summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/arcnet/arc-rimi.c2
-rw-r--r--drivers/net/arcnet/com20020-isa.c2
-rw-r--r--drivers/net/arcnet/com90io.c2
-rw-r--r--drivers/net/bonding/bond_main.c68
-rw-r--r--drivers/net/bonding/bond_netlink.c3
-rw-r--r--drivers/net/dsa/b53/b53_common.c4
-rw-r--r--drivers/net/dsa/bcm_sf2.c20
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c25
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h1
-rw-r--r--drivers/net/dsa/qca8k.c6
-rw-r--r--drivers/net/ethernet/amazon/Kconfig2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h425
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c302
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h72
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_common_defs.h4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c285
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h72
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h229
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c504
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h42
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h206
-rw-r--r--drivers/net/ethernet/amd/declance.c10
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c1699
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h250
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c86
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c112
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h310
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c9
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c8
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c114
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c66
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c68
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h1
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c12
-rw-r--r--drivers/net/ethernet/freescale/Kconfig9
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig16
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c92
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c222
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h14
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h40
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc.c194
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc.h45
-rw-r--r--drivers/net/ethernet/freescale/fec.h4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c24
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c48
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c465
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c26
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h69
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c1090
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h83
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c579
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h42
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c64
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c517
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h24
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c121
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h27
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c14
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h97
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c43
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c32
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h18
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c295
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c14
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c16
-rw-r--r--drivers/net/ethernet/intel/Kconfig60
-rw-r--r--drivers/net/ethernet/intel/Makefile1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c51
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c1
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c130
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c46
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h77
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c285
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c264
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c127
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c66
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c2675
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h173
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c9
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile10
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h443
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c541
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h107
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h389
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h321
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c490
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.h13
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c806
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.h41
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c3901
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.c215
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.h14
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c791
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.h21
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h221
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h37
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c343
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h50
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c801
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/Kconfig3
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c27
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Makefile6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c721
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h111
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h186
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h211
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c303
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h525
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h262
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h5709
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c1772
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h368
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c515
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c1959
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c472
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c816
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c71
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h502
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h917
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c208
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c130
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c736
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c476
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c195
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c126
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h603
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c160
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h106
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c225
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c982
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c249
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c138
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c575
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h2
-rw-r--r--drivers/net/ethernet/microchip/Kconfig1
-rw-r--r--drivers/net/ethernet/mscc/Kconfig2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c24
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h79
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c65
-rw-r--r--drivers/net/ethernet/mscc/ocelot_hsio.h785
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c93
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/ctrl.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.h35
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c105
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h45
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c415
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c91
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h107
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c77
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c234
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c85
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c49
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h57
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c179
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c65
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c37
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_abi.h35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c39
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h39
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app_nic.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h43
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c54
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_hwmon.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c65
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.c34
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c75
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c29
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h59
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c259
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c322
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h51
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c95
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c46
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c181
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c5
-rw-r--r--drivers/net/ethernet/sfc/efx.c34
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c34
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c3
-rw-r--r--drivers/net/ethernet/socionext/netsec.c45
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c66
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h8
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c6
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c6
-rw-r--r--drivers/net/fddi/Kconfig11
-rw-r--r--drivers/net/fddi/Makefile1
-rw-r--r--drivers/net/fddi/defza.c1564
-rw-r--r--drivers/net/fddi/defza.h791
-rw-r--r--drivers/net/fddi/skfp/h/cmtdef.h8
-rw-r--r--drivers/net/geneve.c26
-rw-r--r--drivers/net/hamradio/6pack.c21
-rw-r--r--drivers/net/hamradio/mkiss.c21
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/hyperv/netvsc_drv.c15
-rw-r--r--drivers/net/ieee802154/adf7242.c3
-rw-r--r--drivers/net/ieee802154/ca8210.c6
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c8
-rw-r--r--drivers/net/ieee802154/mcr20a.c72
-rw-r--r--drivers/net/loopback.c4
-rw-r--r--drivers/net/macsec.c20
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/netdevsim/bpf.c8
-rw-r--r--drivers/net/ntb_netdev.c30
-rw-r--r--drivers/net/phy/mdio-mux-bcm-iproc.c6
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c11
-rw-r--r--drivers/net/phy/micrel.c130
-rw-r--r--drivers/net/phy/mscc.c1417
-rw-r--r--drivers/net/phy/phy-c45.c2
-rw-r--r--drivers/net/phy/phy.c95
-rw-r--r--drivers/net/phy/phy_device.c13
-rw-r--r--drivers/net/phy/phylink.c48
-rw-r--r--drivers/net/phy/sfp.c9
-rw-r--r--drivers/net/ppp/ppp_mppe.c27
-rw-r--r--drivers/net/slip/slip.c25
-rw-r--r--drivers/net/team/team.c11
-rw-r--r--drivers/net/tun.c74
-rw-r--r--drivers/net/usb/asix_common.c3
-rw-r--r--drivers/net/usb/ax88179_178a.c3
-rw-r--r--drivers/net/usb/cdc_ncm.c6
-rw-r--r--drivers/net/usb/lan78xx.c17
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c3
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/smsc95xx.c3
-rw-r--r--drivers/net/usb/sr9800.c3
-rw-r--r--drivers/net/veth.c175
-rw-r--r--drivers/net/virtio_net.c68
-rw-r--r--drivers/net/vxlan.c201
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c51
-rw-r--r--drivers/net/wan/x25_asy.c19
-rw-r--r--drivers/net/wimax/i2400m/control.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c6
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c116
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c207
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c30
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c14
-rw-r--r--drivers/net/wireless/cisco/airo.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c438
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c64
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c64
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c373
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c118
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c837
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c436
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c31
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c1
-rw-r--r--drivers/net/wireless/intersil/prism54/isl_38xx.c1
-rw-r--r--drivers/net/wireless/intersil/prism54/isl_ioctl.c1
-rw-r--r--drivers/net/wireless/intersil/prism54/islpci_dev.c1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c47
-rw-r--r--drivers/net/wireless/marvell/libertas/if_cs.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c8
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c8
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c7
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c17
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig43
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/dma.h126
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c83
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c59
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mac.c82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mac.h20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h134
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c91
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c395
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/trace.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/tx.c101
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c68
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h215
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h)40
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dma.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h37
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c419
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c74
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c179
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c201
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.h38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_trace.c)2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.h (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_trace.h)31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c202
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb.h15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c175
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.h78
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2.h210
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Makefile16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/debugfs.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c)12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c)122
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h)35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c)17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_common.c)46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.h (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_dma.c)32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_mcu_common.c)18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h)6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h105
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h (renamed from drivers/net/wireless/mediatek/mt76/mt76x2u.h)41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_pci.c)7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c)146
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_init.c)88
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_mac.c)99
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_main.c)33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c)17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_phy.c)125
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_tx.c)43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c)128
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_usb.c)10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2u_init.c)51
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c)20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2u_main.c)23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c)38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c)45
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_core.c76
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.h64
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c239
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c118
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_core.c53
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c13
-rw-r--r--drivers/net/wireless/quantenna/Kconfig2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/Kconfig17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/Makefile9
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c193
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c554
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c39
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h8
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c18
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c287
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h20
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c228
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h22
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h245
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c1219
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_ipc.h94
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_regs.h45
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.h8
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c13
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/util.c18
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/util.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c2
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c71
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c4
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c48
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c17
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h2
-rw-r--r--drivers/net/wireless/zydas/zd1201.c1
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c2
-rw-r--r--drivers/net/xen-netback/common.h3
-rw-r--r--drivers/net/xen-netback/hash.c51
-rw-r--r--drivers/net/xen-netback/interface.c3
619 files changed, 54423 insertions, 13994 deletions
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index a07e24970be4..11c5bad95226 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -33,7 +33,7 @@
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index 38fa60ddaf2e..28510e33924f 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -38,7 +38,7 @@
#include <linux/netdevice.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/io.h>
#include "arcdevice.h"
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 4e56aaf2b984..2c546013a980 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -34,7 +34,7 @@
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 0d87e11e7f1d..ffa37adb7681 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -210,6 +210,7 @@ static void bond_get_stats(struct net_device *bond_dev,
static void bond_slave_arr_handler(struct work_struct *work);
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
int mod);
+static void bond_netdev_notify_work(struct work_struct *work);
/*---------------------------- General routines -----------------------------*/
@@ -962,7 +963,8 @@ static inline void slave_disable_netpoll(struct slave *slave)
return;
slave->np = NULL;
- __netpoll_free_async(np);
+
+ __netpoll_free(np);
}
static void bond_poll_controller(struct net_device *bond_dev)
@@ -1170,9 +1172,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
}
}
- /* don't change skb->dev for link-local packets */
- if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
+ /* Link-local multicast packets should be passed to the
+ * stack on the link they arrive as well as pass them to the
+ * bond-master device. These packets are mostly usable when
+ * stack receives it with the link on which they arrive
+ * (e.g. LLDP) they also must be available on master. Some of
+ * the use cases include (but are not limited to): LLDP agents
+ * that must be able to operate both on enslaved interfaces as
+ * well as on bonds themselves; linux bridges that must be able
+ * to process/pass BPDUs from attached bonds when any kind of
+ * STP version is enabled on the network.
+ */
+ if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
+ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+ if (nskb) {
+ nskb->dev = bond->dev;
+ nskb->queue_mapping = 0;
+ netif_rx(nskb);
+ }
return RX_HANDLER_PASS;
+ }
if (bond_should_deliver_exact_match(skb, slave, bond))
return RX_HANDLER_EXACT;
@@ -1269,6 +1289,8 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
return NULL;
}
}
+ INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
+
return slave;
}
@@ -1276,6 +1298,7 @@ static void bond_free_slave(struct slave *slave)
{
struct bonding *bond = bond_get_bond_by_slave(slave);
+ cancel_delayed_work_sync(&slave->notify_work);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
kfree(SLAVE_AD_INFO(slave));
@@ -1297,39 +1320,26 @@ static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
info->link_failure_count = slave->link_failure_count;
}
-static void bond_netdev_notify(struct net_device *dev,
- struct netdev_bonding_info *info)
-{
- rtnl_lock();
- netdev_bonding_info_change(dev, info);
- rtnl_unlock();
-}
-
static void bond_netdev_notify_work(struct work_struct *_work)
{
- struct netdev_notify_work *w =
- container_of(_work, struct netdev_notify_work, work.work);
+ struct slave *slave = container_of(_work, struct slave,
+ notify_work.work);
+
+ if (rtnl_trylock()) {
+ struct netdev_bonding_info binfo;
- bond_netdev_notify(w->dev, &w->bonding_info);
- dev_put(w->dev);
- kfree(w);
+ bond_fill_ifslave(slave, &binfo.slave);
+ bond_fill_ifbond(slave->bond, &binfo.master);
+ netdev_bonding_info_change(slave->dev, &binfo);
+ rtnl_unlock();
+ } else {
+ queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
+ }
}
void bond_queue_slave_event(struct slave *slave)
{
- struct bonding *bond = slave->bond;
- struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
-
- if (!nnw)
- return;
-
- dev_hold(slave->dev);
- nnw->dev = slave->dev;
- bond_fill_ifslave(slave, &nnw->bonding_info.slave);
- bond_fill_ifbond(bond, &nnw->bonding_info.master);
- INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
-
- queue_delayed_work(slave->bond->wq, &nnw->work, 0);
+ queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
}
void bond_lower_state_changed(struct slave *slave)
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 9697977b80f0..6b9ad8673218 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -638,8 +638,7 @@ static int bond_fill_info(struct sk_buff *skb,
goto nla_put_failure;
if (nla_put(skb, IFLA_BOND_AD_ACTOR_SYSTEM,
- sizeof(bond->params.ad_actor_system),
- &bond->params.ad_actor_system))
+ ETH_ALEN, &bond->params.ad_actor_system))
goto nla_put_failure;
}
if (!bond_3ad_get_active_agg_info(bond, &info)) {
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 700d86dd5e13..0e4bbdcc614f 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1291,7 +1291,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
b53_get_vlan_entry(dev, vid, vl);
vl->members |= BIT(port);
- if (untagged)
+ if (untagged && !dsa_is_cpu_port(ds, port))
vl->untag |= BIT(port);
else
vl->untag &= ~BIT(port);
@@ -1333,7 +1333,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
pvid = 0;
}
- if (untagged)
+ if (untagged && !dsa_is_cpu_port(ds, port))
vl->untag &= ~(BIT(port));
b53_set_vlan_entry(dev, vid, vl);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 1fc27e149e7f..2eb68769562c 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -702,7 +702,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
static int bcm_sf2_sw_resume(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- unsigned int port;
int ret;
ret = bcm_sf2_sw_rst(priv);
@@ -714,14 +713,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
if (priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(ds, true);
- for (port = 0; port < DSA_MAX_PORTS; port++) {
- if (dsa_is_user_port(ds, port))
- bcm_sf2_port_setup(ds, port, NULL);
- else if (dsa_is_cpu_port(ds, port))
- bcm_sf2_imp_setup(ds, port);
- }
-
- bcm_sf2_enable_acb(ds);
+ ds->ops->setup(ds);
return 0;
}
@@ -1172,10 +1164,10 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
{
struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
- /* Disable all ports and interrupts */
priv->wol_ports_mask = 0;
- bcm_sf2_sw_suspend(priv->dev->ds);
dsa_unregister_switch(priv->dev->ds);
+ /* Disable all ports and interrupts */
+ bcm_sf2_sw_suspend(priv->dev->ds);
bcm_sf2_mdio_unregister(priv);
return 0;
@@ -1198,16 +1190,14 @@ static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int bcm_sf2_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+ struct bcm_sf2_priv *priv = dev_get_drvdata(dev);
return dsa_switch_suspend(priv->dev->ds);
}
static int bcm_sf2_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+ struct bcm_sf2_priv *priv = dev_get_drvdata(dev);
return dsa_switch_resume(priv->dev->ds);
}
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 78ce820b5257..e05d4eddc935 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2907,7 +2907,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_set_link = mv88e6xxx_port_set_link,
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6390_port_set_speed,
+ .port_set_speed = mv88e6341_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3528,7 +3528,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_set_link = mv88e6xxx_port_set_link,
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6390_port_set_speed,
+ .port_set_speed = mv88e6341_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 92945841c8e8..cd7db60a508b 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -228,8 +228,11 @@ static int mv88e6xxx_port_set_speed(struct mv88e6xxx_chip *chip, int port,
ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000;
break;
case 2500:
- ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 |
- MV88E6390_PORT_MAC_CTL_ALTSPEED;
+ if (alt_bit)
+ ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 |
+ MV88E6390_PORT_MAC_CTL_ALTSPEED;
+ else
+ ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000;
break;
case 10000:
/* all bits set, fall through... */
@@ -291,6 +294,24 @@ int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
return mv88e6xxx_port_set_speed(chip, port, speed, false, false);
}
+/* Support 10, 100, 200, 1000, 2500 Mbps (e.g. 88E6341) */
+int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+{
+ if (speed == SPEED_MAX)
+ speed = port < 5 ? 1000 : 2500;
+
+ if (speed > 2500)
+ return -EOPNOTSUPP;
+
+ if (speed == 200 && port != 0)
+ return -EOPNOTSUPP;
+
+ if (speed == 2500 && port < 5)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_speed(chip, port, speed, !port, true);
+}
+
/* Support 10, 100, 200, 1000 Mbps (e.g. 88E6352 family) */
int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
{
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index f32f56af8e35..36904c9bf955 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -269,6 +269,7 @@ int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup);
int mv88e6065_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
+int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index cdcde7f8e0b2..7e97e620bd44 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -955,8 +955,7 @@ qca8k_set_pm(struct qca8k_priv *priv, int enable)
static int qca8k_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct qca8k_priv *priv = platform_get_drvdata(pdev);
+ struct qca8k_priv *priv = dev_get_drvdata(dev);
qca8k_set_pm(priv, 0);
@@ -965,8 +964,7 @@ static int qca8k_suspend(struct device *dev)
static int qca8k_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct qca8k_priv *priv = platform_get_drvdata(pdev);
+ struct qca8k_priv *priv = dev_get_drvdata(dev);
qca8k_set_pm(priv, 1);
diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig
index 99b30353541a..9e87d7b8360f 100644
--- a/drivers/net/ethernet/amazon/Kconfig
+++ b/drivers/net/ethernet/amazon/Kconfig
@@ -17,7 +17,7 @@ if NET_VENDOR_AMAZON
config ENA_ETHERNET
tristate "Elastic Network Adapter (ENA) support"
- depends on (PCI_MSI && X86)
+ depends on PCI_MSI && !CPU_BIG_ENDIAN
---help---
This driver supports Elastic Network Adapter (ENA)"
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 4532e574ebcd..9f80b73f90b1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -32,115 +32,81 @@
#ifndef _ENA_ADMIN_H_
#define _ENA_ADMIN_H_
-enum ena_admin_aq_opcode {
- ENA_ADMIN_CREATE_SQ = 1,
-
- ENA_ADMIN_DESTROY_SQ = 2,
-
- ENA_ADMIN_CREATE_CQ = 3,
-
- ENA_ADMIN_DESTROY_CQ = 4,
- ENA_ADMIN_GET_FEATURE = 8,
-
- ENA_ADMIN_SET_FEATURE = 9,
-
- ENA_ADMIN_GET_STATS = 11,
+enum ena_admin_aq_opcode {
+ ENA_ADMIN_CREATE_SQ = 1,
+ ENA_ADMIN_DESTROY_SQ = 2,
+ ENA_ADMIN_CREATE_CQ = 3,
+ ENA_ADMIN_DESTROY_CQ = 4,
+ ENA_ADMIN_GET_FEATURE = 8,
+ ENA_ADMIN_SET_FEATURE = 9,
+ ENA_ADMIN_GET_STATS = 11,
};
enum ena_admin_aq_completion_status {
- ENA_ADMIN_SUCCESS = 0,
-
- ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
-
- ENA_ADMIN_BAD_OPCODE = 2,
-
- ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
-
- ENA_ADMIN_MALFORMED_REQUEST = 4,
-
+ ENA_ADMIN_SUCCESS = 0,
+ ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
+ ENA_ADMIN_BAD_OPCODE = 2,
+ ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
+ ENA_ADMIN_MALFORMED_REQUEST = 4,
/* Additional status is provided in ACQ entry extended_status */
- ENA_ADMIN_ILLEGAL_PARAMETER = 5,
-
- ENA_ADMIN_UNKNOWN_ERROR = 6,
+ ENA_ADMIN_ILLEGAL_PARAMETER = 5,
+ ENA_ADMIN_UNKNOWN_ERROR = 6,
+ ENA_ADMIN_RESOURCE_BUSY = 7,
};
enum ena_admin_aq_feature_id {
- ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
-
- ENA_ADMIN_MAX_QUEUES_NUM = 2,
-
- ENA_ADMIN_HW_HINTS = 3,
-
- ENA_ADMIN_RSS_HASH_FUNCTION = 10,
-
- ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
-
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
-
- ENA_ADMIN_MTU = 14,
-
- ENA_ADMIN_RSS_HASH_INPUT = 18,
-
- ENA_ADMIN_INTERRUPT_MODERATION = 20,
-
- ENA_ADMIN_AENQ_CONFIG = 26,
-
- ENA_ADMIN_LINK_CONFIG = 27,
-
- ENA_ADMIN_HOST_ATTR_CONFIG = 28,
-
- ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
+ ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
+ ENA_ADMIN_MAX_QUEUES_NUM = 2,
+ ENA_ADMIN_HW_HINTS = 3,
+ ENA_ADMIN_LLQ = 4,
+ ENA_ADMIN_RSS_HASH_FUNCTION = 10,
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
+ ENA_ADMIN_MTU = 14,
+ ENA_ADMIN_RSS_HASH_INPUT = 18,
+ ENA_ADMIN_INTERRUPT_MODERATION = 20,
+ ENA_ADMIN_AENQ_CONFIG = 26,
+ ENA_ADMIN_LINK_CONFIG = 27,
+ ENA_ADMIN_HOST_ATTR_CONFIG = 28,
+ ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
};
enum ena_admin_placement_policy_type {
/* descriptors and headers are in host memory */
- ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
-
+ ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
/* descriptors and headers are in device memory (a.k.a Low Latency
* Queue)
*/
- ENA_ADMIN_PLACEMENT_POLICY_DEV = 3,
+ ENA_ADMIN_PLACEMENT_POLICY_DEV = 3,
};
enum ena_admin_link_types {
- ENA_ADMIN_LINK_SPEED_1G = 0x1,
-
- ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2,
-
- ENA_ADMIN_LINK_SPEED_5G = 0x4,
-
- ENA_ADMIN_LINK_SPEED_10G = 0x8,
-
- ENA_ADMIN_LINK_SPEED_25G = 0x10,
-
- ENA_ADMIN_LINK_SPEED_40G = 0x20,
-
- ENA_ADMIN_LINK_SPEED_50G = 0x40,
-
- ENA_ADMIN_LINK_SPEED_100G = 0x80,
-
- ENA_ADMIN_LINK_SPEED_200G = 0x100,
-
- ENA_ADMIN_LINK_SPEED_400G = 0x200,
+ ENA_ADMIN_LINK_SPEED_1G = 0x1,
+ ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2,
+ ENA_ADMIN_LINK_SPEED_5G = 0x4,
+ ENA_ADMIN_LINK_SPEED_10G = 0x8,
+ ENA_ADMIN_LINK_SPEED_25G = 0x10,
+ ENA_ADMIN_LINK_SPEED_40G = 0x20,
+ ENA_ADMIN_LINK_SPEED_50G = 0x40,
+ ENA_ADMIN_LINK_SPEED_100G = 0x80,
+ ENA_ADMIN_LINK_SPEED_200G = 0x100,
+ ENA_ADMIN_LINK_SPEED_400G = 0x200,
};
enum ena_admin_completion_policy_type {
/* completion queue entry for each sq descriptor */
- ENA_ADMIN_COMPLETION_POLICY_DESC = 0,
-
+ ENA_ADMIN_COMPLETION_POLICY_DESC = 0,
/* completion queue entry upon request in sq descriptor */
- ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1,
-
+ ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1,
/* current queue head pointer is updated in OS memory upon sq
* descriptor request
*/
- ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2,
-
+ ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2,
/* current queue head pointer is updated in OS memory for each sq
* descriptor
*/
- ENA_ADMIN_COMPLETION_POLICY_HEAD = 3,
+ ENA_ADMIN_COMPLETION_POLICY_HEAD = 3,
};
/* basic stats return ena_admin_basic_stats while extanded stats return a
@@ -148,15 +114,13 @@ enum ena_admin_completion_policy_type {
* device id
*/
enum ena_admin_get_stats_type {
- ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
-
- ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
+ ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
+ ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
};
enum ena_admin_get_stats_scope {
- ENA_ADMIN_SPECIFIC_QUEUE = 0,
-
- ENA_ADMIN_ETH_TRAFFIC = 1,
+ ENA_ADMIN_SPECIFIC_QUEUE = 0,
+ ENA_ADMIN_ETH_TRAFFIC = 1,
};
struct ena_admin_aq_common_desc {
@@ -227,7 +191,9 @@ struct ena_admin_acq_common_desc {
u16 extended_status;
- /* serves as a hint what AQ entries can be revoked */
+ /* indicates to the driver which AQ entry has been consumed by the
+ * device and could be reused
+ */
u16 sq_head_indx;
};
@@ -296,9 +262,8 @@ struct ena_admin_aq_create_sq_cmd {
};
enum ena_admin_sq_direction {
- ENA_ADMIN_SQ_DIRECTION_TX = 1,
-
- ENA_ADMIN_SQ_DIRECTION_RX = 2,
+ ENA_ADMIN_SQ_DIRECTION_TX = 1,
+ ENA_ADMIN_SQ_DIRECTION_RX = 2,
};
struct ena_admin_acq_create_sq_resp_desc {
@@ -483,8 +448,85 @@ struct ena_admin_device_attr_feature_desc {
u32 max_mtu;
};
+enum ena_admin_llq_header_location {
+ /* header is in descriptor list */
+ ENA_ADMIN_INLINE_HEADER = 1,
+ /* header in a separate ring, implies 16B descriptor list entry */
+ ENA_ADMIN_HEADER_RING = 2,
+};
+
+enum ena_admin_llq_ring_entry_size {
+ ENA_ADMIN_LIST_ENTRY_SIZE_128B = 1,
+ ENA_ADMIN_LIST_ENTRY_SIZE_192B = 2,
+ ENA_ADMIN_LIST_ENTRY_SIZE_256B = 4,
+};
+
+enum ena_admin_llq_num_descs_before_header {
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_0 = 0,
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1 = 1,
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2 = 2,
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4 = 4,
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8 = 8,
+};
+
+/* packet descriptor list entry always starts with one or more descriptors,
+ * followed by a header. The rest of the descriptors are located in the
+ * beginning of the subsequent entry. Stride refers to how the rest of the
+ * descriptors are placed. This field is relevant only for inline header
+ * mode
+ */
+enum ena_admin_llq_stride_ctrl {
+ ENA_ADMIN_SINGLE_DESC_PER_ENTRY = 1,
+ ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY = 2,
+};
+
+struct ena_admin_feature_llq_desc {
+ u32 max_llq_num;
+
+ u32 max_llq_depth;
+
+ /* specify the header locations the device supports. bitfield of
+ * enum ena_admin_llq_header_location.
+ */
+ u16 header_location_ctrl_supported;
+
+ /* the header location the driver selected to use. */
+ u16 header_location_ctrl_enabled;
+
+ /* if inline header is specified - this is the size of descriptor
+ * list entry. If header in a separate ring is specified - this is
+ * the size of header ring entry. bitfield of enum
+ * ena_admin_llq_ring_entry_size. specify the entry sizes the device
+ * supports
+ */
+ u16 entry_size_ctrl_supported;
+
+ /* the entry size the driver selected to use. */
+ u16 entry_size_ctrl_enabled;
+
+ /* valid only if inline header is specified. First entry associated
+ * with the packet includes descriptors and header. Rest of the
+ * entries occupied by descriptors. This parameter defines the max
+ * number of descriptors precedding the header in the first entry.
+ * The field is bitfield of enum
+ * ena_admin_llq_num_descs_before_header and specify the values the
+ * device supports
+ */
+ u16 desc_num_before_header_supported;
+
+ /* the desire field the driver selected to use */
+ u16 desc_num_before_header_enabled;
+
+ /* valid only if inline was chosen. bitfield of enum
+ * ena_admin_llq_stride_ctrl
+ */
+ u16 descriptors_stride_ctrl_supported;
+
+ /* the stride control the driver selected to use */
+ u16 descriptors_stride_ctrl_enabled;
+};
+
struct ena_admin_queue_feature_desc {
- /* including LLQs */
u32 max_sq_num;
u32 max_sq_depth;
@@ -493,9 +535,9 @@ struct ena_admin_queue_feature_desc {
u32 max_cq_depth;
- u32 max_llq_num;
+ u32 max_legacy_llq_num;
- u32 max_llq_depth;
+ u32 max_legacy_llq_depth;
u32 max_header_size;
@@ -583,9 +625,8 @@ struct ena_admin_feature_offload_desc {
};
enum ena_admin_hash_functions {
- ENA_ADMIN_TOEPLITZ = 1,
-
- ENA_ADMIN_CRC32 = 2,
+ ENA_ADMIN_TOEPLITZ = 1,
+ ENA_ADMIN_CRC32 = 2,
};
struct ena_admin_feature_rss_flow_hash_control {
@@ -611,50 +652,35 @@ struct ena_admin_feature_rss_flow_hash_function {
/* RSS flow hash protocols */
enum ena_admin_flow_hash_proto {
- ENA_ADMIN_RSS_TCP4 = 0,
-
- ENA_ADMIN_RSS_UDP4 = 1,
-
- ENA_ADMIN_RSS_TCP6 = 2,
-
- ENA_ADMIN_RSS_UDP6 = 3,
-
- ENA_ADMIN_RSS_IP4 = 4,
-
- ENA_ADMIN_RSS_IP6 = 5,
-
- ENA_ADMIN_RSS_IP4_FRAG = 6,
-
- ENA_ADMIN_RSS_NOT_IP = 7,
-
+ ENA_ADMIN_RSS_TCP4 = 0,
+ ENA_ADMIN_RSS_UDP4 = 1,
+ ENA_ADMIN_RSS_TCP6 = 2,
+ ENA_ADMIN_RSS_UDP6 = 3,
+ ENA_ADMIN_RSS_IP4 = 4,
+ ENA_ADMIN_RSS_IP6 = 5,
+ ENA_ADMIN_RSS_IP4_FRAG = 6,
+ ENA_ADMIN_RSS_NOT_IP = 7,
/* TCPv6 with extension header */
- ENA_ADMIN_RSS_TCP6_EX = 8,
-
+ ENA_ADMIN_RSS_TCP6_EX = 8,
/* IPv6 with extension header */
- ENA_ADMIN_RSS_IP6_EX = 9,
-
- ENA_ADMIN_RSS_PROTO_NUM = 16,
+ ENA_ADMIN_RSS_IP6_EX = 9,
+ ENA_ADMIN_RSS_PROTO_NUM = 16,
};
/* RSS flow hash fields */
enum ena_admin_flow_hash_fields {
/* Ethernet Dest Addr */
- ENA_ADMIN_RSS_L2_DA = BIT(0),
-
+ ENA_ADMIN_RSS_L2_DA = BIT(0),
/* Ethernet Src Addr */
- ENA_ADMIN_RSS_L2_SA = BIT(1),
-
+ ENA_ADMIN_RSS_L2_SA = BIT(1),
/* ipv4/6 Dest Addr */
- ENA_ADMIN_RSS_L3_DA = BIT(2),
-
+ ENA_ADMIN_RSS_L3_DA = BIT(2),
/* ipv4/6 Src Addr */
- ENA_ADMIN_RSS_L3_SA = BIT(3),
-
+ ENA_ADMIN_RSS_L3_SA = BIT(3),
/* tcp/udp Dest Port */
- ENA_ADMIN_RSS_L4_DP = BIT(4),
-
+ ENA_ADMIN_RSS_L4_DP = BIT(4),
/* tcp/udp Src Port */
- ENA_ADMIN_RSS_L4_SP = BIT(5),
+ ENA_ADMIN_RSS_L4_SP = BIT(5),
};
struct ena_admin_proto_input {
@@ -693,15 +719,13 @@ struct ena_admin_feature_rss_flow_hash_input {
};
enum ena_admin_os_type {
- ENA_ADMIN_OS_LINUX = 1,
-
- ENA_ADMIN_OS_WIN = 2,
-
- ENA_ADMIN_OS_DPDK = 3,
-
- ENA_ADMIN_OS_FREEBSD = 4,
-
- ENA_ADMIN_OS_IPXE = 5,
+ ENA_ADMIN_OS_LINUX = 1,
+ ENA_ADMIN_OS_WIN = 2,
+ ENA_ADMIN_OS_DPDK = 3,
+ ENA_ADMIN_OS_FREEBSD = 4,
+ ENA_ADMIN_OS_IPXE = 5,
+ ENA_ADMIN_OS_ESXI = 6,
+ ENA_ADMIN_OS_GROUPS_NUM = 6,
};
struct ena_admin_host_info {
@@ -723,11 +747,27 @@ struct ena_admin_host_info {
/* 7:0 : major
* 15:8 : minor
* 23:16 : sub_minor
+ * 31:24 : module_type
*/
u32 driver_version;
/* features bitmap */
- u32 supported_network_features[4];
+ u32 supported_network_features[2];
+
+ /* ENA spec version of driver */
+ u16 ena_spec_version;
+
+ /* ENA device's Bus, Device and Function
+ * 2:0 : function
+ * 7:3 : device
+ * 15:8 : bus
+ */
+ u16 bdf;
+
+ /* Number of CPUs */
+ u16 num_cpus;
+
+ u16 reserved;
};
struct ena_admin_rss_ind_table_entry {
@@ -800,6 +840,8 @@ struct ena_admin_get_feat_resp {
struct ena_admin_device_attr_feature_desc dev_attr;
+ struct ena_admin_feature_llq_desc llq;
+
struct ena_admin_queue_feature_desc max_queue;
struct ena_admin_feature_aenq_desc aenq;
@@ -847,6 +889,9 @@ struct ena_admin_set_feat_cmd {
/* rss indirection table */
struct ena_admin_feature_rss_ind_table ind_table;
+
+ /* LLQ configuration */
+ struct ena_admin_feature_llq_desc llq;
} u;
};
@@ -875,25 +920,18 @@ struct ena_admin_aenq_common_desc {
/* asynchronous event notification groups */
enum ena_admin_aenq_group {
- ENA_ADMIN_LINK_CHANGE = 0,
-
- ENA_ADMIN_FATAL_ERROR = 1,
-
- ENA_ADMIN_WARNING = 2,
-
- ENA_ADMIN_NOTIFICATION = 3,
-
- ENA_ADMIN_KEEP_ALIVE = 4,
-
- ENA_ADMIN_AENQ_GROUPS_NUM = 5,
+ ENA_ADMIN_LINK_CHANGE = 0,
+ ENA_ADMIN_FATAL_ERROR = 1,
+ ENA_ADMIN_WARNING = 2,
+ ENA_ADMIN_NOTIFICATION = 3,
+ ENA_ADMIN_KEEP_ALIVE = 4,
+ ENA_ADMIN_AENQ_GROUPS_NUM = 5,
};
enum ena_admin_aenq_notification_syndrom {
- ENA_ADMIN_SUSPEND = 0,
-
- ENA_ADMIN_RESUME = 1,
-
- ENA_ADMIN_UPDATE_HINTS = 2,
+ ENA_ADMIN_SUSPEND = 0,
+ ENA_ADMIN_RESUME = 1,
+ ENA_ADMIN_UPDATE_HINTS = 2,
};
struct ena_admin_aenq_entry {
@@ -928,27 +966,27 @@ struct ena_admin_ena_mmio_req_read_less_resp {
};
/* aq_common_desc */
-#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
-#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
-#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1
-#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1)
-#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2
-#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2)
+#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2)
/* sq */
-#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5
-#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5)
+#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5)
/* acq_common_desc */
-#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
-#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0)
+#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0)
/* aq_create_sq_cmd */
-#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5
-#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5)
-#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0)
-#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4
-#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4)
#define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0)
/* aq_create_cq_cmd */
@@ -957,12 +995,12 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
/* get_set_feature_common_desc */
-#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
+#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
/* get_feature_link_desc */
-#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0)
-#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1
-#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1)
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0)
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1)
/* feature_offload_desc */
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0)
@@ -974,19 +1012,19 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3)
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4)
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5)
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6)
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7)
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0)
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1)
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2)
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3)
/* feature_rss_flow_hash_function */
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0)
@@ -994,25 +1032,32 @@ struct ena_admin_ena_mmio_req_read_less_resp {
/* feature_rss_flow_hash_input */
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1
-#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1)
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2
-#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2)
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1)
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2)
/* host_info */
-#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0)
-#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8
-#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8)
-#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16
-#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16)
+#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0)
+#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8)
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16)
+#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT 24
+#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_MASK GENMASK(31, 24)
+#define ENA_ADMIN_HOST_INFO_FUNCTION_MASK GENMASK(2, 0)
+#define ENA_ADMIN_HOST_INFO_DEVICE_SHIFT 3
+#define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3)
+#define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8)
/* aenq_common_desc */
-#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
+#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
/* aenq_link_change_desc */
-#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
+#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
#endif /*_ENA_ADMIN_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 7635c38e77dd..420cede41ca4 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -41,9 +41,6 @@
#define ENA_ASYNC_QUEUE_DEPTH 16
#define ENA_ADMIN_QUEUE_DEPTH 32
-#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
- ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
- | (ENA_COMMON_SPEC_VERSION_MINOR))
#define ENA_CTRL_MAJOR 0
#define ENA_CTRL_MINOR 0
@@ -61,6 +58,8 @@
#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
+#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
+
#define ENA_REGS_ADMIN_INTR_MASK 1
#define ENA_POLL_MS 5
@@ -236,7 +235,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
tail_masked = admin_queue->sq.tail & queue_size_mask;
/* In case of queue FULL */
- cnt = atomic_read(&admin_queue->outstanding_cmds);
+ cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
pr_debug("admin queue is full.\n");
admin_queue->stats.out_of_space++;
@@ -305,7 +304,7 @@ static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue
struct ena_admin_acq_entry *comp,
size_t comp_size_in_bytes)
{
- unsigned long flags;
+ unsigned long flags = 0;
struct ena_comp_ctx *comp_ctx;
spin_lock_irqsave(&admin_queue->q_lock, flags);
@@ -333,7 +332,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
- io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
+ io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
io_sq->desc_entry_size =
(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
sizeof(struct ena_eth_io_tx_desc) :
@@ -355,21 +354,48 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
&io_sq->desc_addr.phys_addr,
GFP_KERNEL);
}
- } else {
+
+ if (!io_sq->desc_addr.virt_addr) {
+ pr_err("memory allocation failed");
+ return -ENOMEM;
+ }
+ }
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ /* Allocate bounce buffers */
+ io_sq->bounce_buf_ctrl.buffer_size =
+ ena_dev->llq_info.desc_list_entry_size;
+ io_sq->bounce_buf_ctrl.buffers_num =
+ ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
+ io_sq->bounce_buf_ctrl.next_to_use = 0;
+
+ size = io_sq->bounce_buf_ctrl.buffer_size *
+ io_sq->bounce_buf_ctrl.buffers_num;
+
dev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
- io_sq->desc_addr.virt_addr =
+ io_sq->bounce_buf_ctrl.base_buffer =
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
set_dev_node(ena_dev->dmadev, dev_node);
- if (!io_sq->desc_addr.virt_addr) {
- io_sq->desc_addr.virt_addr =
+ if (!io_sq->bounce_buf_ctrl.base_buffer)
+ io_sq->bounce_buf_ctrl.base_buffer =
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+
+ if (!io_sq->bounce_buf_ctrl.base_buffer) {
+ pr_err("bounce buffer memory allocation failed");
+ return -ENOMEM;
}
- }
- if (!io_sq->desc_addr.virt_addr) {
- pr_err("memory allocation failed");
- return -ENOMEM;
+ memcpy(&io_sq->llq_info, &ena_dev->llq_info,
+ sizeof(io_sq->llq_info));
+
+ /* Initiate the first bounce buffer */
+ io_sq->llq_buf_ctrl.curr_bounce_buf =
+ ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+ memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+ 0x0, io_sq->llq_info.desc_list_entry_size);
+ io_sq->llq_buf_ctrl.descs_left_in_line =
+ io_sq->llq_info.descs_num_before_header;
}
io_sq->tail = 0;
@@ -460,7 +486,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
/* Go over all the completions */
while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
- ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
+ ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
/* Do not read the rest of the completion entry before the
* phase bit was validated
*/
@@ -511,7 +537,8 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
struct ena_com_admin_queue *admin_queue)
{
- unsigned long flags, timeout;
+ unsigned long flags = 0;
+ unsigned long timeout;
int ret;
timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
@@ -557,10 +584,160 @@ err:
return ret;
}
+/**
+ * Set the LLQ configurations of the firmware
+ *
+ * The driver provides only the enabled feature values to the device,
+ * which in turn, checks if they are supported.
+ */
+static int ena_com_set_llq(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+ int ret;
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
+
+ cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
+ cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
+ cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
+ cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ pr_err("Failed to set LLQ configurations: %d\n", ret);
+
+ return ret;
+}
+
+static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_llq_desc *llq_features,
+ struct ena_llq_configurations *llq_default_cfg)
+{
+ struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+ u16 supported_feat;
+ int rc;
+
+ memset(llq_info, 0, sizeof(*llq_info));
+
+ supported_feat = llq_features->header_location_ctrl_supported;
+
+ if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
+ llq_info->header_location_ctrl =
+ llq_default_cfg->llq_header_location;
+ } else {
+ pr_err("Invalid header location control, supported: 0x%x\n",
+ supported_feat);
+ return -EINVAL;
+ }
+
+ if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
+ supported_feat = llq_features->descriptors_stride_ctrl_supported;
+ if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
+ llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
+ } else {
+ if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
+ llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
+ } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
+ llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
+ } else {
+ pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
+ supported_feat);
+ return -EINVAL;
+ }
+
+ pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+ llq_default_cfg->llq_stride_ctrl, supported_feat,
+ llq_info->desc_stride_ctrl);
+ }
+ } else {
+ llq_info->desc_stride_ctrl = 0;
+ }
+
+ supported_feat = llq_features->entry_size_ctrl_supported;
+ if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
+ llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
+ llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
+ } else {
+ if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
+ llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
+ llq_info->desc_list_entry_size = 128;
+ } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
+ llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
+ llq_info->desc_list_entry_size = 192;
+ } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
+ llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
+ llq_info->desc_list_entry_size = 256;
+ } else {
+ pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
+ supported_feat);
+ return -EINVAL;
+ }
+
+ pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+ llq_default_cfg->llq_ring_entry_size, supported_feat,
+ llq_info->desc_list_entry_size);
+ }
+ if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
+ /* The desc list entry size should be whole multiply of 8
+ * This requirement comes from __iowrite64_copy()
+ */
+ pr_err("illegal entry size %d\n",
+ llq_info->desc_list_entry_size);
+ return -EINVAL;
+ }
+
+ if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
+ llq_info->descs_per_entry = llq_info->desc_list_entry_size /
+ sizeof(struct ena_eth_io_tx_desc);
+ else
+ llq_info->descs_per_entry = 1;
+
+ supported_feat = llq_features->desc_num_before_header_supported;
+ if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
+ llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
+ } else {
+ if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
+ llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
+ } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
+ llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
+ } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
+ llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
+ } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
+ llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
+ } else {
+ pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
+ supported_feat);
+ return -EINVAL;
+ }
+
+ pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+ llq_default_cfg->llq_num_decs_before_header,
+ supported_feat, llq_info->descs_num_before_header);
+ }
+
+ rc = ena_com_set_llq(ena_dev);
+ if (rc)
+ pr_err("Cannot set LLQ configuration: %d\n", rc);
+
+ return 0;
+}
+
static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
struct ena_com_admin_queue *admin_queue)
{
- unsigned long flags;
+ unsigned long flags = 0;
int ret;
wait_for_completion_timeout(&comp_ctx->wait_event,
@@ -606,7 +783,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
mmio_read->read_resp;
u32 mmio_read_reg, ret, i;
- unsigned long flags;
+ unsigned long flags = 0;
u32 timeout = mmio_read->reg_read_to;
might_sleep();
@@ -728,15 +905,17 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
if (io_sq->desc_addr.virt_addr) {
size = io_sq->desc_entry_size * io_sq->q_depth;
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
- dma_free_coherent(ena_dev->dmadev, size,
- io_sq->desc_addr.virt_addr,
- io_sq->desc_addr.phys_addr);
- else
- devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
+ dma_free_coherent(ena_dev->dmadev, size,
+ io_sq->desc_addr.virt_addr,
+ io_sq->desc_addr.phys_addr);
io_sq->desc_addr.virt_addr = NULL;
}
+
+ if (io_sq->bounce_buf_ctrl.base_buffer) {
+ devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
+ io_sq->bounce_buf_ctrl.base_buffer = NULL;
+ }
}
static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
@@ -1248,7 +1427,7 @@ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
- unsigned long flags;
+ unsigned long flags = 0;
spin_lock_irqsave(&admin_queue->q_lock, flags);
while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
@@ -1292,7 +1471,7 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
- unsigned long flags;
+ unsigned long flags = 0;
spin_lock_irqsave(&admin_queue->q_lock, flags);
ena_dev->admin_queue.running_state = state;
@@ -1326,7 +1505,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
}
if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
- pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
+ pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
get_resp.u.aenq.supported_groups, groups_flag);
return -EOPNOTSUPP;
}
@@ -1400,11 +1579,6 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
- if (ver < MIN_ENA_VER) {
- pr_err("ENA version is lower than the minimal version the driver supports\n");
- return -1;
- }
-
pr_info("ena controller version: %d.%d.%d implementation version %d\n",
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
@@ -1479,7 +1653,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
sizeof(*mmio_read->read_resp),
&mmio_read->read_resp_dma_addr, GFP_KERNEL);
if (unlikely(!mmio_read->read_resp))
- return -ENOMEM;
+ goto err;
ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
@@ -1488,6 +1662,10 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
mmio_read->readless_supported = true;
return 0;
+
+err:
+
+ return -ENOMEM;
}
void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
@@ -1523,8 +1701,7 @@ void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
}
int ena_com_admin_init(struct ena_com_dev *ena_dev,
- struct ena_aenq_handlers *aenq_handlers,
- bool init_spinlock)
+ struct ena_aenq_handlers *aenq_handlers)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
@@ -1550,8 +1727,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
atomic_set(&admin_queue->outstanding_cmds, 0);
- if (init_spinlock)
- spin_lock_init(&admin_queue->q_lock);
+ spin_lock_init(&admin_queue->q_lock);
ret = ena_com_init_comp_ctxt(admin_queue);
if (ret)
@@ -1748,6 +1924,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
else
return rc;
+ rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ);
+ if (!rc)
+ memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
+ sizeof(get_resp.u.llq));
+ else if (rc == -EOPNOTSUPP)
+ memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
+ else
+ return rc;
+
return 0;
}
@@ -1779,6 +1964,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
struct ena_admin_aenq_entry *aenq_e;
struct ena_admin_aenq_common_desc *aenq_common;
struct ena_com_aenq *aenq = &dev->aenq;
+ unsigned long long timestamp;
ena_aenq_handler handler_cb;
u16 masked_head, processed = 0;
u8 phase;
@@ -1796,10 +1982,11 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
*/
dma_rmb();
+ timestamp =
+ (unsigned long long)aenq_common->timestamp_low |
+ ((unsigned long long)aenq_common->timestamp_high << 32);
pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
- aenq_common->group, aenq_common->syndrom,
- (u64)aenq_common->timestamp_low +
- ((u64)aenq_common->timestamp_high << 32));
+ aenq_common->group, aenq_common->syndrom, timestamp);
/* Handle specific event*/
handler_cb = ena_com_get_specific_aenq_cb(dev,
@@ -2441,6 +2628,10 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
if (unlikely(!host_attr->host_info))
return -ENOMEM;
+ host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
+ ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
+ (ENA_COMMON_SPEC_VERSION_MINOR));
+
return 0;
}
@@ -2712,3 +2903,34 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
intr_moder_tbl[level].pkts_per_interval;
entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
}
+
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_llq_desc *llq_features,
+ struct ena_llq_configurations *llq_default_cfg)
+{
+ int rc;
+ int size;
+
+ if (!llq_features->max_llq_num) {
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ return 0;
+ }
+
+ rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
+ if (rc)
+ return rc;
+
+ /* Validate the descriptor is not too big */
+ size = ena_dev->tx_max_header_size;
+ size += ena_dev->llq_info.descs_num_before_header *
+ sizeof(struct ena_eth_io_tx_desc);
+
+ if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
+ pr_err("the size of the LLQ entry is smaller than needed\n");
+ return -EINVAL;
+ }
+
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 7b784f8a06a6..078d6f2b4f39 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -37,6 +37,8 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/prefetch.h>
#include <linux/sched.h>
#include <linux/sizes.h>
#include <linux/spinlock.h>
@@ -108,6 +110,14 @@ enum ena_intr_moder_level {
ENA_INTR_MAX_NUM_OF_LEVELS,
};
+struct ena_llq_configurations {
+ enum ena_admin_llq_header_location llq_header_location;
+ enum ena_admin_llq_ring_entry_size llq_ring_entry_size;
+ enum ena_admin_llq_stride_ctrl llq_stride_ctrl;
+ enum ena_admin_llq_num_descs_before_header llq_num_decs_before_header;
+ u16 llq_ring_entry_size_value;
+};
+
struct ena_intr_moder_entry {
unsigned int intr_moder_interval;
unsigned int pkts_per_interval;
@@ -142,6 +152,15 @@ struct ena_com_tx_meta {
u16 l4_hdr_len; /* In words */
};
+struct ena_com_llq_info {
+ u16 header_location_ctrl;
+ u16 desc_stride_ctrl;
+ u16 desc_list_entry_size_ctrl;
+ u16 desc_list_entry_size;
+ u16 descs_num_before_header;
+ u16 descs_per_entry;
+};
+
struct ena_com_io_cq {
struct ena_com_io_desc_addr cdesc_addr;
@@ -179,6 +198,20 @@ struct ena_com_io_cq {
} ____cacheline_aligned;
+struct ena_com_io_bounce_buffer_control {
+ u8 *base_buffer;
+ u16 next_to_use;
+ u16 buffer_size;
+ u16 buffers_num; /* Must be a power of 2 */
+};
+
+/* This struct is to keep tracking the current location of the next llq entry */
+struct ena_com_llq_pkt_ctrl {
+ u8 *curr_bounce_buf;
+ u16 idx;
+ u16 descs_left_in_line;
+};
+
struct ena_com_io_sq {
struct ena_com_io_desc_addr desc_addr;
@@ -190,6 +223,9 @@ struct ena_com_io_sq {
u32 msix_vector;
struct ena_com_tx_meta cached_tx_meta;
+ struct ena_com_llq_info llq_info;
+ struct ena_com_llq_pkt_ctrl llq_buf_ctrl;
+ struct ena_com_io_bounce_buffer_control bounce_buf_ctrl;
u16 q_depth;
u16 qid;
@@ -197,6 +233,7 @@ struct ena_com_io_sq {
u16 idx;
u16 tail;
u16 next_to_comp;
+ u16 llq_last_copy_tail;
u32 tx_max_header_size;
u8 phase;
u8 desc_entry_size;
@@ -334,6 +371,8 @@ struct ena_com_dev {
u16 intr_delay_resolution;
u32 intr_moder_tx_interval;
struct ena_intr_moder_entry *intr_moder_tbl;
+
+ struct ena_com_llq_info llq_info;
};
struct ena_com_dev_get_features_ctx {
@@ -342,6 +381,7 @@ struct ena_com_dev_get_features_ctx {
struct ena_admin_feature_aenq_desc aenq;
struct ena_admin_feature_offload_desc offload;
struct ena_admin_ena_hw_hints hw_hints;
+ struct ena_admin_feature_llq_desc llq;
};
struct ena_com_create_io_ctx {
@@ -397,8 +437,6 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
/* ena_com_admin_init - Init the admin and the async queues
* @ena_dev: ENA communication layer struct
* @aenq_handlers: Those handlers to be called upon event.
- * @init_spinlock: Indicate if this method should init the admin spinlock or
- * the spinlock was init before (for example, in a case of FLR).
*
* Initialize the admin submission and completion queues.
* Initialize the asynchronous events notification queues.
@@ -406,8 +444,7 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
* @return - 0 on success, negative value on failure.
*/
int ena_com_admin_init(struct ena_com_dev *ena_dev,
- struct ena_aenq_handlers *aenq_handlers,
- bool init_spinlock);
+ struct ena_aenq_handlers *aenq_handlers);
/* ena_com_admin_destroy - Destroy the admin and the async events queues.
* @ena_dev: ENA communication layer struct
@@ -935,6 +972,16 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
enum ena_intr_moder_level level,
struct ena_intr_moder_entry *entry);
+/* ena_com_config_dev_mode - Configure the placement policy of the device.
+ * @ena_dev: ENA communication layer struct
+ * @llq_features: LLQ feature descriptor, retrieve via
+ * ena_com_get_dev_attr_feat.
+ * @ena_llq_config: The default driver LLQ parameters configurations
+ */
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_llq_desc *llq_features,
+ struct ena_llq_configurations *llq_default_config);
+
static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
{
return ena_dev->adaptive_coalescing;
@@ -1044,4 +1091,21 @@ static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
}
+static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl)
+{
+ u16 size, buffers_num;
+ u8 *buf;
+
+ size = bounce_buf_ctrl->buffer_size;
+ buffers_num = bounce_buf_ctrl->buffers_num;
+
+ buf = bounce_buf_ctrl->base_buffer +
+ (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size;
+
+ prefetchw(bounce_buf_ctrl->base_buffer +
+ (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size);
+
+ return buf;
+}
+
#endif /* !(ENA_COM) */
diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
index bb8d73676eab..23beb7e7ed7b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_common_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
@@ -32,8 +32,8 @@
#ifndef _ENA_COMMON_H_
#define _ENA_COMMON_H_
-#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* */
-#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* */
+#define ENA_COMMON_SPEC_VERSION_MAJOR 2
+#define ENA_COMMON_SPEC_VERSION_MINOR 0
/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */
struct ena_common_mem_addr {
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 1c682b76190f..f6c2d3855be8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -59,16 +59,7 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
return cdesc;
}
-static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
-{
- io_cq->head++;
-
- /* Switch phase bit in case of wrap around */
- if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
- io_cq->phase ^= 1;
-}
-
-static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
{
u16 tail_masked;
u32 offset;
@@ -80,45 +71,159 @@ static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
}
-static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
+static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
+ u8 *bounce_buffer)
{
- u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
- u32 offset = tail_masked * io_sq->desc_entry_size;
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
- /* In case this queue isn't a LLQ */
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
- return;
+ u16 dst_tail_mask;
+ u32 dst_offset;
- memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset,
- io_sq->desc_addr.virt_addr + offset,
- io_sq->desc_entry_size);
-}
+ dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
+ dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
+
+ /* Make sure everything was written into the bounce buffer before
+ * writing the bounce buffer to the device
+ */
+ wmb();
+
+ /* The line is completed. Copy it to dev */
+ __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
+ bounce_buffer, (llq_info->desc_list_entry_size) / 8);
-static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
-{
io_sq->tail++;
/* Switch phase bit in case of wrap around */
if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
io_sq->phase ^= 1;
+
+ return 0;
}
-static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,
- u8 *head_src, u16 header_len)
+static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
+ u8 *header_src,
+ u16 header_len)
{
- u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
- u8 __iomem *dev_head_addr =
- io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size);
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+ u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
+ u16 header_offset;
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
return 0;
- if (unlikely(!io_sq->header_addr)) {
- pr_err("Push buffer header ptr is NULL\n");
- return -EINVAL;
+ header_offset =
+ llq_info->descs_num_before_header * io_sq->desc_entry_size;
+
+ if (unlikely((header_offset + header_len) >
+ llq_info->desc_list_entry_size)) {
+ pr_err("trying to write header larger than llq entry can accommodate\n");
+ return -EFAULT;
+ }
+
+ if (unlikely(!bounce_buffer)) {
+ pr_err("bounce buffer is NULL\n");
+ return -EFAULT;
+ }
+
+ memcpy(bounce_buffer + header_offset, header_src, header_len);
+
+ return 0;
+}
+
+static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
+{
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+ u8 *bounce_buffer;
+ void *sq_desc;
+
+ bounce_buffer = pkt_ctrl->curr_bounce_buf;
+
+ if (unlikely(!bounce_buffer)) {
+ pr_err("bounce buffer is NULL\n");
+ return NULL;
+ }
+
+ sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
+ pkt_ctrl->idx++;
+ pkt_ctrl->descs_left_in_line--;
+
+ return sq_desc;
+}
+
+static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
+{
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+ int rc;
+
+ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
+ return 0;
+
+ /* bounce buffer was used, so write it and get a new one */
+ if (pkt_ctrl->idx) {
+ rc = ena_com_write_bounce_buffer_to_dev(io_sq,
+ pkt_ctrl->curr_bounce_buf);
+ if (unlikely(rc))
+ return rc;
+
+ pkt_ctrl->curr_bounce_buf =
+ ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+ memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+ 0x0, llq_info->desc_list_entry_size);
+ }
+
+ pkt_ctrl->idx = 0;
+ pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
+ return 0;
+}
+
+static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+{
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+ return get_sq_desc_llq(io_sq);
+
+ return get_sq_desc_regular_queue(io_sq);
+}
+
+static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
+{
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+ int rc;
+
+ if (!pkt_ctrl->descs_left_in_line) {
+ rc = ena_com_write_bounce_buffer_to_dev(io_sq,
+ pkt_ctrl->curr_bounce_buf);
+ if (unlikely(rc))
+ return rc;
+
+ pkt_ctrl->curr_bounce_buf =
+ ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+ memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+ 0x0, llq_info->desc_list_entry_size);
+
+ pkt_ctrl->idx = 0;
+ if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
+ pkt_ctrl->descs_left_in_line = 1;
+ else
+ pkt_ctrl->descs_left_in_line =
+ llq_info->desc_list_entry_size / io_sq->desc_entry_size;
}
- memcpy_toio(dev_head_addr, head_src, header_len);
+ return 0;
+}
+
+static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
+{
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+ return ena_com_sq_update_llq_tail(io_sq);
+
+ io_sq->tail++;
+
+ /* Switch phase bit in case of wrap around */
+ if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
+ io_sq->phase ^= 1;
return 0;
}
@@ -186,8 +291,8 @@ static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
return false;
}
-static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
- struct ena_com_tx_ctx *ena_tx_ctx)
+static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
{
struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
@@ -232,8 +337,7 @@ static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *i
memcpy(&io_sq->cached_tx_meta, ena_meta,
sizeof(struct ena_com_tx_meta));
- ena_com_copy_curr_sq_desc_to_dev(io_sq);
- ena_com_sq_update_tail(io_sq);
+ return ena_com_sq_update_tail(io_sq);
}
static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
@@ -245,11 +349,14 @@ static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
ena_rx_ctx->l3_csum_err =
- (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+ !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
ena_rx_ctx->l4_csum_err =
- (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+ !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
+ ena_rx_ctx->l4_csum_checked =
+ !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
ena_rx_ctx->hash = cdesc->hash;
ena_rx_ctx->frag =
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
@@ -271,18 +378,19 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
{
struct ena_eth_io_tx_desc *desc = NULL;
struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
- void *push_header = ena_tx_ctx->push_header;
+ void *buffer_to_push = ena_tx_ctx->push_header;
u16 header_len = ena_tx_ctx->header_len;
u16 num_bufs = ena_tx_ctx->num_bufs;
- int total_desc, i, rc;
+ u16 start_tail = io_sq->tail;
+ int i, rc;
bool have_meta;
u64 addr_hi;
WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
/* num_bufs +1 for potential meta desc */
- if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
- pr_err("Not enough space in the tx queue\n");
+ if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
+ pr_debug("Not enough space in the tx queue\n");
return -ENOMEM;
}
@@ -292,23 +400,32 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
return -EINVAL;
}
- /* start with pushing the header (if needed) */
- rc = ena_com_write_header(io_sq, push_header, header_len);
+ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
+ !buffer_to_push))
+ return -EINVAL;
+
+ rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
if (unlikely(rc))
return rc;
have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
ena_tx_ctx);
- if (have_meta)
- ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
+ if (have_meta) {
+ rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
+ if (unlikely(rc))
+ return rc;
+ }
- /* If the caller doesn't want send packets */
+ /* If the caller doesn't want to send packets */
if (unlikely(!num_bufs && !header_len)) {
- *nb_hw_desc = have_meta ? 0 : 1;
- return 0;
+ rc = ena_com_close_bounce_buffer(io_sq);
+ *nb_hw_desc = io_sq->tail - start_tail;
+ return rc;
}
desc = get_sq_desc(io_sq);
+ if (unlikely(!desc))
+ return -EFAULT;
memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
/* Set first desc when we don't have meta descriptor */
@@ -360,10 +477,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
for (i = 0; i < num_bufs; i++) {
/* The first desc share the same desc as the header */
if (likely(i != 0)) {
- ena_com_copy_curr_sq_desc_to_dev(io_sq);
- ena_com_sq_update_tail(io_sq);
+ rc = ena_com_sq_update_tail(io_sq);
+ if (unlikely(rc))
+ return rc;
desc = get_sq_desc(io_sq);
+ if (unlikely(!desc))
+ return -EFAULT;
+
memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
desc->len_ctrl |= (io_sq->phase <<
@@ -386,15 +507,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
/* set the last desc indicator */
desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
- ena_com_copy_curr_sq_desc_to_dev(io_sq);
-
- ena_com_sq_update_tail(io_sq);
+ rc = ena_com_sq_update_tail(io_sq);
+ if (unlikely(rc))
+ return rc;
- total_desc = max_t(u16, num_bufs, 1);
- total_desc += have_meta ? 1 : 0;
+ rc = ena_com_close_bounce_buffer(io_sq);
- *nb_hw_desc = total_desc;
- return 0;
+ *nb_hw_desc = io_sq->tail - start_tail;
+ return rc;
}
int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
@@ -453,15 +573,18 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
- if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
+ if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
return -ENOSPC;
desc = get_sq_desc(io_sq);
+ if (unlikely(!desc))
+ return -EFAULT;
+
memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
desc->length = ena_buf->len;
- desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;
+ desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK;
desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
@@ -472,43 +595,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->buff_addr_hi =
((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
- ena_com_sq_update_tail(io_sq);
-
- return 0;
-}
-
-int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
-{
- u8 expected_phase, cdesc_phase;
- struct ena_eth_io_tx_cdesc *cdesc;
- u16 masked_head;
-
- masked_head = io_cq->head & (io_cq->q_depth - 1);
- expected_phase = io_cq->phase;
-
- cdesc = (struct ena_eth_io_tx_cdesc *)
- ((uintptr_t)io_cq->cdesc_addr.virt_addr +
- (masked_head * io_cq->cdesc_entry_size_in_bytes));
-
- /* When the current completion descriptor phase isn't the same as the
- * expected, it mean that the device still didn't update
- * this completion.
- */
- cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
- if (cdesc_phase != expected_phase)
- return -EAGAIN;
-
- dma_rmb();
- if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
- pr_err("Invalid req id %d\n", cdesc->req_id);
- return -EINVAL;
- }
-
- ena_com_cq_inc_head(io_cq);
-
- *req_id = READ_ONCE(cdesc->req_id);
-
- return 0;
+ return ena_com_sq_update_tail(io_sq);
}
bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 2f7657227cfe..340d02b64ca6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -67,6 +67,7 @@ struct ena_com_rx_ctx {
enum ena_eth_io_l4_proto_index l4_proto;
bool l3_csum_err;
bool l4_csum_err;
+ u8 l4_csum_checked;
/* fragmented packet */
bool frag;
u32 hash;
@@ -86,8 +87,6 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
struct ena_com_buf *ena_buf,
u16 req_id);
-int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
-
bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
@@ -96,7 +95,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
writel(intr_reg->intr_control, io_cq->unmask_reg);
}
-static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
+static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq)
{
u16 tail, next_to_comp, cnt;
@@ -107,11 +106,28 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
return io_sq->q_depth - 1 - cnt;
}
-static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+/* Check if the submission queue has enough space to hold required_buffers */
+static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
+ u16 required_buffers)
{
- u16 tail;
+ int temp;
- tail = io_sq->tail;
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return ena_com_free_desc(io_sq) >= required_buffers;
+
+ /* This calculation doesn't need to be 100% accurate. So to reduce
+ * the calculation overhead just Subtract 2 lines from the free descs
+ * (one for the header line and one to compensate the devision
+ * down calculation.
+ */
+ temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
+
+ return ena_com_free_desc(io_sq) > temp;
+}
+
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+{
+ u16 tail = io_sq->tail;
pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
io_sq->qid, tail);
@@ -159,4 +175,48 @@ static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
io_sq->next_to_comp += elem;
}
+static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
+{
+ io_cq->head++;
+
+ /* Switch phase bit in case of wrap around */
+ if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
+ io_cq->phase ^= 1;
+}
+
+static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
+ u16 *req_id)
+{
+ u8 expected_phase, cdesc_phase;
+ struct ena_eth_io_tx_cdesc *cdesc;
+ u16 masked_head;
+
+ masked_head = io_cq->head & (io_cq->q_depth - 1);
+ expected_phase = io_cq->phase;
+
+ cdesc = (struct ena_eth_io_tx_cdesc *)
+ ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+ (masked_head * io_cq->cdesc_entry_size_in_bytes));
+
+ /* When the current completion descriptor phase isn't the same as the
+ * expected, it mean that the device still didn't update
+ * this completion.
+ */
+ cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+ if (cdesc_phase != expected_phase)
+ return -EAGAIN;
+
+ dma_rmb();
+
+ *req_id = READ_ONCE(cdesc->req_id);
+ if (unlikely(*req_id >= io_cq->q_depth)) {
+ pr_err("Invalid req id %d\n", cdesc->req_id);
+ return -EINVAL;
+ }
+
+ ena_com_cq_inc_head(io_cq);
+
+ return 0;
+}
+
#endif /* ENA_ETH_COM_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
index f320c58793a5..00e0f056a741 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
@@ -33,25 +33,18 @@
#define _ENA_ETH_IO_H_
enum ena_eth_io_l3_proto_index {
- ENA_ETH_IO_L3_PROTO_UNKNOWN = 0,
-
- ENA_ETH_IO_L3_PROTO_IPV4 = 8,
-
- ENA_ETH_IO_L3_PROTO_IPV6 = 11,
-
- ENA_ETH_IO_L3_PROTO_FCOE = 21,
-
- ENA_ETH_IO_L3_PROTO_ROCE = 22,
+ ENA_ETH_IO_L3_PROTO_UNKNOWN = 0,
+ ENA_ETH_IO_L3_PROTO_IPV4 = 8,
+ ENA_ETH_IO_L3_PROTO_IPV6 = 11,
+ ENA_ETH_IO_L3_PROTO_FCOE = 21,
+ ENA_ETH_IO_L3_PROTO_ROCE = 22,
};
enum ena_eth_io_l4_proto_index {
- ENA_ETH_IO_L4_PROTO_UNKNOWN = 0,
-
- ENA_ETH_IO_L4_PROTO_TCP = 12,
-
- ENA_ETH_IO_L4_PROTO_UDP = 13,
-
- ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
+ ENA_ETH_IO_L4_PROTO_UNKNOWN = 0,
+ ENA_ETH_IO_L4_PROTO_TCP = 12,
+ ENA_ETH_IO_L4_PROTO_UDP = 13,
+ ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
};
struct ena_eth_io_tx_desc {
@@ -242,9 +235,13 @@ struct ena_eth_io_rx_cdesc_base {
* checksum error detected, or, the controller didn't
* validate the checksum. This bit is valid only when
* l4_proto_idx indicates TCP/UDP packet, and,
- * ipv4_frag is not set
+ * ipv4_frag is not set. This bit is valid only when
+ * l4_csum_checked below is set.
* 15 : ipv4_frag - Indicates IPv4 fragmented packet
- * 23:16 : reserved16
+ * 16 : l4_csum_checked - L4 checksum was verified
+ * (could be OK or error), when cleared the status of
+ * checksum is unknown
+ * 23:17 : reserved17 - MBZ
* 24 : phase
* 25 : l3_csum2 - second checksum engine result
* 26 : first - Indicates first descriptor in
@@ -303,114 +300,116 @@ struct ena_eth_io_numa_node_cfg_reg {
};
/* tx_desc */
-#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0)
-#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16
-#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16)
-#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23
-#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23)
-#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24
-#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24)
-#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26
-#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26)
-#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27
-#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27)
-#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28
-#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28)
-#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0)
-#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4
-#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4)
-#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7
-#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7)
-#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8
-#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8)
-#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13
-#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13)
-#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14
-#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14)
-#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15
-#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15)
-#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17
-#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17)
-#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22
-#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22)
-#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0)
-#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24
-#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24)
+#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0)
+#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16
+#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16)
+#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23
+#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23)
+#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24
+#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26
+#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27
+#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27)
+#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28
+#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28)
+#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0)
+#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4
+#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4)
+#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7
+#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7)
+#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8
+#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8)
+#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13
+#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13)
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14)
+#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15
+#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15)
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17)
+#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22
+#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22)
+#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0)
+#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24
+#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24)
/* tx_meta_desc */
-#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0)
-#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14
-#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14)
-#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16
-#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16)
-#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20
-#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20)
-#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21
-#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21)
-#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23
-#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23)
-#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24
-#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24)
-#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26
-#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26)
-#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27
-#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27)
-#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28
-#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28)
-#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0)
-#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0)
-#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8
-#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8)
-#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16
-#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16)
-#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22
-#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22)
+#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0)
+#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14
+#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14)
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16)
+#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20
+#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20)
+#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21
+#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21)
+#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23
+#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23)
+#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24
+#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26
+#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27
+#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27)
+#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28
+#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28)
+#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0)
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0)
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8)
+#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16)
+#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22
+#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22)
/* tx_cdesc */
-#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0)
+#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0)
/* rx_desc */
-#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0)
-#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2
-#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2)
-#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3
-#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3)
-#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4
-#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4)
+#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0)
+#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2
+#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2)
+#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3
+#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3)
+#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4
+#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4)
/* rx_cdesc_base */
-#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0)
-#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5
-#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5)
-#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8
-#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8)
-#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13
-#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13)
-#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14
-#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14)
-#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15
-#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15)
-#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24
-#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24)
-#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25
-#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25)
-#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26
-#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26)
-#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27
-#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27)
-#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30
-#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0)
+#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5
+#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14)
+#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15
+#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT 16
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK BIT(16)
+#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24
+#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25)
+#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26
+#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27
+#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27)
+#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30
+#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30)
/* intr_reg */
-#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0)
-#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15
-#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15)
-#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30
-#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30)
+#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0)
+#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15
+#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15)
+#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30
+#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30)
/* numa_node_cfg_reg */
-#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0)
-#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31
-#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31)
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0)
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31)
#endif /*_ENA_ETH_IO_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 521607bc4393..f3a5a384e6e8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -81,6 +81,7 @@ static const struct ena_stats ena_stats_tx_strings[] = {
ENA_STAT_TX_ENTRY(doorbells),
ENA_STAT_TX_ENTRY(prepare_ctx_err),
ENA_STAT_TX_ENTRY(bad_req_id),
+ ENA_STAT_TX_ENTRY(llq_buffer_copy),
ENA_STAT_TX_ENTRY(missed_tx),
};
@@ -96,6 +97,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
ENA_STAT_RX_ENTRY(bad_req_id),
ENA_STAT_RX_ENTRY(empty_rx_ring),
+ ENA_STAT_RX_ENTRY(csum_unchecked),
};
static const struct ena_stats ena_stats_ena_com_strings[] = {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index b2522e84f482..18956e7604a3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -237,6 +237,17 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
}
}
+ size = tx_ring->tx_max_header_size;
+ tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
+ if (!tx_ring->push_buf_intermediate_buf) {
+ tx_ring->push_buf_intermediate_buf = vzalloc(size);
+ if (!tx_ring->push_buf_intermediate_buf) {
+ vfree(tx_ring->tx_buffer_info);
+ vfree(tx_ring->free_tx_ids);
+ return -ENOMEM;
+ }
+ }
+
/* Req id ring for TX out of order completions */
for (i = 0; i < tx_ring->ring_size; i++)
tx_ring->free_tx_ids[i] = i;
@@ -265,6 +276,9 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
vfree(tx_ring->free_tx_ids);
tx_ring->free_tx_ids = NULL;
+
+ vfree(tx_ring->push_buf_intermediate_buf);
+ tx_ring->push_buf_intermediate_buf = NULL;
}
/* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
@@ -602,6 +616,36 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
ena_free_rx_bufs(adapter, i);
}
+static inline void ena_unmap_tx_skb(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info)
+{
+ struct ena_com_buf *ena_buf;
+ u32 cnt;
+ int i;
+
+ ena_buf = tx_info->bufs;
+ cnt = tx_info->num_of_bufs;
+
+ if (unlikely(!cnt))
+ return;
+
+ if (tx_info->map_linear_data) {
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(ena_buf, paddr),
+ dma_unmap_len(ena_buf, len),
+ DMA_TO_DEVICE);
+ ena_buf++;
+ cnt--;
+ }
+
+ /* unmap remaining mapped pages */
+ for (i = 0; i < cnt; i++) {
+ dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
+ dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
+ ena_buf++;
+ }
+}
+
/* ena_free_tx_bufs - Free Tx Buffers per Queue
* @tx_ring: TX ring for which buffers be freed
*/
@@ -612,9 +656,6 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
for (i = 0; i < tx_ring->ring_size; i++) {
struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
- struct ena_com_buf *ena_buf;
- int nr_frags;
- int j;
if (!tx_info->skb)
continue;
@@ -630,21 +671,7 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
tx_ring->qid, i);
}
- ena_buf = tx_info->bufs;
- dma_unmap_single(tx_ring->dev,
- ena_buf->paddr,
- ena_buf->len,
- DMA_TO_DEVICE);
-
- /* unmap remaining mapped pages */
- nr_frags = tx_info->num_of_bufs - 1;
- for (j = 0; j < nr_frags; j++) {
- ena_buf++;
- dma_unmap_page(tx_ring->dev,
- ena_buf->paddr,
- ena_buf->len,
- DMA_TO_DEVICE);
- }
+ ena_unmap_tx_skb(tx_ring, tx_info);
dev_kfree_skb_any(tx_info->skb);
}
@@ -735,8 +762,6 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
while (tx_pkts < budget) {
struct ena_tx_buffer *tx_info;
struct sk_buff *skb;
- struct ena_com_buf *ena_buf;
- int i, nr_frags;
rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
&req_id);
@@ -756,24 +781,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
tx_info->skb = NULL;
tx_info->last_jiffies = 0;
- if (likely(tx_info->num_of_bufs != 0)) {
- ena_buf = tx_info->bufs;
-
- dma_unmap_single(tx_ring->dev,
- dma_unmap_addr(ena_buf, paddr),
- dma_unmap_len(ena_buf, len),
- DMA_TO_DEVICE);
-
- /* unmap remaining mapped pages */
- nr_frags = tx_info->num_of_bufs - 1;
- for (i = 0; i < nr_frags; i++) {
- ena_buf++;
- dma_unmap_page(tx_ring->dev,
- dma_unmap_addr(ena_buf, paddr),
- dma_unmap_len(ena_buf, len),
- DMA_TO_DEVICE);
- }
- }
+ ena_unmap_tx_skb(tx_ring, tx_info);
netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
"tx_poll: q %d skb %p completed\n", tx_ring->qid,
@@ -804,12 +812,13 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
*/
smp_mb();
- above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
- ENA_TX_WAKEUP_THRESH;
+ above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+ ENA_TX_WAKEUP_THRESH);
if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
__netif_tx_lock(txq, smp_processor_id());
- above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
- ENA_TX_WAKEUP_THRESH;
+ above_thresh =
+ ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+ ENA_TX_WAKEUP_THRESH);
if (netif_tx_queue_stopped(txq) && above_thresh) {
netif_tx_wake_queue(txq);
u64_stats_update_begin(&tx_ring->syncp);
@@ -985,8 +994,19 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
return;
}
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (likely(ena_rx_ctx->l4_csum_checked)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.csum_unchecked++;
+ u64_stats_update_end(&rx_ring->syncp);
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
}
+
}
static void ena_set_rx_hash(struct ena_ring *rx_ring,
@@ -1101,8 +1121,10 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
rx_ring->next_to_clean = next_to_clean;
- refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
- refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER;
+ refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq);
+ refill_threshold =
+ min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
+ ENA_RX_REFILL_THRESH_PACKET);
/* Optimization, try to batch new rx buffers */
if (refill_required > refill_threshold) {
@@ -1299,7 +1321,6 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
/* Reserved the max msix vectors we might need */
msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
-
netif_dbg(adapter, probe, adapter->netdev,
"trying to enable MSI-X, vectors %d\n", msix_vecs);
@@ -1574,8 +1595,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
if (rc)
return rc;
- ena_init_napi(adapter);
-
ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
ena_refill_all_rx_bufs(adapter);
@@ -1592,7 +1611,7 @@ static int ena_up_complete(struct ena_adapter *adapter)
static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
{
- struct ena_com_create_io_ctx ctx = { 0 };
+ struct ena_com_create_io_ctx ctx;
struct ena_com_dev *ena_dev;
struct ena_ring *tx_ring;
u32 msix_vector;
@@ -1605,6 +1624,8 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
msix_vector = ENA_IO_IRQ_IDX(qid);
ena_qid = ENA_IO_TXQ_IDX(qid);
+ memset(&ctx, 0x0, sizeof(ctx));
+
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
ctx.qid = ena_qid;
ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
@@ -1658,7 +1679,7 @@ create_err:
static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
{
struct ena_com_dev *ena_dev;
- struct ena_com_create_io_ctx ctx = { 0 };
+ struct ena_com_create_io_ctx ctx;
struct ena_ring *rx_ring;
u32 msix_vector;
u16 ena_qid;
@@ -1670,6 +1691,8 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
msix_vector = ENA_IO_IRQ_IDX(qid);
ena_qid = ENA_IO_RXQ_IDX(qid);
+ memset(&ctx, 0x0, sizeof(ctx));
+
ctx.qid = ena_qid;
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
@@ -1729,6 +1752,13 @@ static int ena_up(struct ena_adapter *adapter)
ena_setup_io_intr(adapter);
+ /* napi poll functions should be initialized before running
+ * request_irq(), to handle a rare condition where there is a pending
+ * interrupt, causing the ISR to fire immediately while the poll
+ * function wasn't set yet, causing a null dereference
+ */
+ ena_init_napi(adapter);
+
rc = ena_request_io_irq(adapter);
if (rc)
goto err_req_irq;
@@ -1980,73 +2010,70 @@ static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
return rc;
}
-/* Called with netif_tx_lock. */
-static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static int ena_tx_map_skb(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info,
+ struct sk_buff *skb,
+ void **push_hdr,
+ u16 *header_len)
{
- struct ena_adapter *adapter = netdev_priv(dev);
- struct ena_tx_buffer *tx_info;
- struct ena_com_tx_ctx ena_tx_ctx;
- struct ena_ring *tx_ring;
- struct netdev_queue *txq;
+ struct ena_adapter *adapter = tx_ring->adapter;
struct ena_com_buf *ena_buf;
- void *push_hdr;
- u32 len, last_frag;
- u16 next_to_use;
- u16 req_id;
- u16 push_len;
- u16 header_len;
dma_addr_t dma;
- int qid, rc, nb_hw_desc;
- int i = -1;
-
- netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
- /* Determine which tx ring we will be placed on */
- qid = skb_get_queue_mapping(skb);
- tx_ring = &adapter->tx_ring[qid];
- txq = netdev_get_tx_queue(dev, qid);
-
- rc = ena_check_and_linearize_skb(tx_ring, skb);
- if (unlikely(rc))
- goto error_drop_packet;
+ u32 skb_head_len, frag_len, last_frag;
+ u16 push_len = 0;
+ u16 delta = 0;
+ int i = 0;
- skb_tx_timestamp(skb);
- len = skb_headlen(skb);
-
- next_to_use = tx_ring->next_to_use;
- req_id = tx_ring->free_tx_ids[next_to_use];
- tx_info = &tx_ring->tx_buffer_info[req_id];
- tx_info->num_of_bufs = 0;
-
- WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
- ena_buf = tx_info->bufs;
+ skb_head_len = skb_headlen(skb);
tx_info->skb = skb;
+ ena_buf = tx_info->bufs;
if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- /* prepared the push buffer */
- push_len = min_t(u32, len, tx_ring->tx_max_header_size);
- header_len = push_len;
- push_hdr = skb->data;
+ /* When the device is LLQ mode, the driver will copy
+ * the header into the device memory space.
+ * the ena_com layer assume the header is in a linear
+ * memory space.
+ * This assumption might be wrong since part of the header
+ * can be in the fragmented buffers.
+ * Use skb_header_pointer to make sure the header is in a
+ * linear memory space.
+ */
+
+ push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
+ *push_hdr = skb_header_pointer(skb, 0, push_len,
+ tx_ring->push_buf_intermediate_buf);
+ *header_len = push_len;
+ if (unlikely(skb->data != *push_hdr)) {
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.llq_buffer_copy++;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ delta = push_len - skb_head_len;
+ }
} else {
- push_len = 0;
- header_len = min_t(u32, len, tx_ring->tx_max_header_size);
- push_hdr = NULL;
+ *push_hdr = NULL;
+ *header_len = min_t(u32, skb_head_len,
+ tx_ring->tx_max_header_size);
}
- netif_dbg(adapter, tx_queued, dev,
+ netif_dbg(adapter, tx_queued, adapter->netdev,
"skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
- push_hdr, push_len);
+ *push_hdr, push_len);
- if (len > push_len) {
+ if (skb_head_len > push_len) {
dma = dma_map_single(tx_ring->dev, skb->data + push_len,
- len - push_len, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
+ skb_head_len - push_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
goto error_report_dma_error;
ena_buf->paddr = dma;
- ena_buf->len = len - push_len;
+ ena_buf->len = skb_head_len - push_len;
ena_buf++;
tx_info->num_of_bufs++;
+ tx_info->map_linear_data = 1;
+ } else {
+ tx_info->map_linear_data = 0;
}
last_frag = skb_shinfo(skb)->nr_frags;
@@ -2054,18 +2081,75 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < last_frag; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = skb_frag_size(frag);
- dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
+ frag_len = skb_frag_size(frag);
+
+ if (unlikely(delta >= frag_len)) {
+ delta -= frag_len;
+ continue;
+ }
+
+ dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
+ frag_len - delta, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
goto error_report_dma_error;
ena_buf->paddr = dma;
- ena_buf->len = len;
+ ena_buf->len = frag_len - delta;
ena_buf++;
+ tx_info->num_of_bufs++;
+ delta = 0;
}
- tx_info->num_of_bufs += last_frag;
+ return 0;
+
+error_report_dma_error:
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.dma_mapping_err++;
+ u64_stats_update_end(&tx_ring->syncp);
+ netdev_warn(adapter->netdev, "failed to map skb\n");
+
+ tx_info->skb = NULL;
+
+ tx_info->num_of_bufs += i;
+ ena_unmap_tx_skb(tx_ring, tx_info);
+
+ return -EINVAL;
+}
+
+/* Called with netif_tx_lock. */
+static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+ struct ena_tx_buffer *tx_info;
+ struct ena_com_tx_ctx ena_tx_ctx;
+ struct ena_ring *tx_ring;
+ struct netdev_queue *txq;
+ void *push_hdr;
+ u16 next_to_use, req_id, header_len;
+ int qid, rc, nb_hw_desc;
+
+ netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
+ /* Determine which tx ring we will be placed on */
+ qid = skb_get_queue_mapping(skb);
+ tx_ring = &adapter->tx_ring[qid];
+ txq = netdev_get_tx_queue(dev, qid);
+
+ rc = ena_check_and_linearize_skb(tx_ring, skb);
+ if (unlikely(rc))
+ goto error_drop_packet;
+
+ skb_tx_timestamp(skb);
+
+ next_to_use = tx_ring->next_to_use;
+ req_id = tx_ring->free_tx_ids[next_to_use];
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ tx_info->num_of_bufs = 0;
+
+ WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
+
+ rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
+ if (unlikely(rc))
+ goto error_drop_packet;
memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
ena_tx_ctx.ena_bufs = tx_info->bufs;
@@ -2081,14 +2165,22 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
&nb_hw_desc);
+ /* ena_com_prepare_tx() can't fail due to overflow of tx queue,
+ * since the number of free descriptors in the queue is checked
+ * after sending the previous packet. In case there isn't enough
+ * space in the queue for the next packet, it is stopped
+ * until there is again enough available space in the queue.
+ * All other failure reasons of ena_com_prepare_tx() are fatal
+ * and therefore require a device reset.
+ */
if (unlikely(rc)) {
netif_err(adapter, tx_queued, dev,
"failed to prepare tx bufs\n");
u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.queue_stop++;
tx_ring->tx_stats.prepare_ctx_err++;
u64_stats_update_end(&tx_ring->syncp);
- netif_tx_stop_queue(txq);
+ adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE;
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
goto error_unmap_dma;
}
@@ -2110,8 +2202,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
* to sgl_size + 2. one for the meta descriptor and one for header
* (if the header is larger than tx_max_header_size).
*/
- if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) <
- (tx_ring->sgl_size + 2))) {
+ if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+ tx_ring->sgl_size + 2))) {
netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
__func__, qid);
@@ -2130,8 +2222,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
smp_mb();
- if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
- > ENA_TX_WAKEUP_THRESH) {
+ if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+ ENA_TX_WAKEUP_THRESH)) {
netif_tx_wake_queue(txq);
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->tx_stats.queue_wakeup++;
@@ -2151,58 +2243,15 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
-error_report_dma_error:
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.dma_mapping_err++;
- u64_stats_update_end(&tx_ring->syncp);
- netdev_warn(adapter->netdev, "failed to map skb\n");
-
- tx_info->skb = NULL;
-
error_unmap_dma:
- if (i >= 0) {
- /* save value of frag that failed */
- last_frag = i;
-
- /* start back at beginning and unmap skb */
- tx_info->skb = NULL;
- ena_buf = tx_info->bufs;
- dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
- dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
-
- /* unmap remaining mapped pages */
- for (i = 0; i < last_frag; i++) {
- ena_buf++;
- dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
- dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
- }
- }
+ ena_unmap_tx_skb(tx_ring, tx_info);
+ tx_info->skb = NULL;
error_drop_packet:
-
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ena_netpoll(struct net_device *netdev)
-{
- struct ena_adapter *adapter = netdev_priv(netdev);
- int i;
-
- /* Dont schedule NAPI if the driver is in the middle of reset
- * or netdev is down.
- */
-
- if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
- test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
- return;
-
- for (i = 0; i < adapter->num_queues; i++)
- napi_schedule(&adapter->ena_napi[i].napi);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
@@ -2220,7 +2269,8 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
return qid;
}
-static void ena_config_host_info(struct ena_com_dev *ena_dev)
+static void ena_config_host_info(struct ena_com_dev *ena_dev,
+ struct pci_dev *pdev)
{
struct ena_admin_host_info *host_info;
int rc;
@@ -2234,6 +2284,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
host_info = ena_dev->host_attr.host_info;
+ host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
host_info->os_type = ENA_ADMIN_OS_LINUX;
host_info->kernel_ver = LINUX_VERSION_CODE;
strncpy(host_info->kernel_ver_str, utsname()->version,
@@ -2244,7 +2295,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
host_info->driver_version =
(DRV_MODULE_VER_MAJOR) |
(DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
- (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
+ (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
+ ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
+ host_info->num_cpus = num_online_cpus();
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
@@ -2368,9 +2421,6 @@ static const struct net_device_ops ena_netdev_ops = {
.ndo_change_mtu = ena_change_mtu,
.ndo_set_mac_address = NULL,
.ndo_validate_addr = eth_validate_addr,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ena_netpoll,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
};
static int ena_device_validate_params(struct ena_adapter *adapter,
@@ -2458,7 +2508,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
}
/* ENA admin level init */
- rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
+ rc = ena_com_admin_init(ena_dev, &aenq_handlers);
if (rc) {
dev_err(dev,
"Can not initialize ena admin queue with device\n");
@@ -2471,7 +2521,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
*/
ena_com_set_admin_polling_mode(ena_dev, true);
- ena_config_host_info(ena_dev);
+ ena_config_host_info(ena_dev, pdev);
/* Get Device Attributes*/
rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
@@ -2556,15 +2606,14 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
adapter->dev_up_before_reset = dev_up;
-
if (!graceful)
ena_com_set_admin_running_state(ena_dev, false);
if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
ena_down(adapter);
- /* Before releasing the ENA resources, a device reset is required.
- * (to prevent the device from accessing them).
+ /* Stop the device from sending AENQ events (in case reset flag is set
+ * and device is up, ena_close already reset the device
* In case the reset flag is set and the device is up, ena_down()
* already perform the reset, so it can be skipped.
*/
@@ -2633,14 +2682,20 @@ static int ena_restore_device(struct ena_adapter *adapter)
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
- dev_err(&pdev->dev, "Device reset completed successfully\n");
+ dev_err(&pdev->dev,
+ "Device reset completed successfully, Driver info: %s\n",
+ version);
return rc;
err_disable_msix:
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
err_device_destroy:
+ ena_com_abort_admin_commands(ena_dev);
+ ena_com_wait_for_abort_completion(ena_dev);
ena_com_admin_destroy(ena_dev);
+ ena_com_mmio_reg_read_request_destroy(ena_dev);
+ ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
err:
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
@@ -2822,7 +2877,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter)
rx_ring = &adapter->rx_ring[i];
refill_required =
- ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
+ ena_com_free_desc(rx_ring->ena_com_io_sq);
if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
rx_ring->empty_rx_queue++;
@@ -2967,20 +3022,10 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
int io_sq_num, io_queue_num;
/* In case of LLQ use the llq number in the get feature cmd */
- if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- io_sq_num = get_feat_ctx->max_queues.max_llq_num;
-
- if (io_sq_num == 0) {
- dev_err(&pdev->dev,
- "Trying to use LLQ but llq_num is 0. Fall back into regular queues\n");
-
- ena_dev->tx_mem_queue_type =
- ENA_ADMIN_PLACEMENT_POLICY_HOST;
- io_sq_num = get_feat_ctx->max_queues.max_sq_num;
- }
- } else {
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+ io_sq_num = get_feat_ctx->llq.max_llq_num;
+ else
io_sq_num = get_feat_ctx->max_queues.max_sq_num;
- }
io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
io_queue_num = min_t(int, io_queue_num, io_sq_num);
@@ -2996,18 +3041,52 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
return io_queue_num;
}
-static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
- struct ena_com_dev_get_features_ctx *get_feat_ctx)
+static int ena_set_queues_placement_policy(struct pci_dev *pdev,
+ struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_llq_desc *llq,
+ struct ena_llq_configurations *llq_default_configurations)
{
bool has_mem_bar;
+ int rc;
+ u32 llq_feature_mask;
+
+ llq_feature_mask = 1 << ENA_ADMIN_LLQ;
+ if (!(ena_dev->supported_features & llq_feature_mask)) {
+ dev_err(&pdev->dev,
+ "LLQ is not supported Fallback to host mode policy.\n");
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ return 0;
+ }
has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
- /* Enable push mode if device supports LLQ */
- if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0))
- ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
- else
+ rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
+ if (unlikely(rc)) {
+ dev_err(&pdev->dev,
+ "Failed to configure the device mode. Fallback to host mode policy.\n");
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ return 0;
+ }
+
+ /* Nothing to config, exit */
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return 0;
+
+ if (!has_mem_bar) {
+ dev_err(&pdev->dev,
+ "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ return 0;
+ }
+
+ ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
+ pci_resource_start(pdev, ENA_MEM_BAR),
+ pci_resource_len(pdev, ENA_MEM_BAR));
+
+ if (!ena_dev->mem_bar)
+ return -EFAULT;
+
+ return 0;
}
static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
@@ -3120,18 +3199,20 @@ err_rss_init:
static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
{
- int release_bars;
-
- if (ena_dev->mem_bar)
- devm_iounmap(&pdev->dev, ena_dev->mem_bar);
-
- if (ena_dev->reg_bar)
- devm_iounmap(&pdev->dev, ena_dev->reg_bar);
+ int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
- release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
pci_release_selected_regions(pdev, release_bars);
}
+static inline void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
+{
+ llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
+ llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
+ llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
+ llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
+ llq_config->llq_ring_entry_size_value = 128;
+}
+
static int ena_calc_queue_size(struct pci_dev *pdev,
struct ena_com_dev *ena_dev,
u16 *max_tx_sgl_size,
@@ -3147,7 +3228,7 @@ static int ena_calc_queue_size(struct pci_dev *pdev,
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
queue_size = min_t(u32, queue_size,
- get_feat_ctx->max_queues.max_llq_depth);
+ get_feat_ctx->llq.max_llq_depth);
queue_size = rounddown_pow_of_two(queue_size);
@@ -3180,7 +3261,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static int version_printed;
struct net_device *netdev;
struct ena_adapter *adapter;
+ struct ena_llq_configurations llq_config;
struct ena_com_dev *ena_dev = NULL;
+ char *queue_type_str;
static int adapters_found;
int io_queue_num, bars, rc;
int queue_size;
@@ -3234,16 +3317,13 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free_region;
}
- ena_set_push_mode(pdev, ena_dev, &get_feat_ctx);
+ set_default_llq_configurations(&llq_config);
- if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
- pci_resource_start(pdev, ENA_MEM_BAR),
- pci_resource_len(pdev, ENA_MEM_BAR));
- if (!ena_dev->mem_bar) {
- rc = -EFAULT;
- goto err_device_destroy;
- }
+ rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,
+ &llq_config);
+ if (rc) {
+ dev_err(&pdev->dev, "ena device init failed\n");
+ goto err_device_destroy;
}
/* initial Tx interrupt delay, Assumes 1 usec granularity.
@@ -3258,8 +3338,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_device_destroy;
}
- dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n",
- io_queue_num, queue_size);
+ dev_info(&pdev->dev, "creating %d io queues. queue size: %d. LLQ is %s\n",
+ io_queue_num, queue_size,
+ (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ?
+ "ENABLED" : "DISABLED");
/* dev zeroed in init_etherdev */
netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
@@ -3349,9 +3431,15 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
timer_setup(&adapter->timer_service, ena_timer_service, 0);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
- dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n",
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ queue_type_str = "Regular";
+ else
+ queue_type_str = "Low Latency";
+
+ dev_info(&pdev->dev,
+ "%s found at mem %lx, mac addr %pM Queues %d, Placement policy: %s\n",
DEVICE_NAME, (long)pci_resource_start(pdev, 0),
- netdev->dev_addr, io_queue_num);
+ netdev->dev_addr, io_queue_num, queue_type_str);
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 7c7ae56c52cf..521873642339 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -43,9 +43,9 @@
#include "ena_com.h"
#include "ena_eth_com.h"
-#define DRV_MODULE_VER_MAJOR 1
-#define DRV_MODULE_VER_MINOR 5
-#define DRV_MODULE_VER_SUBMINOR 0
+#define DRV_MODULE_VER_MAJOR 2
+#define DRV_MODULE_VER_MINOR 0
+#define DRV_MODULE_VER_SUBMINOR 1
#define DRV_MODULE_NAME "ena"
#ifndef DRV_MODULE_VERSION
@@ -61,6 +61,17 @@
#define ENA_ADMIN_MSIX_VEC 1
#define ENA_MAX_MSIX_VEC(io_queues) (ENA_ADMIN_MSIX_VEC + (io_queues))
+/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
+ * driver passes 0.
+ * Since the max packet size the ENA handles is ~9kB limit the buffer length to
+ * 16kB.
+ */
+#if PAGE_SIZE > SZ_16K
+#define ENA_PAGE_SIZE SZ_16K
+#else
+#define ENA_PAGE_SIZE PAGE_SIZE
+#endif
+
#define ENA_MIN_MSIX_VEC 2
#define ENA_REG_BAR 0
@@ -70,7 +81,7 @@
#define ENA_DEFAULT_RING_SIZE (1024)
#define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2)
-#define ENA_DEFAULT_RX_COPYBREAK (128 - NET_IP_ALIGN)
+#define ENA_DEFAULT_RX_COPYBREAK (256 - NET_IP_ALIGN)
/* limit the buffer size to 600 bytes to handle MTU changes from very
* small to very large, in which case the number of buffers per packet
@@ -95,10 +106,11 @@
*/
#define ENA_TX_POLL_BUDGET_DIVIDER 4
-/* Refill Rx queue when number of available descriptors is below
- * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER
+/* Refill Rx queue when number of required descriptors is above
+ * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER or ENA_RX_REFILL_THRESH_PACKET
*/
#define ENA_RX_REFILL_THRESH_DIVIDER 8
+#define ENA_RX_REFILL_THRESH_PACKET 256
/* Number of queues to check for missing queues per timer service */
#define ENA_MONITORED_TX_QUEUES 4
@@ -151,6 +163,9 @@ struct ena_tx_buffer {
/* num of buffers used by this skb */
u32 num_of_bufs;
+ /* Indicate if bufs[0] map the linear data of the skb. */
+ u8 map_linear_data;
+
/* Used for detect missing tx packets to limit the number of prints */
u32 print_once;
/* Save the last jiffies to detect missing tx packets
@@ -186,6 +201,7 @@ struct ena_stats_tx {
u64 tx_poll;
u64 doorbells;
u64 bad_req_id;
+ u64 llq_buffer_copy;
u64 missed_tx;
};
@@ -201,6 +217,7 @@ struct ena_stats_rx {
u64 rx_copybreak_pkt;
u64 bad_req_id;
u64 empty_rx_ring;
+ u64 csum_unchecked;
};
struct ena_ring {
@@ -257,6 +274,8 @@ struct ena_ring {
struct ena_stats_tx tx_stats;
struct ena_stats_rx rx_stats;
};
+
+ u8 *push_buf_intermediate_buf;
int empty_rx_queue;
} ____cacheline_aligned;
@@ -355,15 +374,4 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
int ena_get_sset_count(struct net_device *netdev, int sset);
-/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
- * driver passas 0.
- * Since the max packet size the ENA handles is ~9kB limit the buffer length to
- * 16kB.
- */
-#if PAGE_SIZE > SZ_16K
-#define ENA_PAGE_SIZE SZ_16K
-#else
-#define ENA_PAGE_SIZE PAGE_SIZE
-#endif
-
#endif /* !(ENA_H) */
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 48ca97fbe7bc..04fcafcc059c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -33,137 +33,125 @@
#define _ENA_REGS_H_
enum ena_regs_reset_reason_types {
- ENA_REGS_RESET_NORMAL = 0,
-
- ENA_REGS_RESET_KEEP_ALIVE_TO = 1,
-
- ENA_REGS_RESET_ADMIN_TO = 2,
-
- ENA_REGS_RESET_MISS_TX_CMPL = 3,
-
- ENA_REGS_RESET_INV_RX_REQ_ID = 4,
-
- ENA_REGS_RESET_INV_TX_REQ_ID = 5,
-
- ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6,
-
- ENA_REGS_RESET_INIT_ERR = 7,
-
- ENA_REGS_RESET_DRIVER_INVALID_STATE = 8,
-
- ENA_REGS_RESET_OS_TRIGGER = 9,
-
- ENA_REGS_RESET_OS_NETDEV_WD = 10,
-
- ENA_REGS_RESET_SHUTDOWN = 11,
-
- ENA_REGS_RESET_USER_TRIGGER = 12,
-
- ENA_REGS_RESET_GENERIC = 13,
-
- ENA_REGS_RESET_MISS_INTERRUPT = 14,
+ ENA_REGS_RESET_NORMAL = 0,
+ ENA_REGS_RESET_KEEP_ALIVE_TO = 1,
+ ENA_REGS_RESET_ADMIN_TO = 2,
+ ENA_REGS_RESET_MISS_TX_CMPL = 3,
+ ENA_REGS_RESET_INV_RX_REQ_ID = 4,
+ ENA_REGS_RESET_INV_TX_REQ_ID = 5,
+ ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6,
+ ENA_REGS_RESET_INIT_ERR = 7,
+ ENA_REGS_RESET_DRIVER_INVALID_STATE = 8,
+ ENA_REGS_RESET_OS_TRIGGER = 9,
+ ENA_REGS_RESET_OS_NETDEV_WD = 10,
+ ENA_REGS_RESET_SHUTDOWN = 11,
+ ENA_REGS_RESET_USER_TRIGGER = 12,
+ ENA_REGS_RESET_GENERIC = 13,
+ ENA_REGS_RESET_MISS_INTERRUPT = 14,
};
/* ena_registers offsets */
-#define ENA_REGS_VERSION_OFF 0x0
-#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
-#define ENA_REGS_CAPS_OFF 0x8
-#define ENA_REGS_CAPS_EXT_OFF 0xc
-#define ENA_REGS_AQ_BASE_LO_OFF 0x10
-#define ENA_REGS_AQ_BASE_HI_OFF 0x14
-#define ENA_REGS_AQ_CAPS_OFF 0x18
-#define ENA_REGS_ACQ_BASE_LO_OFF 0x20
-#define ENA_REGS_ACQ_BASE_HI_OFF 0x24
-#define ENA_REGS_ACQ_CAPS_OFF 0x28
-#define ENA_REGS_AQ_DB_OFF 0x2c
-#define ENA_REGS_ACQ_TAIL_OFF 0x30
-#define ENA_REGS_AENQ_CAPS_OFF 0x34
-#define ENA_REGS_AENQ_BASE_LO_OFF 0x38
-#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c
-#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40
-#define ENA_REGS_AENQ_TAIL_OFF 0x44
-#define ENA_REGS_INTR_MASK_OFF 0x4c
-#define ENA_REGS_DEV_CTL_OFF 0x54
-#define ENA_REGS_DEV_STS_OFF 0x58
-#define ENA_REGS_MMIO_REG_READ_OFF 0x5c
-#define ENA_REGS_MMIO_RESP_LO_OFF 0x60
-#define ENA_REGS_MMIO_RESP_HI_OFF 0x64
-#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68
+
+/* 0 base */
+#define ENA_REGS_VERSION_OFF 0x0
+#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
+#define ENA_REGS_CAPS_OFF 0x8
+#define ENA_REGS_CAPS_EXT_OFF 0xc
+#define ENA_REGS_AQ_BASE_LO_OFF 0x10
+#define ENA_REGS_AQ_BASE_HI_OFF 0x14
+#define ENA_REGS_AQ_CAPS_OFF 0x18
+#define ENA_REGS_ACQ_BASE_LO_OFF 0x20
+#define ENA_REGS_ACQ_BASE_HI_OFF 0x24
+#define ENA_REGS_ACQ_CAPS_OFF 0x28
+#define ENA_REGS_AQ_DB_OFF 0x2c
+#define ENA_REGS_ACQ_TAIL_OFF 0x30
+#define ENA_REGS_AENQ_CAPS_OFF 0x34
+#define ENA_REGS_AENQ_BASE_LO_OFF 0x38
+#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c
+#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40
+#define ENA_REGS_AENQ_TAIL_OFF 0x44
+#define ENA_REGS_INTR_MASK_OFF 0x4c
+#define ENA_REGS_DEV_CTL_OFF 0x54
+#define ENA_REGS_DEV_STS_OFF 0x58
+#define ENA_REGS_MMIO_REG_READ_OFF 0x5c
+#define ENA_REGS_MMIO_RESP_LO_OFF 0x60
+#define ENA_REGS_MMIO_RESP_HI_OFF 0x64
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68
/* version register */
-#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff
-#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
-#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00
+#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff
+#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
+#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00
/* controller_version register */
-#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff
-#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8
-#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00
-#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16
-#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000
-#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24
-#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000
+#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff
+#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8
+#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00
+#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16
+#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000
+#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24
+#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000
/* caps register */
-#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1
-#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1
-#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
-#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
-#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
-#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16
-#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000
+#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1
+#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1
+#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
+#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
+#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000
/* aq_caps register */
-#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
-#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16
-#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000
+#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
+#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000
/* acq_caps register */
-#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff
-#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16
-#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000
+#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff
+#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000
/* aenq_caps register */
-#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff
-#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16
-#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000
+#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff
+#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000
/* dev_ctl register */
-#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1
-#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1
-#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2
-#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2
-#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
-#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
-#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
-#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28
-#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000
+#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1
+#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1
+#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2
+#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2
+#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
+#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
+#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
+#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28
+#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000
/* dev_sts register */
-#define ENA_REGS_DEV_STS_READY_MASK 0x1
-#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1
-#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2
-#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2
-#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4
-#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3
-#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8
-#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4
-#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10
-#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5
-#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20
-#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6
-#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40
-#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7
-#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80
+#define ENA_REGS_DEV_STS_READY_MASK 0x1
+#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1
+#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2
+#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2
+#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4
+#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3
+#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8
+#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4
+#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10
+#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5
+#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80
/* mmio_reg_read register */
-#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff
-#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16
-#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000
+#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff
+#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16
+#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000
/* rss_ind_entry_update register */
-#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff
-#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
-#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
#endif /*_ENA_REGS_H_ */
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 29ebbf582010..9f23703dd509 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -1031,6 +1031,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
int i, ret;
unsigned long esar_base;
unsigned char *esar;
+ const char *desc;
if (dec_lance_debug && version_printed++ == 0)
printk(version);
@@ -1216,19 +1217,20 @@ static int dec_lance_probe(struct device *bdev, const int type)
*/
switch (type) {
case ASIC_LANCE:
- printk("%s: IOASIC onboard LANCE", name);
+ desc = "IOASIC onboard LANCE";
break;
case PMAD_LANCE:
- printk("%s: PMAD-AA", name);
+ desc = "PMAD-AA";
break;
case PMAX_LANCE:
- printk("%s: PMAX onboard LANCE", name);
+ desc = "PMAX onboard LANCE";
break;
}
for (i = 0; i < 6; i++)
dev->dev_addr[i] = esar[i * 4];
- printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
+ printk("%s: %s, addr = %pM, irq = %d\n",
+ name, desc, dev->dev_addr, dev->irq);
dev->netdev_ops = &lance_netdev_ops;
dev->watchdog_timeo = 5*HZ;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index d96a84a62d78..0cc911f928b1 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -119,7 +119,6 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
-#include <net/busy_poll.h>
#include <linux/clk.h>
#include <linux/if_ether.h>
#include <linux/net_tstamp.h>
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 750007513f9d..1d5d6b8df855 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -84,7 +84,7 @@ static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
const struct aq_hw_ops **ops,
const struct aq_hw_caps_s **caps)
{
- int i = 0;
+ int i;
if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA)
return -EINVAL;
@@ -107,7 +107,7 @@ static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
int aq_pci_func_init(struct pci_dev *pdev)
{
- int err = 0;
+ int err;
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!err) {
@@ -141,7 +141,7 @@ int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
char *name, void *aq_vec, cpumask_t *affinity_mask)
{
struct pci_dev *pdev = self->pdev;
- int err = 0;
+ int err;
if (pdev->msix_enabled || pdev->msi_enabled)
err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0,
@@ -164,7 +164,7 @@ int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
void aq_pci_func_free_irqs(struct aq_nic_s *self)
{
struct pci_dev *pdev = self->pdev;
- unsigned int i = 0U;
+ unsigned int i;
for (i = 32U; i--;) {
if (!((1U << i) & self->msix_entry_mask))
@@ -194,8 +194,8 @@ static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
static int aq_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
- struct aq_nic_s *self = NULL;
- int err = 0;
+ struct aq_nic_s *self;
+ int err;
struct net_device *ndev;
resource_size_t mmio_pa;
u32 bar;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index c0568465e10b..096ca5730887 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -279,7 +279,7 @@ static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
return err;
}
-int aq_fw2x_update_stats(struct aq_hw_s *self)
+static int aq_fw2x_update_stats(struct aq_hw_s *self)
{
int err = 0;
u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 6d3221134927..7968c644ad86 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1964,8 +1964,6 @@ static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
if (!alx_reset_mac(hw))
rc = PCI_ERS_RESULT_RECOVERED;
out:
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
rtnl_unlock();
return rc;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index faba55fd656a..4122553e224b 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1070,9 +1070,6 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
{
u32 reg;
- /* Stop monitoring MPD interrupt */
- intrl2_0_mask_set(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
-
/* Disable RXCHK, active filters and Broadcom tag matching */
reg = rxchk_readl(priv, RXCHK_CONTROL);
reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
@@ -1082,6 +1079,17 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
/* Clear the MagicPacket detection logic */
mpd_enable_set(priv, false);
+ reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
+ if (reg & INTRL2_0_MPD)
+ netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
+
+ if (reg & INTRL2_0_BRCM_MATCH_TAG) {
+ reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
+ RXCHK_BRCM_TAG_MATCH_MASK;
+ netdev_info(priv->netdev,
+ "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
+ }
+
netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
}
@@ -1106,7 +1114,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *txr;
unsigned int ring, ring_bit;
- u32 reg;
priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
@@ -1132,16 +1139,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
bcm_sysport_tx_reclaim_all(priv);
- if (priv->irq0_stat & INTRL2_0_MPD)
- netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
-
- if (priv->irq0_stat & INTRL2_0_BRCM_MATCH_TAG) {
- reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
- RXCHK_BRCM_TAG_MATCH_MASK;
- netdev_info(priv->netdev,
- "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
- }
-
if (!priv->is_lite)
goto out;
@@ -2645,9 +2642,6 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
/* UniMAC receive needs to be turned on */
umac_enable_set(priv, CMD_RX_EN, 1);
- /* Enable the interrupt wake-up source */
- intrl2_0_mask_clear(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
-
netif_dbg(priv, wol, ndev, "entered WOL mode\n");
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 122fdb80a789..bbb247116045 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8793,13 +8793,6 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
return result;
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
- err); /* non-fatal, continue */
- }
-
return result;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 5a727d4729da..686899d7e555 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -27,7 +27,6 @@
#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
-#include <net/busy_poll.h>
#include <linux/prefetch.h>
#include "bnx2x_cmn.h"
#include "bnx2x_init.h"
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 40093d88353f..95309b27c7d1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -14380,14 +14380,6 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
rtnl_unlock();
- /* If AER, perform cleanup of the PCIe registers */
- if (bp->flags & AER_ENABLED) {
- if (pci_cleanup_aer_uncorrect_error_status(pdev))
- BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
- else
- DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
- }
-
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 61957b0bbd8c..dd85d790f638 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -111,6 +111,7 @@ enum board_idx {
BCM57452,
BCM57454,
BCM5745x_NPAR,
+ BCM57508,
BCM58802,
BCM58804,
BCM58808,
@@ -152,6 +153,7 @@ static const struct {
[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
+ [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
@@ -196,6 +198,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
+ { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV
@@ -241,15 +244,46 @@ static bool bnxt_vf_pciid(enum board_idx idx)
#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
-#define BNXT_CP_DB_REARM(db, raw_cons) \
- writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
-
-#define BNXT_CP_DB(db, raw_cons) \
- writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
-
#define BNXT_CP_DB_IRQ_DIS(db) \
writel(DB_CP_IRQ_DIS_FLAGS, db)
+#define BNXT_DB_CQ(db, idx) \
+ writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
+
+#define BNXT_DB_NQ_P5(db, idx) \
+ writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
+
+#define BNXT_DB_CQ_ARM(db, idx) \
+ writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
+
+#define BNXT_DB_NQ_ARM_P5(db, idx) \
+ writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
+
+static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ BNXT_DB_NQ_P5(db, idx);
+ else
+ BNXT_DB_CQ(db, idx);
+}
+
+static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ BNXT_DB_NQ_ARM_P5(db, idx);
+ else
+ BNXT_DB_CQ_ARM(db, idx);
+}
+
+static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
+ db->doorbell);
+ else
+ BNXT_DB_CQ(db, idx);
+}
+
const u16 bnxt_lhint_arr[] = {
TX_BD_FLAGS_LHINT_512_AND_SMALLER,
TX_BD_FLAGS_LHINT_512_TO_1023,
@@ -341,6 +375,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct tx_push_buffer *tx_push_buf = txr->tx_push;
struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
+ void __iomem *db = txr->tx_db.doorbell;
void *pdata = tx_push_buf->data;
u64 *end;
int j, push_len;
@@ -398,12 +433,11 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
push_len = (length + sizeof(*tx_push) + 7) / 8;
if (push_len > 16) {
- __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
- __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
+ __iowrite64_copy(db, tx_push_buf, 16);
+ __iowrite32_copy(db + 4, tx_push_buf + 1,
(push_len - 16) << 1);
} else {
- __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
- push_len);
+ __iowrite64_copy(db, tx_push_buf, push_len);
}
goto tx_done;
@@ -505,7 +539,7 @@ normal_tx:
txr->tx_prod = prod;
if (!skb->xmit_more || netif_xmit_stopped(txq))
- bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
+ bnxt_db_write(bp, &txr->tx_db, prod);
tx_done:
@@ -513,7 +547,7 @@ tx_done:
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
if (skb->xmit_more && !tx_buf->is_push)
- bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
+ bnxt_db_write(bp, &txr->tx_db, prod);
netif_tx_stop_queue(txq);
@@ -776,11 +810,11 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
return 0;
}
-static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
+static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
u32 agg_bufs)
{
+ struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt *bp = bnapi->bp;
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
u16 sw_prod = rxr->rx_sw_agg_prod;
@@ -903,12 +937,13 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
return skb;
}
-static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
+static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
struct sk_buff *skb, u16 cp_cons,
u32 agg_bufs)
{
+ struct bnxt_napi *bnapi = cpr->bnapi;
struct pci_dev *pdev = bp->pdev;
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
u32 i;
@@ -955,7 +990,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
* allocated already.
*/
rxr->rx_agg_prod = prod;
- bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i);
return NULL;
}
@@ -1012,10 +1047,9 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
return skb;
}
-static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
+static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u32 *raw_cons, void *cmp)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct rx_cmp *rxcmp = cmp;
u32 tmp_raw_cons = *raw_cons;
u8 cmp_type, agg_bufs = 0;
@@ -1141,11 +1175,11 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
cons_rx_buf->data = NULL;
}
-static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
- u16 cp_cons, u32 agg_bufs)
+static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
+ u32 agg_bufs)
{
if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
}
static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
@@ -1339,13 +1373,13 @@ static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
}
static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
- struct bnxt_napi *bnapi,
+ struct bnxt_cp_ring_info *cpr,
u32 *raw_cons,
struct rx_tpa_end_cmp *tpa_end,
struct rx_tpa_end_cmp_ext *tpa_end1,
u8 *event)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u8 agg_id = TPA_END_AGG_ID(tpa_end);
u8 *data_ptr, agg_bufs;
@@ -1357,7 +1391,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
void *data;
if (unlikely(bnapi->in_reset)) {
- int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
+ int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
if (rc < 0)
return ERR_PTR(-EBUSY);
@@ -1383,7 +1417,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
- bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
if (agg_bufs > MAX_SKB_FRAGS)
netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
agg_bufs, (int)MAX_SKB_FRAGS);
@@ -1393,7 +1427,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (len <= bp->rx_copy_thresh) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
- bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
return NULL;
}
} else {
@@ -1402,7 +1436,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
- bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
return NULL;
}
@@ -1417,7 +1451,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (!skb) {
kfree(data);
- bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
return NULL;
}
skb_reserve(skb, bp->rx_offset);
@@ -1425,7 +1459,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+ skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
return NULL;
@@ -1479,10 +1513,10 @@ static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
* -ENOMEM - packet aborted due to out of memory
* -EIO - packet aborted due to hw error indicated in BD
*/
-static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
- u8 *event)
+static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ u32 *raw_cons, u8 *event)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
struct net_device *dev = bp->dev;
struct rx_cmp *rxcmp;
@@ -1521,7 +1555,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
goto next_rx_no_prod_no_len;
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
- skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
+ skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
(struct rx_tpa_end_cmp *)rxcmp,
(struct rx_tpa_end_cmp_ext *)rxcmp1, event);
@@ -1542,7 +1576,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
data = rx_buf->data;
data_ptr = rx_buf->data_ptr;
if (unlikely(cons != rxr->rx_next_cons)) {
- int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
+ int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
bnxt_sched_reset(bp, rxr);
return rc1;
@@ -1565,7 +1599,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
bnxt_reuse_rx_data(rxr, cons, data);
if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
rc = -EIO;
goto next_rx;
@@ -1602,7 +1636,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+ skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
if (!skb) {
rc = -ENOMEM;
goto next_rx;
@@ -1664,10 +1698,10 @@ next_rx_no_prod_no_len:
/* In netpoll mode, if we are using a combined completion ring, we need to
* discard the rx packets and recycle the buffers.
*/
-static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi,
+static int bnxt_force_rx_discard(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
u32 *raw_cons, u8 *event)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
u32 tmp_raw_cons = *raw_cons;
struct rx_cmp_ext *rxcmp1;
struct rx_cmp *rxcmp;
@@ -1697,7 +1731,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi,
tpa_end1->rx_tpa_end_cmp_errors_v2 |=
cpu_to_le32(RX_TPA_END_CMP_ERRORS);
}
- return bnxt_rx_pkt(bp, bnapi, raw_cons, event);
+ return bnxt_rx_pkt(bp, cpr, raw_cons, event);
}
#define BNXT_GET_EVENT_PORT(data) \
@@ -1848,7 +1882,7 @@ static irqreturn_t bnxt_inta(int irq, void *dev_instance)
}
/* disable ring IRQ */
- BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
+ BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
/* Return here if interrupt is shared and is disabled. */
if (unlikely(atomic_read(&bp->intr_sem) != 0))
@@ -1858,9 +1892,10 @@ static irqreturn_t bnxt_inta(int irq, void *dev_instance)
return IRQ_HANDLED;
}
-static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ int budget)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_napi *bnapi = cpr->bnapi;
u32 raw_cons = cpr->cp_raw_cons;
u32 cons;
int tx_pkts = 0;
@@ -1868,6 +1903,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
u8 event = 0;
struct tx_cmp *txcmp;
+ cpr->has_more_work = 0;
while (1) {
int rc;
@@ -1881,16 +1917,22 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
* reading any further.
*/
dma_rmb();
+ cpr->had_work_done = 1;
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
- if (unlikely(tx_pkts > bp->tx_wake_thresh))
+ if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
rx_pkts = budget;
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ if (budget)
+ cpr->has_more_work = 1;
+ break;
+ }
} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
if (likely(budget))
- rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
+ rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
else
- rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons,
+ rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
&event);
if (likely(rc >= 0))
rx_pkts += rc;
@@ -1913,39 +1955,60 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
}
raw_cons = NEXT_RAW_CMP(raw_cons);
- if (rx_pkts == budget)
+ if (rx_pkts && rx_pkts == budget) {
+ cpr->has_more_work = 1;
break;
+ }
}
if (event & BNXT_TX_EVENT) {
struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
- void __iomem *db = txr->tx_doorbell;
u16 prod = txr->tx_prod;
/* Sync BD data before updating doorbell */
wmb();
- bnxt_db_write_relaxed(bp, db, DB_KEY_TX | prod);
+ bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
}
cpr->cp_raw_cons = raw_cons;
- /* ACK completion ring before freeing tx ring and producing new
- * buffers in rx/agg rings to prevent overflowing the completion
- * ring.
- */
- BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ bnapi->tx_pkts += tx_pkts;
+ bnapi->events |= event;
+ return rx_pkts;
+}
- if (tx_pkts)
- bnapi->tx_int(bp, bnapi, tx_pkts);
+static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
+{
+ if (bnapi->tx_pkts) {
+ bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
+ bnapi->tx_pkts = 0;
+ }
- if (event & BNXT_RX_EVENT) {
+ if (bnapi->events & BNXT_RX_EVENT) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
- if (event & BNXT_AGG_EVENT)
- bnxt_db_write(bp, rxr->rx_agg_doorbell,
- DB_KEY_RX | rxr->rx_agg_prod);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ if (bnapi->events & BNXT_AGG_EVENT)
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
}
+ bnapi->events = 0;
+}
+
+static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ int budget)
+{
+ struct bnxt_napi *bnapi = cpr->bnapi;
+ int rx_pkts;
+
+ rx_pkts = __bnxt_poll_work(bp, cpr, budget);
+
+ /* ACK completion ring before freeing tx ring and producing new
+ * buffers in rx/agg rings to prevent overflowing the completion
+ * ring.
+ */
+ bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
+
+ __bnxt_poll_work_done(bp, bnapi);
return rx_pkts;
}
@@ -1984,7 +2047,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
rxcmp1->rx_cmp_cfa_code_errors_v2 |=
cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
- rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
+ rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
if (likely(rc == -EIO) && budget)
rx_pkts++;
else if (rc == -EBUSY) /* partial completion */
@@ -2003,16 +2066,15 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
}
cpr->cp_raw_cons = raw_cons;
- BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
- bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
+ BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
if (event & BNXT_AGG_EVENT)
- bnxt_db_write(bp, rxr->rx_agg_doorbell,
- DB_KEY_RX | rxr->rx_agg_prod);
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
napi_complete_done(napi, rx_pkts);
- BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
}
return rx_pkts;
}
@@ -2025,15 +2087,17 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
int work_done = 0;
while (1) {
- work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
+ work_done += bnxt_poll_work(bp, cpr, budget - work_done);
- if (work_done >= budget)
+ if (work_done >= budget) {
+ if (!budget)
+ BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
break;
+ }
if (!bnxt_has_work(bp, cpr)) {
if (napi_complete_done(napi, work_done))
- BNXT_CP_DB_REARM(cpr->cp_doorbell,
- cpr->cp_raw_cons);
+ BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
break;
}
}
@@ -2050,6 +2114,104 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ int i, work_done = 0;
+
+ for (i = 0; i < 2; i++) {
+ struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
+
+ if (cpr2) {
+ work_done += __bnxt_poll_work(bp, cpr2,
+ budget - work_done);
+ cpr->has_more_work |= cpr2->has_more_work;
+ }
+ }
+ return work_done;
+}
+
+static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
+ u64 dbr_type, bool all)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
+ struct bnxt_db_info *db;
+
+ if (cpr2 && (all || cpr2->had_work_done)) {
+ db = &cpr2->cp_db;
+ writeq(db->db_key64 | dbr_type |
+ RING_CMP(cpr2->cp_raw_cons), db->doorbell);
+ cpr2->had_work_done = 0;
+ }
+ }
+ __bnxt_poll_work_done(bp, bnapi);
+}
+
+static int bnxt_poll_p5(struct napi_struct *napi, int budget)
+{
+ struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ u32 raw_cons = cpr->cp_raw_cons;
+ struct bnxt *bp = bnapi->bp;
+ struct nqe_cn *nqcmp;
+ int work_done = 0;
+ u32 cons;
+
+ if (cpr->has_more_work) {
+ cpr->has_more_work = 0;
+ work_done = __bnxt_poll_cqs(bp, bnapi, budget);
+ if (cpr->has_more_work) {
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
+ return work_done;
+ }
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
+ if (napi_complete_done(napi, work_done))
+ BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
+ return work_done;
+ }
+ while (1) {
+ cons = RING_CMP(raw_cons);
+ nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+ if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
+ false);
+ cpr->cp_raw_cons = raw_cons;
+ if (napi_complete_done(napi, work_done))
+ BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
+ cpr->cp_raw_cons);
+ return work_done;
+ }
+
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+
+ if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
+ u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
+ struct bnxt_cp_ring_info *cpr2;
+
+ cpr2 = cpr->cp_ring_arr[idx];
+ work_done += __bnxt_poll_work(bp, cpr2,
+ budget - work_done);
+ cpr->has_more_work = cpr2->has_more_work;
+ } else {
+ bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
+ }
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ if (cpr->has_more_work)
+ break;
+ }
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
+ cpr->cp_raw_cons = raw_cons;
+ return work_done;
+}
+
static void bnxt_free_tx_skbs(struct bnxt *bp)
{
int i, max_idx;
@@ -2195,60 +2357,73 @@ static void bnxt_free_skbs(struct bnxt *bp)
bnxt_free_rx_skbs(bp);
}
-static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
{
struct pci_dev *pdev = bp->pdev;
int i;
- for (i = 0; i < ring->nr_pages; i++) {
- if (!ring->pg_arr[i])
+ for (i = 0; i < rmem->nr_pages; i++) {
+ if (!rmem->pg_arr[i])
continue;
- dma_free_coherent(&pdev->dev, ring->page_size,
- ring->pg_arr[i], ring->dma_arr[i]);
+ dma_free_coherent(&pdev->dev, rmem->page_size,
+ rmem->pg_arr[i], rmem->dma_arr[i]);
- ring->pg_arr[i] = NULL;
+ rmem->pg_arr[i] = NULL;
}
- if (ring->pg_tbl) {
- dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
- ring->pg_tbl, ring->pg_tbl_map);
- ring->pg_tbl = NULL;
+ if (rmem->pg_tbl) {
+ dma_free_coherent(&pdev->dev, rmem->nr_pages * 8,
+ rmem->pg_tbl, rmem->pg_tbl_map);
+ rmem->pg_tbl = NULL;
}
- if (ring->vmem_size && *ring->vmem) {
- vfree(*ring->vmem);
- *ring->vmem = NULL;
+ if (rmem->vmem_size && *rmem->vmem) {
+ vfree(*rmem->vmem);
+ *rmem->vmem = NULL;
}
}
-static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
{
- int i;
struct pci_dev *pdev = bp->pdev;
+ u64 valid_bit = 0;
+ int i;
- if (ring->nr_pages > 1) {
- ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
- ring->nr_pages * 8,
- &ring->pg_tbl_map,
+ if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
+ valid_bit = PTU_PTE_VALID;
+ if (rmem->nr_pages > 1) {
+ rmem->pg_tbl = dma_alloc_coherent(&pdev->dev,
+ rmem->nr_pages * 8,
+ &rmem->pg_tbl_map,
GFP_KERNEL);
- if (!ring->pg_tbl)
+ if (!rmem->pg_tbl)
return -ENOMEM;
}
- for (i = 0; i < ring->nr_pages; i++) {
- ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
- ring->page_size,
- &ring->dma_arr[i],
+ for (i = 0; i < rmem->nr_pages; i++) {
+ u64 extra_bits = valid_bit;
+
+ rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+ rmem->page_size,
+ &rmem->dma_arr[i],
GFP_KERNEL);
- if (!ring->pg_arr[i])
+ if (!rmem->pg_arr[i])
return -ENOMEM;
- if (ring->nr_pages > 1)
- ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
+ if (rmem->nr_pages > 1) {
+ if (i == rmem->nr_pages - 2 &&
+ (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
+ extra_bits |= PTU_PTE_NEXT_TO_LAST;
+ else if (i == rmem->nr_pages - 1 &&
+ (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
+ extra_bits |= PTU_PTE_LAST;
+ rmem->pg_tbl[i] =
+ cpu_to_le64(rmem->dma_arr[i] | extra_bits);
+ }
}
- if (ring->vmem_size) {
- *ring->vmem = vzalloc(ring->vmem_size);
- if (!(*ring->vmem))
+ if (rmem->vmem_size) {
+ *rmem->vmem = vzalloc(rmem->vmem_size);
+ if (!(*rmem->vmem))
return -ENOMEM;
}
return 0;
@@ -2278,10 +2453,10 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
rxr->rx_agg_bmap = NULL;
ring = &rxr->rx_ring_struct;
- bnxt_free_ring(bp, ring);
+ bnxt_free_ring(bp, &ring->ring_mem);
ring = &rxr->rx_agg_ring_struct;
- bnxt_free_ring(bp, ring);
+ bnxt_free_ring(bp, &ring->ring_mem);
}
}
@@ -2308,15 +2483,16 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
if (rc < 0)
return rc;
- rc = bnxt_alloc_ring(bp, ring);
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;
+ ring->grp_idx = i;
if (agg_rings) {
u16 mem_size;
ring = &rxr->rx_agg_ring_struct;
- rc = bnxt_alloc_ring(bp, ring);
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;
@@ -2359,7 +2535,7 @@ static void bnxt_free_tx_rings(struct bnxt *bp)
ring = &txr->tx_ring_struct;
- bnxt_free_ring(bp, ring);
+ bnxt_free_ring(bp, &ring->ring_mem);
}
}
@@ -2390,7 +2566,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
ring = &txr->tx_ring_struct;
- rc = bnxt_alloc_ring(bp, ring);
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;
@@ -2436,6 +2612,7 @@ static void bnxt_free_cp_rings(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr;
struct bnxt_ring_struct *ring;
+ int j;
if (!bnapi)
continue;
@@ -2443,12 +2620,51 @@ static void bnxt_free_cp_rings(struct bnxt *bp)
cpr = &bnapi->cp_ring;
ring = &cpr->cp_ring_struct;
- bnxt_free_ring(bp, ring);
+ bnxt_free_ring(bp, &ring->ring_mem);
+
+ for (j = 0; j < 2; j++) {
+ struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+
+ if (cpr2) {
+ ring = &cpr2->cp_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+ kfree(cpr2);
+ cpr->cp_ring_arr[j] = NULL;
+ }
+ }
}
}
+static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
+{
+ struct bnxt_ring_mem_info *rmem;
+ struct bnxt_ring_struct *ring;
+ struct bnxt_cp_ring_info *cpr;
+ int rc;
+
+ cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
+ if (!cpr)
+ return NULL;
+
+ ring = &cpr->cp_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->cp_nr_pages;
+ rmem->page_size = HW_CMPD_RING_SIZE;
+ rmem->pg_arr = (void **)cpr->cp_desc_ring;
+ rmem->dma_arr = cpr->cp_desc_mapping;
+ rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
+ rc = bnxt_alloc_ring(bp, rmem);
+ if (rc) {
+ bnxt_free_ring(bp, rmem);
+ kfree(cpr);
+ cpr = NULL;
+ }
+ return cpr;
+}
+
static int bnxt_alloc_cp_rings(struct bnxt *bp)
{
+ bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
int i, rc, ulp_base_vec, ulp_msix;
ulp_msix = bnxt_get_ulp_msix_num(bp);
@@ -2462,9 +2678,10 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
continue;
cpr = &bnapi->cp_ring;
+ cpr->bnapi = bnapi;
ring = &cpr->cp_ring_struct;
- rc = bnxt_alloc_ring(bp, ring);
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;
@@ -2472,6 +2689,29 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
ring->map_idx = i + ulp_msix;
else
ring->map_idx = i;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ continue;
+
+ if (i < bp->rx_nr_rings) {
+ struct bnxt_cp_ring_info *cpr2 =
+ bnxt_alloc_cp_sub_ring(bp);
+
+ cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
+ if (!cpr2)
+ return -ENOMEM;
+ cpr2->bnapi = bnapi;
+ }
+ if ((sh && i < bp->tx_nr_rings) ||
+ (!sh && i >= bp->rx_nr_rings)) {
+ struct bnxt_cp_ring_info *cpr2 =
+ bnxt_alloc_cp_sub_ring(bp);
+
+ cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
+ if (!cpr2)
+ return -ENOMEM;
+ cpr2->bnapi = bnapi;
+ }
}
return 0;
}
@@ -2482,6 +2722,7 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_ring_mem_info *rmem;
struct bnxt_cp_ring_info *cpr;
struct bnxt_rx_ring_info *rxr;
struct bnxt_tx_ring_info *txr;
@@ -2492,31 +2733,34 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
cpr = &bnapi->cp_ring;
ring = &cpr->cp_ring_struct;
- ring->nr_pages = bp->cp_nr_pages;
- ring->page_size = HW_CMPD_RING_SIZE;
- ring->pg_arr = (void **)cpr->cp_desc_ring;
- ring->dma_arr = cpr->cp_desc_mapping;
- ring->vmem_size = 0;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->cp_nr_pages;
+ rmem->page_size = HW_CMPD_RING_SIZE;
+ rmem->pg_arr = (void **)cpr->cp_desc_ring;
+ rmem->dma_arr = cpr->cp_desc_mapping;
+ rmem->vmem_size = 0;
rxr = bnapi->rx_ring;
if (!rxr)
goto skip_rx;
ring = &rxr->rx_ring_struct;
- ring->nr_pages = bp->rx_nr_pages;
- ring->page_size = HW_RXBD_RING_SIZE;
- ring->pg_arr = (void **)rxr->rx_desc_ring;
- ring->dma_arr = rxr->rx_desc_mapping;
- ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
- ring->vmem = (void **)&rxr->rx_buf_ring;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->rx_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_desc_ring;
+ rmem->dma_arr = rxr->rx_desc_mapping;
+ rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_buf_ring;
ring = &rxr->rx_agg_ring_struct;
- ring->nr_pages = bp->rx_agg_nr_pages;
- ring->page_size = HW_RXBD_RING_SIZE;
- ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
- ring->dma_arr = rxr->rx_agg_desc_mapping;
- ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
- ring->vmem = (void **)&rxr->rx_agg_ring;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->rx_agg_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
+ rmem->dma_arr = rxr->rx_agg_desc_mapping;
+ rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_agg_ring;
skip_rx:
txr = bnapi->tx_ring;
@@ -2524,12 +2768,13 @@ skip_rx:
continue;
ring = &txr->tx_ring_struct;
- ring->nr_pages = bp->tx_nr_pages;
- ring->page_size = HW_RXBD_RING_SIZE;
- ring->pg_arr = (void **)txr->tx_desc_ring;
- ring->dma_arr = txr->tx_desc_mapping;
- ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
- ring->vmem = (void **)&txr->tx_buf_ring;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->tx_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)txr->tx_desc_ring;
+ rmem->dma_arr = txr->tx_desc_mapping;
+ rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
+ rmem->vmem = (void **)&txr->tx_buf_ring;
}
}
@@ -2539,8 +2784,8 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
u32 prod;
struct rx_bd **rx_buf_ring;
- rx_buf_ring = (struct rx_bd **)ring->pg_arr;
- for (i = 0, prod = 0; i < ring->nr_pages; i++) {
+ rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
+ for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
int j;
struct rx_bd *rxbd;
@@ -2642,7 +2887,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
static void bnxt_init_cp_rings(struct bnxt *bp)
{
- int i;
+ int i, j;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
@@ -2651,6 +2896,17 @@ static void bnxt_init_cp_rings(struct bnxt *bp)
ring->fw_ring_id = INVALID_HW_RING_ID;
cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
+ for (j = 0; j < 2; j++) {
+ struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+
+ if (!cpr2)
+ continue;
+
+ ring = &cpr2->cp_ring_struct;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
+ cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
+ }
}
}
@@ -2754,10 +3010,12 @@ static void bnxt_init_vnics(struct bnxt *bp)
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ int j;
vnic->fw_vnic_id = INVALID_HW_RING_ID;
- vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
- vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
+ for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
+ vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
+
vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
if (bp->vnic_info[i].rss_hash_key) {
@@ -2971,6 +3229,9 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
}
}
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ goto vnic_skip_grps;
+
if (vnic->flags & BNXT_VNIC_RSS_FLAG)
max_rings = bp->rx_nr_rings;
else
@@ -2981,7 +3242,7 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
rc = -ENOMEM;
goto out;
}
-
+vnic_skip_grps:
if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
!(vnic->flags & BNXT_VNIC_RSS_FLAG))
continue;
@@ -3010,10 +3271,11 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
- dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
- bp->hwrm_cmd_resp_dma_addr);
-
- bp->hwrm_cmd_resp_addr = NULL;
+ if (bp->hwrm_cmd_resp_addr) {
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
+ bp->hwrm_cmd_resp_dma_addr);
+ bp->hwrm_cmd_resp_addr = NULL;
+ }
}
static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
@@ -3034,7 +3296,7 @@ static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
if (bp->hwrm_short_cmd_req_addr) {
struct pci_dev *pdev = bp->pdev;
- dma_free_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
+ dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
bp->hwrm_short_cmd_req_addr,
bp->hwrm_short_cmd_req_dma_addr);
bp->hwrm_short_cmd_req_addr = NULL;
@@ -3046,7 +3308,7 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
struct pci_dev *pdev = bp->pdev;
bp->hwrm_short_cmd_req_addr =
- dma_alloc_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
+ dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
&bp->hwrm_short_cmd_req_dma_addr,
GFP_KERNEL);
if (!bp->hwrm_short_cmd_req_addr)
@@ -3070,6 +3332,13 @@ static void bnxt_free_stats(struct bnxt *bp)
bp->hw_rx_port_stats = NULL;
}
+ if (bp->hw_tx_port_stats_ext) {
+ dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
+ bp->hw_tx_port_stats_ext,
+ bp->hw_tx_port_stats_ext_map);
+ bp->hw_tx_port_stats_ext = NULL;
+ }
+
if (bp->hw_rx_port_stats_ext) {
dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
bp->hw_rx_port_stats_ext,
@@ -3144,6 +3413,13 @@ static int bnxt_alloc_stats(struct bnxt *bp)
if (!bp->hw_rx_port_stats_ext)
return 0;
+ if (bp->hwrm_spec_code >= 0x10902) {
+ bp->hw_tx_port_stats_ext =
+ dma_zalloc_coherent(&pdev->dev,
+ sizeof(struct tx_port_stats_ext),
+ &bp->hw_tx_port_stats_ext_map,
+ GFP_KERNEL);
+ }
bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
}
return 0;
@@ -3282,6 +3558,13 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
bp->bnapi[i] = bnapi;
bp->bnapi[i]->index = i;
bp->bnapi[i]->bp = bp;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ struct bnxt_cp_ring_info *cpr =
+ &bp->bnapi[i]->cp_ring;
+
+ cpr->cp_ring_struct.ring_mem.flags =
+ BNXT_RMEM_RING_PTE_FLAG;
+ }
}
bp->rx_ring = kcalloc(bp->rx_nr_rings,
@@ -3291,7 +3574,15 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
return -ENOMEM;
for (i = 0; i < bp->rx_nr_rings; i++) {
- bp->rx_ring[i].bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ rxr->rx_ring_struct.ring_mem.flags =
+ BNXT_RMEM_RING_PTE_FLAG;
+ rxr->rx_agg_ring_struct.ring_mem.flags =
+ BNXT_RMEM_RING_PTE_FLAG;
+ }
+ rxr->bnapi = bp->bnapi[i];
bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
}
@@ -3313,12 +3604,16 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
j = bp->rx_nr_rings;
for (i = 0; i < bp->tx_nr_rings; i++, j++) {
- bp->tx_ring[i].bnapi = bp->bnapi[j];
- bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
+ struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ txr->tx_ring_struct.ring_mem.flags =
+ BNXT_RMEM_RING_PTE_FLAG;
+ txr->bnapi = bp->bnapi[j];
+ bp->bnapi[j]->tx_ring = txr;
bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
if (i >= bp->tx_nr_rings_xdp) {
- bp->tx_ring[i].txq_index = i -
- bp->tx_nr_rings_xdp;
+ txr->txq_index = i - bp->tx_nr_rings_xdp;
bp->bnapi[j]->tx_int = bnxt_tx_int;
} else {
bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
@@ -3378,7 +3673,7 @@ static void bnxt_disable_int(struct bnxt *bp)
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID)
- BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
}
}
@@ -3414,7 +3709,7 @@ static void bnxt_enable_int(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
}
}
@@ -3447,12 +3742,27 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
cp_ring_id = le16_to_cpu(req->cmpl_ring);
intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
- if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) {
+ if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
+ if (msg_len > bp->hwrm_max_ext_req_len ||
+ !bp->hwrm_short_cmd_req_addr)
+ return -EINVAL;
+ }
+
+ if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
+ msg_len > BNXT_HWRM_MAX_REQ_LEN) {
void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
+ u16 max_msg_len;
+
+ /* Set boundary for maximum extended request length for short
+ * cmd format. If passed up from device use the max supported
+ * internal req length.
+ */
+ max_msg_len = bp->hwrm_max_ext_req_len;
memcpy(short_cmd_req, req, msg_len);
- memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
- msg_len);
+ if (msg_len < max_msg_len)
+ memset(short_cmd_req + msg_len, 0,
+ max_msg_len - msg_len);
short_input.req_type = req->req_type;
short_input.signature =
@@ -3981,13 +4291,48 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
+{
+ struct bnxt_ring_grp_info *grp_info;
+
+ grp_info = &bp->grp_info[ring->grp_idx];
+ return grp_info->cp_fw_ring_id;
+}
+
+static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ struct bnxt_napi *bnapi = rxr->bnapi;
+ struct bnxt_cp_ring_info *cpr;
+
+ cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
+ return cpr->cp_ring_struct.fw_ring_id;
+ } else {
+ return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
+ }
+}
+
+static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ struct bnxt_napi *bnapi = txr->bnapi;
+ struct bnxt_cp_ring_info *cpr;
+
+ cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
+ return cpr->cp_ring_struct.fw_ring_id;
+ } else {
+ return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
+ }
+}
+
static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
{
u32 i, j, max_rings;
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_rss_cfg_input req = {0};
- if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
+ vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
@@ -4018,6 +4363,51 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
+ struct hwrm_vnic_rss_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
+ req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ if (!set_rss) {
+ hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return 0;
+ }
+ req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
+ req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
+ req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+ req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
+ nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
+ for (i = 0, k = 0; i < nr_ctxs; i++) {
+ __le16 *ring_tbl = vnic->rss_table;
+ int rc;
+
+ req.ring_table_pair_index = i;
+ req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
+ for (j = 0; j < 64; j++) {
+ u16 ring_id;
+
+ ring_id = rxr->rx_ring_struct.fw_ring_id;
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ rxr++;
+ k++;
+ if (k == max_rings) {
+ k = 0;
+ rxr = &bp->rx_ring[0];
+ }
+ }
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return -EIO;
+ }
+ return 0;
+}
+
static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
@@ -4101,6 +4491,18 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
+
+ req.default_rx_ring_id =
+ cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
+ req.default_cmpl_ring_id =
+ cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
+ req.enables =
+ cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
+ VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
+ goto vnic_mru;
+ }
req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
/* Only RSS support for now TBD: COS & LB */
if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
@@ -4133,13 +4535,13 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
ring = bp->rx_nr_rings - 1;
grp_idx = bp->rx_ring[ring].bnapi->index;
- req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
-
req.lb_rule = cpu_to_le16(0xffff);
+vnic_mru:
req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
VLAN_HLEN);
+ req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
#ifdef CONFIG_BNXT_SRIOV
if (BNXT_VF(bp))
def_vlan = bp->vf.vlan;
@@ -4187,6 +4589,10 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
struct hwrm_vnic_alloc_input req = {0};
struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ goto vnic_no_ring_grps;
/* map ring groups to this vnic */
for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
@@ -4196,12 +4602,12 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
j, nr_rings);
break;
}
- bp->vnic_info[vnic_id].fw_grp_ids[j] =
- bp->grp_info[grp_idx].fw_grp_id;
+ vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
}
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
+vnic_no_ring_grps:
+ for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
+ vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
if (vnic_id == 0)
req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
@@ -4210,7 +4616,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc)
- bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
+ vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -4230,7 +4636,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (!rc) {
u32 flags = le32_to_cpu(resp->flags);
- if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
+ (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
if (flags &
VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
@@ -4245,6 +4652,9 @@ static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
u16 i;
u32 rc = 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return 0;
+
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < bp->rx_nr_rings; i++) {
struct hwrm_ring_grp_alloc_input req = {0};
@@ -4277,7 +4687,7 @@ static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
u32 rc = 0;
struct hwrm_ring_grp_free_input req = {0};
- if (!bp->grp_info)
+ if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
@@ -4306,45 +4716,90 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
int rc = 0, err = 0;
struct hwrm_ring_alloc_input req = {0};
struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
struct bnxt_ring_grp_info *grp_info;
u16 ring_id;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
req.enables = 0;
- if (ring->nr_pages > 1) {
- req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
+ if (rmem->nr_pages > 1) {
+ req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
/* Page size is in log2 units */
req.page_size = BNXT_PAGE_SHIFT;
req.page_tbl_depth = 1;
} else {
- req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]);
+ req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
}
req.fbo = 0;
/* Association of ring index with doorbell index and MSIX number */
req.logical_id = cpu_to_le16(map_index);
switch (ring_type) {
- case HWRM_RING_ALLOC_TX:
+ case HWRM_RING_ALLOC_TX: {
+ struct bnxt_tx_ring_info *txr;
+
+ txr = container_of(ring, struct bnxt_tx_ring_info,
+ tx_ring_struct);
req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
/* Association of transmit ring with completion ring */
grp_info = &bp->grp_info[ring->grp_idx];
- req.cmpl_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
+ req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
req.length = cpu_to_le32(bp->tx_ring_mask + 1);
req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
req.queue_id = cpu_to_le16(ring->queue_id);
break;
+ }
case HWRM_RING_ALLOC_RX:
req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
req.length = cpu_to_le32(bp->rx_ring_mask + 1);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ u16 flags = 0;
+
+ /* Association of rx ring with stats context */
+ grp_info = &bp->grp_info[ring->grp_idx];
+ req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
+ req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req.enables |= cpu_to_le32(
+ RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
+ if (NET_IP_ALIGN == 2)
+ flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
+ req.flags = cpu_to_le16(flags);
+ }
break;
case HWRM_RING_ALLOC_AGG:
- req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
+ /* Association of agg ring with rx ring */
+ grp_info = &bp->grp_info[ring->grp_idx];
+ req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
+ req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
+ req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req.enables |= cpu_to_le32(
+ RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
+ RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
+ } else {
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ }
req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
break;
case HWRM_RING_ALLOC_CMPL:
req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
req.length = cpu_to_le32(bp->cp_ring_mask + 1);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ /* Association of cp ring with nq */
+ grp_info = &bp->grp_info[map_index];
+ req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
+ req.cq_handle = cpu_to_le64(ring->handle);
+ req.enables |= cpu_to_le32(
+ RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
+ } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
+ req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ }
+ break;
+ case HWRM_RING_ALLOC_NQ:
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
+ req.length = cpu_to_le32(bp->cp_ring_mask + 1);
if (bp->flags & BNXT_FLAG_USING_MSIX)
req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
break;
@@ -4393,22 +4848,67 @@ static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
return rc;
}
+static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
+ u32 map_idx, u32 xid)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (BNXT_PF(bp))
+ db->doorbell = bp->bar1 + 0x10000;
+ else
+ db->doorbell = bp->bar1 + 0x4000;
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
+ break;
+ case HWRM_RING_ALLOC_RX:
+ case HWRM_RING_ALLOC_AGG:
+ db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ db->db_key64 = DBR_PATH_L2;
+ break;
+ case HWRM_RING_ALLOC_NQ:
+ db->db_key64 = DBR_PATH_L2;
+ break;
+ }
+ db->db_key64 |= (u64)xid << DBR_XID_SFT;
+ } else {
+ db->doorbell = bp->bar1 + map_idx * 0x80;
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ db->db_key32 = DB_KEY_TX;
+ break;
+ case HWRM_RING_ALLOC_RX:
+ case HWRM_RING_ALLOC_AGG:
+ db->db_key32 = DB_KEY_RX;
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ db->db_key32 = DB_KEY_CP;
+ break;
+ }
+ }
+}
+
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{
int i, rc = 0;
+ u32 type;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ type = HWRM_RING_ALLOC_NQ;
+ else
+ type = HWRM_RING_ALLOC_CMPL;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
u32 map_idx = ring->map_idx;
- cpr->cp_doorbell = bp->bar1 + map_idx * 0x80;
- rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL,
- map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
if (rc)
goto err_out;
- BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
+ bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
if (!i) {
@@ -4418,33 +4918,69 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
}
}
+ type = HWRM_RING_ALLOC_TX;
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
- u32 map_idx = i;
-
- rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
- map_idx);
+ struct bnxt_ring_struct *ring;
+ u32 map_idx;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ struct bnxt_napi *bnapi = txr->bnapi;
+ struct bnxt_cp_ring_info *cpr, *cpr2;
+ u32 type2 = HWRM_RING_ALLOC_CMPL;
+
+ cpr = &bnapi->cp_ring;
+ cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
+ ring = &cpr2->cp_ring_struct;
+ ring->handle = BNXT_TX_HDL;
+ map_idx = bnapi->index;
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
+ if (rc)
+ goto err_out;
+ bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
+ ring->fw_ring_id);
+ bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
+ }
+ ring = &txr->tx_ring_struct;
+ map_idx = i;
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
if (rc)
goto err_out;
- txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
+ bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
}
+ type = HWRM_RING_ALLOC_RX;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
- u32 map_idx = rxr->bnapi->index;
+ struct bnxt_napi *bnapi = rxr->bnapi;
+ u32 map_idx = bnapi->index;
- rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
- map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
if (rc)
goto err_out;
- rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
- writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+ bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ u32 type2 = HWRM_RING_ALLOC_CMPL;
+ struct bnxt_cp_ring_info *cpr2;
+
+ cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
+ ring = &cpr2->cp_ring_struct;
+ ring->handle = BNXT_RX_HDL;
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
+ if (rc)
+ goto err_out;
+ bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
+ ring->fw_ring_id);
+ bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
+ }
}
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ type = HWRM_RING_ALLOC_AGG;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring =
@@ -4452,15 +4988,13 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
u32 grp_idx = ring->grp_idx;
u32 map_idx = grp_idx + bp->rx_nr_rings;
- rc = hwrm_ring_alloc_send_msg(bp, ring,
- HWRM_RING_ALLOC_AGG,
- map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
if (rc)
goto err_out;
- rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
- writel(DB_KEY_RX | rxr->rx_agg_prod,
- rxr->rx_agg_doorbell);
+ bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
+ ring->fw_ring_id);
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
}
}
@@ -4496,6 +5030,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
{
+ u32 type;
int i;
if (!bp->bnapi)
@@ -4504,9 +5039,9 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
- u32 grp_idx = txr->bnapi->index;
- u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
+ u32 cmpl_ring_id;
+ cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
hwrm_ring_free_send_msg(bp, ring,
RING_FREE_REQ_RING_TYPE_TX,
@@ -4520,8 +5055,9 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
u32 grp_idx = rxr->bnapi->index;
- u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
+ u32 cmpl_ring_id;
+ cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
hwrm_ring_free_send_msg(bp, ring,
RING_FREE_REQ_RING_TYPE_RX,
@@ -4533,15 +5069,19 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ type = RING_FREE_REQ_RING_TYPE_RX_AGG;
+ else
+ type = RING_FREE_REQ_RING_TYPE_RX;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
u32 grp_idx = rxr->bnapi->index;
- u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
+ u32 cmpl_ring_id;
+ cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_RX,
+ hwrm_ring_free_send_msg(bp, ring, type,
close_path ? cmpl_ring_id :
INVALID_HW_RING_ID);
ring->fw_ring_id = INVALID_HW_RING_ID;
@@ -4556,14 +5096,32 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
*/
bnxt_disable_int_sync(bp);
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ type = RING_FREE_REQ_RING_TYPE_NQ;
+ else
+ type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+ struct bnxt_ring_struct *ring;
+ int j;
+
+ for (j = 0; j < 2; j++) {
+ struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+ if (cpr2) {
+ ring = &cpr2->cp_ring_struct;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ continue;
+ hwrm_ring_free_send_msg(bp, ring,
+ RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+ ring = &cpr->cp_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ hwrm_ring_free_send_msg(bp, ring, type,
INVALID_HW_RING_ID);
ring->fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
@@ -4571,6 +5129,9 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool shared);
+
static int bnxt_hwrm_get_rings(struct bnxt *bp)
{
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
@@ -4601,6 +5162,22 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
cp = le16_to_cpu(resp->alloc_cmpl_rings);
stats = le16_to_cpu(resp->alloc_stat_ctx);
cp = min_t(u16, cp, stats);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ int rx = hw_resc->resv_rx_rings;
+ int tx = hw_resc->resv_tx_rings;
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx >>= 1;
+ if (cp < (rx + tx)) {
+ bnxt_trim_rings(bp, &rx, &tx, cp, false);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx <<= 1;
+ hw_resc->resv_rx_rings = rx;
+ hw_resc->resv_tx_rings = tx;
+ }
+ cp = le16_to_cpu(resp->alloc_msix);
+ hw_resc->resv_hw_ring_grps = rx;
+ }
hw_resc->resv_cp_rings = cp;
}
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -4626,6 +5203,8 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
return rc;
}
+static bool bnxt_rfs_supported(struct bnxt *bp);
+
static void
__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
int tx_rings, int rx_rings, int ring_grps,
@@ -4639,15 +5218,38 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
req->num_tx_rings = cpu_to_le16(tx_rings);
if (BNXT_NEW_RM(bp)) {
enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
- enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
- enables |= ring_grps ?
- FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
- enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
+ enables |= tx_rings + ring_grps ?
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= rx_rings ?
+ FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ } else {
+ enables |= cp_rings ?
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= ring_grps ?
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
+ FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ }
+ enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
req->num_rx_rings = cpu_to_le16(rx_rings);
- req->num_hw_ring_grps = cpu_to_le16(ring_grps);
- req->num_cmpl_rings = cpu_to_le16(cp_rings);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
+ req->num_msix = cpu_to_le16(cp_rings);
+ req->num_rsscos_ctxs =
+ cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+ } else {
+ req->num_cmpl_rings = cpu_to_le16(cp_rings);
+ req->num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req->num_rsscos_ctxs = cpu_to_le16(1);
+ if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
+ bnxt_rfs_supported(bp))
+ req->num_rsscos_ctxs =
+ cpu_to_le16(ring_grps + 1);
+ }
req->num_stat_ctxs = req->num_cmpl_rings;
req->num_vnics = cpu_to_le16(vnics);
}
@@ -4664,16 +5266,33 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
- enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
- enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
- enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ enables |= tx_rings + ring_grps ?
+ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ } else {
+ enables |= cp_rings ?
+ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= ring_grps ?
+ FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ }
enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
+ req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
req->num_tx_rings = cpu_to_le16(tx_rings);
req->num_rx_rings = cpu_to_le16(rx_rings);
- req->num_hw_ring_grps = cpu_to_le16(ring_grps);
- req->num_cmpl_rings = cpu_to_le16(cp_rings);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
+ req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+ } else {
+ req->num_cmpl_rings = cpu_to_le16(cp_rings);
+ req->num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
+ }
req->num_stat_ctxs = req->num_cmpl_rings;
req->num_vnics = cpu_to_le16(vnics);
@@ -4717,10 +5336,6 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
cp_rings, vnics);
- req.enables |= cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS);
- req.num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
- req.num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return -ENOMEM;
@@ -4766,20 +5381,19 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
return true;
- if (bp->flags & BNXT_FLAG_RFS)
+ if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
if (BNXT_NEW_RM(bp) &&
(hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
- hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic))
+ hw_resc->resv_vnics != vnic ||
+ (hw_resc->resv_hw_ring_grps != grp &&
+ !(bp->flags & BNXT_FLAG_CHIP_P5))))
return true;
return false;
}
-static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
- bool shared);
-
static int __bnxt_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
@@ -4795,7 +5409,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true;
- if (bp->flags & BNXT_FLAG_RFS)
+ if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
@@ -4858,9 +5472,11 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
- FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
- FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+ FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
req.flags = cpu_to_le32(flags);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -4879,12 +5495,16 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
cp_rings, vnics);
flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
- if (BNXT_NEW_RM(bp))
+ if (BNXT_NEW_RM(bp)) {
flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
- FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
+ else
+ flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
+ }
req.flags = cpu_to_le32(flags);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -4907,46 +5527,140 @@ static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
cp_rings, vnics);
}
-static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
+static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
+{
+ struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
+ struct hwrm_ring_aggint_qcaps_input req = {0};
+ int rc;
+
+ coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
+ coal_cap->num_cmpl_dma_aggr_max = 63;
+ coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
+ coal_cap->cmpl_aggr_dma_tmr_max = 65535;
+ coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
+ coal_cap->int_lat_tmr_min_max = 65535;
+ coal_cap->int_lat_tmr_max_max = 65535;
+ coal_cap->num_cmpl_aggr_int_max = 65535;
+ coal_cap->timer_units = 80;
+
+ if (bp->hwrm_spec_code < 0x10902)
+ return;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
+ coal_cap->nq_params = le32_to_cpu(resp->nq_params);
+ coal_cap->num_cmpl_dma_aggr_max =
+ le16_to_cpu(resp->num_cmpl_dma_aggr_max);
+ coal_cap->num_cmpl_dma_aggr_during_int_max =
+ le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
+ coal_cap->cmpl_aggr_dma_tmr_max =
+ le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
+ coal_cap->cmpl_aggr_dma_tmr_during_int_max =
+ le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
+ coal_cap->int_lat_tmr_min_max =
+ le16_to_cpu(resp->int_lat_tmr_min_max);
+ coal_cap->int_lat_tmr_max_max =
+ le16_to_cpu(resp->int_lat_tmr_max_max);
+ coal_cap->num_cmpl_aggr_int_max =
+ le16_to_cpu(resp->num_cmpl_aggr_int_max);
+ coal_cap->timer_units = le16_to_cpu(resp->timer_units);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+}
+
+static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
+{
+ struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
+
+ return usec * 1000 / coal_cap->timer_units;
+}
+
+static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
+ struct bnxt_coal *hw_coal,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
{
- u16 val, tmr, max, flags;
+ struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
+ u32 cmpl_params = coal_cap->cmpl_params;
+ u16 val, tmr, max, flags = 0;
max = hw_coal->bufs_per_record * 128;
if (hw_coal->budget)
max = hw_coal->bufs_per_record * hw_coal->budget;
+ max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
req->num_cmpl_aggr_int = cpu_to_le16(val);
- /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
- val = min_t(u16, val, 63);
+ val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
req->num_cmpl_dma_aggr = cpu_to_le16(val);
- /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
- val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 63);
+ val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
+ coal_cap->num_cmpl_dma_aggr_during_int_max);
req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
- tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks);
- tmr = max_t(u16, tmr, 1);
+ tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
+ tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
req->int_lat_tmr_max = cpu_to_le16(tmr);
/* min timer set to 1/2 of interrupt timer */
- val = tmr / 2;
- req->int_lat_tmr_min = cpu_to_le16(val);
+ if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
+ val = tmr / 2;
+ val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
+ req->int_lat_tmr_min = cpu_to_le16(val);
+ req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
+ }
/* buf timer set to 1/4 of interrupt timer */
- val = max_t(u16, tmr / 4, 1);
+ val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
- tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks_irq);
- tmr = max_t(u16, tmr, 1);
- req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
+ if (cmpl_params &
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
+ tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
+ val = clamp_t(u16, tmr, 1,
+ coal_cap->cmpl_aggr_dma_tmr_during_int_max);
+ req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
+ req->enables |=
+ cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
+ }
- flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
- if (hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
+ if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
+ flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+ if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
+ hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
req->flags = cpu_to_le16(flags);
+ req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
+}
+
+/* Caller holds bp->hwrm_cmd_lock */
+static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct bnxt_coal *hw_coal)
+{
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
+ u32 nq_params = coal_cap->nq_params;
+ u16 tmr;
+
+ if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
+ -1, -1);
+ req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
+ req.flags =
+ cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
+
+ tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
+ tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
+ req.int_lat_tmr_min = cpu_to_le16(tmr);
+ req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
+ return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
@@ -4954,7 +5668,6 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_coal coal;
- unsigned int grp_idx;
/* Tick values in micro seconds.
* 1 coal_buf x bufs_per_record = 1 completion record.
@@ -4970,10 +5683,9 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
- bnxt_hwrm_set_coal_params(&coal, &req_rx);
+ bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
- grp_idx = bnapi->index;
- req_rx.ring_id = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
+ req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
HWRM_CMD_TIMEOUT);
@@ -4990,22 +5702,46 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)
bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
- bnxt_hwrm_set_coal_params(&bp->rx_coal, &req_rx);
- bnxt_hwrm_set_coal_params(&bp->tx_coal, &req_tx);
+ bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
+ bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_coal *hw_coal;
+ u16 ring_id;
req = &req_rx;
- if (!bnapi->rx_ring)
+ if (!bnapi->rx_ring) {
+ ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
req = &req_tx;
- req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+ } else {
+ ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
+ }
+ req->ring_id = cpu_to_le16(ring_id);
rc = _hwrm_send_message(bp, req, sizeof(*req),
HWRM_CMD_TIMEOUT);
if (rc)
break;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ continue;
+
+ if (bnapi->rx_ring && bnapi->tx_ring) {
+ req = &req_tx;
+ ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
+ req->ring_id = cpu_to_le16(ring_id);
+ rc = _hwrm_send_message(bp, req, sizeof(*req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+ }
+ if (bnapi->rx_ring)
+ hw_coal = &bp->rx_coal;
+ else
+ hw_coal = &bp->tx_coal;
+ __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -5132,6 +5868,304 @@ func_qcfg_exit:
return rc;
}
+static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
+{
+ struct hwrm_func_backing_store_qcaps_input req = {0};
+ struct hwrm_func_backing_store_qcaps_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_info *ctx;
+ int i;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
+ if (!ctx_pg) {
+ kfree(ctx);
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
+ ctx->tqm_mem[i] = ctx_pg;
+
+ bp->ctx = ctx;
+ ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
+ ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
+ ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
+ ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
+ ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
+ ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
+ ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
+ ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
+ ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
+ ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
+ ctx->vnic_max_vnic_entries =
+ le16_to_cpu(resp->vnic_max_vnic_entries);
+ ctx->vnic_max_ring_table_entries =
+ le16_to_cpu(resp->vnic_max_ring_table_entries);
+ ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
+ ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
+ ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
+ ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
+ ctx->tqm_min_entries_per_ring =
+ le32_to_cpu(resp->tqm_min_entries_per_ring);
+ ctx->tqm_max_entries_per_ring =
+ le32_to_cpu(resp->tqm_max_entries_per_ring);
+ ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
+ if (!ctx->tqm_entries_multiple)
+ ctx->tqm_entries_multiple = 1;
+ ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
+ ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
+ ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
+ ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
+ } else {
+ rc = 0;
+ }
+ctx_err:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
+ __le64 *pg_dir)
+{
+ u8 pg_size = 0;
+
+ if (BNXT_PAGE_SHIFT == 13)
+ pg_size = 1 << 4;
+ else if (BNXT_PAGE_SIZE == 16)
+ pg_size = 2 << 4;
+
+ *pg_attr = pg_size;
+ if (rmem->nr_pages > 1) {
+ *pg_attr |= 1;
+ *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
+ } else {
+ *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
+ }
+}
+
+#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
+ (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
+
+static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
+{
+ struct hwrm_func_backing_store_cfg_input req = {0};
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ struct bnxt_ctx_pg_info *ctx_pg;
+ __le32 *num_entries;
+ __le64 *pg_dir;
+ u8 *pg_attr;
+ int i, rc;
+ u32 ena;
+
+ if (!ctx)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
+ req.enables = cpu_to_le32(enables);
+
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
+ ctx_pg = &ctx->qp_mem;
+ req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
+ req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
+ req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
+ req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.qpc_pg_size_qpc_lvl,
+ &req.qpc_page_dir);
+ }
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
+ ctx_pg = &ctx->srq_mem;
+ req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
+ req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
+ req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.srq_pg_size_srq_lvl,
+ &req.srq_page_dir);
+ }
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
+ ctx_pg = &ctx->cq_mem;
+ req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
+ req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
+ req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
+ &req.cq_page_dir);
+ }
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
+ ctx_pg = &ctx->vnic_mem;
+ req.vnic_num_vnic_entries =
+ cpu_to_le16(ctx->vnic_max_vnic_entries);
+ req.vnic_num_ring_table_entries =
+ cpu_to_le16(ctx->vnic_max_ring_table_entries);
+ req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.vnic_pg_size_vnic_lvl,
+ &req.vnic_page_dir);
+ }
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
+ ctx_pg = &ctx->stat_mem;
+ req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
+ req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.stat_pg_size_stat_lvl,
+ &req.stat_page_dir);
+ }
+ for (i = 0, num_entries = &req.tqm_sp_num_entries,
+ pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
+ pg_dir = &req.tqm_sp_page_dir,
+ ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
+ i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
+ if (!(enables & ena))
+ continue;
+
+ req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
+ ctx_pg = ctx->tqm_mem[i];
+ *num_entries = cpu_to_le32(ctx_pg->entries);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
+ }
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+ return rc;
+}
+
+static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
+ struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size)
+{
+ struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
+
+ if (!mem_size)
+ return 0;
+
+ rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
+ if (rmem->nr_pages > MAX_CTX_PAGES) {
+ rmem->nr_pages = 0;
+ return -EINVAL;
+ }
+ rmem->page_size = BNXT_PAGE_SIZE;
+ rmem->pg_arr = ctx_pg->ctx_pg_arr;
+ rmem->dma_arr = ctx_pg->ctx_dma_arr;
+ rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
+ return bnxt_alloc_ring(bp, rmem);
+}
+
+static void bnxt_free_ctx_mem(struct bnxt *bp)
+{
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ int i;
+
+ if (!ctx)
+ return;
+
+ if (ctx->tqm_mem[0]) {
+ for (i = 0; i < bp->max_q + 1; i++)
+ bnxt_free_ring(bp, &ctx->tqm_mem[i]->ring_mem);
+ kfree(ctx->tqm_mem[0]);
+ ctx->tqm_mem[0] = NULL;
+ }
+
+ bnxt_free_ring(bp, &ctx->stat_mem.ring_mem);
+ bnxt_free_ring(bp, &ctx->vnic_mem.ring_mem);
+ bnxt_free_ring(bp, &ctx->cq_mem.ring_mem);
+ bnxt_free_ring(bp, &ctx->srq_mem.ring_mem);
+ bnxt_free_ring(bp, &ctx->qp_mem.ring_mem);
+ ctx->flags &= ~BNXT_CTX_FLAG_INITED;
+}
+
+static int bnxt_alloc_ctx_mem(struct bnxt *bp)
+{
+ struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_info *ctx;
+ u32 mem_size, ena, entries;
+ int i, rc;
+
+ rc = bnxt_hwrm_func_backing_store_qcaps(bp);
+ if (rc) {
+ netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
+ rc);
+ return rc;
+ }
+ ctx = bp->ctx;
+ if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
+ return 0;
+
+ ctx_pg = &ctx->qp_mem;
+ ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
+ mem_size = ctx->qp_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->srq_mem;
+ ctx_pg->entries = ctx->srq_max_l2_entries;
+ mem_size = ctx->srq_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->cq_mem;
+ ctx_pg->entries = ctx->cq_max_l2_entries;
+ mem_size = ctx->cq_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->vnic_mem;
+ ctx_pg->entries = ctx->vnic_max_vnic_entries +
+ ctx->vnic_max_ring_table_entries;
+ mem_size = ctx->vnic_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->stat_mem;
+ ctx_pg->entries = ctx->stat_max_entries;
+ mem_size = ctx->stat_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ if (rc)
+ return rc;
+
+ entries = ctx->qp_max_l2_entries;
+ entries = roundup(entries, ctx->tqm_entries_multiple);
+ entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
+ ctx->tqm_max_entries_per_ring);
+ for (i = 0, ena = 0; i < bp->max_q + 1; i++) {
+ ctx_pg = ctx->tqm_mem[i];
+ ctx_pg->entries = entries;
+ mem_size = ctx->tqm_entry_size * entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ if (rc)
+ return rc;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
+ }
+ ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
+ rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
+ if (rc)
+ netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
+ rc);
+ else
+ ctx->flags |= BNXT_CTX_FLAG_INITED;
+
+ return 0;
+}
+
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
{
struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
@@ -5170,6 +6204,13 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ u16 max_msix = le16_to_cpu(resp->max_msix);
+
+ hw_resc->max_irqs = min_t(u16, hw_resc->max_irqs, max_msix);
+ hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
+ }
+
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -5259,6 +6300,9 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (rc)
return rc;
if (bp->hwrm_spec_code >= 0x10803) {
+ rc = bnxt_alloc_ctx_mem(bp);
+ if (rc)
+ return rc;
rc = bnxt_hwrm_func_resc_qcaps(bp, true);
if (!rc)
bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
@@ -5303,13 +6347,15 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
qptr = &resp->queue_id0;
for (i = 0, j = 0; i < bp->max_tc; i++) {
- bp->q_info[j].queue_id = *qptr++;
+ bp->q_info[j].queue_id = *qptr;
+ bp->q_ids[i] = *qptr++;
bp->q_info[j].queue_profile = *qptr++;
bp->tc_to_qidx[j] = j;
if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
(no_rdma && BNXT_PF(bp)))
j++;
}
+ bp->max_q = bp->max_tc;
bp->max_tc = max_t(u8, j, 1);
if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
@@ -5359,8 +6405,12 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
if (!bp->hwrm_cmd_timeout)
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
- if (resp->hwrm_intf_maj_8b >= 1)
+ if (resp->hwrm_intf_maj_8b >= 1) {
bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
+ bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
+ }
+ if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
+ bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
bp->chip_num = le16_to_cpu(resp->chip_num);
if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
@@ -5417,8 +6467,10 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp)
static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
{
+ struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_port_qstats_ext_input req = {0};
struct bnxt_pf_info *pf = &bp->pf;
+ int rc;
if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
return 0;
@@ -5427,7 +6479,19 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
req.port_id = cpu_to_le16(pf->port_id);
req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext));
+ req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
+ bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8;
+ } else {
+ bp->fw_rx_stats_ext_size = 0;
+ bp->fw_tx_stats_ext_size = 0;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
}
static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
@@ -5532,7 +6596,7 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
return rc;
}
-static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
int rc;
@@ -5588,6 +6652,53 @@ vnic_setup_err:
return rc;
}
+static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
+{
+ int rc, i, nr_ctxs;
+
+ nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
+ for (i = 0; i < nr_ctxs; i++) {
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
+ vnic_id, i, rc);
+ break;
+ }
+ bp->rsscos_nr_ctxs++;
+ }
+ if (i < nr_ctxs)
+ return -ENOMEM;
+
+ rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
+ vnic_id, rc);
+ return rc;
+ }
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
+ vnic_id, rc);
+ return rc;
+ }
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
+ vnic_id, rc);
+ }
+ }
+ return rc;
+}
+
+static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return __bnxt_setup_vnic_p5(bp, vnic_id);
+ else
+ return __bnxt_setup_vnic(bp, vnic_id);
+}
+
static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
{
#ifdef CONFIG_RFS_ACCEL
@@ -6206,12 +7317,15 @@ static void bnxt_init_napi(struct bnxt *bp)
struct bnxt_napi *bnapi;
if (bp->flags & BNXT_FLAG_USING_MSIX) {
- if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ poll_fn = bnxt_poll_p5;
+ else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
cp_nr_rings--;
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
- netif_napi_add(bp->dev, &bnapi->napi,
- bnxt_poll, 64);
+ netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bnapi = bp->bnapi[cp_nr_rings];
@@ -6968,10 +8082,10 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
netdev_err(bp->dev, "Failed to reserve default rings at open\n");
return rc;
}
- rc = bnxt_reserve_rings(bp);
- if (rc)
- return rc;
}
+ rc = bnxt_reserve_rings(bp);
+ if (rc)
+ return rc;
if ((bp->flags & BNXT_FLAG_RFS) &&
!(bp->flags & BNXT_FLAG_USING_MSIX)) {
/* disable RFS if falling back to INTA */
@@ -7443,6 +8557,8 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp)
/* If the chip and firmware supports RFS */
static bool bnxt_rfs_supported(struct bnxt *bp)
{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return false;
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
return true;
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
@@ -7456,6 +8572,8 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
int vnics, max_vnics, max_rss_ctxs;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return false;
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
return false;
@@ -7976,6 +9094,9 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
INIT_WORK(&bp->sp_task, bnxt_sp_task);
spin_lock_init(&bp->ntp_fltr_lock);
+#if BITS_PER_LONG == 32
+ spin_lock_init(&bp->db_lock);
+#endif
bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
@@ -8541,6 +9662,9 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_dcb_free(bp);
kfree(bp->edev);
bp->edev = NULL;
+ bnxt_free_ctx_mem(bp);
+ kfree(bp->ctx);
+ bp->ctx = NULL;
bnxt_cleanup_pci(bp);
free_netdev(dev);
}
@@ -8614,7 +9738,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
*max_tx = hw_resc->max_tx_rings;
*max_rx = hw_resc->max_rx_rings;
*max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
- hw_resc->max_irqs);
+ hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp));
*max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
max_ring_grps = hw_resc->max_hw_ring_grps;
if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
@@ -8846,6 +9970,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
bp = netdev_priv(dev);
+ bnxt_set_max_func_irqs(bp, max_irqs);
if (bnxt_vf_pciid(ent->driver_data))
bp->flags |= BNXT_FLAG_VF;
@@ -8872,12 +9997,16 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
- if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) {
+ if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
+ bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
rc = bnxt_alloc_hwrm_short_cmd_req(bp);
if (rc)
goto init_err_pci_clean;
}
+ if (BNXT_CHIP_P5(bp))
+ bp->flags |= BNXT_FLAG_CHIP_P5;
+
rc = bnxt_hwrm_func_reset(bp);
if (rc)
goto init_err_pci_clean;
@@ -8892,7 +10021,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
NETIF_F_RXCSUM | NETIF_F_GRO;
- if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+ if (BNXT_SUPPORTS_TPA(bp))
dev->hw_features |= NETIF_F_LRO;
dev->hw_enc_features =
@@ -8906,7 +10035,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
- if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+ if (BNXT_SUPPORTS_TPA(bp))
dev->hw_features |= NETIF_F_GRO_HW;
dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
if (dev->features & NETIF_F_GRO_HW)
@@ -8917,10 +10046,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
init_waitqueue_head(&bp->sriov_cfg_wait);
mutex_init(&bp->sriov_lock);
#endif
- bp->gro_func = bnxt_gro_func_5730x;
- if (BNXT_CHIP_P4_PLUS(bp))
- bp->gro_func = bnxt_gro_func_5731x;
- else
+ if (BNXT_SUPPORTS_TPA(bp)) {
+ bp->gro_func = bnxt_gro_func_5730x;
+ if (BNXT_CHIP_P4(bp))
+ bp->gro_func = bnxt_gro_func_5731x;
+ }
+ if (!BNXT_CHIP_P4_PLUS(bp))
bp->flags |= BNXT_FLAG_DOUBLE_DB;
rc = bnxt_hwrm_func_drv_rgtr(bp);
@@ -8933,6 +10064,13 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->ulp_probe = bnxt_ulp_probe;
+ rc = bnxt_hwrm_queue_qportcfg(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
+ rc);
+ rc = -1;
+ goto init_err_pci_clean;
+ }
/* Get the MAX capabilities for this function */
rc = bnxt_hwrm_func_qcaps(bp);
if (rc) {
@@ -8947,13 +10085,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = -EADDRNOTAVAIL;
goto init_err_pci_clean;
}
- rc = bnxt_hwrm_queue_qportcfg(bp);
- if (rc) {
- netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
- rc);
- rc = -1;
- goto init_err_pci_clean;
- }
bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_port_led_qcaps(bp);
@@ -8971,7 +10102,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
- bnxt_set_max_func_irqs(bp, max_irqs);
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
netdev_err(bp->dev, "Not enough rings available.\n");
@@ -8984,7 +10114,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
- if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
+ if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
@@ -9019,6 +10149,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
+ bnxt_hwrm_coal_params_qcaps(bp);
+
if (BNXT_PF(bp)) {
if (!bnxt_pf_wq) {
bnxt_pf_wq =
@@ -9050,6 +10182,10 @@ init_err_cleanup_tc:
bnxt_clear_int_mode(bp);
init_err_pci_clean:
+ bnxt_free_hwrm_resources(bp);
+ bnxt_free_ctx_mem(bp);
+ kfree(bp->ctx);
+ bp->ctx = NULL;
bnxt_cleanup_pci(bp);
init_err_free:
@@ -9218,13 +10354,6 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
rtnl_unlock();
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
- err); /* non-fatal, continue */
- }
-
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index bde384630a75..498b373c992d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -12,11 +12,11 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.9.2"
+#define DRV_MODULE_VERSION "1.10.0"
#define DRV_VER_MAJ 1
-#define DRV_VER_MIN 9
-#define DRV_VER_UPD 2
+#define DRV_VER_MIN 10
+#define DRV_VER_UPD 0
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
@@ -403,6 +403,19 @@ struct rx_tpa_end_cmp_ext {
((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \
cpu_to_le32(RX_TPA_END_CMP_ERRORS))
+struct nqe_cn {
+ __le16 type;
+ #define NQ_CN_TYPE_MASK 0x3fUL
+ #define NQ_CN_TYPE_SFT 0
+ #define NQ_CN_TYPE_CQ_NOTIFICATION 0x30UL
+ #define NQ_CN_TYPE_LAST NQ_CN_TYPE_CQ_NOTIFICATION
+ __le16 reserved16;
+ __le32 cq_handle_low;
+ __le32 v;
+ #define NQ_CN_V 0x1UL
+ __le32 cq_handle_high;
+};
+
#define DB_IDX_MASK 0xffffff
#define DB_IDX_VALID (0x1 << 26)
#define DB_IRQ_DIS (0x1 << 27)
@@ -416,6 +429,25 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
+/* 64-bit doorbell */
+#define DBR_INDEX_MASK 0x0000000000ffffffULL
+#define DBR_XID_MASK 0x000fffff00000000ULL
+#define DBR_XID_SFT 32
+#define DBR_PATH_L2 (0x1ULL << 56)
+#define DBR_TYPE_SQ (0x0ULL << 60)
+#define DBR_TYPE_RQ (0x1ULL << 60)
+#define DBR_TYPE_SRQ (0x2ULL << 60)
+#define DBR_TYPE_SRQ_ARM (0x3ULL << 60)
+#define DBR_TYPE_CQ (0x4ULL << 60)
+#define DBR_TYPE_CQ_ARMSE (0x5ULL << 60)
+#define DBR_TYPE_CQ_ARMALL (0x6ULL << 60)
+#define DBR_TYPE_CQ_ARMENA (0x7ULL << 60)
+#define DBR_TYPE_SRQ_ARMENA (0x8ULL << 60)
+#define DBR_TYPE_CQ_CUTOFF_ACK (0x9ULL << 60)
+#define DBR_TYPE_NQ (0xaULL << 60)
+#define DBR_TYPE_NQ_ARM (0xbULL << 60)
+#define DBR_TYPE_NULL (0xfULL << 60)
+
#define INVALID_HW_RING_ID ((u16)-1)
/* The hardware supports certain page sizes. Use the supported page sizes
@@ -505,6 +537,9 @@ struct rx_tpa_end_cmp_ext {
(!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) == \
!((raw_cons) & bp->cp_bit))
+#define NQ_CMP_VALID(nqcmp, raw_cons) \
+ (!!((nqcmp)->v & cpu_to_le32(NQ_CN_V)) == !((raw_cons) & bp->cp_bit))
+
#define TX_CMP_TYPE(txcmp) \
(le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE)
@@ -577,9 +612,13 @@ struct bnxt_sw_rx_agg_bd {
dma_addr_t mapping;
};
-struct bnxt_ring_struct {
+struct bnxt_ring_mem_info {
int nr_pages;
int page_size;
+ u32 flags;
+#define BNXT_RMEM_VALID_PTE_FLAG 1
+#define BNXT_RMEM_RING_PTE_FLAG 2
+
void **pg_arr;
dma_addr_t *dma_arr;
@@ -588,12 +627,17 @@ struct bnxt_ring_struct {
int vmem_size;
void **vmem;
+};
+
+struct bnxt_ring_struct {
+ struct bnxt_ring_mem_info ring_mem;
u16 fw_ring_id; /* Ring id filled by Chimp FW */
union {
u16 grp_idx;
u16 map_idx; /* Used by cmpl rings */
};
+ u32 handle;
u8 queue_id;
};
@@ -609,12 +653,20 @@ struct tx_push_buffer {
u32 data[25];
};
+struct bnxt_db_info {
+ void __iomem *doorbell;
+ union {
+ u64 db_key64;
+ u32 db_key32;
+ };
+};
+
struct bnxt_tx_ring_info {
struct bnxt_napi *bnapi;
u16 tx_prod;
u16 tx_cons;
u16 txq_index;
- void __iomem *tx_doorbell;
+ struct bnxt_db_info tx_db;
struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
struct bnxt_sw_tx_bd *tx_buf_ring;
@@ -631,6 +683,42 @@ struct bnxt_tx_ring_info {
struct bnxt_ring_struct tx_ring_struct;
};
+#define BNXT_LEGACY_COAL_CMPL_PARAMS \
+ (RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT)
+
+#define BNXT_COAL_CMPL_ENABLES \
+ (RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR | \
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR | \
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX | \
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT)
+
+#define BNXT_COAL_CMPL_MIN_TMR_ENABLE \
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN
+
+#define BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE \
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT
+
+struct bnxt_coal_cap {
+ u32 cmpl_params;
+ u32 nq_params;
+ u16 num_cmpl_dma_aggr_max;
+ u16 num_cmpl_dma_aggr_during_int_max;
+ u16 cmpl_aggr_dma_tmr_max;
+ u16 cmpl_aggr_dma_tmr_during_int_max;
+ u16 int_lat_tmr_min_max;
+ u16 int_lat_tmr_max_max;
+ u16 num_cmpl_aggr_int_max;
+ u16 timer_units;
+};
+
struct bnxt_coal {
u16 coal_ticks;
u16 coal_ticks_irq;
@@ -675,8 +763,8 @@ struct bnxt_rx_ring_info {
u16 rx_agg_prod;
u16 rx_sw_agg_prod;
u16 rx_next_cons;
- void __iomem *rx_doorbell;
- void __iomem *rx_agg_doorbell;
+ struct bnxt_db_info rx_db;
+ struct bnxt_db_info rx_agg_db;
struct bpf_prog *xdp_prog;
@@ -703,8 +791,12 @@ struct bnxt_rx_ring_info {
};
struct bnxt_cp_ring_info {
+ struct bnxt_napi *bnapi;
u32 cp_raw_cons;
- void __iomem *cp_doorbell;
+ struct bnxt_db_info cp_db;
+
+ u8 had_work_done:1;
+ u8 has_more_work:1;
struct bnxt_coal rx_ring_coal;
u64 rx_packets;
@@ -713,7 +805,10 @@ struct bnxt_cp_ring_info {
struct net_dim dim;
- struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
+ union {
+ struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
+ struct nqe_cn *nq_desc_ring[MAX_CP_PAGES];
+ };
dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
@@ -723,6 +818,10 @@ struct bnxt_cp_ring_info {
u64 rx_l4_csum_errors;
struct bnxt_ring_struct cp_ring_struct;
+
+ struct bnxt_cp_ring_info *cp_ring_arr[2];
+#define BNXT_RX_HDL 0
+#define BNXT_TX_HDL 1
};
struct bnxt_napi {
@@ -736,6 +835,9 @@ struct bnxt_napi {
void (*tx_int)(struct bnxt *, struct bnxt_napi *,
int);
+ int tx_pkts;
+ u8 events;
+
u32 flags;
#define BNXT_NAPI_FLAG_XDP 0x1
@@ -755,6 +857,7 @@ struct bnxt_irq {
#define HWRM_RING_ALLOC_RX 0x2
#define HWRM_RING_ALLOC_AGG 0x4
#define HWRM_RING_ALLOC_CMPL 0x8
+#define HWRM_RING_ALLOC_NQ 0x10
#define INVALID_STATS_CTX_ID -1
@@ -768,7 +871,7 @@ struct bnxt_ring_grp_info {
struct bnxt_vnic_info {
u16 fw_vnic_id; /* returned by Chimp during alloc */
-#define BNXT_MAX_CTX_PER_VNIC 2
+#define BNXT_MAX_CTX_PER_VNIC 8
u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC];
u16 fw_l2_ctx_id;
#define BNXT_MAX_UC_ADDRS 4
@@ -1069,6 +1172,55 @@ struct bnxt_vf_rep {
struct bnxt_vf_rep_stats tx_stats;
};
+#define PTU_PTE_VALID 0x1UL
+#define PTU_PTE_LAST 0x2UL
+#define PTU_PTE_NEXT_TO_LAST 0x4UL
+
+#define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8)
+
+struct bnxt_ctx_pg_info {
+ u32 entries;
+ void *ctx_pg_arr[MAX_CTX_PAGES];
+ dma_addr_t ctx_dma_arr[MAX_CTX_PAGES];
+ struct bnxt_ring_mem_info ring_mem;
+};
+
+struct bnxt_ctx_mem_info {
+ u32 qp_max_entries;
+ u16 qp_min_qp1_entries;
+ u16 qp_max_l2_entries;
+ u16 qp_entry_size;
+ u16 srq_max_l2_entries;
+ u32 srq_max_entries;
+ u16 srq_entry_size;
+ u16 cq_max_l2_entries;
+ u32 cq_max_entries;
+ u16 cq_entry_size;
+ u16 vnic_max_vnic_entries;
+ u16 vnic_max_ring_table_entries;
+ u16 vnic_entry_size;
+ u32 stat_max_entries;
+ u16 stat_entry_size;
+ u16 tqm_entry_size;
+ u32 tqm_min_entries_per_ring;
+ u32 tqm_max_entries_per_ring;
+ u32 mrav_max_entries;
+ u16 mrav_entry_size;
+ u16 tim_entry_size;
+ u32 tim_max_entries;
+ u8 tqm_entries_multiple;
+
+ u32 flags;
+ #define BNXT_CTX_FLAG_INITED 0x01
+
+ struct bnxt_ctx_pg_info qp_mem;
+ struct bnxt_ctx_pg_info srq_mem;
+ struct bnxt_ctx_pg_info cq_mem;
+ struct bnxt_ctx_pg_info vnic_mem;
+ struct bnxt_ctx_pg_info stat_mem;
+ struct bnxt_ctx_pg_info *tqm_mem[9];
+};
+
struct bnxt {
void __iomem *bar0;
void __iomem *bar1;
@@ -1098,6 +1250,8 @@ struct bnxt {
#define CHIP_NUM_5745X 0xd730
+#define CHIP_NUM_57500 0x1750
+
#define CHIP_NUM_58802 0xd802
#define CHIP_NUM_58804 0xd804
#define CHIP_NUM_58808 0xd808
@@ -1144,6 +1298,7 @@ struct bnxt {
atomic_t intr_sem;
u32 flags;
+ #define BNXT_FLAG_CHIP_P5 0x1
#define BNXT_FLAG_VF 0x2
#define BNXT_FLAG_LRO 0x4
#ifdef CONFIG_INET
@@ -1190,15 +1345,24 @@ struct bnxt {
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
+#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
+ !(bp->flags & BNXT_FLAG_CHIP_P5))
-/* Chip class phase 4 and later */
-#define BNXT_CHIP_P4_PLUS(bp) \
+/* Chip class phase 5 */
+#define BNXT_CHIP_P5(bp) \
+ ((bp)->chip_num == CHIP_NUM_57500)
+
+/* Chip class phase 4.x */
+#define BNXT_CHIP_P4(bp) \
(BNXT_CHIP_NUM_57X1X((bp)->chip_num) || \
BNXT_CHIP_NUM_5745X((bp)->chip_num) || \
BNXT_CHIP_NUM_588XX((bp)->chip_num) || \
(BNXT_CHIP_NUM_58700((bp)->chip_num) && \
!BNXT_CHIP_TYPE_NITRO_A0(bp)))
+#define BNXT_CHIP_P4_PLUS(bp) \
+ (BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
+
struct bnxt_en_dev *edev;
struct bnxt_en_dev * (*ulp_probe)(struct net_device *);
@@ -1261,6 +1425,8 @@ struct bnxt {
u8 max_lltc; /* lossless TCs */
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
u8 tc_to_qidx[BNXT_MAX_QUEUE];
+ u8 q_ids[BNXT_MAX_QUEUE];
+ u8 max_q;
unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ
@@ -1305,12 +1471,17 @@ struct bnxt {
struct rx_port_stats *hw_rx_port_stats;
struct tx_port_stats *hw_tx_port_stats;
struct rx_port_stats_ext *hw_rx_port_stats_ext;
+ struct tx_port_stats_ext *hw_tx_port_stats_ext;
dma_addr_t hw_rx_port_stats_map;
dma_addr_t hw_tx_port_stats_map;
dma_addr_t hw_rx_port_stats_ext_map;
+ dma_addr_t hw_tx_port_stats_ext_map;
int hw_port_stats_size;
+ u16 fw_rx_stats_ext_size;
+ u16 fw_tx_stats_ext_size;
u16 hwrm_max_req_len;
+ u16 hwrm_max_ext_req_len;
int hwrm_cmd_timeout;
struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
struct hwrm_ver_get_output ver_resp;
@@ -1328,11 +1499,10 @@ struct bnxt {
u8 port_count;
u16 br_mode;
+ struct bnxt_coal_cap coal_cap;
struct bnxt_coal rx_coal;
struct bnxt_coal tx_coal;
-#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2)
-
u32 stats_coal_ticks;
#define BNXT_DEF_STATS_COAL_TICKS 1000000
#define BNXT_MIN_STATS_COAL_TICKS 250000
@@ -1360,6 +1530,7 @@ struct bnxt {
struct bnxt_hw_resc hw_resc;
struct bnxt_pf_info pf;
+ struct bnxt_ctx_mem_info *ctx;
#ifdef CONFIG_BNXT_SRIOV
int nr_vfs;
struct bnxt_vf_info vf;
@@ -1374,6 +1545,11 @@ struct bnxt {
struct mutex sriov_lock;
#endif
+#if BITS_PER_LONG == 32
+ /* ensure atomic 64-bit doorbell writes on 32-bit systems. */
+ spinlock_t db_lock;
+#endif
+
#define BNXT_NTP_FLTR_MAX_FLTR 4096
#define BNXT_NTP_FLTR_HASH_SIZE 512
#define BNXT_NTP_FLTR_HASH_MASK (BNXT_NTP_FLTR_HASH_SIZE - 1)
@@ -1425,6 +1601,9 @@ struct bnxt {
#define BNXT_RX_STATS_EXT_OFFSET(counter) \
(offsetof(struct rx_port_stats_ext, counter) / 8)
+#define BNXT_TX_STATS_EXT_OFFSET(counter) \
+ (offsetof(struct tx_port_stats_ext, counter) / 8)
+
#define I2C_DEV_ADDR_A0 0xa0
#define I2C_DEV_ADDR_A2 0xa2
#define SFF_DIAG_SUPPORT_OFFSET 0x5c
@@ -1443,21 +1622,46 @@ static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
}
+#if BITS_PER_LONG == 32
+#define writeq(val64, db) \
+do { \
+ spin_lock(&bp->db_lock); \
+ writel((val64) & 0xffffffff, db); \
+ writel((val64) >> 32, (db) + 4); \
+ spin_unlock(&bp->db_lock); \
+} while (0)
+
+#define writeq_relaxed writeq
+#endif
+
/* For TX and RX ring doorbells with no ordering guarantee*/
-static inline void bnxt_db_write_relaxed(struct bnxt *bp, void __iomem *db,
- u32 val)
+static inline void bnxt_db_write_relaxed(struct bnxt *bp,
+ struct bnxt_db_info *db, u32 idx)
{
- writel_relaxed(val, db);
- if (bp->flags & BNXT_FLAG_DOUBLE_DB)
- writel_relaxed(val, db);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ writeq_relaxed(db->db_key64 | idx, db->doorbell);
+ } else {
+ u32 db_val = db->db_key32 | idx;
+
+ writel_relaxed(db_val, db->doorbell);
+ if (bp->flags & BNXT_FLAG_DOUBLE_DB)
+ writel_relaxed(db_val, db->doorbell);
+ }
}
/* For TX and RX ring doorbells */
-static inline void bnxt_db_write(struct bnxt *bp, void __iomem *db, u32 val)
+static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
+ u32 idx)
{
- writel(val, db);
- if (bp->flags & BNXT_FLAG_DOUBLE_DB)
- writel(val, db);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ writeq(db->db_key64 | idx, db->doorbell);
+ } else {
+ u32 db_val = db->db_key32 | idx;
+
+ writel(db_val, db->doorbell);
+ if (bp->flags & BNXT_FLAG_DOUBLE_DB)
+ writel(db_val, db->doorbell);
+ }
}
extern const u16 bnxt_lhint_arr[];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index ddc98c359488..a85d2be986af 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -98,13 +98,13 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
for (i = 0; i < max_tc; i++) {
- u8 qidx;
+ u8 qidx = bp->tc_to_qidx[i];
req.enables |= cpu_to_le32(
- QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
+ QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID <<
+ qidx);
memset(&cos2bw, 0, sizeof(cos2bw));
- qidx = bp->tc_to_qidx[i];
cos2bw.queue_id = bp->q_info[qidx].queue_id;
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
cos2bw.tsa =
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 790c684f08ab..140dbd62106d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -21,9 +21,22 @@ static const struct devlink_ops bnxt_dl_ops = {
#endif /* CONFIG_BNXT_SRIOV */
};
+enum bnxt_dl_param_id {
+ BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
+};
+
static const struct bnxt_dl_nvm_param nvm_params[] = {
{DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV,
BNXT_NVM_SHARED_CFG, 1},
+ {DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI,
+ BNXT_NVM_SHARED_CFG, 1},
+ {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10},
+ {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7},
+ {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK,
+ BNXT_NVM_SHARED_CFG, 1},
};
static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
@@ -55,8 +68,22 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE;
- if (nvm_param.num_bits == 1)
- buf = &val->vbool;
+ switch (bytesize) {
+ case 1:
+ if (nvm_param.num_bits == 1)
+ buf = &val->vbool;
+ else
+ buf = &val->vu8;
+ break;
+ case 2:
+ buf = &val->vu16;
+ break;
+ case 4:
+ buf = &val->vu32;
+ break;
+ default:
+ return -EFAULT;
+ }
data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize,
&data_dma_addr, GFP_KERNEL);
@@ -78,8 +105,12 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
memcpy(buf, data_addr, bytesize);
dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
- if (rc)
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
+ netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
+ return -EACCES;
+ } else if (rc) {
return -EIO;
+ }
return 0;
}
@@ -88,9 +119,15 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
{
struct hwrm_nvm_get_variable_input req = {0};
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
- return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
+ rc = bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
+ if (!rc)
+ if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
+ ctx->val.vbool = !ctx->val.vbool;
+
+ return rc;
}
static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
@@ -100,14 +137,55 @@ static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1);
+
+ if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
+ ctx->val.vbool = !ctx->val.vbool;
+
return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
}
+static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ int max_val = -1;
+
+ if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX)
+ max_val = BNXT_MSIX_VEC_MAX;
+
+ if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN)
+ max_val = BNXT_MSIX_VEC_MIN_MAX;
+
+ if (val.vu32 > max_val) {
+ NL_SET_ERR_MSG_MOD(extack, "MSIX value is exceeding the range");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct devlink_param bnxt_dl_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_SRIOV,
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
NULL),
+ DEVLINK_PARAM_GENERIC(IGNORE_ARI,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ NULL),
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ bnxt_dl_msix_validate),
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ bnxt_dl_msix_validate),
+ DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
+ "gre_ver_check", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ NULL),
};
int bnxt_dl_register(struct bnxt *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index 2f68dc048390..5b6b2c7d97cf 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -33,8 +33,15 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
}
}
+#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108
+#define NVM_OFF_MSIX_VEC_PER_PF_MIN 114
+#define NVM_OFF_IGNORE_ARI 164
+#define NVM_OFF_DIS_GRE_VER_CHECK 171
#define NVM_OFF_ENABLE_SRIOV 401
+#define BNXT_MSIX_VEC_MAX 1280
+#define BNXT_MSIX_VEC_MIN_MAX 128
+
enum bnxt_nvm_dir_type {
BNXT_NVM_SHARED_CFG = 40,
BNXT_NVM_PORT_CFG,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index e52d7af3ab3e..48078564f025 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -148,6 +148,65 @@ reset_coalesce:
#define BNXT_RX_STATS_EXT_ENTRY(counter) \
{ BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
+#define BNXT_TX_STATS_EXT_ENTRY(counter) \
+ { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) }
+
+#define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \
+ BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \
+ BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions)
+
+#define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \
+ BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \
+ BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions)
+
+#define BNXT_RX_STATS_EXT_PFC_ENTRIES \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(0), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(1), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(2), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(3), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(4), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(5), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(6), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(7)
+
+#define BNXT_TX_STATS_EXT_PFC_ENTRIES \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(0), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(1), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(2), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(3), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(4), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(5), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(6), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(7)
+
+#define BNXT_RX_STATS_EXT_COS_ENTRY(n) \
+ BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \
+ BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n)
+
+#define BNXT_TX_STATS_EXT_COS_ENTRY(n) \
+ BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \
+ BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n)
+
+#define BNXT_RX_STATS_EXT_COS_ENTRIES \
+ BNXT_RX_STATS_EXT_COS_ENTRY(0), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(1), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(2), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(3), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(4), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(5), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(6), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(7) \
+
+#define BNXT_TX_STATS_EXT_COS_ENTRIES \
+ BNXT_TX_STATS_EXT_COS_ENTRY(0), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(1), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(2), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(3), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(4), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(5), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(6), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(7) \
+
enum {
RX_TOTAL_DISCARDS,
TX_TOTAL_DISCARDS,
@@ -256,11 +315,20 @@ static const struct {
BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
+ BNXT_RX_STATS_EXT_COS_ENTRIES,
+ BNXT_RX_STATS_EXT_PFC_ENTRIES,
+};
+
+static const struct {
+ long offset;
+ char string[ETH_GSTRING_LEN];
+} bnxt_tx_port_stats_ext_arr[] = {
+ BNXT_TX_STATS_EXT_COS_ENTRIES,
+ BNXT_TX_STATS_EXT_PFC_ENTRIES,
};
#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
-#define BNXT_NUM_PORT_STATS_EXT ARRAY_SIZE(bnxt_port_stats_ext_arr)
static int bnxt_get_num_stats(struct bnxt *bp)
{
@@ -272,7 +340,8 @@ static int bnxt_get_num_stats(struct bnxt *bp)
num_stats += BNXT_NUM_PORT_STATS;
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT)
- num_stats += BNXT_NUM_PORT_STATS_EXT;
+ num_stats += bp->fw_rx_stats_ext_size +
+ bp->fw_tx_stats_ext_size;
return num_stats;
}
@@ -334,12 +403,17 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
}
}
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
- __le64 *port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext;
+ __le64 *rx_port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext;
+ __le64 *tx_port_stats_ext = (__le64 *)bp->hw_tx_port_stats_ext;
- for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++, j++) {
- buf[j] = le64_to_cpu(*(port_stats_ext +
+ for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) {
+ buf[j] = le64_to_cpu(*(rx_port_stats_ext +
bnxt_port_stats_ext_arr[i].offset));
}
+ for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) {
+ buf[j] = le64_to_cpu(*(tx_port_stats_ext +
+ bnxt_tx_port_stats_ext_arr[i].offset));
+ }
}
}
@@ -407,10 +481,15 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
}
}
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
- for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++) {
+ for (i = 0; i < bp->fw_rx_stats_ext_size; i++) {
strcpy(buf, bnxt_port_stats_ext_arr[i].string);
buf += ETH_GSTRING_LEN;
}
+ for (i = 0; i < bp->fw_tx_stats_ext_size; i++) {
+ strcpy(buf,
+ bnxt_tx_port_stats_ext_arr[i].string);
+ buf += ETH_GSTRING_LEN;
+ }
}
break;
case ETH_SS_TEST:
@@ -2419,11 +2498,11 @@ static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_napi *bnapi,
+static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u32 raw_cons, int pkt_size)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+ struct bnxt_napi *bnapi = cpr->bnapi;
+ struct bnxt_rx_ring_info *rxr;
struct bnxt_sw_rx_bd *rx_buf;
struct rx_cmp *rxcmp;
u16 cp_cons, cons;
@@ -2431,6 +2510,7 @@ static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_napi *bnapi,
u32 len;
int i;
+ rxr = bnapi->rx_ring;
cp_cons = RING_CMP(raw_cons);
rxcmp = (struct rx_cmp *)
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
@@ -2451,17 +2531,15 @@ static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_napi *bnapi,
return 0;
}
-static int bnxt_poll_loopback(struct bnxt *bp, int pkt_size)
+static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ int pkt_size)
{
- struct bnxt_napi *bnapi = bp->bnapi[0];
- struct bnxt_cp_ring_info *cpr;
struct tx_cmp *txcmp;
int rc = -EIO;
u32 raw_cons;
u32 cons;
int i;
- cpr = &bnapi->cp_ring;
raw_cons = cpr->cp_raw_cons;
for (i = 0; i < 200; i++) {
cons = RING_CMP(raw_cons);
@@ -2477,7 +2555,7 @@ static int bnxt_poll_loopback(struct bnxt *bp, int pkt_size)
*/
dma_rmb();
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
- rc = bnxt_rx_loopback(bp, bnapi, raw_cons, pkt_size);
+ rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
raw_cons = NEXT_RAW_CMP(raw_cons);
raw_cons = NEXT_RAW_CMP(raw_cons);
break;
@@ -2491,12 +2569,14 @@ static int bnxt_poll_loopback(struct bnxt *bp, int pkt_size)
static int bnxt_run_loopback(struct bnxt *bp)
{
struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
+ struct bnxt_cp_ring_info *cpr;
int pkt_size, i = 0;
struct sk_buff *skb;
dma_addr_t map;
u8 *data;
int rc;
+ cpr = &txr->bnapi->cp_ring;
pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
@@ -2520,8 +2600,8 @@ static int bnxt_run_loopback(struct bnxt *bp)
/* Sync BD data before updating doorbell */
wmb();
- bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | txr->tx_prod);
- rc = bnxt_poll_loopback(bp, pkt_size);
+ bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
+ rc = bnxt_poll_loopback(bp, cpr, pkt_size);
dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 971ace5d0d4a..5dd086059568 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -37,6 +37,8 @@ struct hwrm_resp_hdr {
#define TLV_TYPE_HWRM_REQUEST 0x1UL
#define TLV_TYPE_HWRM_RESPONSE 0x2UL
#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL
#define TLV_TYPE_ENGINE_CKV_NONCE 0x8002UL
#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
@@ -186,6 +188,7 @@ struct cmd_nums {
#define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
#define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
#define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
+ #define HWRM_STAT_CTX_ENG_QUERY 0xafUL
#define HWRM_STAT_CTX_ALLOC 0xb0UL
#define HWRM_STAT_CTX_FREE 0xb1UL
#define HWRM_STAT_CTX_QUERY 0xb2UL
@@ -235,6 +238,7 @@ struct cmd_nums {
#define HWRM_CFA_PAIR_INFO 0x10fUL
#define HWRM_FW_IPC_MSG 0x110UL
#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL
#define HWRM_ENGINE_CKV_HELLO 0x12dUL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
@@ -295,6 +299,7 @@ struct cmd_nums {
#define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
#define HWRM_DBG_FW_CLI 0xff1aUL
#define HWRM_DBG_I2C_CMD 0xff1bUL
+ #define HWRM_DBG_RING_INFO_GET 0xff1cUL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
#define HWRM_NVM_VALIDATE_OPTION 0xffefUL
#define HWRM_NVM_FLUSH 0xfff0UL
@@ -320,20 +325,21 @@ struct cmd_nums {
/* ret_codes (size:64b/8B) */
struct ret_codes {
__le16 error_code;
- #define HWRM_ERR_CODE_SUCCESS 0x0UL
- #define HWRM_ERR_CODE_FAIL 0x1UL
- #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
- #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
- #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
- #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
- #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
- #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
- #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
- #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
- #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
- #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
- #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
- #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ #define HWRM_ERR_CODE_SUCCESS 0x0UL
+ #define HWRM_ERR_CODE_FAIL 0x1UL
+ #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
+ #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
+ #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
+ #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
+ #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
+ #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
+ #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
+ #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
__le16 unused_0[3];
};
@@ -355,10 +361,10 @@ struct hwrm_err_output {
#define HW_HASH_KEY_SIZE 40
#define HWRM_RESP_VALID_KEY 1
#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 9
-#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 25
-#define HWRM_VERSION_STR "1.9.2.25"
+#define HWRM_VERSION_MINOR 10
+#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_RSVD 3
+#define HWRM_VERSION_STR "1.10.0.3"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -396,10 +402,15 @@ struct hwrm_ver_get_output {
u8 netctrl_fw_bld_8b;
u8 netctrl_fw_rsvd_8b;
__le32 dev_caps_cfg;
- #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
- #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED 0x10UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED 0x20UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
@@ -528,6 +539,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
#define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
@@ -539,6 +551,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
#define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
__le32 event_data2;
@@ -652,10 +665,11 @@ struct hwrm_async_event_cmpl_vf_cfg_change {
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL
};
/* hwrm_func_reset_input (size:192b/24B) */
@@ -852,6 +866,7 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL
#define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL
#define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -903,6 +918,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
#define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
+ #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1014,6 +1030,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL
#define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
#define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
+ #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -1214,9 +1231,10 @@ struct hwrm_func_drv_rgtr_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -1416,7 +1434,9 @@ struct hwrm_func_resource_qcaps_output {
__le16 min_hw_ring_grps;
__le16 max_hw_ring_grps;
__le16 max_tx_scheduler_inputs;
- u8 unused_0[7];
+ __le16 flags;
+ #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL
+ u8 unused_0[5];
u8 valid;
};
@@ -1445,7 +1465,9 @@ struct hwrm_func_vf_resource_cfg_input {
__le16 max_stat_ctx;
__le16 min_hw_ring_grps;
__le16 max_hw_ring_grps;
- u8 unused_0[4];
+ __le16 flags;
+ #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL
+ u8 unused_0[2];
};
/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */
@@ -1503,7 +1525,8 @@ struct hwrm_func_backing_store_qcaps_output {
__le16 mrav_entry_size;
__le16 tim_entry_size;
__le32 tim_max_entries;
- u8 unused_0[3];
+ u8 unused_0[2];
+ u8 tqm_entries_multiple;
u8 valid;
};
@@ -1917,6 +1940,7 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
u8 auto_mode;
@@ -1947,6 +1971,7 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
__le16 auto_link_speed_mask;
@@ -1964,6 +1989,7 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_200GB 0x4000UL
u8 wirespeed;
#define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
#define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
@@ -2048,6 +2074,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
u8 duplex_cfg;
@@ -2072,6 +2099,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_200GB 0x4000UL
__le16 force_link_speed;
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
@@ -2083,6 +2111,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
u8 auto_mode;
@@ -2107,6 +2136,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
__le16 auto_link_speed_mask;
@@ -2124,6 +2154,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_200GB 0x4000UL
u8 wirespeed;
#define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
#define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
@@ -2178,7 +2209,11 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4 0x1cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
@@ -2644,7 +2679,8 @@ struct hwrm_port_qstats_ext_output {
__le16 tx_stat_size;
__le16 rx_stat_size;
__le16 total_active_cos_queues;
- u8 unused_0;
+ u8 flags;
+ #define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL
u8 valid;
};
@@ -2685,7 +2721,9 @@ struct hwrm_port_clr_stats_input {
__le16 target_id;
__le64 resp_addr;
__le16 port_id;
- u8 unused_0[6];
+ u8 flags;
+ #define PORT_CLR_STATS_REQ_FLAGS_ROCE_COUNTERS 0x1UL
+ u8 unused_0[5];
};
/* hwrm_port_clr_stats_output (size:128b/16B) */
@@ -4574,7 +4612,9 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
#define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
#define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
- u8 unused_0[3];
+ u8 unused_0;
+ __le16 flags;
+ #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
__le64 page_tbl_addr;
__le32 fbo;
u8 page_size;
@@ -4838,13 +4878,19 @@ struct hwrm_cfa_l2_filter_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_MASK 0x30UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_SFT 4
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE
__le32 enables;
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
@@ -4901,6 +4947,8 @@ struct hwrm_cfa_l2_filter_alloc_input {
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 unused_4;
@@ -4958,11 +5006,17 @@ struct hwrm_cfa_l2_filter_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
__le32 enables;
#define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
#define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
@@ -5064,6 +5118,8 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 tunnel_flags;
@@ -5140,7 +5196,7 @@ struct hwrm_vxlan_ipv6_hdr {
__be32 dest_ip_addr[4];
};
-/* hwrm_cfa_encap_data_vxlan (size:576b/72B) */
+/* hwrm_cfa_encap_data_vxlan (size:640b/80B) */
struct hwrm_cfa_encap_data_vxlan {
u8 src_mac_addr[6];
__le16 unused_0;
@@ -5159,6 +5215,10 @@ struct hwrm_cfa_encap_data_vxlan {
__be16 src_port;
__be16 dst_port;
__be32 vni;
+ u8 hdr_rsvd0[3];
+ u8 hdr_rsvd1;
+ u8 hdr_flags;
+ u8 unused[3];
};
/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
@@ -5171,15 +5231,18 @@ struct hwrm_cfa_encap_record_alloc_input {
__le32 flags;
#define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
u8 encap_type;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE
u8 unused_0[3];
__le32 encap_data[20];
};
@@ -5273,6 +5336,8 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 pri_hint;
@@ -5404,6 +5469,8 @@ struct hwrm_cfa_decap_filter_alloc_input {
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 unused_0;
@@ -5476,19 +5543,22 @@ struct hwrm_cfa_flow_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le16 flags;
- #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL
__le16 src_fid;
__le32 tunnel_handle;
__le16 action_flags;
@@ -5502,6 +5572,7 @@ struct hwrm_cfa_flow_alloc_input {
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
__le16 dst_fid;
__be16 l2_rewrite_vlan_tpid;
__be16 l2_rewrite_vlan_tci;
@@ -5525,21 +5596,38 @@ struct hwrm_cfa_flow_alloc_input {
__be16 nat_port;
__be16 l2_rewrite_smac[3];
u8 ip_proto;
- u8 unused_0;
-};
-
-/* hwrm_cfa_flow_alloc_output (size:128b/16B) */
+ u8 tunnel_type;
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+};
+
+/* hwrm_cfa_flow_alloc_output (size:256b/32B) */
struct hwrm_cfa_flow_alloc_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le16 flow_handle;
- u8 unused_0[5];
+ u8 unused_0[2];
+ __le32 flow_id;
+ __le64 ext_flow_handle;
+ u8 unused_1[7];
u8 valid;
};
-/* hwrm_cfa_flow_free_input (size:192b/24B) */
+/* hwrm_cfa_flow_free_input (size:256b/32B) */
struct hwrm_cfa_flow_free_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -5548,6 +5636,7 @@ struct hwrm_cfa_flow_free_input {
__le64 resp_addr;
__le16 flow_handle;
u8 unused_0[6];
+ __le64 ext_flow_handle;
};
/* hwrm_cfa_flow_free_output (size:256b/32B) */
@@ -5562,7 +5651,7 @@ struct hwrm_cfa_flow_free_output {
u8 valid;
};
-/* hwrm_cfa_flow_stats_input (size:320b/40B) */
+/* hwrm_cfa_flow_stats_input (size:640b/80B) */
struct hwrm_cfa_flow_stats_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -5581,6 +5670,16 @@ struct hwrm_cfa_flow_stats_input {
__le16 flow_handle_8;
__le16 flow_handle_9;
u8 unused_0[2];
+ __le32 flow_id_0;
+ __le32 flow_id_1;
+ __le32 flow_id_2;
+ __le32 flow_id_3;
+ __le32 flow_id_4;
+ __le32 flow_id_5;
+ __le32 flow_id_6;
+ __le32 flow_id_7;
+ __le32 flow_id_8;
+ __le32 flow_id_9;
};
/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
@@ -5670,7 +5769,8 @@ struct hwrm_tunnel_dst_port_query_input {
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE
u8 unused_0[7];
};
@@ -5698,7 +5798,8 @@ struct hwrm_tunnel_dst_port_alloc_input {
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE
u8 unused_0;
__be16 tunnel_dst_port_val;
u8 unused_1[4];
@@ -5727,7 +5828,8 @@ struct hwrm_tunnel_dst_port_free_input {
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE
u8 unused_0;
__le16 tunnel_dst_port_id;
u8 unused_1[4];
@@ -5932,10 +6034,11 @@ struct hwrm_fw_reset_input {
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT
u8 selfrst_status;
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
+ #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE
u8 host_idx;
u8 unused_0[5];
};
@@ -5947,10 +6050,11 @@ struct hwrm_fw_reset_output {
__le16 seq_id;
__le16 resp_len;
u8 selfrst_status;
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
+ #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE
u8 unused_0[6];
u8 valid;
};
@@ -6498,6 +6602,34 @@ struct hwrm_dbg_coredump_retrieve_output {
u8 valid;
};
+/* hwrm_dbg_ring_info_get_input (size:192b/24B) */
+struct hwrm_dbg_ring_info_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_RX
+ u8 unused_0[3];
+ __le32 fw_ring_id;
+};
+
+/* hwrm_dbg_ring_info_get_output (size:192b/24B) */
+struct hwrm_dbg_ring_info_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 producer_index;
+ __le32 consumer_index;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_nvm_read_input (size:320b/40B) */
struct hwrm_nvm_read_input {
__le16 req_type;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index b574fe8e974e..9a25c05aa571 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -521,7 +521,8 @@ int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode)
return 0;
}
-int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode)
+int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
int rc = 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
index 38b9a75ad724..d7287651422f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
@@ -30,7 +30,8 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
bool bnxt_dev_is_vf_rep(struct net_device *dev);
int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode);
-int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode);
+int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack);
#else
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 0584d07c8c33..bf6de02be396 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -63,7 +63,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
tx_buf = &txr->tx_buf_ring[last_tx_cons];
rx_prod = tx_buf->rx_prod;
}
- bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rx_prod);
+ bnxt_db_write(bp, &rxr->rx_db, rx_prod);
}
/* returns the following:
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index b756fc79424e..a6cbaca37e94 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -320,9 +320,12 @@ int bcmgenet_mii_probe(struct net_device *dev)
phydev->advertising = phydev->supported;
/* The internal PHY has its link interrupts routed to the
- * Ethernet MAC ISRs
+ * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
+ * that prevents the signaling of link UP interrupts when
+ * the link operates at 10Mbps, so fallback to polling for
+ * those versions of GENET.
*/
- if (priv->internal_phy)
+ if (priv->internal_phy && !GENET_IS_V5(priv))
dev->phydev->irq = PHY_IGNORE_INTERRUPT;
return 0;
@@ -338,7 +341,7 @@ static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv)
if (!compat)
return NULL;
- priv->mdio_dn = of_find_compatible_node(dn, NULL, compat);
+ priv->mdio_dn = of_get_compatible_child(dn, compat);
kfree(compat);
if (!priv->mdio_dn) {
dev_err(kdev, "unable to find MDIO bus node\n");
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 0acaef3ef548..1d86b4d5645a 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1684,7 +1684,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
padlen = 0;
/* No room for FCS, need to reallocate skb. */
else
- padlen = ETH_FCS_LEN - tailroom;
+ padlen = ETH_FCS_LEN;
} else {
/* Add room for FCS. */
padlen += ETH_FCS_LEN;
@@ -4156,8 +4156,7 @@ static int macb_remove(struct platform_device *pdev)
static int __maybe_unused macb_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *netdev = platform_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
netif_carrier_off(netdev);
@@ -4179,8 +4178,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
static int __maybe_unused macb_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *netdev = platform_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
if (bp->wol & MACB_WOL_ENABLED) {
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index eb96b0613cf6..825a28e5b544 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -1732,7 +1732,7 @@ int liquidio_set_fec(struct lio *lio, int on_off)
if (oct->props[lio->ifidx].fec !=
oct->props[lio->ifidx].fec_boot) {
dev_dbg(&oct->pci_dev->dev,
- "Reloade driver to chang fec to %s\n",
+ "Reload driver to change fec to %s\n",
oct->props[lio->ifidx].fec ? "on" : "off");
}
@@ -1796,7 +1796,7 @@ int liquidio_get_fec(struct lio *lio)
if (oct->props[lio->ifidx].fec !=
oct->props[lio->ifidx].fec_boot) {
dev_dbg(&oct->pci_dev->dev,
- "Reloade driver to chang fec to %s\n",
+ "Reload driver to change fec to %s\n",
oct->props[lio->ifidx].fec ? "on" : "off");
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 9d70e5c6157f..3d24133e5e49 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -3144,7 +3144,8 @@ liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
}
static int
-liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode)
+liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
{
struct lio_devlink_priv *priv;
struct octeon_device *oct;
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index e2cdfa75673f..75c1c5ed2387 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -67,6 +67,7 @@ config CHELSIO_T3
config CHELSIO_T4
tristate "Chelsio Communications T4/T5/T6 Ethernet support"
depends on PCI && (IPV6 || IPV6=n)
+ depends on THERMAL || !THERMAL
select FW_LOADER
select MDIO
select ZLIB_DEFLATE
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 8b0a253a18d8..1e82b9efe447 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -2158,6 +2158,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EPERM;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_SET_QSET_PARAMS)
+ return -EINVAL;
if (t.qset_idx >= SGE_QSETS)
return -EINVAL;
if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
@@ -2257,6 +2259,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_GET_QSET_PARAMS)
+ return -EINVAL;
+
/* Display qsets for all ports when offload enabled */
if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
q1 = 0;
@@ -2302,6 +2307,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EBUSY;
if (copy_from_user(&edata, useraddr, sizeof(edata)))
return -EFAULT;
+ if (edata.cmd != CHELSIO_SET_QSET_NUM)
+ return -EINVAL;
if (edata.val < 1 ||
(edata.val > 1 && !(adapter->flags & USING_MSIX)))
return -EINVAL;
@@ -2342,6 +2349,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EPERM;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_LOAD_FW)
+ return -EINVAL;
/* Check t.len sanity ? */
fw_data = memdup_user(useraddr + sizeof(t), t.len);
if (IS_ERR(fw_data))
@@ -2365,6 +2374,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EBUSY;
if (copy_from_user(&m, useraddr, sizeof(m)))
return -EFAULT;
+ if (m.cmd != CHELSIO_SETMTUTAB)
+ return -EINVAL;
if (m.nmtus != NMTUS)
return -EINVAL;
if (m.mtus[0] < 81) /* accommodate SACK */
@@ -2406,6 +2417,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EBUSY;
if (copy_from_user(&m, useraddr, sizeof(m)))
return -EFAULT;
+ if (m.cmd != CHELSIO_SET_PM)
+ return -EINVAL;
if (!is_power_of_2(m.rx_pg_sz) ||
!is_power_of_2(m.tx_pg_sz))
return -EINVAL; /* not power of 2 */
@@ -2439,6 +2452,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EIO; /* need the memory controllers */
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_GET_MEM)
+ return -EINVAL;
if ((t.addr & 7) || (t.len & 7))
return -EINVAL;
if (t.mem_id == MEM_CM)
@@ -2491,6 +2506,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EAGAIN;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_SET_TRACE_FILTER)
+ return -EINVAL;
tp = (const struct trace_params *)&t.sip;
if (t.config_tx)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index bea6a059a8f1..78e5d17a1d5f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -12,3 +12,6 @@ cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
+ifdef CONFIG_THERMAL
+cxgb4-objs += cxgb4_thermal.o
+endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index b5010bd32ea3..b16f4b3ef4c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -52,6 +52,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/ptp_classify.h>
#include <linux/crash_dump.h>
+#include <linux/thermal.h>
#include <asm/io.h>
#include "t4_chip_type.h"
#include "cxgb4_uld.h"
@@ -890,6 +891,14 @@ struct mps_encap_entry {
atomic_t refcnt;
};
+#if IS_ENABLED(CONFIG_THERMAL)
+struct ch_thermal {
+ struct thermal_zone_device *tzdev;
+ int trip_temp;
+ int trip_type;
+};
+#endif
+
struct adapter {
void __iomem *regs;
void __iomem *bar2;
@@ -1008,6 +1017,9 @@ struct adapter {
/* Dump buffer for collecting logs in kdump kernel */
struct vmcoredd_data vmcoredd;
+#if IS_ENABLED(CONFIG_THERMAL)
+ struct ch_thermal ch_thermal;
+#endif
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
@@ -1862,4 +1874,8 @@ void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
u16 vlan);
int cxgb4_dcb_enabled(const struct net_device *dev);
+
+int cxgb4_thermal_init(struct adapter *adap);
+int cxgb4_thermal_remove(struct adapter *adap);
+
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 1a93efa60e71..05a46926016a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4767,7 +4767,6 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
- pci_cleanup_aer_uncorrect_error_status(pdev);
if (t4_wait_dev_ready(adap->regs) < 0)
return PCI_ERS_RESULT_DISCONNECT;
@@ -5864,6 +5863,10 @@ fw_attach_fail:
if (!is_t4(adapter->params.chip))
cxgb4_ptp_init(adapter);
+ if (IS_ENABLED(CONFIG_THERMAL) &&
+ !is_t4(adapter->params.chip) && (adapter->flags & FW_OK))
+ cxgb4_thermal_init(adapter);
+
print_adapter_info(adapter);
return 0;
@@ -5929,6 +5932,8 @@ static void remove_one(struct pci_dev *pdev)
if (!is_t4(adapter->params.chip))
cxgb4_ptp_stop(adapter);
+ if (IS_ENABLED(CONFIG_THERMAL))
+ cxgb4_thermal_remove(adapter);
/* If we allocated filters, free up state associated with any
* valid filters ...
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
new file mode 100644
index 000000000000..28052e7504e5
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2017 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Written by: Ganesh Goudar (ganeshgr@chelsio.com)
+ */
+
+#include "cxgb4.h"
+
+#define CXGB4_NUM_TRIPS 1
+
+static int cxgb4_thermal_get_temp(struct thermal_zone_device *tzdev,
+ int *temp)
+{
+ struct adapter *adap = tzdev->devdata;
+ u32 param, val;
+ int ret;
+
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
+ FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_TMP));
+
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val);
+ if (ret < 0 || val == 0)
+ return -1;
+
+ *temp = val * 1000;
+ return 0;
+}
+
+static int cxgb4_thermal_get_trip_type(struct thermal_zone_device *tzdev,
+ int trip, enum thermal_trip_type *type)
+{
+ struct adapter *adap = tzdev->devdata;
+
+ if (!adap->ch_thermal.trip_temp)
+ return -EINVAL;
+
+ *type = adap->ch_thermal.trip_type;
+ return 0;
+}
+
+static int cxgb4_thermal_get_trip_temp(struct thermal_zone_device *tzdev,
+ int trip, int *temp)
+{
+ struct adapter *adap = tzdev->devdata;
+
+ if (!adap->ch_thermal.trip_temp)
+ return -EINVAL;
+
+ *temp = adap->ch_thermal.trip_temp;
+ return 0;
+}
+
+static struct thermal_zone_device_ops cxgb4_thermal_ops = {
+ .get_temp = cxgb4_thermal_get_temp,
+ .get_trip_type = cxgb4_thermal_get_trip_type,
+ .get_trip_temp = cxgb4_thermal_get_trip_temp,
+};
+
+int cxgb4_thermal_init(struct adapter *adap)
+{
+ struct ch_thermal *ch_thermal = &adap->ch_thermal;
+ int num_trip = CXGB4_NUM_TRIPS;
+ u32 param, val;
+ int ret;
+
+ /* on older firmwares we may not get the trip temperature,
+ * set the num of trips to 0.
+ */
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
+ FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_MAXTMPTHRESH));
+
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val);
+ if (ret < 0) {
+ num_trip = 0; /* could not get trip temperature */
+ } else {
+ ch_thermal->trip_temp = val * 1000;
+ ch_thermal->trip_type = THERMAL_TRIP_CRITICAL;
+ }
+
+ ch_thermal->tzdev = thermal_zone_device_register("cxgb4", num_trip,
+ 0, adap,
+ &cxgb4_thermal_ops,
+ NULL, 0, 0);
+ if (IS_ERR(ch_thermal->tzdev)) {
+ ret = PTR_ERR(ch_thermal->tzdev);
+ dev_err(adap->pdev_dev, "Failed to register thermal zone\n");
+ ch_thermal->tzdev = NULL;
+ return ret;
+ }
+ return 0;
+}
+
+int cxgb4_thermal_remove(struct adapter *adap)
+{
+ if (adap->ch_thermal.tzdev)
+ thermal_zone_device_unregister(adap->ch_thermal.tzdev);
+ return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 4bc211093c98..9a6065a3fa46 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -520,10 +520,20 @@ setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
if (!txq_info)
return -ENOMEM;
+ if (uld_type == CXGB4_ULD_CRYPTO) {
+ i = min_t(int, adap->vres.ncrypto_fc,
+ num_online_cpus());
+ txq_info->ntxq = rounddown(i, adap->params.nports);
+ if (txq_info->ntxq <= 0) {
+ dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n");
+ kfree(txq_info);
+ return -EINVAL;
+ }
- i = min_t(int, uld_info->ntxq, num_online_cpus());
- txq_info->ntxq = roundup(i, adap->params.nports);
-
+ } else {
+ i = min_t(int, uld_info->ntxq, num_online_cpus());
+ txq_info->ntxq = roundup(i, adap->params.nports);
+ }
txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
GFP_KERNEL);
if (!txq_info->uldtxq) {
@@ -546,11 +556,14 @@ static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
struct cxgb4_lld_info *lli)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ int tx_uld_type = TX_ULD(uld_type);
+ struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type];
lli->rxq_ids = rxq_info->rspq_id;
lli->nrxq = rxq_info->nrxq;
lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
lli->nciq = rxq_info->nciq;
+ lli->ntxq = txq_info->ntxq;
}
int t4_uld_mem_alloc(struct adapter *adap)
@@ -634,7 +647,6 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->ports = adap->port;
lld->vr = &adap->vres;
lld->mtus = adap->params.mtus;
- lld->ntxq = adap->sge.ofldqsets;
lld->nchan = adap->params.nports;
lld->nports = adap->params.nports;
lld->wr_cred = adap->params.ofldq_wr_cred;
@@ -702,15 +714,14 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
* about any presently available devices that support its type. Returns
* %-EBUSY if a ULD of the same type is already registered.
*/
-int cxgb4_register_uld(enum cxgb4_uld type,
- const struct cxgb4_uld_info *p)
+void cxgb4_register_uld(enum cxgb4_uld type,
+ const struct cxgb4_uld_info *p)
{
int ret = 0;
- unsigned int adap_idx = 0;
struct adapter *adap;
if (type >= CXGB4_ULD_MAX)
- return -EINVAL;
+ return;
mutex_lock(&uld_mutex);
list_for_each_entry(adap, &adapter_list, list_node) {
@@ -733,52 +744,29 @@ int cxgb4_register_uld(enum cxgb4_uld type,
}
if (adap->flags & FULL_INIT_DONE)
enable_rx_uld(adap, type);
- if (adap->uld[type].add) {
- ret = -EBUSY;
+ if (adap->uld[type].add)
goto free_irq;
- }
ret = setup_sge_txq_uld(adap, type, p);
if (ret)
goto free_irq;
adap->uld[type] = *p;
uld_attach(adap, type);
- adap_idx++;
- }
- mutex_unlock(&uld_mutex);
- return 0;
-
+ continue;
free_irq:
- if (adap->flags & FULL_INIT_DONE)
- quiesce_rx_uld(adap, type);
- if (adap->flags & USING_MSIX)
- free_msix_queue_irqs_uld(adap, type);
-free_rxq:
- free_sge_queues_uld(adap, type);
-free_queues:
- free_queues_uld(adap, type);
-out:
-
- list_for_each_entry(adap, &adapter_list, list_node) {
- if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
- (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
- continue;
- if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
- continue;
- if (!adap_idx)
- break;
- adap->uld[type].handle = NULL;
- adap->uld[type].add = NULL;
- release_sge_txq_uld(adap, type);
if (adap->flags & FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
if (adap->flags & USING_MSIX)
free_msix_queue_irqs_uld(adap, type);
+free_rxq:
free_sge_queues_uld(adap, type);
+free_queues:
free_queues_uld(adap, type);
- adap_idx--;
+out:
+ dev_warn(adap->pdev_dev,
+ "ULD registration failed for uld type %d\n", type);
}
mutex_unlock(&uld_mutex);
- return ret;
+ return;
}
EXPORT_SYMBOL(cxgb4_register_uld);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index de9ad311dacd..5fa9a2d5fc4b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -384,7 +384,7 @@ struct cxgb4_uld_info {
int (*tx_handler)(struct sk_buff *skb, struct net_device *dev);
};
-int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
+void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type);
int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
int cxgb4_immdata_send(struct net_device *dev, unsigned int idx,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 7fc656680299..52edb688942b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -38,7 +38,6 @@
#include "cxgb4.h"
#include "sched.h"
-/* Spinlock must be held by caller */
static int t4_sched_class_fw_cmd(struct port_info *pi,
struct ch_sched_params *p,
enum sched_fw_ops op)
@@ -67,7 +66,6 @@ static int t4_sched_class_fw_cmd(struct port_info *pi,
return err;
}
-/* Spinlock must be held by caller */
static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
enum sched_bind_type type, bool bind)
{
@@ -163,7 +161,6 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
if (e && index >= 0) {
int i = 0;
- spin_lock(&e->lock);
list_for_each_entry(qe, &e->queue_list, list) {
if (i == index)
break;
@@ -171,10 +168,8 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
}
err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
false);
- if (err) {
- spin_unlock(&e->lock);
- goto out;
- }
+ if (err)
+ return err;
list_del(&qe->list);
kvfree(qe);
@@ -182,9 +177,7 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
e->state = SCHED_STATE_UNUSED;
memset(&e->info, 0, sizeof(e->info));
}
- spin_unlock(&e->lock);
}
-out:
return err;
}
@@ -210,10 +203,8 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
/* Unbind queue from any existing class */
err = t4_sched_queue_unbind(pi, p);
- if (err) {
- kvfree(qe);
- goto out;
- }
+ if (err)
+ goto out_err;
/* Bind queue to specified class */
memset(qe, 0, sizeof(*qe));
@@ -221,18 +212,16 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
memcpy(&qe->param, p, sizeof(qe->param));
e = &s->tab[qe->param.class];
- spin_lock(&e->lock);
err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true);
- if (err) {
- kvfree(qe);
- spin_unlock(&e->lock);
- goto out;
- }
+ if (err)
+ goto out_err;
list_add_tail(&qe->list, &e->queue_list);
atomic_inc(&e->refcnt);
- spin_unlock(&e->lock);
-out:
+ return err;
+
+out_err:
+ kvfree(qe);
return err;
}
@@ -296,8 +285,6 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
enum sched_bind_type type)
{
struct port_info *pi = netdev2pinfo(dev);
- struct sched_table *s;
- int err = 0;
u8 class_id;
if (!can_sched(dev))
@@ -323,12 +310,8 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
if (class_id == SCHED_CLS_NONE)
return -ENOTSUPP;
- s = pi->sched_tbl;
- write_lock(&s->rw_lock);
- err = t4_sched_class_bind_unbind_op(pi, arg, type, true);
- write_unlock(&s->rw_lock);
+ return t4_sched_class_bind_unbind_op(pi, arg, type, true);
- return err;
}
/**
@@ -343,8 +326,6 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
enum sched_bind_type type)
{
struct port_info *pi = netdev2pinfo(dev);
- struct sched_table *s;
- int err = 0;
u8 class_id;
if (!can_sched(dev))
@@ -367,12 +348,7 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
if (!valid_class_id(dev, class_id))
return -EINVAL;
- s = pi->sched_tbl;
- write_lock(&s->rw_lock);
- err = t4_sched_class_bind_unbind_op(pi, arg, type, false);
- write_unlock(&s->rw_lock);
-
- return err;
+ return t4_sched_class_bind_unbind_op(pi, arg, type, false);
}
/* If @p is NULL, fetch any available unused class */
@@ -425,7 +401,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
struct ch_sched_params *p)
{
- struct sched_table *s = pi->sched_tbl;
struct sched_class *e;
u8 class_id;
int err;
@@ -441,7 +416,6 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
if (class_id != SCHED_CLS_NONE)
return NULL;
- write_lock(&s->rw_lock);
/* See if there's an exisiting class with same
* requested sched params
*/
@@ -452,27 +426,19 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
/* Fetch any available unused class */
e = t4_sched_class_lookup(pi, NULL);
if (!e)
- goto out;
+ return NULL;
memcpy(&np, p, sizeof(np));
np.u.params.class = e->idx;
-
- spin_lock(&e->lock);
/* New class */
err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD);
- if (err) {
- spin_unlock(&e->lock);
- e = NULL;
- goto out;
- }
+ if (err)
+ return NULL;
memcpy(&e->info, &np, sizeof(e->info));
atomic_set(&e->refcnt, 0);
e->state = SCHED_STATE_ACTIVE;
- spin_unlock(&e->lock);
}
-out:
- write_unlock(&s->rw_lock);
return e;
}
@@ -517,14 +483,12 @@ struct sched_table *t4_init_sched(unsigned int sched_size)
return NULL;
s->sched_size = sched_size;
- rwlock_init(&s->rw_lock);
for (i = 0; i < s->sched_size; i++) {
memset(&s->tab[i], 0, sizeof(struct sched_class));
s->tab[i].idx = i;
s->tab[i].state = SCHED_STATE_UNUSED;
INIT_LIST_HEAD(&s->tab[i].queue_list);
- spin_lock_init(&s->tab[i].lock);
atomic_set(&s->tab[i].refcnt, 0);
}
return s;
@@ -545,11 +509,9 @@ void t4_cleanup_sched(struct adapter *adap)
for (i = 0; i < s->sched_size; i++) {
struct sched_class *e;
- write_lock(&s->rw_lock);
e = &s->tab[i];
if (e->state == SCHED_STATE_ACTIVE)
t4_sched_class_free(pi, e);
- write_unlock(&s->rw_lock);
}
kvfree(s);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h
index 3a49e00a38a1..168fb4ce3759 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h
@@ -69,13 +69,11 @@ struct sched_class {
u8 idx;
struct ch_sched_params info;
struct list_head queue_list;
- spinlock_t lock; /* Per class lock */
atomic_t refcnt;
};
struct sched_table { /* per port scheduling table */
u8 sched_size;
- rwlock_t rw_lock; /* Table lock */
struct sched_class tab[0];
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index f85eab57e9e1..cb523949c812 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -4204,6 +4204,7 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
*/
int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
{
+ unsigned int fw_caps = adap->params.fw_caps_support;
struct fw_port_cmd c;
memset(&c, 0, sizeof(c));
@@ -4211,9 +4212,14 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
FW_PORT_CMD_PORTID_V(port));
c.action_to_len16 =
- cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
+ cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
+ ? FW_PORT_ACTION_L1_CFG
+ : FW_PORT_ACTION_L1_CFG32) |
FW_LEN16(c));
- c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP32_ANEG);
+ if (fw_caps == FW_CAPS16)
+ c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
+ else
+ c.u.l1cfg32.rcap32 = cpu_to_be32(FW_PORT_CAP32_ANEG);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index b8f75a22fb6c..f152da1ce046 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -753,7 +753,6 @@ struct cpl_abort_req_rss {
};
struct cpl_abort_req_rss6 {
- WR_HDR;
union opcode_tid ot;
__be32 srqidx_status;
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 6d2bc8789223..57584ab32043 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1332,6 +1332,7 @@ enum fw_params_param_dev_phyfw {
enum fw_params_param_dev_diag {
FW_PARAM_DEV_DIAG_TMP = 0x00,
FW_PARAM_DEV_DIAG_VDD = 0x01,
+ FW_PARAM_DEV_DIAG_MAXTMPTHRESH = 0x02,
};
enum fw_params_param_dev_fwcache {
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 50222b7b81f3..0a82fcf16d35 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1722,8 +1722,7 @@ out:
static int
dm9000_drv_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct board_info *db;
if (ndev) {
@@ -1745,8 +1744,7 @@ dm9000_drv_suspend(struct device *dev)
static int
dm9000_drv_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct board_info *db = netdev_priv(ndev);
if (ndev) {
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 58bcee8f0a58..ce041c90adb0 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -185,6 +185,7 @@ static inline void queue_tail_inc(struct be_queue_info *q)
struct be_eq_obj {
struct be_queue_info q;
+ char desc[32];
struct be_adapter *adapter;
struct napi_struct napi;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 74d122616e76..c5ad7a4f4d83 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3488,11 +3488,9 @@ static int be_msix_register(struct be_adapter *adapter)
int status, i, vec;
for_all_evt_queues(adapter, eqo, i) {
- char irq_name[IFNAMSIZ+4];
-
- snprintf(irq_name, sizeof(irq_name), "%s-q%d", netdev->name, i);
+ sprintf(eqo->desc, "%s-q%d", netdev->name, i);
vec = be_msix_vec_get(adapter, eqo);
- status = request_irq(vec, be_msix, 0, irq_name, eqo);
+ status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
if (status)
goto err_msix;
@@ -4002,8 +4000,6 @@ static int be_enable_vxlan_offloads(struct be_adapter *adapter)
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL;
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
- netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
be16_to_cpu(port));
@@ -4025,8 +4021,6 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
adapter->vxlan_port = 0;
netdev->hw_enc_features = 0;
- netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
- netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
}
static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
@@ -5320,6 +5314,7 @@ static void be_netdev_init(struct net_device *netdev)
struct be_adapter *adapter = netdev_priv(netdev);
netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX;
if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
@@ -6151,7 +6146,6 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
if (status)
return PCI_ERS_RESULT_DISCONNECT;
- pci_cleanup_aer_uncorrect_error_status(pdev);
be_clear_error(adapter, BE_CLEAR_ALL);
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 7a30276e1ba6..d3a62bc1f1c6 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -96,13 +96,6 @@ config GIANFAR
on the 8540.
source "drivers/net/ethernet/freescale/dpaa/Kconfig"
-
-config FSL_DPAA2_ETH
- tristate "Freescale DPAA2 Ethernet"
- depends on FSL_MC_BUS && FSL_MC_DPIO
- depends on NETDEVICES && ETHERNET
- ---help---
- Ethernet driver for Freescale DPAA2 SoCs, using the
- Freescale MC bus driver
+source "drivers/net/ethernet/freescale/dpaa2/Kconfig"
endif # NET_VENDOR_FREESCALE
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
new file mode 100644
index 000000000000..809a155eb193
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -0,0 +1,16 @@
+config FSL_DPAA2_ETH
+ tristate "Freescale DPAA2 Ethernet"
+ depends on FSL_MC_BUS && FSL_MC_DPIO
+ help
+ This is the DPAA2 Ethernet driver supporting Freescale SoCs
+ with DPAA2 (DataPath Acceleration Architecture v2).
+ The driver manages network objects discovered on the Freescale
+ MC bus.
+
+config FSL_DPAA2_PTP_CLOCK
+ tristate "Freescale DPAA2 PTP Clock"
+ depends on FSL_DPAA2_ETH && POSIX_TIMERS
+ select PTP_1588_CLOCK
+ help
+ This driver adds support for using the DPAA2 1588 timer module
+ as a PTP clock.
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
index 9315ecdba612..2f424e0a8225 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Makefile
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -3,9 +3,11 @@
# Makefile for the Freescale DPAA2 Ethernet controller
#
-obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
+obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
+obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
-fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
+fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
+fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
# Needed by the tracing framework
CFLAGS_dpaa2-eth.o := -I$(src)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 108c137ea593..88f7acce38dc 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -98,8 +98,7 @@ free_buf:
}
/* Build a linear skb based on a single-buffer frame descriptor */
-static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
+static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
const struct dpaa2_fd *fd,
void *fd_vaddr)
{
@@ -233,7 +232,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
percpu_extras = this_cpu_ptr(priv->percpu_extras);
if (fd_format == dpaa2_fd_single) {
- skb = build_linear_skb(priv, ch, fd, vaddr);
+ skb = build_linear_skb(ch, fd, vaddr);
} else if (fd_format == dpaa2_fd_sg) {
skb = build_frag_skb(priv, ch, buf_data);
skb_free_frag(vaddr);
@@ -289,10 +288,11 @@ err_frame_format:
*
* Observance of NAPI budget is not our concern, leaving that to the caller.
*/
-static int consume_frames(struct dpaa2_eth_channel *ch)
+static int consume_frames(struct dpaa2_eth_channel *ch,
+ enum dpaa2_eth_fq_type *type)
{
struct dpaa2_eth_priv *priv = ch->priv;
- struct dpaa2_eth_fq *fq;
+ struct dpaa2_eth_fq *fq = NULL;
struct dpaa2_dq *dq;
const struct dpaa2_fd *fd;
int cleaned = 0;
@@ -311,12 +311,23 @@ static int consume_frames(struct dpaa2_eth_channel *ch)
fd = dpaa2_dq_fd(dq);
fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
- fq->stats.frames++;
fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
cleaned++;
} while (!is_last);
+ if (!cleaned)
+ return 0;
+
+ fq->stats.frames += cleaned;
+ ch->stats.frames += cleaned;
+
+ /* A dequeue operation only pulls frames from a single queue
+ * into the store. Return the frame queue type as an out param.
+ */
+ if (type)
+ *type = fq->type;
+
return cleaned;
}
@@ -426,7 +437,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_format(fd, dpaa2_fd_sg);
dpaa2_fd_set_addr(fd, addr);
dpaa2_fd_set_len(fd, skb->len);
- dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
enable_tx_tstamp(fd, sgt_buf);
@@ -479,7 +490,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
dpaa2_fd_set_len(fd, skb->len);
dpaa2_fd_set_format(fd, dpaa2_fd_single);
- dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
enable_tx_tstamp(fd, buffer_start);
@@ -648,7 +659,7 @@ err_alloc_headroom:
/* Tx confirmation frame processing routine */
static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_channel *ch __always_unused,
const struct dpaa2_fd *fd,
struct napi_struct *napi __always_unused,
u16 queue_id __always_unused)
@@ -921,14 +932,16 @@ static int pull_channel(struct dpaa2_eth_channel *ch)
static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
{
struct dpaa2_eth_channel *ch;
- int cleaned = 0, store_cleaned;
struct dpaa2_eth_priv *priv;
+ int rx_cleaned = 0, txconf_cleaned = 0;
+ enum dpaa2_eth_fq_type type = 0;
+ int store_cleaned;
int err;
ch = container_of(napi, struct dpaa2_eth_channel, napi);
priv = ch->priv;
- while (cleaned < budget) {
+ do {
err = pull_channel(ch);
if (unlikely(err))
break;
@@ -936,30 +949,32 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
/* Refill pool if appropriate */
refill_pool(priv, ch, priv->bpid);
- store_cleaned = consume_frames(ch);
- cleaned += store_cleaned;
+ store_cleaned = consume_frames(ch, &type);
+ if (type == DPAA2_RX_FQ)
+ rx_cleaned += store_cleaned;
+ else
+ txconf_cleaned += store_cleaned;
- /* If we have enough budget left for a full store,
- * try a new pull dequeue, otherwise we're done here
+ /* If we either consumed the whole NAPI budget with Rx frames
+ * or we reached the Tx confirmations threshold, we're done.
*/
- if (store_cleaned == 0 ||
- cleaned > budget - DPAA2_ETH_STORE_SIZE)
- break;
- }
-
- if (cleaned < budget && napi_complete_done(napi, cleaned)) {
- /* Re-enable data available notifications */
- do {
- err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
- cpu_relax();
- } while (err == -EBUSY);
- WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
- ch->nctx.desired_cpu);
- }
+ if (rx_cleaned >= budget ||
+ txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI)
+ return budget;
+ } while (store_cleaned);
- ch->stats.frames += cleaned;
+ /* We didn't consume the entire budget, so finish napi and
+ * re-enable data availability notifications
+ */
+ napi_complete_done(napi, rx_cleaned);
+ do {
+ err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
+ cpu_relax();
+ } while (err == -EBUSY);
+ WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
+ ch->nctx.desired_cpu);
- return cleaned;
+ return max(rx_cleaned, 1);
}
static void enable_ch_napi(struct dpaa2_eth_priv *priv)
@@ -986,7 +1001,7 @@ static void disable_ch_napi(struct dpaa2_eth_priv *priv)
static int link_state_update(struct dpaa2_eth_priv *priv)
{
- struct dpni_link_state state;
+ struct dpni_link_state state = {0};
int err;
err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
@@ -1069,14 +1084,13 @@ enable_err:
/* The DPIO store must be empty when we call this,
* at the end of every NAPI cycle.
*/
-static u32 drain_channel(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch)
+static u32 drain_channel(struct dpaa2_eth_channel *ch)
{
u32 drained = 0, total = 0;
do {
pull_channel(ch);
- drained = consume_frames(ch);
+ drained = consume_frames(ch, NULL);
total += drained;
} while (drained);
@@ -1091,7 +1105,7 @@ static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
- drained += drain_channel(priv, ch);
+ drained += drain_channel(ch);
}
return drained;
@@ -1100,7 +1114,7 @@ static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
static int dpaa2_eth_stop(struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- int dpni_enabled;
+ int dpni_enabled = 0;
int retries = 10;
u32 drained;
@@ -2156,8 +2170,8 @@ int dpaa2_eth_cls_fld_off(int prot, int field)
/* Set Rx distribution (hash or flow classification) key
* flags is a combination of RXH_ bits
*/
-int dpaa2_eth_set_dist_key(struct net_device *net_dev,
- enum dpaa2_eth_rx_dist type, u64 flags)
+static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
+ enum dpaa2_eth_rx_dist type, u64 flags)
{
struct device *dev = net_dev->dev.parent;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 7a7a3e7bcde2..452a8e9c4f0e 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -40,6 +40,11 @@
*/
#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
+/* Maximum number of Tx confirmation frames to be processed
+ * in a single NAPI call
+ */
+#define DPAA2_ETH_TXCONF_PER_NAPI 256
+
/* Buffer quota per queue. Must be large enough such that for minimum sized
* frames taildrop kicks in before the bpool gets depleted, so we compute
* how many 64B frames fit inside the taildrop threshold and add a margin
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
new file mode 100644
index 000000000000..84b942b1eccc
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "dpaa2-ptp.h"
+
+struct ptp_dpaa2_priv {
+ struct fsl_mc_device *ptp_mc_dev;
+ struct ptp_clock *clock;
+ struct ptp_clock_info caps;
+ u32 freq_comp;
+};
+
+/* PTP clock operations */
+static int ptp_dpaa2_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct ptp_dpaa2_priv *ptp_dpaa2 =
+ container_of(ptp, struct ptp_dpaa2_priv, caps);
+ struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev;
+ struct device *dev = &mc_dev->dev;
+ u64 adj;
+ u32 diff, tmr_add;
+ int neg_adj = 0;
+ int err = 0;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ tmr_add = ptp_dpaa2->freq_comp;
+ adj = tmr_add;
+ adj *= ppb;
+ diff = div_u64(adj, 1000000000ULL);
+
+ tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
+
+ err = dprtc_set_freq_compensation(mc_dev->mc_io, 0,
+ mc_dev->mc_handle, tmr_add);
+ if (err)
+ dev_err(dev, "dprtc_set_freq_compensation err %d\n", err);
+ return err;
+}
+
+static int ptp_dpaa2_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct ptp_dpaa2_priv *ptp_dpaa2 =
+ container_of(ptp, struct ptp_dpaa2_priv, caps);
+ struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev;
+ struct device *dev = &mc_dev->dev;
+ s64 now;
+ int err = 0;
+
+ err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &now);
+ if (err) {
+ dev_err(dev, "dprtc_get_time err %d\n", err);
+ return err;
+ }
+
+ now += delta;
+
+ err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, now);
+ if (err)
+ dev_err(dev, "dprtc_set_time err %d\n", err);
+ return err;
+}
+
+static int ptp_dpaa2_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct ptp_dpaa2_priv *ptp_dpaa2 =
+ container_of(ptp, struct ptp_dpaa2_priv, caps);
+ struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev;
+ struct device *dev = &mc_dev->dev;
+ u64 ns;
+ u32 remainder;
+ int err = 0;
+
+ err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &ns);
+ if (err) {
+ dev_err(dev, "dprtc_get_time err %d\n", err);
+ return err;
+ }
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+ ts->tv_nsec = remainder;
+ return err;
+}
+
+static int ptp_dpaa2_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct ptp_dpaa2_priv *ptp_dpaa2 =
+ container_of(ptp, struct ptp_dpaa2_priv, caps);
+ struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev;
+ struct device *dev = &mc_dev->dev;
+ u64 ns;
+ int err = 0;
+
+ ns = ts->tv_sec * 1000000000ULL;
+ ns += ts->tv_nsec;
+
+ err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, ns);
+ if (err)
+ dev_err(dev, "dprtc_set_time err %d\n", err);
+ return err;
+}
+
+static const struct ptp_clock_info ptp_dpaa2_caps = {
+ .owner = THIS_MODULE,
+ .name = "DPAA2 PTP Clock",
+ .max_adj = 512000,
+ .n_alarm = 2,
+ .n_ext_ts = 2,
+ .n_per_out = 3,
+ .n_pins = 0,
+ .pps = 1,
+ .adjfreq = ptp_dpaa2_adjfreq,
+ .adjtime = ptp_dpaa2_adjtime,
+ .gettime64 = ptp_dpaa2_gettime,
+ .settime64 = ptp_dpaa2_settime,
+};
+
+static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
+{
+ struct device *dev = &mc_dev->dev;
+ struct ptp_dpaa2_priv *ptp_dpaa2;
+ u32 tmr_add = 0;
+ int err;
+
+ ptp_dpaa2 = devm_kzalloc(dev, sizeof(*ptp_dpaa2), GFP_KERNEL);
+ if (!ptp_dpaa2)
+ return -ENOMEM;
+
+ err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
+ if (err) {
+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+ goto err_exit;
+ }
+
+ err = dprtc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
+ &mc_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dprtc_open err %d\n", err);
+ goto err_free_mcp;
+ }
+
+ ptp_dpaa2->ptp_mc_dev = mc_dev;
+
+ err = dprtc_get_freq_compensation(mc_dev->mc_io, 0,
+ mc_dev->mc_handle, &tmr_add);
+ if (err) {
+ dev_err(dev, "dprtc_get_freq_compensation err %d\n", err);
+ goto err_close;
+ }
+
+ ptp_dpaa2->freq_comp = tmr_add;
+ ptp_dpaa2->caps = ptp_dpaa2_caps;
+
+ ptp_dpaa2->clock = ptp_clock_register(&ptp_dpaa2->caps, dev);
+ if (IS_ERR(ptp_dpaa2->clock)) {
+ err = PTR_ERR(ptp_dpaa2->clock);
+ goto err_close;
+ }
+
+ dpaa2_phc_index = ptp_clock_index(ptp_dpaa2->clock);
+
+ dev_set_drvdata(dev, ptp_dpaa2);
+
+ return 0;
+
+err_close:
+ dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+err_free_mcp:
+ fsl_mc_portal_free(mc_dev->mc_io);
+err_exit:
+ return err;
+}
+
+static int dpaa2_ptp_remove(struct fsl_mc_device *mc_dev)
+{
+ struct ptp_dpaa2_priv *ptp_dpaa2;
+ struct device *dev = &mc_dev->dev;
+
+ ptp_dpaa2 = dev_get_drvdata(dev);
+ ptp_clock_unregister(ptp_dpaa2->clock);
+
+ dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+ fsl_mc_portal_free(mc_dev->mc_io);
+
+ return 0;
+}
+
+static const struct fsl_mc_device_id dpaa2_ptp_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dprtc",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(fslmc, dpaa2_ptp_match_id_table);
+
+static struct fsl_mc_driver dpaa2_ptp_drv = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_ptp_probe,
+ .remove = dpaa2_ptp_remove,
+ .match_id_table = dpaa2_ptp_match_id_table,
+};
+
+module_fsl_mc_driver(dpaa2_ptp_drv);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DPAA2 PTP Clock Driver");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
new file mode 100644
index 000000000000..ff2e177395d4
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 NXP
+ */
+
+#ifndef __RTC_H
+#define __RTC_H
+
+#include "dprtc.h"
+#include "dprtc-cmd.h"
+
+extern int dpaa2_phc_index;
+
+#endif
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
new file mode 100644
index 000000000000..9af4ac71f347
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#ifndef _FSL_DPRTC_CMD_H
+#define _FSL_DPRTC_CMD_H
+
+/* Command versioning */
+#define DPRTC_CMD_BASE_VERSION 1
+#define DPRTC_CMD_ID_OFFSET 4
+
+#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800)
+#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810)
+
+#define DPRTC_CMDID_SET_FREQ_COMPENSATION DPRTC_CMD(0x1d1)
+#define DPRTC_CMDID_GET_FREQ_COMPENSATION DPRTC_CMD(0x1d2)
+#define DPRTC_CMDID_GET_TIME DPRTC_CMD(0x1d3)
+#define DPRTC_CMDID_SET_TIME DPRTC_CMD(0x1d4)
+
+#pragma pack(push, 1)
+struct dprtc_cmd_open {
+ __le32 dprtc_id;
+};
+
+struct dprtc_get_freq_compensation {
+ __le32 freq_compensation;
+};
+
+struct dprtc_time {
+ __le64 time;
+};
+
+#pragma pack(pop)
+
+#endif /* _FSL_DPRTC_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.c b/drivers/net/ethernet/freescale/dpaa2/dprtc.c
new file mode 100644
index 000000000000..c13e09bc7b9d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#include <linux/fsl/mc.h>
+
+#include "dprtc.h"
+#include "dprtc-cmd.h"
+
+/**
+ * dprtc_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dprtc_id: DPRTC unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dprtc_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dprtc_id,
+ u16 *token)
+{
+ struct dprtc_cmd_open *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dprtc_cmd_open *)cmd.params;
+ cmd_params->dprtc_id = cpu_to_le32(dprtc_id);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dprtc_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_set_freq_compensation() - Sets a new frequency compensation value.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @freq_compensation: The new frequency compensation value to set.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u32 freq_compensation)
+{
+ struct dprtc_get_freq_compensation *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_get_freq_compensation *)cmd.params;
+ cmd_params->freq_compensation = cpu_to_le32(freq_compensation);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_get_freq_compensation() - Retrieves the frequency compensation value
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @freq_compensation: Frequency compensation value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u32 *freq_compensation)
+{
+ struct dprtc_get_freq_compensation *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION,
+ cmd_flags,
+ token);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dprtc_get_freq_compensation *)cmd.params;
+ *freq_compensation = le32_to_cpu(rsp_params->freq_compensation);
+
+ return 0;
+}
+
+/**
+ * dprtc_get_time() - Returns the current RTC time.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @time: Current RTC time.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_get_time(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ uint64_t *time)
+{
+ struct dprtc_time *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME,
+ cmd_flags,
+ token);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dprtc_time *)cmd.params;
+ *time = le64_to_cpu(rsp_params->time);
+
+ return 0;
+}
+
+/**
+ * dprtc_set_time() - Updates current RTC time.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @time: New RTC time.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_set_time(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ uint64_t time)
+{
+ struct dprtc_time *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_time *)cmd.params;
+ cmd_params->time = cpu_to_le64(time);
+
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.h b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
new file mode 100644
index 000000000000..fe19618d6cdf
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#ifndef __FSL_DPRTC_H
+#define __FSL_DPRTC_H
+
+/* Data Path Real Time Counter API
+ * Contains initialization APIs and runtime control APIs for RTC
+ */
+
+struct fsl_mc_io;
+
+int dprtc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dprtc_id,
+ u16 *token);
+
+int dprtc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u32 freq_compensation);
+
+int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u32 *freq_compensation);
+
+int dprtc_get_time(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ uint64_t *time);
+
+int dprtc_set_time(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ uint64_t time);
+
+#endif /* __FSL_DPRTC_H */
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 4778b663653e..bf80855dd0dd 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -452,6 +452,10 @@ struct bufdesc_ex {
* initialisation.
*/
#define FEC_QUIRK_MIB_CLEAR (1 << 15)
+/* Only i.MX25/i.MX27/i.MX28 controller supports FRBR,FRSR registers,
+ * those FIFO receive registers are resolved in other platforms.
+ */
+#define FEC_QUIRK_HAS_FRREG (1 << 16)
struct bufdesc_prop {
int qid;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index ce74b7a46d07..6db69ba30dcd 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -91,14 +91,16 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = 0,
}, {
.name = "imx25-fec",
- .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR,
+ .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
+ FEC_QUIRK_HAS_FRREG,
}, {
.name = "imx27-fec",
- .driver_data = FEC_QUIRK_MIB_CLEAR,
+ .driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
}, {
.name = "imx28-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
- FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC,
+ FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
+ FEC_QUIRK_HAS_FRREG,
}, {
.name = "imx6q-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
@@ -1158,7 +1160,7 @@ static void fec_enet_timeout_work(struct work_struct *work)
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_restart(ndev);
- netif_wake_queue(ndev);
+ netif_tx_wake_all_queues(ndev);
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
}
@@ -1273,7 +1275,7 @@ skb_done:
/* Since we have freed up a buffer, the ring is no longer full
*/
- if (netif_queue_stopped(ndev)) {
+ if (netif_tx_queue_stopped(nq)) {
entries_free = fec_enet_get_free_txdesc_num(txq);
if (entries_free >= txq->tx_wake_threshold)
netif_tx_wake_queue(nq);
@@ -1746,7 +1748,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_restart(ndev);
- netif_wake_queue(ndev);
+ netif_tx_wake_all_queues(ndev);
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
}
@@ -2162,7 +2164,13 @@ static void fec_enet_get_regs(struct net_device *ndev,
memset(buf, 0, regs->len);
for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
- off = fec_enet_register_offset[i] / 4;
+ off = fec_enet_register_offset[i];
+
+ if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
+ !(fep->quirks & FEC_QUIRK_HAS_FRREG))
+ continue;
+
+ off >>= 2;
buf[off] = readl(&theregs[off]);
}
}
@@ -2240,7 +2248,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_restart(ndev);
- netif_wake_queue(ndev);
+ netif_tx_wake_all_queues(ndev);
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index a051e582d541..79d03f8ee7b1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -84,7 +84,7 @@ static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
if (cb->type == DESC_TYPE_SKB)
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
- else
+ else if (cb->length)
dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index f56855e63c96..28e907831b0e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -40,9 +40,9 @@
#define SKB_TMP_LEN(SKB) \
(((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
-static void fill_v2_desc(struct hnae_ring *ring, void *priv,
- int size, dma_addr_t dma, int frag_end,
- int buf_num, enum hns_desc_type type, int mtu)
+static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
+ int send_sz, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
{
struct hnae_desc *desc = &ring->desc[ring->next_to_use];
struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
@@ -64,7 +64,7 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
desc_cb->type = type;
desc->addr = cpu_to_le64(dma);
- desc->tx.send_size = cpu_to_le16((u16)size);
+ desc->tx.send_size = cpu_to_le16((u16)send_sz);
/* config bd buffer end */
hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
@@ -133,6 +133,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
ring_ptr_move_fw(ring, next_to_use);
}
+static void fill_v2_desc(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
+{
+ fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
+ buf_num, type, mtu);
+}
+
static const struct acpi_device_id hns_enet_acpi_match[] = {
{ "HISI00C1", 0 },
{ "HISI00C2", 0 },
@@ -289,15 +297,15 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
/* when the frag size is bigger than hardware, split this frag */
for (k = 0; k < frag_buf_num; k++)
- fill_v2_desc(ring, priv,
- (k == frag_buf_num - 1) ?
+ fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
+ (k == frag_buf_num - 1) ?
sizeoflast : BD_MAX_SEND_SIZE,
- dma + BD_MAX_SEND_SIZE * k,
- frag_end && (k == frag_buf_num - 1) ? 1 : 0,
- buf_num,
- (type == DESC_TYPE_SKB && !k) ?
+ dma + BD_MAX_SEND_SIZE * k,
+ frag_end && (k == frag_buf_num - 1) ? 1 : 0,
+ buf_num,
+ (type == DESC_TYPE_SKB && !k) ?
DESC_TYPE_SKB : DESC_TYPE_PAGE,
- mtu);
+ mtu);
}
netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
@@ -1495,21 +1503,6 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
return phy_mii_ioctl(phy_dev, ifr, cmd);
}
-/* use only for netconsole to poll with the device without interrupt */
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void hns_nic_poll_controller(struct net_device *ndev)
-{
- struct hns_nic_priv *priv = netdev_priv(ndev);
- unsigned long flags;
- int i;
-
- local_irq_save(flags);
- for (i = 0; i < priv->ae_handle->q_num * 2; i++)
- napi_schedule(&priv->ring_data[i].napi);
- local_irq_restore(flags);
-}
-#endif
-
static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
@@ -1962,9 +1955,6 @@ static const struct net_device_ops hns_nic_netdev_ops = {
.ndo_set_features = hns_nic_set_features,
.ndo_fix_features = hns_nic_fix_features,
.ndo_get_stats64 = hns_nic_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = hns_nic_poll_controller,
-#endif
.ndo_set_rx_mode = hns_nic_set_rx_mode,
.ndo_select_queue = hns_nic_select_queue,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index be9dc08ccf67..038326cfda93 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -46,9 +46,6 @@ enum hclge_mbx_mac_vlan_subcode {
HCLGE_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */
HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */
HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */
- HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */
- HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, /* read func MTA type */
- HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, /* update MTA status */
};
/* below are per-VF vlan cfg subcodes */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 591ee2ee4bf6..055b40606dbc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -316,8 +316,8 @@ struct hnae3_ae_ops {
int (*set_loopback)(struct hnae3_handle *handle,
enum hnae3_loop loop_mode, bool en);
- void (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc,
- bool en_mc_pmc);
+ int (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc,
+ bool en_mc_pmc);
int (*set_mtu)(struct hnae3_handle *handle, int new_mtu);
void (*get_pauseparam)(struct hnae3_handle *handle,
@@ -355,8 +355,6 @@ struct hnae3_ae_ops {
const unsigned char *addr);
int (*rm_mc_addr)(struct hnae3_handle *handle,
const unsigned char *addr);
- int (*update_mta_status)(struct hnae3_handle *handle);
-
void (*set_tso_stats)(struct hnae3_handle *handle, int enable);
void (*update_stats)(struct hnae3_handle *handle,
struct net_device_stats *net_stats);
@@ -393,7 +391,7 @@ struct hnae3_ae_ops {
int vector_num,
struct hnae3_ring_chain_node *vr_chain);
- void (*reset_queue)(struct hnae3_handle *handle, u16 queue_id);
+ int (*reset_queue)(struct hnae3_handle *handle, u16 queue_id);
u32 (*get_fw_version)(struct hnae3_handle *handle);
void (*get_mdix_mode)(struct hnae3_handle *handle,
u8 *tp_mdix_ctrl, u8 *tp_mdix);
@@ -404,7 +402,7 @@ struct hnae3_ae_ops {
int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
u16 vlan, u8 qos, __be16 proto);
int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
- void (*reset_event)(struct hnae3_handle *handle);
+ void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle);
void (*get_channels)(struct hnae3_handle *handle,
struct ethtool_channels *ch);
void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
@@ -431,6 +429,7 @@ struct hnae3_ae_ops {
struct ethtool_rxnfc *cmd, u32 *rule_locs);
int (*restore_fd_rules)(struct hnae3_handle *handle);
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
+ pci_ers_result_t (*process_hw_error)(struct hnae3_ae_dev *ae_dev);
};
struct hnae3_dcb_ops {
@@ -481,6 +480,7 @@ struct hnae3_knic_private_info {
const struct hnae3_dcb_ops *dcb_ops;
u16 int_rl_setting;
+ enum pkt_hash_types rss_type;
};
struct hnae3_roce_private_info {
@@ -504,6 +504,15 @@ struct hnae3_unic_private_info {
#define HNAE3_SUPPORT_VF BIT(3)
#define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4)
+#define HNAE3_USER_UPE BIT(0) /* unicast promisc enabled by user */
+#define HNAE3_USER_MPE BIT(1) /* mulitcast promisc enabled by user */
+#define HNAE3_BPE BIT(2) /* broadcast promisc enable */
+#define HNAE3_OVERFLOW_UPE BIT(3) /* unicast mac vlan overflow */
+#define HNAE3_OVERFLOW_MPE BIT(4) /* multicast mac vlan overflow */
+#define HNAE3_VLAN_FLTR BIT(5) /* enable vlan filter */
+#define HNAE3_UPE (HNAE3_USER_UPE | HNAE3_OVERFLOW_UPE)
+#define HNAE3_MPE (HNAE3_USER_MPE | HNAE3_OVERFLOW_MPE)
+
struct hnae3_handle {
struct hnae3_client *client;
struct pci_dev *pdev;
@@ -522,6 +531,8 @@ struct hnae3_handle {
};
u32 numa_node_mask; /* for multi-chip support */
+
+ u8 netdev_flags;
};
#define hnae3_set_field(origin, mask, shift, val) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index c2692563a4d9..3f96aa30068e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -9,6 +9,7 @@
#include <linux/ipv6.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/aer.h>
#include <linux/skbuff.h>
#include <linux/sctp.h>
#include <linux/vermagic.h>
@@ -21,6 +22,7 @@
static void hns3_clear_all_ring(struct hnae3_handle *h);
static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
+static void hns3_remove_hw_addr(struct net_device *netdev);
static const char hns3_driver_name[] = "hns3";
const char hns3_driver_version[] = VERMAGIC_STRING;
@@ -458,26 +460,83 @@ static int hns3_nic_mc_unsync(struct net_device *netdev,
return 0;
}
+static u8 hns3_get_netdev_flags(struct net_device *netdev)
+{
+ u8 flags = 0;
+
+ if (netdev->flags & IFF_PROMISC) {
+ flags = HNAE3_USER_UPE | HNAE3_USER_MPE;
+ } else {
+ flags |= HNAE3_VLAN_FLTR;
+ if (netdev->flags & IFF_ALLMULTI)
+ flags |= HNAE3_USER_MPE;
+ }
+
+ return flags;
+}
+
static void hns3_nic_set_rx_mode(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ u8 new_flags;
+ int ret;
- if (h->ae_algo->ops->set_promisc_mode) {
- if (netdev->flags & IFF_PROMISC)
- h->ae_algo->ops->set_promisc_mode(h, true, true);
- else if (netdev->flags & IFF_ALLMULTI)
- h->ae_algo->ops->set_promisc_mode(h, false, true);
- else
- h->ae_algo->ops->set_promisc_mode(h, false, false);
- }
- if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
+ new_flags = hns3_get_netdev_flags(netdev);
+
+ ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
+ if (ret) {
netdev_err(netdev, "sync uc address fail\n");
+ if (ret == -ENOSPC)
+ new_flags |= HNAE3_OVERFLOW_UPE;
+ }
+
if (netdev->flags & IFF_MULTICAST) {
- if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
+ ret = __dev_mc_sync(netdev, hns3_nic_mc_sync,
+ hns3_nic_mc_unsync);
+ if (ret) {
netdev_err(netdev, "sync mc address fail\n");
+ if (ret == -ENOSPC)
+ new_flags |= HNAE3_OVERFLOW_MPE;
+ }
+ }
+
+ hns3_update_promisc_mode(netdev, new_flags);
+ /* User mode Promisc mode enable and vlan filtering is disabled to
+ * let all packets in. MAC-VLAN Table overflow Promisc enabled and
+ * vlan fitering is enabled
+ */
+ hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
+ h->netdev_flags = new_flags;
+}
+
+int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
- if (h->ae_algo->ops->update_mta_status)
- h->ae_algo->ops->update_mta_status(h);
+ if (h->ae_algo->ops->set_promisc_mode) {
+ return h->ae_algo->ops->set_promisc_mode(h,
+ promisc_flags & HNAE3_UPE,
+ promisc_flags & HNAE3_MPE);
+ }
+
+ return 0;
+}
+
+void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ bool last_state;
+
+ if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) {
+ last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
+ if (enable != last_state) {
+ netdev_info(netdev,
+ "%s vlan filter\n",
+ enable ? "enable" : "disable");
+ h->ae_algo->ops->enable_vlan_filter(h, enable);
+ }
}
}
@@ -921,35 +980,28 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
}
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
- int size, dma_addr_t dma, int frag_end,
- enum hns_desc_type type)
+ int size, int frag_end, enum hns_desc_type type)
{
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
+ struct device *dev = ring_to_dev(ring);
u32 ol_type_vlan_len_msec = 0;
u16 bdtp_fe_sc_vld_ra_ri = 0;
+ struct skb_frag_struct *frag;
+ unsigned int frag_buf_num;
u32 type_cs_vlan_tso = 0;
struct sk_buff *skb;
u16 inner_vtag = 0;
u16 out_vtag = 0;
+ unsigned int k;
+ int sizeoflast;
u32 paylen = 0;
+ dma_addr_t dma;
u16 mss = 0;
u8 ol4_proto;
u8 il4_proto;
int ret;
- /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
- desc_cb->priv = priv;
- desc_cb->length = size;
- desc_cb->dma = dma;
- desc_cb->type = type;
-
- /* now, fill the descriptor */
- desc->addr = cpu_to_le64(dma);
- desc->tx.send_size = cpu_to_le16((u16)size);
- hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
- desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
-
if (type == DESC_TYPE_SKB) {
skb = (struct sk_buff *)priv;
paylen = skb->len;
@@ -990,38 +1042,47 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc->tx.mss = cpu_to_le16(mss);
desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
- }
- /* move ring pointer to next.*/
- ring_ptr_move_fw(ring, next_to_use);
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+ } else {
+ frag = (struct skb_frag_struct *)priv;
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ }
- return 0;
-}
+ if (dma_mapping_error(ring->dev, dma)) {
+ ring->stats.sw_err_cnt++;
+ return -ENOMEM;
+ }
-static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
- int size, dma_addr_t dma, int frag_end,
- enum hns_desc_type type)
-{
- unsigned int frag_buf_num;
- unsigned int k;
- int sizeoflast;
- int ret;
+ desc_cb->length = size;
frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
sizeoflast = size % HNS3_MAX_BD_SIZE;
sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
- /* When the frag size is bigger than hardware, split this frag */
+ /* When frag size is bigger than hardware limit, split this frag */
for (k = 0; k < frag_buf_num; k++) {
- ret = hns3_fill_desc(ring, priv,
- (k == frag_buf_num - 1) ?
- sizeoflast : HNS3_MAX_BD_SIZE,
- dma + HNS3_MAX_BD_SIZE * k,
- frag_end && (k == frag_buf_num - 1) ? 1 : 0,
- (type == DESC_TYPE_SKB && !k) ?
- DESC_TYPE_SKB : DESC_TYPE_PAGE);
- if (ret)
- return ret;
+ /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
+ desc_cb->priv = priv;
+ desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
+ desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
+ DESC_TYPE_SKB : DESC_TYPE_PAGE;
+
+ /* now, fill the descriptor */
+ desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
+ desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
+ (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
+ hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
+ frag_end && (k == frag_buf_num - 1) ?
+ 1 : 0);
+ desc->tx.bdtp_fe_sc_vld_ra_ri =
+ cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
+
+ /* move ring pointer to next.*/
+ ring_ptr_move_fw(ring, next_to_use);
+
+ desc_cb = &ring->desc_cb[ring->next_to_use];
+ desc = &ring->desc[ring->next_to_use];
}
return 0;
@@ -1077,7 +1138,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
return 0;
}
-static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
+static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
{
struct device *dev = ring_to_dev(ring);
unsigned int i;
@@ -1093,12 +1154,14 @@ static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
ring->desc_cb[ring->next_to_use].dma,
ring->desc_cb[ring->next_to_use].length,
DMA_TO_DEVICE);
- else
+ else if (ring->desc_cb[ring->next_to_use].length)
dma_unmap_page(dev,
ring->desc_cb[ring->next_to_use].dma,
ring->desc_cb[ring->next_to_use].length,
DMA_TO_DEVICE);
+ ring->desc_cb[ring->next_to_use].length = 0;
+
/* rollback one */
ring_ptr_move_bw(ring, next_to_use);
}
@@ -1110,12 +1173,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
struct hns3_nic_ring_data *ring_data =
&tx_ring_data(priv, skb->queue_mapping);
struct hns3_enet_ring *ring = ring_data->ring;
- struct device *dev = priv->dev;
struct netdev_queue *dev_queue;
struct skb_frag_struct *frag;
int next_to_use_head;
int next_to_use_frag;
- dma_addr_t dma;
int buf_num;
int seg_num;
int size;
@@ -1150,35 +1211,23 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
next_to_use_head = ring->next_to_use;
- dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma)) {
- netdev_err(netdev, "TX head DMA map failed\n");
- ring->stats.sw_err_cnt++;
- goto out_err_tx_ok;
- }
-
- ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
- DESC_TYPE_SKB);
+ ret = priv->ops.fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
+ DESC_TYPE_SKB);
if (ret)
- goto head_dma_map_err;
+ goto head_fill_err;
next_to_use_frag = ring->next_to_use;
/* Fill the fragments */
for (i = 1; i < seg_num; i++) {
frag = &skb_shinfo(skb)->frags[i - 1];
size = skb_frag_size(frag);
- dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma)) {
- netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
- ring->stats.sw_err_cnt++;
- goto frag_dma_map_err;
- }
- ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
- seg_num - 1 == i ? 1 : 0,
- DESC_TYPE_PAGE);
+
+ ret = priv->ops.fill_desc(ring, frag, size,
+ seg_num - 1 == i ? 1 : 0,
+ DESC_TYPE_PAGE);
if (ret)
- goto frag_dma_map_err;
+ goto frag_fill_err;
}
/* Complete translate all packets */
@@ -1191,11 +1240,11 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
-frag_dma_map_err:
- hns_nic_dma_unmap(ring, next_to_use_frag);
+frag_fill_err:
+ hns3_clear_desc(ring, next_to_use_frag);
-head_dma_map_err:
- hns_nic_dma_unmap(ring, next_to_use_head);
+head_fill_err:
+ hns3_clear_desc(ring, next_to_use_head);
out_err_tx_ok:
dev_kfree_skb_any(skb);
@@ -1257,13 +1306,10 @@ static int hns3_nic_set_features(struct net_device *netdev,
int ret;
if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
- if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
- priv->ops.fill_desc = hns3_fill_desc_tso;
+ if (features & (NETIF_F_TSO | NETIF_F_TSO6))
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
- } else {
- priv->ops.fill_desc = hns3_fill_desc;
+ else
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
- }
}
if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
@@ -1450,18 +1496,22 @@ static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
return ret;
}
-static void hns3_restore_vlan(struct net_device *netdev)
+static int hns3_restore_vlan(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ int ret = 0;
u16 vid;
- int ret;
for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
- if (ret)
- netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n",
- vid, ret);
+ if (ret) {
+ netdev_err(netdev, "Restore vlan: %d filter, ret:%d\n",
+ vid, ret);
+ return ret;
+ }
}
+
+ return ret;
}
static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
@@ -1570,7 +1620,7 @@ static void hns3_nic_net_timeout(struct net_device *ndev)
/* request the reset */
if (h->ae_algo->ops->reset_event)
- h->ae_algo->ops->reset_event(h);
+ h->ae_algo->ops->reset_event(h->pdev, h);
}
static const struct net_device_ops hns3_nic_netdev_ops = {
@@ -1728,6 +1778,52 @@ static void hns3_shutdown(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D3hot);
}
+static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ pci_ers_result_t ret;
+
+ dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (!ae_dev) {
+ dev_err(&pdev->dev,
+ "Can't recover - error happened during device init\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ if (ae_dev->ops->process_hw_error)
+ ret = ae_dev->ops->process_hw_error(ae_dev);
+ else
+ return PCI_ERS_RESULT_NONE;
+
+ return ret;
+}
+
+static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ dev_info(dev, "requesting reset due to PCI error\n");
+
+ /* request the reset */
+ if (ae_dev->ops->reset_event) {
+ ae_dev->ops->reset_event(pdev, NULL);
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+static const struct pci_error_handlers hns3_err_handler = {
+ .error_detected = hns3_error_detected,
+ .slot_reset = hns3_slot_reset,
+};
+
static struct pci_driver hns3_driver = {
.name = hns3_driver_name,
.id_table = hns3_pci_tbl,
@@ -1735,6 +1831,7 @@ static struct pci_driver hns3_driver = {
.remove = hns3_remove,
.shutdown = hns3_shutdown,
.sriov_configure = hns3_pci_sriov_configure,
+ .err_handler = &hns3_err_handler,
};
/* set default feature to hns3 */
@@ -1834,7 +1931,7 @@ static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
if (cb->type == DESC_TYPE_SKB)
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
- else
+ else if (cb->length)
dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
}
@@ -2202,18 +2299,18 @@ static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
napi_gro_receive(&ring->tqp_vector->napi, skb);
}
-static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
- struct hns3_desc *desc, u32 l234info)
+static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
+ struct hns3_desc *desc, u32 l234info,
+ u16 *vlan_tag)
{
struct pci_dev *pdev = ring->tqp->handle->pdev;
- u16 vlan_tag;
if (pdev->revision == 0x20) {
- vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
- if (!(vlan_tag & VLAN_VID_MASK))
- vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+ *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ if (!(*vlan_tag & VLAN_VID_MASK))
+ *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
- return vlan_tag;
+ return (*vlan_tag != 0);
}
#define HNS3_STRP_OUTER_VLAN 0x1
@@ -2222,17 +2319,29 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
HNS3_RXD_STRP_TAGP_S)) {
case HNS3_STRP_OUTER_VLAN:
- vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
- break;
+ *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ return true;
case HNS3_STRP_INNER_VLAN:
- vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
- break;
+ *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+ return true;
default:
- vlan_tag = 0;
- break;
+ return false;
}
+}
- return vlan_tag;
+static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
+ struct sk_buff *skb)
+{
+ struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
+ struct hnae3_handle *handle = ring->tqp->handle;
+ enum pkt_hash_types rss_type;
+
+ if (le32_to_cpu(desc->rx.rss_hash))
+ rss_type = handle->kinfo.rss_type;
+ else
+ rss_type = PKT_HASH_TYPE_NONE;
+
+ skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type);
}
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
@@ -2334,8 +2443,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
u16 vlan_tag;
- vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info);
- if (vlan_tag & VLAN_VID_MASK)
+ if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
__vlan_hwaccel_put_tag(skb,
htons(ETH_P_8021Q),
vlan_tag);
@@ -2377,6 +2485,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ring->tqp_vector->rx_group.total_bytes += skb->len;
hns3_rx_checksum(ring, skb, desc);
+ hns3_set_rx_skb_rss_type(ring, skb);
+
return 0;
}
@@ -2623,7 +2733,7 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
GFP_KERNEL);
if (!chain)
- return -ENOMEM;
+ goto err_free_chain;
cur_chain->next = chain;
chain->tqp_index = tx_ring->tqp->tqp_index;
@@ -2653,7 +2763,7 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
while (rx_ring) {
chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
if (!chain)
- return -ENOMEM;
+ goto err_free_chain;
cur_chain->next = chain;
chain->tqp_index = rx_ring->tqp->tqp_index;
@@ -2668,6 +2778,16 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
}
return 0;
+
+err_free_chain:
+ cur_chain = head->next;
+ while (cur_chain) {
+ chain = cur_chain->next;
+ devm_kfree(&pdev->dev, chain);
+ cur_chain = chain;
+ }
+
+ return -ENOMEM;
}
static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
@@ -2717,7 +2837,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector;
int ret = 0;
- u16 i;
+ int i;
hns3_nic_set_cpumask(priv);
@@ -2764,13 +2884,19 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
if (ret)
- return ret;
+ goto map_ring_fail;
netif_napi_add(priv->netdev, &tqp_vector->napi,
hns3_nic_common_poll, NAPI_POLL_WEIGHT);
}
return 0;
+
+map_ring_fail:
+ while (i--)
+ netif_napi_del(&priv->tqp_vector[i].napi);
+
+ return ret;
}
static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
@@ -2927,8 +3053,10 @@ static int hns3_queue_to_ring(struct hnae3_queue *tqp,
return ret;
ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
- if (ret)
+ if (ret) {
+ devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
return ret;
+ }
return 0;
}
@@ -2955,6 +3083,12 @@ static int hns3_get_ring_config(struct hns3_nic_priv *priv)
return 0;
err:
+ while (i--) {
+ devm_kfree(priv->dev, priv->ring_data[i].ring);
+ devm_kfree(priv->dev,
+ priv->ring_data[i + h->kinfo.num_tqps].ring);
+ }
+
devm_kfree(&pdev->dev, priv->ring_data);
return ret;
}
@@ -3122,9 +3256,6 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
int i;
for (i = 0; i < h->kinfo.num_tqps; i++) {
- if (h->ae_algo->ops->reset_queue)
- h->ae_algo->ops->reset_queue(h, i);
-
hns3_fini_ring(priv->ring_data[i].ring);
hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
}
@@ -3132,11 +3263,12 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
}
/* Set mac addr if it is configured. or leave it to the AE driver */
-static void hns3_init_mac_addr(struct net_device *netdev, bool init)
+static int hns3_init_mac_addr(struct net_device *netdev, bool init)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
u8 mac_addr_temp[ETH_ALEN];
+ int ret = 0;
if (h->ae_algo->ops->get_mac_addr && init) {
h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
@@ -3151,17 +3283,9 @@ static void hns3_init_mac_addr(struct net_device *netdev, bool init)
}
if (h->ae_algo->ops->set_mac_addr)
- h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
-
-}
+ ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
-static void hns3_uninit_mac_addr(struct net_device *netdev)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
-
- if (h->ae_algo->ops->rm_uc_addr)
- h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr);
+ return ret;
}
static int hns3_restore_fd_rules(struct net_device *netdev)
@@ -3187,14 +3311,12 @@ static void hns3_nic_set_priv_ops(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ priv->ops.fill_desc = hns3_fill_desc;
if ((netdev->features & NETIF_F_TSO) ||
- (netdev->features & NETIF_F_TSO6)) {
- priv->ops.fill_desc = hns3_fill_desc_tso;
+ (netdev->features & NETIF_F_TSO6))
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
- } else {
- priv->ops.fill_desc = hns3_fill_desc;
+ else
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
- }
}
static int hns3_client_init(struct hnae3_handle *handle)
@@ -3296,6 +3418,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
+ hns3_remove_hw_addr(netdev);
+
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
@@ -3319,8 +3443,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
priv->ring_data = NULL;
- hns3_uninit_mac_addr(netdev);
-
free_netdev(netdev);
}
@@ -3376,20 +3498,48 @@ err_out:
return ret;
}
-static void hns3_recover_hw_addr(struct net_device *ndev)
+static int hns3_recover_hw_addr(struct net_device *ndev)
{
struct netdev_hw_addr_list *list;
struct netdev_hw_addr *ha, *tmp;
+ int ret = 0;
/* go through and sync uc_addr entries to the device */
list = &ndev->uc;
- list_for_each_entry_safe(ha, tmp, &list->list, list)
- hns3_nic_uc_sync(ndev, ha->addr);
+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
+ ret = hns3_nic_uc_sync(ndev, ha->addr);
+ if (ret)
+ return ret;
+ }
/* go through and sync mc_addr entries to the device */
list = &ndev->mc;
+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
+ ret = hns3_nic_mc_sync(ndev, ha->addr);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static void hns3_remove_hw_addr(struct net_device *netdev)
+{
+ struct netdev_hw_addr_list *list;
+ struct netdev_hw_addr *ha, *tmp;
+
+ hns3_nic_uc_unsync(netdev, netdev->dev_addr);
+
+ /* go through and unsync uc_addr entries to the device */
+ list = &netdev->uc;
+ list_for_each_entry_safe(ha, tmp, &list->list, list)
+ hns3_nic_uc_unsync(netdev, ha->addr);
+
+ /* go through and unsync mc_addr entries to the device */
+ list = &netdev->mc;
list_for_each_entry_safe(ha, tmp, &list->list, list)
- hns3_nic_mc_sync(ndev, ha->addr);
+ if (ha->refcount > 1)
+ hns3_nic_mc_unsync(netdev, ha->addr);
}
static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
@@ -3497,7 +3647,10 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
int ret;
for (i = 0; i < h->kinfo.num_tqps; i++) {
- h->ae_algo->ops->reset_queue(h, i);
+ ret = h->ae_algo->ops->reset_queue(h, i);
+ if (ret)
+ return ret;
+
hns3_init_ring_hw(priv->ring_data[i].ring);
/* We need to clear tx ring here because self test will
@@ -3586,17 +3739,33 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
{
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ bool vlan_filter_enable;
int ret;
- hns3_init_mac_addr(netdev, false);
- hns3_nic_set_rx_mode(netdev);
- hns3_recover_hw_addr(netdev);
+ ret = hns3_init_mac_addr(netdev, false);
+ if (ret)
+ return ret;
+
+ ret = hns3_recover_hw_addr(netdev);
+ if (ret)
+ return ret;
+
+ ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
+ if (ret)
+ return ret;
+
+ vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
+ hns3_enable_vlan_filter(netdev, vlan_filter_enable);
/* Hardware table is only clear when pf resets */
- if (!(handle->flags & HNAE3_SUPPORT_VF))
- hns3_restore_vlan(netdev);
+ if (!(handle->flags & HNAE3_SUPPORT_VF)) {
+ ret = hns3_restore_vlan(netdev);
+ return ret;
+ }
- hns3_restore_fd_rules(netdev);
+ ret = hns3_restore_fd_rules(netdev);
+ if (ret)
+ return ret;
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
@@ -3637,14 +3806,14 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
if (ret)
netdev_err(netdev, "uninit ring error\n");
- hns3_uninit_mac_addr(netdev);
-
- /* it is cumbersome for hardware to pick-and-choose rules for deletion
- * from TCAM. Hence, for function reset software intervention is
- * required to delete the rules
+ /* it is cumbersome for hardware to pick-and-choose entries for deletion
+ * from table space. Hence, for function reset software intervention is
+ * required to delete the entries
*/
- if (hns3_dev_ongoing_func_reset(ae_dev))
+ if (hns3_dev_ongoing_func_reset(ae_dev)) {
+ hns3_remove_hw_addr(netdev);
hns3_del_all_fd_rules(netdev, false);
+ }
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index ac881e8fc05d..d3636d088aa3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -419,8 +419,7 @@ struct hns3_nic_ring_data {
struct hns3_nic_ops {
int (*fill_desc)(struct hns3_enet_ring *ring, void *priv,
- int size, dma_addr_t dma, int frag_end,
- enum hns_desc_type type);
+ int size, int frag_end, enum hns_desc_type type);
int (*maybe_stop_tx)(struct sk_buff **out_skb,
int *bnum, struct hns3_enet_ring *ring);
void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum);
@@ -640,6 +639,9 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
u32 rl_value);
+void hns3_enable_vlan_filter(struct net_device *netdev, bool enable);
+int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags);
+
#ifdef CONFIG_HNS3_DCB
void hns3_dcbnl_setup(struct hnae3_handle *handle);
#else
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 7d79a074a214..a4762c2b8ba1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -71,6 +71,7 @@ struct hns3_link_mode_mapping {
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ bool vlan_filter_enable;
int ret;
if (!h->ae_algo->ops->set_loopback ||
@@ -91,7 +92,14 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
if (ret)
return ret;
- h->ae_algo->ops->set_promisc_mode(h, en, en);
+ if (en) {
+ h->ae_algo->ops->set_promisc_mode(h, true, true);
+ } else {
+ /* recover promisc mode before loopback test */
+ hns3_update_promisc_mode(ndev, h->netdev_flags);
+ vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true;
+ hns3_enable_vlan_filter(ndev, vlan_filter_enable);
+ }
return ret;
}
@@ -678,12 +686,13 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss)
return -EOPNOTSUPP;
- /* currently we only support Toeplitz hash */
- if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) {
- netdev_err(netdev,
- "hash func not supported (only Toeplitz hash)\n");
+ if ((h->pdev->revision == 0x20 &&
+ hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) {
+ netdev_err(netdev, "hash func not supported\n");
return -EOPNOTSUPP;
}
+
if (!indir) {
netdev_err(netdev,
"set rss failed for indir is empty\n");
@@ -1077,6 +1086,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_ethtool_stats = hns3_get_stats,
.get_sset_count = hns3_get_sset_count,
.get_rxnfc = hns3_get_rxnfc,
+ .set_rxnfc = hns3_set_rxnfc,
.get_rxfh_key_size = hns3_get_rss_key_size,
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index cb8ddd043476..580e81743681 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -6,6 +6,6 @@
ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o
+hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o
hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index ac13cb2b168e..690f62ed87dc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -24,15 +24,15 @@ static int hclge_ring_space(struct hclge_cmq_ring *ring)
return ring->desc_num - used - 1;
}
-static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int h)
+static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head)
{
- int u = ring->next_to_use;
- int c = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+ int ntc = ring->next_to_clean;
- if (unlikely(h >= ring->desc_num))
- return 0;
+ if (ntu > ntc)
+ return head >= ntc && head <= ntu;
- return u > c ? (h > c && h <= u) : (h > c || h <= u);
+ return head >= ntc || head <= ntu;
}
static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
@@ -304,6 +304,10 @@ int hclge_cmd_queue_init(struct hclge_dev *hdev)
{
int ret;
+ /* Setup the lock for command queue */
+ spin_lock_init(&hdev->hw.cmq.csq.lock);
+ spin_lock_init(&hdev->hw.cmq.crq.lock);
+
/* Setup the queue entries for use cmd queue */
hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
@@ -337,18 +341,20 @@ int hclge_cmd_init(struct hclge_dev *hdev)
u32 version;
int ret;
+ spin_lock_bh(&hdev->hw.cmq.csq.lock);
+ spin_lock_bh(&hdev->hw.cmq.crq.lock);
+
hdev->hw.cmq.csq.next_to_clean = 0;
hdev->hw.cmq.csq.next_to_use = 0;
hdev->hw.cmq.crq.next_to_clean = 0;
hdev->hw.cmq.crq.next_to_use = 0;
- /* Setup the lock for command queue */
- spin_lock_init(&hdev->hw.cmq.csq.lock);
- spin_lock_init(&hdev->hw.cmq.crq.lock);
-
hclge_cmd_init_regs(&hdev->hw);
clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ spin_unlock_bh(&hdev->hw.cmq.crq.lock);
+ spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+
ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
dev_err(&hdev->pdev->dev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index e5e66b27e03e..872cd4bdd70d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -175,15 +175,9 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
+ HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004,
HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
- HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012,
-
- /* Multicast linear table commands */
- HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020,
- HCLGE_OPC_MTA_MAC_FUNC_CFG = 0x1021,
- HCLGE_OPC_MTA_TBL_ITEM_CFG = 0x1022,
- HCLGE_OPC_MTA_TBL_ITEM_QUERY = 0x1023,
/* VLAN commands */
HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
@@ -215,6 +209,28 @@ enum hclge_opcode_type {
/* Led command */
HCLGE_OPC_LED_STATUS_CFG = 0xB000,
+
+ /* Error INT commands */
+ HCLGE_TM_SCH_ECC_INT_EN = 0x0829,
+ HCLGE_TM_SCH_ECC_ERR_RINT_CMD = 0x082d,
+ HCLGE_TM_SCH_ECC_ERR_RINT_CE = 0x082f,
+ HCLGE_TM_SCH_ECC_ERR_RINT_NFE = 0x0830,
+ HCLGE_TM_SCH_ECC_ERR_RINT_FE = 0x0831,
+ HCLGE_TM_SCH_MBIT_ECC_INFO_CMD = 0x0833,
+ HCLGE_COMMON_ECC_INT_CFG = 0x1505,
+ HCLGE_IGU_EGU_TNL_INT_QUERY = 0x1802,
+ HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
+ HCLGE_IGU_EGU_TNL_INT_CLR = 0x1804,
+ HCLGE_IGU_COMMON_INT_QUERY = 0x1805,
+ HCLGE_IGU_COMMON_INT_EN = 0x1806,
+ HCLGE_IGU_COMMON_INT_CLR = 0x1807,
+ HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
+ HCLGE_TM_QCN_MEM_INT_INFO_CMD = 0x1A17,
+ HCLGE_PPP_CMD0_INT_CMD = 0x2100,
+ HCLGE_PPP_CMD1_INT_CMD = 0x2101,
+ HCLGE_NCSI_INT_QUERY = 0x2400,
+ HCLGE_NCSI_INT_EN = 0x2401,
+ HCLGE_NCSI_INT_CLR = 0x2402,
};
#define HCLGE_TQP_REG_OFFSET 0x80000
@@ -402,6 +418,8 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24)
#define HCLGE_CFG_SPEED_ABILITY_S 0
#define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0)
+#define HCLGE_CFG_UMV_TBL_SPACE_S 16
+#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
struct hclge_cfg_param_cmd {
__le32 offset;
@@ -591,13 +609,12 @@ struct hclge_mac_vlan_tbl_entry_cmd {
u8 rsv2[6];
};
-#define HCLGE_VLAN_MASK_EN_B 0
-struct hclge_mac_vlan_mask_entry_cmd {
- u8 rsv0[2];
- u8 vlan_mask;
- u8 rsv1;
- u8 mac_mask[6];
- u8 rsv2[14];
+#define HCLGE_UMV_SPC_ALC_B 0
+struct hclge_umv_spc_alc_cmd {
+ u8 allocate;
+ u8 rsv1[3];
+ __le32 space_size;
+ u8 rsv2[16];
};
#define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0)
@@ -622,30 +639,6 @@ struct hclge_mac_mgr_tbl_entry_cmd {
u8 rsv3[2];
};
-#define HCLGE_CFG_MTA_MAC_SEL_S 0
-#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0)
-#define HCLGE_CFG_MTA_MAC_EN_B 7
-struct hclge_mta_filter_mode_cmd {
- u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */
- u8 rsv[23];
-};
-
-#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0
-struct hclge_cfg_func_mta_filter_cmd {
- u8 accept; /* Only used lowest 1 bit */
- u8 function_id;
- u8 rsv[22];
-};
-
-#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0
-#define HCLGE_CFG_MTA_ITEM_IDX_S 0
-#define HCLGE_CFG_MTA_ITEM_IDX_M GENMASK(11, 0)
-struct hclge_cfg_func_mta_item_cmd {
- __le16 item_idx; /* Only used lowest 12 bit */
- u8 accept; /* Only used lowest 1 bit */
- u8 rsv[21];
-};
-
struct hclge_mac_vlan_add_cmd {
__le16 flags;
__le16 mac_addr_hi16;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
new file mode 100644
index 000000000000..123c37e653f3
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -0,0 +1,1090 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#include "hclge_err.h"
+
+static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
+ { .int_msk = BIT(0), .msg = "imp_itcm0_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "imp_itcm1_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "imp_itcm2_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "imp_itcm3_ecc_1bit_err" },
+ { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" },
+ { .int_msk = BIT(8), .msg = "imp_dtcm0_mem0_ecc_1bit_err" },
+ { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" },
+ { .int_msk = BIT(10), .msg = "imp_dtcm0_mem1_ecc_1bit_err" },
+ { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" },
+ { .int_msk = BIT(12), .msg = "imp_dtcm1_mem0_ecc_1bit_err" },
+ { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" },
+ { .int_msk = BIT(14), .msg = "imp_dtcm1_mem1_ecc_1bit_err" },
+ { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_imp_itcm4_ecc_int[] = {
+ { .int_msk = BIT(0), .msg = "imp_itcm4_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "imp_itcm4_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
+ { .int_msk = BIT(0), .msg = "cmdq_nic_rx_depth_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "cmdq_nic_tx_depth_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "cmdq_nic_rx_tail_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "cmdq_nic_tx_tail_ecc_1bit_err" },
+ { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" },
+ { .int_msk = BIT(8), .msg = "cmdq_nic_rx_head_ecc_1bit_err" },
+ { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" },
+ { .int_msk = BIT(10), .msg = "cmdq_nic_tx_head_ecc_1bit_err" },
+ { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" },
+ { .int_msk = BIT(12), .msg = "cmdq_nic_rx_addr_ecc_1bit_err" },
+ { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" },
+ { .int_msk = BIT(14), .msg = "cmdq_nic_tx_addr_ecc_1bit_err" },
+ { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_cmdq_rocee_mem_ecc_int[] = {
+ { .int_msk = BIT(0), .msg = "cmdq_rocee_rx_depth_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "cmdq_rocee_tx_depth_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "cmdq_rocee_rx_tail_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "cmdq_rocee_tx_tail_ecc_1bit_err" },
+ { .int_msk = BIT(7), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" },
+ { .int_msk = BIT(8), .msg = "cmdq_rocee_rx_head_ecc_1bit_err" },
+ { .int_msk = BIT(9), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" },
+ { .int_msk = BIT(10), .msg = "cmdq_rocee_tx_head_ecc_1bit_err" },
+ { .int_msk = BIT(11), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" },
+ { .int_msk = BIT(12), .msg = "cmdq_rocee_rx_addr_ecc_1bit_err" },
+ { .int_msk = BIT(13), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" },
+ { .int_msk = BIT(14), .msg = "cmdq_rocee_tx_addr_ecc_1bit_err" },
+ { .int_msk = BIT(15), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
+ { .int_msk = BIT(0), .msg = "tqp_int_cfg_even_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "tqp_int_cfg_odd_ecc_1bit_err" },
+ { .int_msk = BIT(2), .msg = "tqp_int_ctrl_even_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "tqp_int_ctrl_odd_ecc_1bit_err" },
+ { .int_msk = BIT(4), .msg = "tx_que_scan_int_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "rx_que_scan_int_ecc_1bit_err" },
+ { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" },
+ { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" },
+ { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" },
+ { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err" },
+ { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err" },
+ { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_igu_com_err_int[] = {
+ { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" },
+ { .int_msk = BIT(1), .msg = "igu_rx_buf0_ecc_1bit_err" },
+ { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" },
+ { .int_msk = BIT(3), .msg = "igu_rx_buf1_ecc_1bit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_igu_egu_tnl_err_int[] = {
+ { .int_msk = BIT(0), .msg = "rx_buf_overflow" },
+ { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" },
+ { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" },
+ { .int_msk = BIT(3), .msg = "tx_buf_overflow" },
+ { .int_msk = BIT(4), .msg = "tx_buf_underrun" },
+ { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ncsi_err_int[] = {
+ { .int_msk = BIT(0), .msg = "ncsi_tx_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppp_mpf_int0[] = {
+ { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_1bit_err" },
+ { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_1bit_err" },
+ { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_1bit_err" },
+ { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_1bit_err" },
+ { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_1bit_err" },
+ { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_1bit_err" },
+ { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_1bit_err" },
+ { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_1bit_err" },
+ { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_1bit_err" },
+ { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_1bit_err" },
+ { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_1bit_err" },
+ { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_1bit_err" },
+ { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_1bit_err" },
+ { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_1bit_err" },
+ { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_1bit_err" },
+ { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_1bit_err" },
+ { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_1bit_err" },
+ { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_1bit_err" },
+ { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_1bit_err" },
+ { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_1bit_err" },
+ { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_1bit_err" },
+ { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_1bit_err" },
+ { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_1bit_err" },
+ { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_1bit_err" },
+ { .int_msk = BIT(27),
+ .msg = "flow_director_ad_mem0_ecc_1bit_err" },
+ { .int_msk = BIT(28),
+ .msg = "flow_director_ad_mem1_ecc_1bit_err" },
+ { .int_msk = BIT(29),
+ .msg = "rx_vlan_tag_memory_ecc_1bit_err" },
+ { .int_msk = BIT(30),
+ .msg = "Tx_UP_mapping_config_mem_ecc_1bit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppp_mpf_int1[] = {
+ { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" },
+ { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" },
+ { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" },
+ { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_erre" },
+ { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" },
+ { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" },
+ { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" },
+ { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err" },
+ { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err" },
+ { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err" },
+ { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err" },
+ { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err" },
+ { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err" },
+ { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err" },
+ { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err" },
+ { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err" },
+ { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err" },
+ { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err" },
+ { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err" },
+ { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err" },
+ { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err" },
+ { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err" },
+ { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err" },
+ { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err" },
+ { .int_msk = BIT(27),
+ .msg = "flow_director_ad_mem0_ecc_mbit_err" },
+ { .int_msk = BIT(28),
+ .msg = "flow_director_ad_mem1_ecc_mbit_err" },
+ { .int_msk = BIT(29),
+ .msg = "rx_vlan_tag_memory_ecc_mbit_err" },
+ { .int_msk = BIT(30),
+ .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppp_pf_int[] = {
+ { .int_msk = BIT(0), .msg = "Tx_vlan_tag_err" },
+ { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppp_mpf_int2[] = {
+ { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_1bit_err" },
+ { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_1bit_err" },
+ { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_1bit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppp_mpf_int3[] = {
+ { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" },
+ { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" },
+ { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err" },
+ { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+struct hclge_tm_sch_ecc_info {
+ const char *name;
+};
+
+static const struct hclge_tm_sch_ecc_info hclge_tm_sch_ecc_err[7][15] = {
+ {
+ { .name = "QSET_QUEUE_CTRL:PRI_LEN TAB" },
+ { .name = "QSET_QUEUE_CTRL:SPA_LEN TAB" },
+ { .name = "QSET_QUEUE_CTRL:SPB_LEN TAB" },
+ { .name = "QSET_QUEUE_CTRL:WRRA_LEN TAB" },
+ { .name = "QSET_QUEUE_CTRL:WRRB_LEN TAB" },
+ { .name = "QSET_QUEUE_CTRL:SPA_HPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:SPB_HPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:WRRA_HPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:WRRB_HPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:QS_LINKLIST TAB" },
+ { .name = "QSET_QUEUE_CTRL:SPA_TPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:SPB_TPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:WRRA_TPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:WRRB_TPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:QS_DEFICITCNT TAB" },
+ },
+ {
+ { .name = "ROCE_QUEUE_CTRL:QS_LEN TAB" },
+ { .name = "ROCE_QUEUE_CTRL:QS_TPTR TAB" },
+ { .name = "ROCE_QUEUE_CTRL:QS_HPTR TAB" },
+ { .name = "ROCE_QUEUE_CTRL:QLINKLIST TAB" },
+ { .name = "ROCE_QUEUE_CTRL:QCLEN TAB" },
+ },
+ {
+ { .name = "NIC_QUEUE_CTRL:QS_LEN TAB" },
+ { .name = "NIC_QUEUE_CTRL:QS_TPTR TAB" },
+ { .name = "NIC_QUEUE_CTRL:QS_HPTR TAB" },
+ { .name = "NIC_QUEUE_CTRL:QLINKLIST TAB" },
+ { .name = "NIC_QUEUE_CTRL:QCLEN TAB" },
+ },
+ {
+ { .name = "RAM_CFG_CTRL:CSHAP TAB" },
+ { .name = "RAM_CFG_CTRL:PSHAP TAB" },
+ },
+ {
+ { .name = "SHAPER_CTRL:PSHAP TAB" },
+ },
+ {
+ { .name = "MSCH_CTRL" },
+ },
+ {
+ { .name = "TOP_CTRL" },
+ },
+};
+
+static const struct hclge_hw_error hclge_tm_sch_err_int[] = {
+ { .int_msk = BIT(0), .msg = "tm_sch_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_full_err" },
+ { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_empty_err" },
+ { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_full_err" },
+ { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_empty_err" },
+ { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_full_err" },
+ { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_empty_err" },
+ { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_full_err" },
+ { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_empty_err" },
+ { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_full_err" },
+ { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_empty_err" },
+ { .int_msk = BIT(12),
+ .msg = "tm_sch_port_shap_offset_fifo_wr_full_err" },
+ { .int_msk = BIT(13),
+ .msg = "tm_sch_port_shap_offset_fifo_rd_empty_err" },
+ { .int_msk = BIT(14),
+ .msg = "tm_sch_pg_pshap_offset_fifo_wr_full_err" },
+ { .int_msk = BIT(15),
+ .msg = "tm_sch_pg_pshap_offset_fifo_rd_empty_err" },
+ { .int_msk = BIT(16),
+ .msg = "tm_sch_pg_cshap_offset_fifo_wr_full_err" },
+ { .int_msk = BIT(17),
+ .msg = "tm_sch_pg_cshap_offset_fifo_rd_empty_err" },
+ { .int_msk = BIT(18),
+ .msg = "tm_sch_pri_pshap_offset_fifo_wr_full_err" },
+ { .int_msk = BIT(19),
+ .msg = "tm_sch_pri_pshap_offset_fifo_rd_empty_err" },
+ { .int_msk = BIT(20),
+ .msg = "tm_sch_pri_cshap_offset_fifo_wr_full_err" },
+ { .int_msk = BIT(21),
+ .msg = "tm_sch_pri_cshap_offset_fifo_rd_empty_err" },
+ { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_full_err" },
+ { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_empty_err" },
+ { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_full_err" },
+ { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_empty_err" },
+ { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_full_err" },
+ { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_empty_err" },
+ { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_full_err" },
+ { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_empty_err" },
+ { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_full_err" },
+ { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_empty_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_qcn_ecc_err_int[] = {
+ { .int_msk = BIT(0), .msg = "qcn_byte_mem_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "qcn_time_mem_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "qcn_fb_mem_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "qcn_link_mem_ecc_1bit_err" },
+ { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err" },
+ { .int_msk = BIT(8), .msg = "qcn_rate_mem_ecc_1bit_err" },
+ { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err" },
+ { .int_msk = BIT(10), .msg = "qcn_tmplt_mem_ecc_1bit_err" },
+ { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err" },
+ { .int_msk = BIT(12), .msg = "qcn_shap_cfg_mem_ecc_1bit_err" },
+ { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err" },
+ { .int_msk = BIT(14), .msg = "qcn_gp0_barrel_mem_ecc_1bit_err" },
+ { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err" },
+ { .int_msk = BIT(16), .msg = "qcn_gp1_barrel_mem_ecc_1bit_err" },
+ { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err" },
+ { .int_msk = BIT(18), .msg = "qcn_gp2_barrel_mem_ecc_1bit_err" },
+ { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err" },
+ { .int_msk = BIT(20), .msg = "qcn_gp3_barral_mem_ecc_1bit_err" },
+ { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static void hclge_log_error(struct device *dev,
+ const struct hclge_hw_error *err_list,
+ u32 err_sts)
+{
+ const struct hclge_hw_error *err;
+ int i = 0;
+
+ while (err_list[i].msg) {
+ err = &err_list[i];
+ if (!(err->int_msk & err_sts)) {
+ i++;
+ continue;
+ }
+ dev_warn(dev, "%s [error status=0x%x] found\n",
+ err->msg, err_sts);
+ i++;
+ }
+}
+
+/* hclge_cmd_query_error: read the error information
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @cmd: command opcode
+ * @flag: flag for extended command structure
+ * @w_num: offset for setting the read interrupt type.
+ * @int_type: select which type of the interrupt for which the error
+ * info will be read(RAS-CE/RAS-NFE/RAS-FE etc).
+ *
+ * This function query the error info from hw register/s using command
+ */
+static int hclge_cmd_query_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 cmd,
+ u16 flag, u8 w_num,
+ enum hclge_err_int_type int_type)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int num = 1;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ if (flag) {
+ desc[0].flag |= cpu_to_le16(flag);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ num = 2;
+ }
+ if (w_num)
+ desc[0].data[w_num] = cpu_to_le32(int_type);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ if (ret)
+ dev_err(dev, "query error cmd failed (%d)\n", ret);
+
+ return ret;
+}
+
+/* hclge_cmd_clear_error: clear the error status
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @desc_src: prefilled descriptor from the previous command for reusing
+ * @cmd: command opcode
+ * @flag: flag for extended command structure
+ *
+ * This function clear the error status in the hw register/s using command
+ */
+static int hclge_cmd_clear_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc,
+ struct hclge_desc *desc_src,
+ u32 cmd, u16 flag)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int num = 1;
+ int ret, i;
+
+ if (cmd) {
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
+ if (flag) {
+ desc[0].flag |= cpu_to_le16(flag);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
+ num = 2;
+ }
+ if (desc_src) {
+ for (i = 0; i < 6; i++) {
+ desc[0].data[i] = desc_src[0].data[i];
+ if (flag)
+ desc[1].data[i] = desc_src[1].data[i];
+ }
+ }
+ } else {
+ hclge_cmd_reuse_desc(&desc[0], false);
+ if (flag) {
+ desc[0].flag |= cpu_to_le16(flag);
+ hclge_cmd_reuse_desc(&desc[1], false);
+ num = 2;
+ }
+ }
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ if (ret)
+ dev_err(dev, "clear error cmd failed (%d)\n", ret);
+
+ return ret;
+}
+
+static int hclge_enable_common_error(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false);
+
+ if (en) {
+ /* enable COMMON error interrupts */
+ desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN);
+ desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN |
+ HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN);
+ desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN);
+ desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN);
+ desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN);
+ } else {
+ /* disable COMMON error interrupts */
+ desc[0].data[0] = 0;
+ desc[0].data[2] = 0;
+ desc[0].data[3] = 0;
+ desc[0].data[4] = 0;
+ desc[0].data[5] = 0;
+ }
+ desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK);
+ desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK |
+ HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK);
+ desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK);
+ desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK);
+ desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
+ if (ret)
+ dev_err(dev,
+ "failed(%d) to enable/disable COMMON err interrupts\n",
+ ret);
+
+ return ret;
+}
+
+static int hclge_enable_ncsi_error(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->pdev->revision < 0x21)
+ return 0;
+
+ /* enable/disable NCSI error interrupts */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN);
+ else
+ desc.data[0] = 0;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(dev,
+ "failed(%d) to enable/disable NCSI error interrupts\n",
+ ret);
+
+ return ret;
+}
+
+static int hclge_enable_igu_egu_error(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ /* enable/disable error interrupts */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
+ else
+ desc.data[0] = 0;
+ desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(dev,
+ "failed(%d) to enable/disable IGU common interrupts\n",
+ ret);
+ return ret;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN);
+ else
+ desc.data[0] = 0;
+ desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(dev,
+ "failed(%d) to enable/disable IGU-EGU TNL interrupts\n",
+ ret);
+ return ret;
+ }
+
+ ret = hclge_enable_ncsi_error(hdev, en);
+ if (ret)
+ dev_err(dev, "fail(%d) to en/disable err int\n", ret);
+
+ return ret;
+}
+
+static int hclge_enable_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
+ bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int ret;
+
+ /* enable/disable PPP error interrupts */
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
+
+ if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
+ if (en) {
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN);
+ desc[0].data[1] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN);
+ } else {
+ desc[0].data[0] = 0;
+ desc[0].data[1] = 0;
+ }
+ desc[1].data[0] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK);
+ desc[1].data[1] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK);
+ } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
+ if (en) {
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN);
+ desc[0].data[1] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN);
+ } else {
+ desc[0].data[0] = 0;
+ desc[0].data[1] = 0;
+ }
+ desc[1].data[0] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK);
+ desc[1].data[1] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
+ if (ret)
+ dev_err(dev,
+ "failed(%d) to enable/disable PPP error interrupts\n",
+ ret);
+
+ return ret;
+}
+
+static int hclge_enable_ppp_error(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int ret;
+
+ ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD,
+ en);
+ if (ret) {
+ dev_err(dev,
+ "failed(%d) to enable/disable PPP error intr 0,1\n",
+ ret);
+ return ret;
+ }
+
+ ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD,
+ en);
+ if (ret)
+ dev_err(dev,
+ "failed(%d) to enable/disable PPP error intr 2,3\n",
+ ret);
+
+ return ret;
+}
+
+int hclge_enable_tm_hw_error(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ /* enable TM SCH hw errors */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_SCH_ECC_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_TM_SCH_ECC_ERR_INT_EN);
+ else
+ desc.data[0] = 0;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(dev, "failed(%d) to configure TM SCH errors\n", ret);
+ return ret;
+ }
+
+ /* enable TM QCN hw errors */
+ ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG,
+ 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read TM QCN CFG status\n", ret);
+ return ret;
+ }
+
+ hclge_cmd_reuse_desc(&desc, false);
+ if (en)
+ desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
+ else
+ desc.data[1] = 0;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(dev,
+ "failed(%d) to configure TM QCN mem errors\n", ret);
+
+ return ret;
+}
+
+static void hclge_process_common_error(struct hclge_dev *hdev,
+ enum hclge_err_int_type type)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ u32 err_sts;
+ int ret;
+
+ /* read err sts */
+ ret = hclge_cmd_query_error(hdev, &desc[0],
+ HCLGE_COMMON_ECC_INT_CFG,
+ HCLGE_CMD_FLAG_NEXT, 0, 0);
+ if (ret) {
+ dev_err(dev,
+ "failed(=%d) to query COMMON error interrupt status\n",
+ ret);
+ return;
+ }
+
+ /* log err */
+ err_sts = (le32_to_cpu(desc[0].data[0])) & HCLGE_IMP_TCM_ECC_INT_MASK;
+ hclge_log_error(dev, &hclge_imp_tcm_ecc_int[0], err_sts);
+
+ err_sts = (le32_to_cpu(desc[0].data[1])) & HCLGE_CMDQ_ECC_INT_MASK;
+ hclge_log_error(dev, &hclge_cmdq_nic_mem_ecc_int[0], err_sts);
+
+ err_sts = (le32_to_cpu(desc[0].data[1]) >> HCLGE_CMDQ_ROC_ECC_INT_SHIFT)
+ & HCLGE_CMDQ_ECC_INT_MASK;
+ hclge_log_error(dev, &hclge_cmdq_rocee_mem_ecc_int[0], err_sts);
+
+ if ((le32_to_cpu(desc[0].data[3])) & BIT(0))
+ dev_warn(dev, "imp_rd_data_poison_err found\n");
+
+ err_sts = (le32_to_cpu(desc[0].data[3]) >> HCLGE_TQP_ECC_INT_SHIFT) &
+ HCLGE_TQP_ECC_INT_MASK;
+ hclge_log_error(dev, &hclge_tqp_int_ecc_int[0], err_sts);
+
+ err_sts = (le32_to_cpu(desc[0].data[5])) &
+ HCLGE_IMP_ITCM4_ECC_INT_MASK;
+ hclge_log_error(dev, &hclge_imp_itcm4_ecc_int[0], err_sts);
+
+ /* clear error interrupts */
+ desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_CLR_MASK);
+ desc[1].data[1] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_CLR_MASK |
+ HCLGE_CMDQ_ROCEE_ECC_CLR_MASK);
+ desc[1].data[3] = cpu_to_le32(HCLGE_TQP_IMP_ERR_CLR_MASK);
+ desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_CLR_MASK);
+
+ ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0,
+ HCLGE_CMD_FLAG_NEXT);
+ if (ret)
+ dev_err(dev,
+ "failed(%d) to clear COMMON error interrupt status\n",
+ ret);
+}
+
+static void hclge_process_ncsi_error(struct hclge_dev *hdev,
+ enum hclge_err_int_type type)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc_rd;
+ struct hclge_desc desc_wr;
+ u32 err_sts;
+ int ret;
+
+ if (hdev->pdev->revision < 0x21)
+ return;
+
+ /* read NCSI error status */
+ ret = hclge_cmd_query_error(hdev, &desc_rd, HCLGE_NCSI_INT_QUERY,
+ 0, 1, HCLGE_NCSI_ERR_INT_TYPE);
+ if (ret) {
+ dev_err(dev,
+ "failed(=%d) to query NCSI error interrupt status\n",
+ ret);
+ return;
+ }
+
+ /* log err */
+ err_sts = le32_to_cpu(desc_rd.data[0]);
+ hclge_log_error(dev, &hclge_ncsi_err_int[0], err_sts);
+
+ /* clear err int */
+ ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
+ HCLGE_NCSI_INT_CLR, 0);
+ if (ret)
+ dev_err(dev, "failed(=%d) to clear NCSI interrupt status\n",
+ ret);
+}
+
+static void hclge_process_igu_egu_error(struct hclge_dev *hdev,
+ enum hclge_err_int_type int_type)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc_rd;
+ struct hclge_desc desc_wr;
+ u32 err_sts;
+ int ret;
+
+ /* read IGU common err sts */
+ ret = hclge_cmd_query_error(hdev, &desc_rd,
+ HCLGE_IGU_COMMON_INT_QUERY,
+ 0, 1, int_type);
+ if (ret) {
+ dev_err(dev, "failed(=%d) to query IGU common int status\n",
+ ret);
+ return;
+ }
+
+ /* log err */
+ err_sts = le32_to_cpu(desc_rd.data[0]) &
+ HCLGE_IGU_COM_INT_MASK;
+ hclge_log_error(dev, &hclge_igu_com_err_int[0], err_sts);
+
+ /* clear err int */
+ ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
+ HCLGE_IGU_COMMON_INT_CLR, 0);
+ if (ret) {
+ dev_err(dev, "failed(=%d) to clear IGU common int status\n",
+ ret);
+ return;
+ }
+
+ /* read IGU-EGU TNL err sts */
+ ret = hclge_cmd_query_error(hdev, &desc_rd,
+ HCLGE_IGU_EGU_TNL_INT_QUERY,
+ 0, 1, int_type);
+ if (ret) {
+ dev_err(dev, "failed(=%d) to query IGU-EGU TNL int status\n",
+ ret);
+ return;
+ }
+
+ /* log err */
+ err_sts = le32_to_cpu(desc_rd.data[0]) &
+ HCLGE_IGU_EGU_TNL_INT_MASK;
+ hclge_log_error(dev, &hclge_igu_egu_tnl_err_int[0], err_sts);
+
+ /* clear err int */
+ ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
+ HCLGE_IGU_EGU_TNL_INT_CLR, 0);
+ if (ret) {
+ dev_err(dev, "failed(=%d) to clear IGU-EGU TNL int status\n",
+ ret);
+ return;
+ }
+
+ hclge_process_ncsi_error(hdev, HCLGE_ERR_INT_RAS_NFE);
+}
+
+static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd,
+ enum hclge_err_int_type int_type)
+{
+ enum hnae3_reset_type reset_level = HNAE3_NONE_RESET;
+ struct device *dev = &hdev->pdev->dev;
+ const struct hclge_hw_error *hw_err_lst1, *hw_err_lst2, *hw_err_lst3;
+ struct hclge_desc desc[2];
+ u32 err_sts;
+ int ret;
+
+ /* read PPP INT sts */
+ ret = hclge_cmd_query_error(hdev, &desc[0], cmd,
+ HCLGE_CMD_FLAG_NEXT, 5, int_type);
+ if (ret) {
+ dev_err(dev, "failed(=%d) to query PPP interrupt status\n",
+ ret);
+ return -EIO;
+ }
+
+ /* log error */
+ if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
+ hw_err_lst1 = &hclge_ppp_mpf_int0[0];
+ hw_err_lst2 = &hclge_ppp_mpf_int1[0];
+ hw_err_lst3 = &hclge_ppp_pf_int[0];
+ } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
+ hw_err_lst1 = &hclge_ppp_mpf_int2[0];
+ hw_err_lst2 = &hclge_ppp_mpf_int3[0];
+ } else {
+ dev_err(dev, "invalid command(=%d)\n", cmd);
+ return -EINVAL;
+ }
+
+ err_sts = le32_to_cpu(desc[0].data[2]);
+ if (err_sts) {
+ hclge_log_error(dev, hw_err_lst1, err_sts);
+ reset_level = HNAE3_FUNC_RESET;
+ }
+
+ err_sts = le32_to_cpu(desc[0].data[3]);
+ if (err_sts) {
+ hclge_log_error(dev, hw_err_lst2, err_sts);
+ reset_level = HNAE3_FUNC_RESET;
+ }
+
+ if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
+ err_sts = (le32_to_cpu(desc[0].data[4]) >> 8) & 0x3;
+ if (err_sts) {
+ hclge_log_error(dev, hw_err_lst3, err_sts);
+ reset_level = HNAE3_FUNC_RESET;
+ }
+ }
+
+ /* clear PPP INT */
+ ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0,
+ HCLGE_CMD_FLAG_NEXT);
+ if (ret) {
+ dev_err(dev, "failed(=%d) to clear PPP interrupt status\n",
+ ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void hclge_process_ppp_error(struct hclge_dev *hdev,
+ enum hclge_err_int_type int_type)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int ret;
+
+ /* read PPP INT0,1 sts */
+ ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD0_INT_CMD,
+ int_type);
+ if (ret < 0) {
+ dev_err(dev, "failed(=%d) to clear PPP interrupt 0,1 status\n",
+ ret);
+ return;
+ }
+
+ /* read err PPP INT2,3 sts */
+ ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD1_INT_CMD,
+ int_type);
+ if (ret < 0)
+ dev_err(dev, "failed(=%d) to clear PPP interrupt 2,3 status\n",
+ ret);
+}
+
+static void hclge_process_tm_sch_error(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+ const struct hclge_tm_sch_ecc_info *tm_sch_ecc_info;
+ struct hclge_desc desc;
+ u32 ecc_info;
+ u8 module_no;
+ u8 ram_no;
+ int ret;
+
+ /* read TM scheduler errors */
+ ret = hclge_cmd_query_error(hdev, &desc,
+ HCLGE_TM_SCH_MBIT_ECC_INFO_CMD, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read SCH mbit ECC err info\n", ret);
+ return;
+ }
+ ecc_info = le32_to_cpu(desc.data[0]);
+
+ ret = hclge_cmd_query_error(hdev, &desc,
+ HCLGE_TM_SCH_ECC_ERR_RINT_CMD, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read SCH ECC err status\n", ret);
+ return;
+ }
+
+ /* log TM scheduler errors */
+ if (le32_to_cpu(desc.data[0])) {
+ hclge_log_error(dev, &hclge_tm_sch_err_int[0],
+ le32_to_cpu(desc.data[0]));
+ if (le32_to_cpu(desc.data[0]) & 0x2) {
+ module_no = (ecc_info >> 20) & 0xF;
+ ram_no = (ecc_info >> 16) & 0xF;
+ tm_sch_ecc_info =
+ &hclge_tm_sch_ecc_err[module_no][ram_no];
+ dev_warn(dev, "ecc err module:ram=%s\n",
+ tm_sch_ecc_info->name);
+ dev_warn(dev, "ecc memory address = 0x%x\n",
+ ecc_info & 0xFFFF);
+ }
+ }
+
+ /* clear TM scheduler errors */
+ ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to clear TM SCH error status\n", ret);
+ return;
+ }
+
+ ret = hclge_cmd_query_error(hdev, &desc,
+ HCLGE_TM_SCH_ECC_ERR_RINT_CE, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read SCH CE status\n", ret);
+ return;
+ }
+
+ ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to clear TM SCH CE status\n", ret);
+ return;
+ }
+
+ ret = hclge_cmd_query_error(hdev, &desc,
+ HCLGE_TM_SCH_ECC_ERR_RINT_NFE, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read SCH NFE status\n", ret);
+ return;
+ }
+
+ ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to clear TM SCH NFE status\n", ret);
+ return;
+ }
+
+ ret = hclge_cmd_query_error(hdev, &desc,
+ HCLGE_TM_SCH_ECC_ERR_RINT_FE, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read SCH FE status\n", ret);
+ return;
+ }
+
+ ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
+ if (ret)
+ dev_err(dev, "failed(%d) to clear TM SCH FE status\n", ret);
+}
+
+static void hclge_process_tm_qcn_error(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ /* read QCN errors */
+ ret = hclge_cmd_query_error(hdev, &desc,
+ HCLGE_TM_QCN_MEM_INT_INFO_CMD, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read QCN ECC err status\n", ret);
+ return;
+ }
+
+ /* log QCN errors */
+ if (le32_to_cpu(desc.data[0]))
+ hclge_log_error(dev, &hclge_qcn_ecc_err_int[0],
+ le32_to_cpu(desc.data[0]));
+
+ /* clear QCN errors */
+ ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
+ if (ret)
+ dev_err(dev, "failed(%d) to clear QCN error status\n", ret);
+}
+
+static void hclge_process_tm_error(struct hclge_dev *hdev,
+ enum hclge_err_int_type type)
+{
+ hclge_process_tm_sch_error(hdev);
+ hclge_process_tm_qcn_error(hdev);
+}
+
+static const struct hclge_hw_blk hw_blk[] = {
+ { .msk = BIT(0), .name = "IGU_EGU",
+ .enable_error = hclge_enable_igu_egu_error,
+ .process_error = hclge_process_igu_egu_error, },
+ { .msk = BIT(5), .name = "COMMON",
+ .enable_error = hclge_enable_common_error,
+ .process_error = hclge_process_common_error, },
+ { .msk = BIT(4), .name = "TM",
+ .enable_error = hclge_enable_tm_hw_error,
+ .process_error = hclge_process_tm_error, },
+ { .msk = BIT(1), .name = "PPP",
+ .enable_error = hclge_enable_ppp_error,
+ .process_error = hclge_process_ppp_error, },
+ { /* sentinel */ }
+};
+
+int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int ret = 0;
+ int i = 0;
+
+ while (hw_blk[i].name) {
+ if (!hw_blk[i].enable_error) {
+ i++;
+ continue;
+ }
+ ret = hw_blk[i].enable_error(hdev, state);
+ if (ret) {
+ dev_err(dev, "fail(%d) to en/disable err int\n", ret);
+ return ret;
+ }
+ i++;
+ }
+
+ return ret;
+}
+
+pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct device *dev = &hdev->pdev->dev;
+ u32 sts, val;
+ int i = 0;
+
+ sts = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
+
+ /* Processing Non-fatal errors */
+ if (sts & HCLGE_RAS_REG_NFE_MASK) {
+ val = (sts >> HCLGE_RAS_REG_NFE_SHIFT) & 0xFF;
+ i = 0;
+ while (hw_blk[i].name) {
+ if (!(hw_blk[i].msk & val)) {
+ i++;
+ continue;
+ }
+ dev_warn(dev, "%s ras non-fatal error identified\n",
+ hw_blk[i].name);
+ if (hw_blk[i].process_error)
+ hw_blk[i].process_error(hdev,
+ HCLGE_ERR_INT_RAS_NFE);
+ i++;
+ }
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
new file mode 100644
index 000000000000..e0e3b5861495
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGE_ERR_H
+#define __HCLGE_ERR_H
+
+#include "hclge_main.h"
+
+#define HCLGE_RAS_PF_OTHER_INT_STS_REG 0x20B00
+#define HCLGE_RAS_REG_FE_MASK 0xFF
+#define HCLGE_RAS_REG_NFE_MASK 0xFF00
+#define HCLGE_RAS_REG_NFE_SHIFT 8
+
+#define HCLGE_IMP_TCM_ECC_ERR_INT_EN 0xFFFF0000
+#define HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK 0xFFFF0000
+#define HCLGE_IMP_ITCM4_ECC_ERR_INT_EN 0x300
+#define HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK 0x300
+#define HCLGE_CMDQ_NIC_ECC_ERR_INT_EN 0xFFFF
+#define HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK 0xFFFF
+#define HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN 0xFFFF0000
+#define HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK 0xFFFF0000
+#define HCLGE_IMP_RD_POISON_ERR_INT_EN 0x0100
+#define HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK 0x0100
+#define HCLGE_TQP_ECC_ERR_INT_EN 0x0FFF
+#define HCLGE_TQP_ECC_ERR_INT_EN_MASK 0x0FFF
+#define HCLGE_IGU_ERR_INT_EN 0x0000066F
+#define HCLGE_IGU_ERR_INT_EN_MASK 0x000F
+#define HCLGE_IGU_TNL_ERR_INT_EN 0x0002AABF
+#define HCLGE_IGU_TNL_ERR_INT_EN_MASK 0x003F
+#define HCLGE_PPP_MPF_ECC_ERR_INT0_EN 0xFFFFFFFF
+#define HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK 0xFFFFFFFF
+#define HCLGE_PPP_MPF_ECC_ERR_INT1_EN 0xFFFFFFFF
+#define HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK 0xFFFFFFFF
+#define HCLGE_PPP_PF_ERR_INT_EN 0x0003
+#define HCLGE_PPP_PF_ERR_INT_EN_MASK 0x0003
+#define HCLGE_PPP_MPF_ECC_ERR_INT2_EN 0x003F
+#define HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK 0x003F
+#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F
+#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F
+#define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3
+#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF
+#define HCLGE_NCSI_ERR_INT_EN 0x3
+#define HCLGE_NCSI_ERR_INT_TYPE 0x9
+
+#define HCLGE_IMP_TCM_ECC_INT_MASK 0xFFFF
+#define HCLGE_IMP_ITCM4_ECC_INT_MASK 0x3
+#define HCLGE_CMDQ_ECC_INT_MASK 0xFFFF
+#define HCLGE_CMDQ_ROC_ECC_INT_SHIFT 16
+#define HCLGE_TQP_ECC_INT_MASK 0xFFF
+#define HCLGE_TQP_ECC_INT_SHIFT 16
+#define HCLGE_IMP_TCM_ECC_CLR_MASK 0xFFFF
+#define HCLGE_IMP_ITCM4_ECC_CLR_MASK 0x3
+#define HCLGE_CMDQ_NIC_ECC_CLR_MASK 0xFFFF
+#define HCLGE_CMDQ_ROCEE_ECC_CLR_MASK 0xFFFF0000
+#define HCLGE_TQP_IMP_ERR_CLR_MASK 0x0FFF0001
+#define HCLGE_IGU_COM_INT_MASK 0xF
+#define HCLGE_IGU_EGU_TNL_INT_MASK 0x3F
+#define HCLGE_PPP_PF_INT_MASK 0x100
+
+enum hclge_err_int_type {
+ HCLGE_ERR_INT_MSIX = 0,
+ HCLGE_ERR_INT_RAS_CE = 1,
+ HCLGE_ERR_INT_RAS_NFE = 2,
+ HCLGE_ERR_INT_RAS_FE = 3,
+};
+
+struct hclge_hw_blk {
+ u32 msk;
+ const char *name;
+ int (*enable_error)(struct hclge_dev *hdev, bool en);
+ void (*process_error)(struct hclge_dev *hdev,
+ enum hclge_err_int_type type);
+};
+
+struct hclge_hw_error {
+ u32 int_msk;
+ const char *msg;
+};
+
+int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state);
+int hclge_enable_tm_hw_error(struct hclge_dev *hdev, bool en);
+pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 21ca4af3b37a..ffdd96020860 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -19,18 +19,18 @@
#include "hclge_mbx.h"
#include "hclge_mdio.h"
#include "hclge_tm.h"
+#include "hclge_err.h"
#include "hnae3.h"
#define HCLGE_NAME "hclge"
#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
-static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
- enum hclge_mta_dmac_sel_type mta_mac_sel,
- bool enable);
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
+static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
+ u16 *allocated_size, bool is_alloc);
static struct hnae3_ae_algo ae_algo;
@@ -778,6 +778,11 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_SPEED_ABILITY_M,
HCLGE_CFG_SPEED_ABILITY_S);
+ cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_UMV_TBL_SPACE_M,
+ HCLGE_CFG_UMV_TBL_SPACE_S);
+ if (!cfg->umv_space)
+ cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
}
/* hclge_get_cfg: query the static parameter from flash
@@ -856,6 +861,7 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->tm_info.num_pg = 1;
hdev->tc_max = cfg.tc_num;
hdev->tm_info.hw_pfc_map = 0;
+ hdev->wanted_umv_size = cfg.umv_space;
ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
if (ret) {
@@ -1939,40 +1945,13 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
return hdev->hw.mac.autoneg;
}
-static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
- bool mask_vlan,
- u8 *mac_mask)
-{
- struct hclge_mac_vlan_mask_entry_cmd *req;
- struct hclge_desc desc;
- int status;
-
- req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
-
- hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
- mask_vlan ? 1 : 0);
- ether_addr_copy(req->mac_mask, mac_mask);
-
- status = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (status)
- dev_err(&hdev->pdev->dev,
- "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
- status);
-
- return status;
-}
-
static int hclge_mac_init(struct hclge_dev *hdev)
{
struct hnae3_handle *handle = &hdev->vport[0].nic;
struct net_device *netdev = handle->kinfo.netdev;
struct hclge_mac *mac = &hdev->hw.mac;
- u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- struct hclge_vport *vport;
int mtu;
int ret;
- int i;
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
@@ -1985,39 +1964,6 @@ static int hclge_mac_init(struct hclge_dev *hdev)
mac->link = 0;
- /* Initialize the MTA table work mode */
- hdev->enable_mta = true;
- hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
-
- ret = hclge_set_mta_filter_mode(hdev,
- hdev->mta_mac_sel_type,
- hdev->enable_mta);
- if (ret) {
- dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
- ret);
- return ret;
- }
-
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- vport = &hdev->vport[i];
- vport->accept_mta_mc = false;
-
- memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow));
- ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "set mta filter mode fail ret=%d\n", ret);
- return ret;
- }
- }
-
- ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "set default mac_vlan_mask fail ret=%d\n", ret);
- return ret;
- }
-
if (netdev)
mtu = netdev->mtu;
else
@@ -2290,7 +2236,7 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
}
/* clear the source of interrupt if it is not cause by reset */
- if (event_cause != HCLGE_VECTOR0_EVENT_RST) {
+ if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
hclge_clear_event_cause(hdev, event_cause, clearval);
hclge_enable_vector(&hdev->misc_vector, true);
}
@@ -2524,14 +2470,17 @@ static void hclge_reset(struct hclge_dev *hdev)
handle = &hdev->vport[0].nic;
rtnl_lock();
hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ rtnl_unlock();
if (!hclge_reset_wait(hdev)) {
+ rtnl_lock();
hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
hclge_reset_ae_dev(hdev->ae_dev);
hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
hclge_clear_reset_cause(hdev);
} else {
+ rtnl_lock();
/* schedule again to check pending resets later */
set_bit(hdev->reset_type, &hdev->reset_pending);
hclge_reset_task_schedule(hdev);
@@ -2543,12 +2492,18 @@ static void hclge_reset(struct hclge_dev *hdev)
ae_dev->reset_type = HNAE3_NONE_RESET;
}
-static void hclge_reset_event(struct hnae3_handle *handle)
+static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ struct hclge_dev *hdev = ae_dev->priv;
- /* check if this is a new reset request and we are not here just because
+ /* We might end up getting called broadly because of 2 below cases:
+ * 1. Recoverable error was conveyed through APEI and only way to bring
+ * normalcy is to reset.
+ * 2. A new reset request from the stack due to timeout
+ *
+ * For the first case,error event might not have ae handle available.
+ * check if this is a new reset request and we are not here just because
* last reset attempt did not succeed and watchdog hit us again. We will
* know this if last reset request did not occur very recently (watchdog
* timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
@@ -2557,6 +2512,9 @@ static void hclge_reset_event(struct hnae3_handle *handle)
* want to make sure we throttle the reset request. Therefore, we will
* not allow it again before 3*HZ times.
*/
+ if (!handle)
+ handle = &hdev->vport[0].nic;
+
if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
return;
else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
@@ -2828,6 +2786,22 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
return ret;
}
+static void hclge_get_rss_type(struct hclge_vport *vport)
+{
+ if (vport->rss_tuple_sets.ipv4_tcp_en ||
+ vport->rss_tuple_sets.ipv4_udp_en ||
+ vport->rss_tuple_sets.ipv4_sctp_en ||
+ vport->rss_tuple_sets.ipv6_tcp_en ||
+ vport->rss_tuple_sets.ipv6_udp_en ||
+ vport->rss_tuple_sets.ipv6_sctp_en)
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
+ else if (vport->rss_tuple_sets.ipv4_fragment_en ||
+ vport->rss_tuple_sets.ipv6_fragment_en)
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
+ else
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
+}
+
static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
{
struct hclge_rss_input_tuple_cmd *req;
@@ -2847,6 +2821,7 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
+ hclge_get_rss_type(&hdev->vport[0]);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
@@ -2861,8 +2836,19 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
int i;
/* Get hash algorithm */
- if (hfunc)
- *hfunc = vport->rss_algo;
+ if (hfunc) {
+ switch (vport->rss_algo) {
+ case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
+ *hfunc = ETH_RSS_HASH_TOP;
+ break;
+ case HCLGE_RSS_HASH_ALGO_SIMPLE:
+ *hfunc = ETH_RSS_HASH_XOR;
+ break;
+ default:
+ *hfunc = ETH_RSS_HASH_UNKNOWN;
+ break;
+ }
+ }
/* Get the RSS Key required by the user */
if (key)
@@ -2886,12 +2872,20 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
/* Set the RSS Hash Key if specififed by the user */
if (key) {
-
- if (hfunc == ETH_RSS_HASH_TOP ||
- hfunc == ETH_RSS_HASH_NO_CHANGE)
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
- else
+ break;
+ case ETH_RSS_HASH_XOR:
+ hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
+ break;
+ case ETH_RSS_HASH_NO_CHANGE:
+ hash_algo = vport->rss_algo;
+ break;
+ default:
return -EINVAL;
+ }
+
ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
if (ret)
return ret;
@@ -3009,6 +3003,7 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
+ hclge_get_rss_type(vport);
return 0;
}
@@ -3322,8 +3317,8 @@ void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
param->vf_id = vport_id;
}
-static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
- bool en_mc_pmc)
+static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
+ bool en_mc_pmc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -3331,7 +3326,7 @@ static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, true,
vport->vport_id);
- hclge_cmd_set_promisc_mode(hdev, &param);
+ return hclge_cmd_set_promisc_mode(hdev, &param);
}
static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
@@ -4978,174 +4973,6 @@ static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
}
-static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
- const u8 *addr)
-{
- u16 high_val = addr[1] | (addr[0] << 8);
- struct hclge_dev *hdev = vport->back;
- u32 rsh = 4 - hdev->mta_mac_sel_type;
- u16 ret_val = (high_val >> rsh) & 0xfff;
-
- return ret_val;
-}
-
-static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
- enum hclge_mta_dmac_sel_type mta_mac_sel,
- bool enable)
-{
- struct hclge_mta_filter_mode_cmd *req;
- struct hclge_desc desc;
- int ret;
-
- req = (struct hclge_mta_filter_mode_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
-
- hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
- enable);
- hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
- HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Config mat filter mode failed for cmd_send, ret =%d.\n",
- ret);
-
- return ret;
-}
-
-int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
- u8 func_id,
- bool enable)
-{
- struct hclge_cfg_func_mta_filter_cmd *req;
- struct hclge_desc desc;
- int ret;
-
- req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
-
- hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
- enable);
- req->function_id = func_id;
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Config func_id enable failed for cmd_send, ret =%d.\n",
- ret);
-
- return ret;
-}
-
-static int hclge_set_mta_table_item(struct hclge_vport *vport,
- u16 idx,
- bool enable)
-{
- struct hclge_dev *hdev = vport->back;
- struct hclge_cfg_func_mta_item_cmd *req;
- struct hclge_desc desc;
- u16 item_idx = 0;
- int ret;
-
- req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
- hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
-
- hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
- HCLGE_CFG_MTA_ITEM_IDX_S, idx);
- req->item_idx = cpu_to_le16(item_idx);
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Config mta table item failed for cmd_send, ret =%d.\n",
- ret);
- return ret;
- }
-
- if (enable)
- set_bit(idx, vport->mta_shadow);
- else
- clear_bit(idx, vport->mta_shadow);
-
- return 0;
-}
-
-static int hclge_update_mta_status(struct hnae3_handle *handle)
-{
- unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct net_device *netdev = handle->kinfo.netdev;
- struct netdev_hw_addr *ha;
- u16 tbl_idx;
-
- memset(mta_status, 0, sizeof(mta_status));
-
- /* update mta_status from mc addr list */
- netdev_for_each_mc_addr(ha, netdev) {
- tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr);
- set_bit(tbl_idx, mta_status);
- }
-
- return hclge_update_mta_status_common(vport, mta_status,
- 0, HCLGE_MTA_TBL_SIZE, true);
-}
-
-int hclge_update_mta_status_common(struct hclge_vport *vport,
- unsigned long *status,
- u16 idx,
- u16 count,
- bool update_filter)
-{
- struct hclge_dev *hdev = vport->back;
- u16 update_max = idx + count;
- u16 check_max;
- int ret = 0;
- bool used;
- u16 i;
-
- /* setup mta check range */
- if (update_filter) {
- i = 0;
- check_max = HCLGE_MTA_TBL_SIZE;
- } else {
- i = idx;
- check_max = update_max;
- }
-
- used = false;
- /* check and update all mta item */
- for (; i < check_max; i++) {
- /* ignore unused item */
- if (!test_bit(i, vport->mta_shadow))
- continue;
-
- /* if i in update range then update it */
- if (i >= idx && i < update_max)
- if (!test_bit(i - idx, status))
- hclge_set_mta_table_item(vport, i, false);
-
- if (!used && test_bit(i, vport->mta_shadow))
- used = true;
- }
-
- /* no longer use mta, disable it */
- if (vport->accept_mta_mc && update_filter && !used) {
- ret = hclge_cfg_func_mta_filter(hdev,
- vport->vport_id,
- false);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "disable func mta filter fail ret=%d\n",
- ret);
- else
- vport->accept_mta_mc = false;
- }
-
- return ret;
-}
-
static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
struct hclge_mac_vlan_tbl_entry_cmd *req)
{
@@ -5269,6 +5096,118 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
return cfg_status;
}
+static int hclge_init_umv_space(struct hclge_dev *hdev)
+{
+ u16 allocated_size = 0;
+ int ret;
+
+ ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
+ true);
+ if (ret)
+ return ret;
+
+ if (allocated_size < hdev->wanted_umv_size)
+ dev_warn(&hdev->pdev->dev,
+ "Alloc umv space failed, want %d, get %d\n",
+ hdev->wanted_umv_size, allocated_size);
+
+ mutex_init(&hdev->umv_mutex);
+ hdev->max_umv_size = allocated_size;
+ hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
+ hdev->share_umv_size = hdev->priv_umv_size +
+ hdev->max_umv_size % (hdev->num_req_vfs + 2);
+
+ return 0;
+}
+
+static int hclge_uninit_umv_space(struct hclge_dev *hdev)
+{
+ int ret;
+
+ if (hdev->max_umv_size > 0) {
+ ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
+ false);
+ if (ret)
+ return ret;
+ hdev->max_umv_size = 0;
+ }
+ mutex_destroy(&hdev->umv_mutex);
+
+ return 0;
+}
+
+static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
+ u16 *allocated_size, bool is_alloc)
+{
+ struct hclge_umv_spc_alc_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_umv_spc_alc_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
+ hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
+ req->space_size = cpu_to_le32(space_size);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "%s umv space failed for cmd_send, ret =%d\n",
+ is_alloc ? "allocate" : "free", ret);
+ return ret;
+ }
+
+ if (is_alloc && allocated_size)
+ *allocated_size = le32_to_cpu(desc.data[1]);
+
+ return 0;
+}
+
+static void hclge_reset_umv_space(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ vport->used_umv_num = 0;
+ }
+
+ mutex_lock(&hdev->umv_mutex);
+ hdev->share_umv_size = hdev->priv_umv_size +
+ hdev->max_umv_size % (hdev->num_req_vfs + 2);
+ mutex_unlock(&hdev->umv_mutex);
+}
+
+static bool hclge_is_umv_space_full(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+ bool is_full;
+
+ mutex_lock(&hdev->umv_mutex);
+ is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
+ hdev->share_umv_size == 0);
+ mutex_unlock(&hdev->umv_mutex);
+
+ return is_full;
+}
+
+static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ mutex_lock(&hdev->umv_mutex);
+ if (is_free) {
+ if (vport->used_umv_num > hdev->priv_umv_size)
+ hdev->share_umv_size++;
+ vport->used_umv_num--;
+ } else {
+ if (vport->used_umv_num >= hdev->priv_umv_size)
+ hdev->share_umv_size--;
+ vport->used_umv_num++;
+ }
+ mutex_unlock(&hdev->umv_mutex);
+}
+
static int hclge_add_uc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
@@ -5314,8 +5253,19 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
* is not allowed in the mac vlan table.
*/
ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
- if (ret == -ENOENT)
- return hclge_add_mac_vlan_tbl(vport, &req, NULL);
+ if (ret == -ENOENT) {
+ if (!hclge_is_umv_space_full(vport)) {
+ ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
+ if (!ret)
+ hclge_update_umv_space(vport, false);
+ return ret;
+ }
+
+ dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
+ hdev->priv_umv_size);
+
+ return -ENOSPC;
+ }
/* check if we just hit the duplicate */
if (!ret)
@@ -5358,6 +5308,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr);
ret = hclge_remove_mac_vlan_tbl(vport, &req);
+ if (!ret)
+ hclge_update_umv_space(vport, true);
return ret;
}
@@ -5376,7 +5328,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc[3];
- u16 tbl_idx;
int status;
/* mac addr check */
@@ -5406,25 +5357,8 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
}
- /* If mc mac vlan table is full, use MTA table */
- if (status == -ENOSPC) {
- if (!vport->accept_mta_mc) {
- status = hclge_cfg_func_mta_filter(hdev,
- vport->vport_id,
- true);
- if (status) {
- dev_err(&hdev->pdev->dev,
- "set mta filter mode fail ret=%d\n",
- status);
- return status;
- }
- vport->accept_mta_mc = true;
- }
-
- /* Set MTA table for this MAC address */
- tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
- status = hclge_set_mta_table_item(vport, tbl_idx, true);
- }
+ if (status == -ENOSPC)
+ dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
return status;
}
@@ -5639,7 +5573,7 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
}
static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
- bool filter_en)
+ u8 fe_type, bool filter_en)
{
struct hclge_vlan_filter_ctrl_cmd *req;
struct hclge_desc desc;
@@ -5649,7 +5583,7 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
req->vlan_type = vlan_type;
- req->vlan_fe = filter_en;
+ req->vlan_fe = filter_en ? fe_type : 0;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -5661,13 +5595,34 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
#define HCLGE_FILTER_TYPE_VF 0
#define HCLGE_FILTER_TYPE_PORT 1
+#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
+#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
+#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
+#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
+#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
+#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
+ | HCLGE_FILTER_FE_ROCE_EGRESS_B)
+#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
+ | HCLGE_FILTER_FE_ROCE_INGRESS_B)
static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
+ if (hdev->pdev->revision >= 0x21) {
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS, enable);
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS, enable);
+ } else {
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B, enable);
+ }
+ if (enable)
+ handle->netdev_flags |= HNAE3_VLAN_FLTR;
+ else
+ handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
}
static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
@@ -5964,18 +5919,30 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
{
#define HCLGE_DEF_VLAN_TYPE 0x8100
- struct hnae3_handle *handle;
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
struct hclge_vport *vport;
int ret;
int i;
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
- if (ret)
- return ret;
+ if (hdev->pdev->revision >= 0x21) {
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS, true);
+ if (ret)
+ return ret;
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
- if (ret)
- return ret;
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS, true);
+ if (ret)
+ return ret;
+ } else {
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B,
+ true);
+ if (ret)
+ return ret;
+ }
+
+ handle->netdev_flags |= HNAE3_VLAN_FLTR;
hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
@@ -6021,7 +5988,6 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
return ret;
}
- handle = &hdev->vport[0].nic;
return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
@@ -6144,31 +6110,28 @@ static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
return tqp->index;
}
-void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
int reset_try_times = 0;
int reset_status;
u16 queue_gid;
- int ret;
-
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
- return;
+ int ret = 0;
queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
ret = hclge_tqp_enable(hdev, queue_id, 0, false);
if (ret) {
- dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
- return;
+ dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
+ return ret;
}
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
if (ret) {
- dev_warn(&hdev->pdev->dev,
- "Send reset tqp cmd fail, ret = %d\n", ret);
- return;
+ dev_err(&hdev->pdev->dev,
+ "Send reset tqp cmd fail, ret = %d\n", ret);
+ return ret;
}
reset_try_times = 0;
@@ -6181,16 +6144,16 @@ void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
}
if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
- dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
- return;
+ dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
+ return ret;
}
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
- if (ret) {
- dev_warn(&hdev->pdev->dev,
- "Deassert the soft reset fail, ret = %d\n", ret);
- return;
- }
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Deassert the soft reset fail, ret = %d\n", ret);
+
+ return ret;
}
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
@@ -6746,6 +6709,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
}
}
+ ret = hclge_init_umv_space(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
+ goto err_msi_irq_uninit;
+ }
+
ret = hclge_mac_init(hdev);
if (ret) {
dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
@@ -6790,6 +6759,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
+ ret = hclge_hw_error_set_state(hdev, true);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "hw error interrupts enable failed, ret =%d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
hclge_dcb_ops_set(hdev);
timer_setup(&hdev->service_timer, hclge_service_timer, 0);
@@ -6866,6 +6842,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ hclge_reset_umv_space(hdev);
+
ret = hclge_mac_init(hdev);
if (ret) {
dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
@@ -6903,6 +6881,12 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ /* Re-enable the TM hw error interrupts because
+ * they get disabled on core/global reset.
+ */
+ if (hclge_enable_tm_hw_error(hdev, true))
+ dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n");
+
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -6919,10 +6903,13 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
if (mac->phydev)
mdiobus_unregister(mac->mdio_bus);
+ hclge_uninit_umv_space(hdev);
+
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
synchronize_irq(hdev->misc_vector.vector_irq);
+ hclge_hw_error_set_state(hdev, false);
hclge_destroy_cmd_queue(&hdev->hw);
hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
@@ -7317,7 +7304,6 @@ static const struct hnae3_ae_ops hclge_ops = {
.rm_uc_addr = hclge_rm_uc_addr,
.add_mc_addr = hclge_add_mc_addr,
.rm_mc_addr = hclge_rm_mc_addr,
- .update_mta_status = hclge_update_mta_status,
.set_autoneg = hclge_set_autoneg,
.get_autoneg = hclge_get_autoneg,
.get_pauseparam = hclge_get_pauseparam,
@@ -7350,6 +7336,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fd_all_rules = hclge_get_all_rules,
.restore_fd_rules = hclge_restore_fd_entries,
.enable_fd = hclge_enable_fd,
+ .process_hw_error = hclge_process_ras_hw_error,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 06adbdd27b95..0d9215404269 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -14,6 +14,8 @@
#define HCLGE_MOD_VERSION "1.0"
#define HCLGE_DRIVER_NAME "hclge"
+#define HCLGE_MAX_PF_NUM 8
+
#define HCLGE_INVALID_VPORT 0xffff
#define HCLGE_PF_CFG_BLOCK_SIZE 32
@@ -53,7 +55,9 @@
#define HCLGE_RSS_TC_SIZE_6 64
#define HCLGE_RSS_TC_SIZE_7 128
-#define HCLGE_MTA_TBL_SIZE 4096
+#define HCLGE_UMV_TBL_SIZE 3072
+#define HCLGE_DEFAULT_UMV_SPACE_PER_PF \
+ (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM)
#define HCLGE_TQP_RESET_TRY_TIMES 10
@@ -162,13 +166,6 @@ enum HCLGE_MAC_DUPLEX {
HCLGE_MAC_FULL
};
-enum hclge_mta_dmac_sel_type {
- HCLGE_MAC_ADDR_47_36,
- HCLGE_MAC_ADDR_46_35,
- HCLGE_MAC_ADDR_45_34,
- HCLGE_MAC_ADDR_44_33,
-};
-
struct hclge_mac {
u8 phy_addr;
u8 flag;
@@ -251,6 +248,7 @@ struct hclge_cfg {
u8 default_speed;
u32 numa_node_map;
u8 speed_ability;
+ u16 umv_space;
};
struct hclge_tm_info {
@@ -670,9 +668,6 @@ struct hclge_dev {
u32 pkt_buf_size; /* Total pf buf size for tx/rx */
u32 mps; /* Max packet size */
- enum hclge_mta_dmac_sel_type mta_mac_sel_type;
- bool enable_mta; /* Multicast filter enable */
-
struct hclge_vlan_type_cfg vlan_type_cfg;
unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
@@ -680,6 +675,15 @@ struct hclge_dev {
struct hclge_fd_cfg fd_cfg;
struct hlist_head fd_rule_list;
u16 hclge_fd_rule_num;
+
+ u16 wanted_umv_size;
+ /* max available unicast mac vlan space */
+ u16 max_umv_size;
+ /* private unicast mac vlan space, it's same for PF and its VFs */
+ u16 priv_umv_size;
+ /* unicast mac vlan space shared by PF and its VFs */
+ u16 share_umv_size;
+ struct mutex umv_mutex; /* protect share_umv_size */
};
/* VPort level vlan tag configuration for TX direction */
@@ -732,13 +736,12 @@ struct hclge_vport {
struct hclge_tx_vtag_cfg txvlan_cfg;
struct hclge_rx_vtag_cfg rxvlan_cfg;
+ u16 used_umv_num;
+
int vport_id;
struct hclge_dev *back; /* Back reference to associated dev */
struct hnae3_handle nic;
struct hnae3_handle roce;
-
- bool accept_mta_mc; /* whether to accept mta filter multicast */
- unsigned long mta_shadow[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
};
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
@@ -753,15 +756,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
int hclge_rm_mc_addr_common(struct hclge_vport *vport,
const unsigned char *addr);
-int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
- u8 func_id,
- bool enable);
-int hclge_update_mta_status_common(struct hclge_vport *vport,
- unsigned long *status,
- u16 idx,
- u16 count,
- bool update_filter);
-
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
int vector_id, bool en,
@@ -784,7 +778,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
void hclge_mbx_handler(struct hclge_dev *hdev);
-void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
+int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
int hclge_cfg_flowctrl(struct hclge_dev *hdev);
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index f34851c91eb3..f890022938d9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -233,43 +233,6 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
return 0;
}
-static int hclge_set_vf_mc_mta_status(struct hclge_vport *vport,
- u8 *msg, u8 idx, bool is_end)
-{
-#define HCLGE_MTA_STATUS_MSG_SIZE 13
-#define HCLGE_MTA_STATUS_MSG_BITS \
- (HCLGE_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
-#define HCLGE_MTA_STATUS_MSG_END_BITS \
- (HCLGE_MTA_TBL_SIZE % HCLGE_MTA_STATUS_MSG_BITS)
- unsigned long status[BITS_TO_LONGS(HCLGE_MTA_STATUS_MSG_BITS)];
- u16 tbl_cnt;
- u16 tbl_idx;
- u8 msg_ofs;
- u8 msg_bit;
-
- tbl_cnt = is_end ? HCLGE_MTA_STATUS_MSG_END_BITS :
- HCLGE_MTA_STATUS_MSG_BITS;
-
- /* set msg field */
- msg_ofs = 0;
- msg_bit = 0;
- memset(status, 0, sizeof(status));
- for (tbl_idx = 0; tbl_idx < tbl_cnt; tbl_idx++) {
- if (msg[msg_ofs] & BIT(msg_bit))
- set_bit(tbl_idx, status);
-
- msg_bit++;
- if (msg_bit == BITS_PER_BYTE) {
- msg_bit = 0;
- msg_ofs++;
- }
- }
-
- return hclge_update_mta_status_common(vport,
- status, idx * HCLGE_MTA_STATUS_MSG_BITS,
- tbl_cnt, is_end);
-}
-
static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
@@ -284,27 +247,6 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
status = hclge_add_mc_addr_common(vport, mac_addr);
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
status = hclge_rm_mc_addr_common(vport, mac_addr);
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE) {
- u8 func_id = vport->vport_id;
- bool enable = mbx_req->msg[2];
-
- status = hclge_cfg_func_mta_filter(hdev, func_id, enable);
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ) {
- resp_data = hdev->mta_mac_sel_type;
- resp_len = sizeof(u8);
- gen_resp = true;
- status = 0;
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE) {
- /* mta status update msg format
- * msg[2.6 : 2.0] msg index
- * msg[2.7] msg is end
- * msg[15 : 3] mta status bits[103 : 0]
- */
- bool is_end = (mbx_req->msg[2] & 0x80) ? true : false;
-
- status = hclge_set_vf_mc_mta_status(vport, &mbx_req->msg[3],
- mbx_req->msg[2] & 0x7F,
- is_end);
} else {
dev_err(&hdev->pdev->dev,
"failed to set mcast mac addr, unknown subcode %d\n",
@@ -458,6 +400,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
+ if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
+ dev_warn(&hdev->pdev->dev,
+ "command queue needs re-initializing\n");
+ return;
+ }
+
desc = &crq->desc[crq->next_to_use];
req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 24b1f2a0c32a..03018638f701 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -52,7 +52,7 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
struct hclge_desc desc;
int ret;
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false);
@@ -90,7 +90,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
struct hclge_desc desc;
int ret;
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 19b32860309c..bc294b0c8b62 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -89,6 +89,7 @@ enum hclgevf_opcode_type {
HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
/* RSS cmd */
HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
+ HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02,
HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07,
HCLGEVF_OPC_RSS_TC_MODE = 0x0D08,
/* Mailbox cmd */
@@ -148,7 +149,8 @@ struct hclgevf_query_res_cmd {
__le16 rsv[7];
};
-#define HCLGEVF_RSS_HASH_KEY_OFFSET 4
+#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4
+#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4
#define HCLGEVF_RSS_HASH_KEY_NUM 16
struct hclgevf_rss_config_cmd {
u8 hash_config;
@@ -159,11 +161,11 @@ struct hclgevf_rss_config_cmd {
struct hclgevf_rss_input_tuple_cmd {
u8 ipv4_tcp_en;
u8 ipv4_udp_en;
- u8 ipv4_stcp_en;
+ u8 ipv4_sctp_en;
u8 ipv4_fragment_en;
u8 ipv6_tcp_en;
u8 ipv6_udp_en;
- u8 ipv6_stcp_en;
+ u8 ipv6_sctp_en;
u8 ipv6_fragment_en;
u8 rsv[16];
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 8f858cb2a67b..085edb945389 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -386,6 +386,47 @@ static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
return -EINVAL;
}
+static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
+ const u8 hfunc, const u8 *key)
+{
+ struct hclgevf_rss_config_cmd *req;
+ struct hclgevf_desc desc;
+ int key_offset;
+ int key_size;
+ int ret;
+
+ req = (struct hclgevf_rss_config_cmd *)desc.data;
+
+ for (key_offset = 0; key_offset < 3; key_offset++) {
+ hclgevf_cmd_setup_basic_desc(&desc,
+ HCLGEVF_OPC_RSS_GENERIC_CONFIG,
+ false);
+
+ req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK);
+ req->hash_config |=
+ (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
+
+ if (key_offset == 2)
+ key_size =
+ HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
+ else
+ key_size = HCLGEVF_RSS_HASH_KEY_NUM;
+
+ memcpy(req->hash_key,
+ key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
+
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Configure RSS config fail, status = %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
{
return HCLGEVF_RSS_KEY_SIZE;
@@ -466,68 +507,40 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
return status;
}
-static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash,
- u8 *key)
+static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
+ u8 *hfunc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_config_cmd *req;
- int lkup_times = key ? 3 : 1;
- struct hclgevf_desc desc;
- int key_offset;
- int key_size;
- int status;
-
- req = (struct hclgevf_rss_config_cmd *)desc.data;
- lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0);
-
- for (key_offset = 0; key_offset < lkup_times; key_offset++) {
- hclgevf_cmd_setup_basic_desc(&desc,
- HCLGEVF_OPC_RSS_GENERIC_CONFIG,
- true);
- req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int i;
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status) {
- dev_err(&hdev->pdev->dev,
- "failed to get hardware RSS cfg, status = %d\n",
- status);
- return status;
+ if (handle->pdev->revision >= 0x21) {
+ /* Get hash algorithm */
+ if (hfunc) {
+ switch (rss_cfg->hash_algo) {
+ case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ:
+ *hfunc = ETH_RSS_HASH_TOP;
+ break;
+ case HCLGEVF_RSS_HASH_ALGO_SIMPLE:
+ *hfunc = ETH_RSS_HASH_XOR;
+ break;
+ default:
+ *hfunc = ETH_RSS_HASH_UNKNOWN;
+ break;
+ }
}
- if (key_offset == 2)
- key_size =
- HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
- else
- key_size = HCLGEVF_RSS_HASH_KEY_NUM;
-
+ /* Get the RSS Key required by the user */
if (key)
- memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM,
- req->hash_key,
- key_size);
- }
-
- if (hash) {
- if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ)
- *hash = ETH_RSS_HASH_TOP;
- else
- *hash = ETH_RSS_HASH_UNKNOWN;
+ memcpy(key, rss_cfg->rss_hash_key,
+ HCLGEVF_RSS_KEY_SIZE);
}
- return 0;
-}
-
-static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
- u8 *hfunc)
-{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- int i;
-
if (indir)
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
indir[i] = rss_cfg->rss_indirection_tbl[i];
- return hclgevf_get_rss_hw_cfg(handle, hfunc, key);
+ return 0;
}
static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
@@ -535,7 +548,36 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- int i;
+ int ret, i;
+
+ if (handle->pdev->revision >= 0x21) {
+ /* Set the RSS Hash Key if specififed by the user */
+ if (key) {
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ rss_cfg->hash_algo =
+ HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
+ break;
+ case ETH_RSS_HASH_XOR:
+ rss_cfg->hash_algo =
+ HCLGEVF_RSS_HASH_ALGO_SIMPLE;
+ break;
+ case ETH_RSS_HASH_NO_CHANGE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
+ key);
+ if (ret)
+ return ret;
+
+ /* Update the shadow RSS key with user specified qids */
+ memcpy(rss_cfg->rss_hash_key, key,
+ HCLGEVF_RSS_KEY_SIZE);
+ }
+ }
/* update the shadow RSS table with user specified qids */
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
@@ -545,6 +587,193 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
return hclgevf_set_rss_indir_table(hdev);
}
+static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
+{
+ u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0;
+
+ if (nfc->data & RXH_L4_B_2_3)
+ hash_sets |= HCLGEVF_D_PORT_BIT;
+ else
+ hash_sets &= ~HCLGEVF_D_PORT_BIT;
+
+ if (nfc->data & RXH_IP_SRC)
+ hash_sets |= HCLGEVF_S_IP_BIT;
+ else
+ hash_sets &= ~HCLGEVF_S_IP_BIT;
+
+ if (nfc->data & RXH_IP_DST)
+ hash_sets |= HCLGEVF_D_IP_BIT;
+ else
+ hash_sets &= ~HCLGEVF_D_IP_BIT;
+
+ if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
+ hash_sets |= HCLGEVF_V_TAG_BIT;
+
+ return hash_sets;
+}
+
+static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ struct hclgevf_rss_input_tuple_cmd *req;
+ struct hclgevf_desc desc;
+ u8 tuple_sets;
+ int ret;
+
+ if (handle->pdev->revision == 0x20)
+ return -EOPNOTSUPP;
+
+ if (nfc->data &
+ ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
+
+ req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
+ req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
+
+ tuple_sets = hclgevf_get_rss_hash_bits(nfc);
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ req->ipv4_tcp_en = tuple_sets;
+ break;
+ case TCP_V6_FLOW:
+ req->ipv6_tcp_en = tuple_sets;
+ break;
+ case UDP_V4_FLOW:
+ req->ipv4_udp_en = tuple_sets;
+ break;
+ case UDP_V6_FLOW:
+ req->ipv6_udp_en = tuple_sets;
+ break;
+ case SCTP_V4_FLOW:
+ req->ipv4_sctp_en = tuple_sets;
+ break;
+ case SCTP_V6_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req->ipv6_sctp_en = tuple_sets;
+ break;
+ case IPV4_FLOW:
+ req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ break;
+ case IPV6_FLOW:
+ req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Set rss tuple fail, status = %d\n", ret);
+ return ret;
+ }
+
+ rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
+ rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
+ rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
+ rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
+ rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
+ rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
+ rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
+ rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
+ return 0;
+}
+
+static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ u8 tuple_sets;
+
+ if (handle->pdev->revision == 0x20)
+ return -EOPNOTSUPP;
+
+ nfc->data = 0;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ break;
+ case UDP_V4_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ break;
+ case TCP_V6_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ break;
+ case UDP_V6_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ break;
+ case SCTP_V4_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ break;
+ case SCTP_V6_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!tuple_sets)
+ return 0;
+
+ if (tuple_sets & HCLGEVF_D_PORT_BIT)
+ nfc->data |= RXH_L4_B_2_3;
+ if (tuple_sets & HCLGEVF_S_PORT_BIT)
+ nfc->data |= RXH_L4_B_0_1;
+ if (tuple_sets & HCLGEVF_D_IP_BIT)
+ nfc->data |= RXH_IP_DST;
+ if (tuple_sets & HCLGEVF_S_IP_BIT)
+ nfc->data |= RXH_IP_SRC;
+
+ return 0;
+}
+
+static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev,
+ struct hclgevf_rss_cfg *rss_cfg)
+{
+ struct hclgevf_rss_input_tuple_cmd *req;
+ struct hclgevf_desc desc;
+ int ret;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
+
+ req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
+
+ req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
+ req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
+
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Configure rss input fail, status = %d\n", ret);
+ return ret;
+}
+
static int hclgevf_get_tc_size(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -696,12 +925,12 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
return status;
}
-static void hclgevf_set_promisc_mode(struct hnae3_handle *handle,
- bool en_uc_pmc, bool en_mc_pmc)
+static int hclgevf_set_promisc_mode(struct hnae3_handle *handle,
+ bool en_uc_pmc, bool en_mc_pmc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
+ return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
}
static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
@@ -746,126 +975,6 @@ static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
}
}
-static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev)
-{
- u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX;
- int ret;
-
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
- HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ,
- NULL, 0, true, &resp_msg, sizeof(u8));
-
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Read mta type fail, ret=%d.\n", ret);
- return ret;
- }
-
- if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) {
- dev_err(&hdev->pdev->dev,
- "Read mta type invalid, resp=%d.\n", resp_msg);
- return -EINVAL;
- }
-
- hdev->mta_mac_sel_type = resp_msg;
-
- return 0;
-}
-
-static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev,
- const u8 *addr)
-{
- u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type;
- u16 high_val = addr[1] | (addr[0] << 8);
-
- return (high_val >> rsh) & 0xfff;
-}
-
-static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev,
- unsigned long *status)
-{
-#define HCLGEVF_MTA_STATUS_MSG_SIZE 13
-#define HCLGEVF_MTA_STATUS_MSG_BITS \
- (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
-#define HCLGEVF_MTA_STATUS_MSG_END_BITS \
- (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS)
- u16 tbl_cnt;
- u16 tbl_idx;
- u8 msg_cnt;
- u8 msg_idx;
- int ret;
-
- msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE,
- HCLGEVF_MTA_STATUS_MSG_BITS);
- tbl_idx = 0;
- msg_idx = 0;
- while (msg_cnt--) {
- u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1];
- u8 *p = &msg[1];
- u8 msg_ofs;
- u8 msg_bit;
-
- memset(msg, 0, sizeof(msg));
-
- /* set index field */
- msg[0] = 0x7F & msg_idx;
-
- /* set end flag field */
- if (msg_cnt == 0) {
- msg[0] |= 0x80;
- tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS;
- } else {
- tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS;
- }
-
- /* set status field */
- msg_ofs = 0;
- msg_bit = 0;
- while (tbl_cnt--) {
- if (test_bit(tbl_idx, status))
- p[msg_ofs] |= BIT(msg_bit);
-
- tbl_idx++;
-
- msg_bit++;
- if (msg_bit == BITS_PER_BYTE) {
- msg_bit = 0;
- msg_ofs++;
- }
- }
-
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
- HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE,
- msg, sizeof(msg), false, NULL, 0);
- if (ret)
- break;
-
- msg_idx++;
- }
-
- return ret;
-}
-
-static int hclgevf_update_mta_status(struct hnae3_handle *handle)
-{
- unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)];
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct net_device *netdev = hdev->nic.kinfo.netdev;
- struct netdev_hw_addr *ha;
- u16 tbl_idx;
-
- /* clear status */
- memset(mta_status, 0, sizeof(mta_status));
-
- /* update status from mc addr list */
- netdev_for_each_mc_addr(ha, netdev) {
- tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr);
- set_bit(tbl_idx, mta_status);
- }
-
- return hclgevf_do_update_mta_status(hdev, mta_status);
-}
-
static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -971,7 +1080,7 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
1, false, NULL, 0);
}
-static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 msg_data[2];
@@ -982,10 +1091,10 @@ static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
/* disable vf queue before send queue reset msg to PF */
ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
if (ret)
- return;
+ return ret;
- hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
- 2, true, NULL, 0);
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
+ 2, true, NULL, 0);
}
static int hclgevf_notify_client(struct hclgevf_dev *hdev,
@@ -1061,6 +1170,8 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
/* bring down the nic to stop any ongoing TX/RX */
hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ rtnl_unlock();
+
/* check if VF could successfully fetch the hardware reset completion
* status from the hardware
*/
@@ -1072,12 +1183,15 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
ret);
dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n");
+ rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
rtnl_unlock();
return ret;
}
+ rtnl_lock();
+
/* now, re-initialize the nic client and ae device*/
ret = hclgevf_reset_stack(hdev);
if (ret)
@@ -1105,7 +1219,8 @@ static int hclgevf_do_reset(struct hclgevf_dev *hdev)
return status;
}
-static void hclgevf_reset_event(struct hnae3_handle *handle)
+static void hclgevf_reset_event(struct pci_dev *pdev,
+ struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -1396,6 +1511,39 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
rss_cfg->rss_size = hdev->rss_size_max;
+ if (hdev->pdev->revision >= 0x21) {
+ rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
+ netdev_rss_key_fill(rss_cfg->rss_hash_key,
+ HCLGEVF_RSS_KEY_SIZE);
+
+ ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
+ rss_cfg->rss_hash_key);
+ if (ret)
+ return ret;
+
+ rss_cfg->rss_tuple_sets.ipv4_tcp_en =
+ HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ rss_cfg->rss_tuple_sets.ipv4_udp_en =
+ HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ rss_cfg->rss_tuple_sets.ipv4_sctp_en =
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+ rss_cfg->rss_tuple_sets.ipv4_fragment_en =
+ HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ rss_cfg->rss_tuple_sets.ipv6_tcp_en =
+ HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ rss_cfg->rss_tuple_sets.ipv6_udp_en =
+ HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ rss_cfg->rss_tuple_sets.ipv6_sctp_en =
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+ rss_cfg->rss_tuple_sets.ipv6_fragment_en =
+ HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+
+ ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
+ if (ret)
+ return ret;
+
+ }
+
/* Initialize RSS indirect table for each vport */
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
@@ -1871,14 +2019,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
- /* Initialize mta type for this VF */
- ret = hclgevf_cfg_func_mta_type(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed(%d) to initialize MTA type\n", ret);
- goto err_config;
- }
-
/* Initialize RSS for this VF */
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
@@ -2038,7 +2178,6 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.rm_uc_addr = hclgevf_rm_uc_addr,
.add_mc_addr = hclgevf_add_mc_addr,
.rm_mc_addr = hclgevf_rm_mc_addr,
- .update_mta_status = hclgevf_update_mta_status,
.get_stats = hclgevf_get_stats,
.update_stats = hclgevf_update_stats,
.get_strings = hclgevf_get_strings,
@@ -2047,6 +2186,8 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_rss_indir_size = hclgevf_get_rss_indir_size,
.get_rss = hclgevf_get_rss,
.set_rss = hclgevf_set_rss,
+ .get_rss_tuple = hclgevf_get_rss_tuple,
+ .set_rss_tuple = hclgevf_set_rss_tuple,
.get_tc_size = hclgevf_get_tc_size,
.get_fw_version = hclgevf_get_fw_version,
.set_vlan_filter = hclgevf_set_vlan_filter,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 2af01f107c63..aed241e8ffab 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -46,9 +46,13 @@
#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf
#define HCLGEVF_RSS_CFG_TBL_NUM \
(HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
-
-#define HCLGEVF_MTA_TBL_SIZE 4096
-#define HCLGEVF_MTA_TYPE_SEL_MAX 4
+#define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
+#define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
+#define HCLGEVF_D_PORT_BIT BIT(0)
+#define HCLGEVF_S_PORT_BIT BIT(1)
+#define HCLGEVF_D_IP_BIT BIT(2)
+#define HCLGEVF_S_IP_BIT BIT(3)
+#define HCLGEVF_V_TAG_BIT BIT(4)
/* states of hclgevf device & tasks */
enum hclgevf_states {
@@ -109,12 +113,24 @@ struct hclgevf_cfg {
u32 numa_node_map;
};
+struct hclgevf_rss_tuple_cfg {
+ u8 ipv4_tcp_en;
+ u8 ipv4_udp_en;
+ u8 ipv4_sctp_en;
+ u8 ipv4_fragment_en;
+ u8 ipv6_tcp_en;
+ u8 ipv6_udp_en;
+ u8 ipv6_sctp_en;
+ u8 ipv6_fragment_en;
+};
+
struct hclgevf_rss_cfg {
u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */
u32 hash_algo;
u32 rss_size;
u8 hw_tc_map;
u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */
+ struct hclgevf_rss_tuple_cfg rss_tuple_sets;
};
struct hclgevf_misc_vector {
@@ -157,8 +173,6 @@ struct hclgevf_dev {
u16 *vector_status;
int *vector_irq;
- bool accept_mta_mc; /* whether to accept mta filter multicast */
- u8 mta_mac_sel_type;
bool mbx_event_pending;
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index 0f5563f3b779..097b5502603f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -58,6 +58,8 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_GET_GLOBAL_QPN = 102,
+ HINIC_PORT_CMD_SET_TSO = 112,
+
HINIC_PORT_CMD_GET_CAP = 170,
};
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index cb239627770f..bbf9bdd0ee3e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -70,8 +70,6 @@
#define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask)
#define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask)
-#define TX_MAX_MSS_DEFAULT 0x3E00
-
enum sq_wqe_type {
SQ_NORMAL_WQE = 0,
};
@@ -494,33 +492,16 @@ static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, u16 prod_idx,
HINIC_SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
HINIC_SQ_CTRL_SET(ctrl_size, LEN);
- ctrl->queue_info = HINIC_SQ_CTRL_SET(TX_MAX_MSS_DEFAULT,
- QUEUE_INFO_MSS);
+ ctrl->queue_info = HINIC_SQ_CTRL_SET(HINIC_MSS_DEFAULT,
+ QUEUE_INFO_MSS) |
+ HINIC_SQ_CTRL_SET(1, QUEUE_INFO_UC);
}
static void sq_prepare_task(struct hinic_sq_task *task)
{
- task->pkt_info0 =
- HINIC_SQ_TASK_INFO0_SET(0, L2HDR_LEN) |
- HINIC_SQ_TASK_INFO0_SET(HINIC_L4_OFF_DISABLE, L4_OFFLOAD) |
- HINIC_SQ_TASK_INFO0_SET(HINIC_OUTER_L3TYPE_UNKNOWN,
- INNER_L3TYPE) |
- HINIC_SQ_TASK_INFO0_SET(HINIC_VLAN_OFF_DISABLE,
- VLAN_OFFLOAD) |
- HINIC_SQ_TASK_INFO0_SET(HINIC_PKT_NOT_PARSED, PARSE_FLAG);
-
- task->pkt_info1 =
- HINIC_SQ_TASK_INFO1_SET(HINIC_MEDIA_UNKNOWN, MEDIA_TYPE) |
- HINIC_SQ_TASK_INFO1_SET(0, INNER_L4_LEN) |
- HINIC_SQ_TASK_INFO1_SET(0, INNER_L3_LEN);
-
- task->pkt_info2 =
- HINIC_SQ_TASK_INFO2_SET(0, TUNNEL_L4_LEN) |
- HINIC_SQ_TASK_INFO2_SET(0, OUTER_L3_LEN) |
- HINIC_SQ_TASK_INFO2_SET(HINIC_TUNNEL_L4TYPE_UNKNOWN,
- TUNNEL_L4TYPE) |
- HINIC_SQ_TASK_INFO2_SET(HINIC_OUTER_L3TYPE_UNKNOWN,
- OUTER_L3TYPE);
+ task->pkt_info0 = 0;
+ task->pkt_info1 = 0;
+ task->pkt_info2 = 0;
task->ufo_v6_identify = 0;
@@ -529,6 +510,86 @@ static void sq_prepare_task(struct hinic_sq_task *task)
task->zero_pad = 0;
}
+void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len)
+{
+ task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(len, L2HDR_LEN);
+}
+
+void hinic_task_set_outter_l3(struct hinic_sq_task *task,
+ enum hinic_l3_offload_type l3_type,
+ u32 network_len)
+{
+ task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l3_type, OUTER_L3TYPE) |
+ HINIC_SQ_TASK_INFO2_SET(network_len, OUTER_L3LEN);
+}
+
+void hinic_task_set_inner_l3(struct hinic_sq_task *task,
+ enum hinic_l3_offload_type l3_type,
+ u32 network_len)
+{
+ task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l3_type, INNER_L3TYPE);
+ task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(network_len, INNER_L3LEN);
+}
+
+void hinic_task_set_tunnel_l4(struct hinic_sq_task *task,
+ enum hinic_l4_tunnel_type l4_type,
+ u32 tunnel_len)
+{
+ task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l4_type, TUNNEL_L4TYPE) |
+ HINIC_SQ_TASK_INFO2_SET(tunnel_len, TUNNEL_L4LEN);
+}
+
+void hinic_set_cs_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
+ enum hinic_l4_offload_type l4_offload,
+ u32 l4_len, u32 offset)
+{
+ u32 tcp_udp_cs = 0, sctp = 0;
+ u32 mss = HINIC_MSS_DEFAULT;
+
+ if (l4_offload == TCP_OFFLOAD_ENABLE ||
+ l4_offload == UDP_OFFLOAD_ENABLE)
+ tcp_udp_cs = 1;
+ else if (l4_offload == SCTP_OFFLOAD_ENABLE)
+ sctp = 1;
+
+ task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD);
+ task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN);
+
+ *queue_info |= HINIC_SQ_CTRL_SET(offset, QUEUE_INFO_PLDOFF) |
+ HINIC_SQ_CTRL_SET(tcp_udp_cs, QUEUE_INFO_TCPUDP_CS) |
+ HINIC_SQ_CTRL_SET(sctp, QUEUE_INFO_SCTP);
+
+ *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
+ *queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS);
+}
+
+void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
+ enum hinic_l4_offload_type l4_offload,
+ u32 l4_len, u32 offset, u32 ip_ident, u32 mss)
+{
+ u32 tso = 0, ufo = 0;
+
+ if (l4_offload == TCP_OFFLOAD_ENABLE)
+ tso = 1;
+ else if (l4_offload == UDP_OFFLOAD_ENABLE)
+ ufo = 1;
+
+ task->ufo_v6_identify = ip_ident;
+
+ task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD);
+ task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(tso || ufo, TSO_FLAG);
+ task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN);
+
+ *queue_info |= HINIC_SQ_CTRL_SET(offset, QUEUE_INFO_PLDOFF) |
+ HINIC_SQ_CTRL_SET(tso, QUEUE_INFO_TSO) |
+ HINIC_SQ_CTRL_SET(ufo, QUEUE_INFO_UFO) |
+ HINIC_SQ_CTRL_SET(!!l4_offload, QUEUE_INFO_TCPUDP_CS);
+
+ /* set MSS value */
+ *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
+ *queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS);
+}
+
/**
* hinic_sq_prepare_wqe - prepare wqe before insert to the queue
* @sq: send queue
@@ -613,6 +674,16 @@ struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq,
}
/**
+ * hinic_sq_return_wqe - return the wqe to the sq
+ * @sq: send queue
+ * @wqe_size: the size of the wqe
+ **/
+void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size)
+{
+ hinic_return_wqe(sq->wq, wqe_size);
+}
+
+/**
* hinic_sq_write_wqe - write the wqe to the sq
* @sq: send queue
* @prod_idx: pi of the wqe
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
index 6c84f83ec283..038522e202b6 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -149,6 +149,31 @@ int hinic_get_sq_free_wqebbs(struct hinic_sq *sq);
int hinic_get_rq_free_wqebbs(struct hinic_rq *rq);
+void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len);
+
+void hinic_task_set_outter_l3(struct hinic_sq_task *task,
+ enum hinic_l3_offload_type l3_type,
+ u32 network_len);
+
+void hinic_task_set_inner_l3(struct hinic_sq_task *task,
+ enum hinic_l3_offload_type l3_type,
+ u32 network_len);
+
+void hinic_task_set_tunnel_l4(struct hinic_sq_task *task,
+ enum hinic_l4_tunnel_type l4_type,
+ u32 tunnel_len);
+
+void hinic_set_cs_inner_l4(struct hinic_sq_task *task,
+ u32 *queue_info,
+ enum hinic_l4_offload_type l4_offload,
+ u32 l4_len, u32 offset);
+
+void hinic_set_tso_inner_l4(struct hinic_sq_task *task,
+ u32 *queue_info,
+ enum hinic_l4_offload_type l4_offload,
+ u32 l4_len,
+ u32 offset, u32 ip_ident, u32 mss);
+
void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx,
struct hinic_sq_wqe *wqe, struct hinic_sge *sges,
int nr_sges);
@@ -159,6 +184,8 @@ void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq,
unsigned int wqe_size, u16 *prod_idx);
+void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size);
+
void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx,
struct hinic_sq_wqe *wqe, struct sk_buff *skb,
unsigned int wqe_size);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 3e3181c089bd..f92f1bf3901a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -775,6 +775,20 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
}
/**
+ * hinic_return_wqe - return the wqe when transmit failed
+ * @wq: wq to return wqe
+ * @wqe_size: wqe size
+ **/
+void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size)
+{
+ int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+
+ atomic_sub(num_wqebbs, &wq->prod_idx);
+
+ atomic_add(num_wqebbs, &wq->delta);
+}
+
+/**
* hinic_put_wqe - return the wqe place to use for a new wqe
* @wq: wq to return wqe
* @wqe_size: wqe size
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
index 9c030a0f035e..9b66545ba563 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
@@ -104,6 +104,8 @@ void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq);
struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
u16 *prod_idx);
+void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size);
+
void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size);
struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
index bc73485483c5..9754d6ed5f4a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
@@ -62,19 +62,33 @@
(((val) >> HINIC_CMDQ_WQE_HEADER_##member##_SHIFT) \
& HINIC_CMDQ_WQE_HEADER_##member##_MASK)
-#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
-#define HINIC_SQ_CTRL_TASKSECT_LEN_SHIFT 16
-#define HINIC_SQ_CTRL_DATA_FORMAT_SHIFT 22
-#define HINIC_SQ_CTRL_LEN_SHIFT 29
-
-#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF
-#define HINIC_SQ_CTRL_TASKSECT_LEN_MASK 0x1F
-#define HINIC_SQ_CTRL_DATA_FORMAT_MASK 0x1
-#define HINIC_SQ_CTRL_LEN_MASK 0x3
-
-#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13
-
-#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFF
+#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
+#define HINIC_SQ_CTRL_TASKSECT_LEN_SHIFT 16
+#define HINIC_SQ_CTRL_DATA_FORMAT_SHIFT 22
+#define HINIC_SQ_CTRL_LEN_SHIFT 29
+
+#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF
+#define HINIC_SQ_CTRL_TASKSECT_LEN_MASK 0x1F
+#define HINIC_SQ_CTRL_DATA_FORMAT_MASK 0x1
+#define HINIC_SQ_CTRL_LEN_MASK 0x3
+
+#define HINIC_SQ_CTRL_QUEUE_INFO_PLDOFF_SHIFT 2
+#define HINIC_SQ_CTRL_QUEUE_INFO_UFO_SHIFT 10
+#define HINIC_SQ_CTRL_QUEUE_INFO_TSO_SHIFT 11
+#define HINIC_SQ_CTRL_QUEUE_INFO_TCPUDP_CS_SHIFT 12
+#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13
+#define HINIC_SQ_CTRL_QUEUE_INFO_SCTP_SHIFT 27
+#define HINIC_SQ_CTRL_QUEUE_INFO_UC_SHIFT 28
+#define HINIC_SQ_CTRL_QUEUE_INFO_PRI_SHIFT 29
+
+#define HINIC_SQ_CTRL_QUEUE_INFO_PLDOFF_MASK 0xFF
+#define HINIC_SQ_CTRL_QUEUE_INFO_UFO_MASK 0x1
+#define HINIC_SQ_CTRL_QUEUE_INFO_TSO_MASK 0x1
+#define HINIC_SQ_CTRL_QUEUE_INFO_TCPUDP_CS_MASK 0x1
+#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFF
+#define HINIC_SQ_CTRL_QUEUE_INFO_SCTP_MASK 0x1
+#define HINIC_SQ_CTRL_QUEUE_INFO_UC_MASK 0x1
+#define HINIC_SQ_CTRL_QUEUE_INFO_PRI_MASK 0x7
#define HINIC_SQ_CTRL_SET(val, member) \
(((u32)(val) & HINIC_SQ_CTRL_##member##_MASK) \
@@ -84,6 +98,10 @@
(((val) >> HINIC_SQ_CTRL_##member##_SHIFT) \
& HINIC_SQ_CTRL_##member##_MASK)
+#define HINIC_SQ_CTRL_CLEAR(val, member) \
+ ((u32)(val) & (~(HINIC_SQ_CTRL_##member##_MASK \
+ << HINIC_SQ_CTRL_##member##_SHIFT)))
+
#define HINIC_SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0
#define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_SHIFT 8
#define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10
@@ -108,28 +126,28 @@
/* 8 bits reserved */
#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_SHIFT 8
-#define HINIC_SQ_TASK_INFO1_INNER_L4_LEN_SHIFT 16
-#define HINIC_SQ_TASK_INFO1_INNER_L3_LEN_SHIFT 24
+#define HINIC_SQ_TASK_INFO1_INNER_L4LEN_SHIFT 16
+#define HINIC_SQ_TASK_INFO1_INNER_L3LEN_SHIFT 24
/* 8 bits reserved */
#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_MASK 0xFF
-#define HINIC_SQ_TASK_INFO1_INNER_L4_LEN_MASK 0xFF
-#define HINIC_SQ_TASK_INFO1_INNER_L3_LEN_MASK 0xFF
+#define HINIC_SQ_TASK_INFO1_INNER_L4LEN_MASK 0xFF
+#define HINIC_SQ_TASK_INFO1_INNER_L3LEN_MASK 0xFF
#define HINIC_SQ_TASK_INFO1_SET(val, member) \
(((u32)(val) & HINIC_SQ_TASK_INFO1_##member##_MASK) << \
HINIC_SQ_TASK_INFO1_##member##_SHIFT)
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4_LEN_SHIFT 0
-#define HINIC_SQ_TASK_INFO2_OUTER_L3_LEN_SHIFT 12
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 19
+#define HINIC_SQ_TASK_INFO2_TUNNEL_L4LEN_SHIFT 0
+#define HINIC_SQ_TASK_INFO2_OUTER_L3LEN_SHIFT 8
+#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 16
/* 1 bit reserved */
-#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 22
+#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 24
/* 8 bits reserved */
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4_LEN_MASK 0xFFF
-#define HINIC_SQ_TASK_INFO2_OUTER_L3_LEN_MASK 0x7F
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x3
+#define HINIC_SQ_TASK_INFO2_TUNNEL_L4LEN_MASK 0xFF
+#define HINIC_SQ_TASK_INFO2_OUTER_L3LEN_MASK 0xFF
+#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x7
/* 1 bit reserved */
#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3
/* 8 bits reserved */
@@ -187,12 +205,15 @@
sizeof(struct hinic_sq_task) + \
(nr_sges) * sizeof(struct hinic_sq_bufdesc))
-#define HINIC_SCMD_DATA_LEN 16
+#define HINIC_SCMD_DATA_LEN 16
+
+#define HINIC_MAX_SQ_BUFDESCS 17
-#define HINIC_MAX_SQ_BUFDESCS 17
+#define HINIC_SQ_WQE_MAX_SIZE 320
+#define HINIC_RQ_WQE_SIZE 32
-#define HINIC_SQ_WQE_MAX_SIZE 320
-#define HINIC_RQ_WQE_SIZE 32
+#define HINIC_MSS_DEFAULT 0x3E00
+#define HINIC_MSS_MIN 0x50
enum hinic_l4offload_type {
HINIC_L4_OFF_DISABLE = 0,
@@ -211,6 +232,26 @@ enum hinic_pkt_parsed {
HINIC_PKT_PARSED = 1,
};
+enum hinic_l3_offload_type {
+ L3TYPE_UNKNOWN = 0,
+ IPV6_PKT = 1,
+ IPV4_PKT_NO_CHKSUM_OFFLOAD = 2,
+ IPV4_PKT_WITH_CHKSUM_OFFLOAD = 3,
+};
+
+enum hinic_l4_offload_type {
+ OFFLOAD_DISABLE = 0,
+ TCP_OFFLOAD_ENABLE = 1,
+ SCTP_OFFLOAD_ENABLE = 2,
+ UDP_OFFLOAD_ENABLE = 3,
+};
+
+enum hinic_l4_tunnel_type {
+ NOT_TUNNEL,
+ TUNNEL_UDP_NO_CSUM,
+ TUNNEL_UDP_CSUM,
+};
+
enum hinic_outer_l3type {
HINIC_OUTER_L3TYPE_UNKNOWN = 0,
HINIC_OUTER_L3TYPE_IPV6 = 1,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 09e9da10b786..fdf2bdb6b0d0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -789,23 +789,6 @@ static void hinic_get_stats64(struct net_device *netdev,
stats->tx_errors = nic_tx_stats->tx_dropped;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void hinic_netpoll(struct net_device *netdev)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- int i, num_qps;
-
- num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
- for (i = 0; i < num_qps; i++) {
- struct hinic_txq *txq = &nic_dev->txqs[i];
- struct hinic_rxq *rxq = &nic_dev->rxqs[i];
-
- napi_schedule(&txq->napi);
- napi_schedule(&rxq->napi);
- }
-}
-#endif
-
static const struct net_device_ops hinic_netdev_ops = {
.ndo_open = hinic_open,
.ndo_stop = hinic_close,
@@ -818,14 +801,12 @@ static const struct net_device_ops hinic_netdev_ops = {
.ndo_start_xmit = hinic_xmit_frame,
.ndo_tx_timeout = hinic_tx_timeout,
.ndo_get_stats64 = hinic_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = hinic_netpoll,
-#endif
};
static void netdev_features_init(struct net_device *netdev)
{
- netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA;
+ netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
netdev->vlan_features = netdev->hw_features;
@@ -883,6 +864,20 @@ static void link_status_event_handler(void *handle, void *buf_in, u16 in_size,
*out_size = sizeof(*ret_link_status);
}
+static int set_features(struct hinic_dev *nic_dev,
+ netdev_features_t pre_features,
+ netdev_features_t features, bool force_change)
+{
+ netdev_features_t changed = force_change ? ~0 : pre_features ^ features;
+ int err = 0;
+
+ if (changed & NETIF_F_TSO)
+ err = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ?
+ HINIC_TSO_ENABLE : HINIC_TSO_DISABLE);
+
+ return err;
+}
+
/**
* nic_dev_init - Initialize the NIC device
* @pdev: the NIC pci device
@@ -983,7 +978,12 @@ static int nic_dev_init(struct pci_dev *pdev)
hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS,
nic_dev, link_status_event_handler);
+ err = set_features(nic_dev, 0, nic_dev->netdev->features, true);
+ if (err)
+ goto err_set_features;
+
SET_NETDEV_DEV(netdev, &pdev->dev);
+
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "Failed to register netdev\n");
@@ -993,6 +993,7 @@ static int nic_dev_init(struct pci_dev *pdev)
return 0;
err_reg_netdev:
+err_set_features:
hinic_hwdev_cb_unregister(nic_dev->hwdev,
HINIC_MGMT_MSG_CMD_LINK_STATUS);
cancel_work_sync(&rx_mode_work->work);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index 4d4e3f05fb5f..7575a7d3bd9f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -377,3 +377,35 @@ int hinic_port_get_cap(struct hinic_dev *nic_dev,
return 0;
}
+
+/**
+ * hinic_port_set_tso - set port tso configuration
+ * @nic_dev: nic device
+ * @state: the tso state to set
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_tso_config tso_cfg = {0};
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size;
+ int err;
+
+ tso_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ tso_cfg.tso_en = state;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_TSO,
+ &tso_cfg, sizeof(tso_cfg),
+ &tso_cfg, &out_size);
+ if (err || out_size != sizeof(tso_cfg) || tso_cfg.status) {
+ dev_err(&pdev->dev,
+ "Failed to set port tso, ret = %d\n",
+ tso_cfg.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
index 9404365195dd..f6e3220fe28f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h
@@ -72,6 +72,11 @@ enum hinic_speed {
HINIC_SPEED_UNKNOWN = 0xFF,
};
+enum hinic_tso_state {
+ HINIC_TSO_DISABLE = 0,
+ HINIC_TSO_ENABLE = 1,
+};
+
struct hinic_port_mac_cmd {
u8 status;
u8 version;
@@ -167,6 +172,17 @@ struct hinic_port_cap {
u8 rsvd2[3];
};
+struct hinic_tso_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 tso_en;
+ u8 resv2[3];
+};
+
int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr,
u16 vlan_id);
@@ -195,4 +211,6 @@ int hinic_port_set_func_state(struct hinic_dev *nic_dev,
int hinic_port_get_cap(struct hinic_dev *nic_dev,
struct hinic_port_cap *port_cap);
+int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index c5fca0356c9c..11e73e67358d 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -26,6 +26,13 @@
#include <linux/skbuff.h>
#include <linux/smp.h>
#include <asm/byteorder.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
#include "hinic_common.h"
#include "hinic_hw_if.h"
@@ -45,9 +52,31 @@
#define CI_UPDATE_NO_PENDING 0
#define CI_UPDATE_NO_COALESC 0
-#define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
+#define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
-#define MIN_SKB_LEN 64
+#define MIN_SKB_LEN 17
+
+#define MAX_PAYLOAD_OFFSET 221
+#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data))
+
+union hinic_l3 {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+};
+
+union hinic_l4 {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+};
+
+enum hinic_offload_type {
+ TX_OFFLOAD_TSO = BIT(0),
+ TX_OFFLOAD_CSUM = BIT(1),
+ TX_OFFLOAD_VLAN = BIT(2),
+ TX_OFFLOAD_INVALID = BIT(3),
+};
/**
* hinic_txq_clean_stats - Clean the statistics of specific queue
@@ -175,18 +204,263 @@ static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
DMA_TO_DEVICE);
}
+static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_l3 *ip,
+ union hinic_l4 *l4,
+ enum hinic_offload_type offload_type,
+ enum hinic_l3_offload_type *l3_type,
+ u8 *l4_proto)
+{
+ u8 *exthdr;
+
+ if (ip->v4->version == 4) {
+ *l3_type = (offload_type == TX_OFFLOAD_CSUM) ?
+ IPV4_PKT_NO_CHKSUM_OFFLOAD :
+ IPV4_PKT_WITH_CHKSUM_OFFLOAD;
+ *l4_proto = ip->v4->protocol;
+ } else if (ip->v4->version == 6) {
+ *l3_type = IPV6_PKT;
+ exthdr = ip->hdr + sizeof(*ip->v6);
+ *l4_proto = ip->v6->nexthdr;
+ if (exthdr != l4->hdr) {
+ int start = exthdr - skb->data;
+ __be16 frag_off;
+
+ ipv6_skip_exthdr(skb, start, l4_proto, &frag_off);
+ }
+ } else {
+ *l3_type = L3TYPE_UNKNOWN;
+ *l4_proto = 0;
+ }
+}
+
+static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4,
+ enum hinic_offload_type offload_type, u8 l4_proto,
+ enum hinic_l4_offload_type *l4_offload,
+ u32 *l4_len, u32 *offset)
+{
+ *l4_offload = OFFLOAD_DISABLE;
+ *offset = 0;
+ *l4_len = 0;
+
+ switch (l4_proto) {
+ case IPPROTO_TCP:
+ *l4_offload = TCP_OFFLOAD_ENABLE;
+ /* doff in unit of 4B */
+ *l4_len = l4->tcp->doff * 4;
+ *offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+
+ case IPPROTO_UDP:
+ *l4_offload = UDP_OFFLOAD_ENABLE;
+ *l4_len = sizeof(struct udphdr);
+ *offset = TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+
+ case IPPROTO_SCTP:
+ /* only csum offload support sctp */
+ if (offload_type != TX_OFFLOAD_CSUM)
+ break;
+
+ *l4_offload = SCTP_OFFLOAD_ENABLE;
+ *l4_len = sizeof(struct sctphdr);
+ *offset = TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static __sum16 csum_magic(union hinic_l3 *ip, unsigned short proto)
+{
+ return (ip->v4->version == 4) ?
+ csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
+ csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
+}
+
+static int offload_tso(struct hinic_sq_task *task, u32 *queue_info,
+ struct sk_buff *skb)
+{
+ u32 offset, l4_len, ip_identify, network_hdr_len;
+ enum hinic_l3_offload_type l3_offload;
+ enum hinic_l4_offload_type l4_offload;
+ union hinic_l3 ip;
+ union hinic_l4 l4;
+ u8 l4_proto;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ if (skb_cow_head(skb, 0) < 0)
+ return -EPROTONOSUPPORT;
+
+ if (skb->encapsulation) {
+ u32 gso_type = skb_shinfo(skb)->gso_type;
+ u32 tunnel_type = 0;
+ u32 l4_tunnel_len;
+
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ network_hdr_len = skb_inner_network_header_len(skb);
+
+ if (ip.v4->version == 4) {
+ ip.v4->tot_len = 0;
+ l3_offload = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
+ } else if (ip.v4->version == 6) {
+ l3_offload = IPV6_PKT;
+ } else {
+ l3_offload = 0;
+ }
+
+ hinic_task_set_outter_l3(task, l3_offload,
+ skb_network_header_len(skb));
+
+ if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+ l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
+ tunnel_type = TUNNEL_UDP_CSUM;
+ } else if (gso_type & SKB_GSO_UDP_TUNNEL) {
+ tunnel_type = TUNNEL_UDP_NO_CSUM;
+ }
+
+ l4_tunnel_len = skb_inner_network_offset(skb) -
+ skb_transport_offset(skb);
+ hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
+
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ } else {
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ network_hdr_len = skb_network_header_len(skb);
+ }
+
+ /* initialize inner IP header fields */
+ if (ip.v4->version == 4)
+ ip.v4->tot_len = 0;
+ else
+ ip.v6->payload_len = 0;
+
+ get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO, &l3_offload,
+ &l4_proto);
+
+ hinic_task_set_inner_l3(task, l3_offload, network_hdr_len);
+
+ ip_identify = 0;
+ if (l4_proto == IPPROTO_TCP)
+ l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
+
+ get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto, &l4_offload,
+ &l4_len, &offset);
+
+ hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, offset,
+ ip_identify, skb_shinfo(skb)->gso_size);
+
+ return 1;
+}
+
+static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
+ struct sk_buff *skb)
+{
+ enum hinic_l4_offload_type l4_offload;
+ u32 offset, l4_len, network_hdr_len;
+ enum hinic_l3_offload_type l3_type;
+ union hinic_l3 ip;
+ union hinic_l4 l4;
+ u8 l4_proto;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (skb->encapsulation) {
+ u32 l4_tunnel_len;
+
+ ip.hdr = skb_network_header(skb);
+
+ if (ip.v4->version == 4)
+ l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
+ else if (ip.v4->version == 6)
+ l3_type = IPV6_PKT;
+ else
+ l3_type = L3TYPE_UNKNOWN;
+
+ hinic_task_set_outter_l3(task, l3_type,
+ skb_network_header_len(skb));
+
+ l4_tunnel_len = skb_inner_network_offset(skb) -
+ skb_transport_offset(skb);
+
+ hinic_task_set_tunnel_l4(task, TUNNEL_UDP_NO_CSUM,
+ l4_tunnel_len);
+
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ network_hdr_len = skb_inner_network_header_len(skb);
+ } else {
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ network_hdr_len = skb_network_header_len(skb);
+ }
+
+ get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM, &l3_type,
+ &l4_proto);
+
+ hinic_task_set_inner_l3(task, l3_type, network_hdr_len);
+
+ get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto, &l4_offload,
+ &l4_len, &offset);
+
+ hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset);
+
+ return 1;
+}
+
+static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
+ u32 *queue_info)
+{
+ enum hinic_offload_type offload = 0;
+ int enabled;
+
+ enabled = offload_tso(task, queue_info, skb);
+ if (enabled > 0) {
+ offload |= TX_OFFLOAD_TSO;
+ } else if (enabled == 0) {
+ enabled = offload_csum(task, queue_info, skb);
+ if (enabled)
+ offload |= TX_OFFLOAD_CSUM;
+ } else {
+ return -EPROTONOSUPPORT;
+ }
+
+ if (offload)
+ hinic_task_set_l2hdr(task, skb_network_offset(skb));
+
+ /* payload offset should not more than 221 */
+ if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_PLDOFF) >
+ MAX_PAYLOAD_OFFSET) {
+ return -EPROTONOSUPPORT;
+ }
+
+ /* mss should not less than 80 */
+ if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_MSS) < HINIC_MSS_MIN) {
+ *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
+ *queue_info |= HINIC_SQ_CTRL_SET(HINIC_MSS_MIN, QUEUE_INFO_MSS);
+ }
+
+ return 0;
+}
+
netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u16 prod_idx, q_id = skb->queue_mapping;
struct netdev_queue *netdev_txq;
int nr_sges, err = NETDEV_TX_OK;
struct hinic_sq_wqe *sq_wqe;
unsigned int wqe_size;
struct hinic_txq *txq;
struct hinic_qp *qp;
- u16 prod_idx;
- txq = &nic_dev->txqs[skb->queue_mapping];
+ txq = &nic_dev->txqs[q_id];
qp = container_of(txq->sq, struct hinic_qp, sq);
if (skb->len < MIN_SKB_LEN) {
@@ -236,15 +510,23 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
process_sq_wqe:
hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
+ err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
+ if (err)
+ goto offload_error;
+
hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
flush_skbs:
- netdev_txq = netdev_get_tx_queue(netdev, skb->queue_mapping);
+ netdev_txq = netdev_get_tx_queue(netdev, q_id);
if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq)))
hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
return err;
+offload_error:
+ hinic_sq_return_wqe(txq->sq, wqe_size);
+ tx_unmap_skb(nic_dev, skb, txq->sges);
+
skb_error:
dev_kfree_skb_any(skb);
@@ -252,7 +534,8 @@ update_error_stats:
u64_stats_update_begin(&txq->txq_stats.syncp);
txq->txq_stats.tx_dropped++;
u64_stats_update_end(&txq->txq_stats.syncp);
- return err;
+
+ return NETDEV_TX_OK;
}
/**
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index aa0b89777e74..3baabdc89726 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -920,17 +920,6 @@ static int ehea_poll(struct napi_struct *napi, int budget)
return rx;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ehea_netpoll(struct net_device *dev)
-{
- struct ehea_port *port = netdev_priv(dev);
- int i;
-
- for (i = 0; i < port->num_def_qps; i++)
- napi_schedule(&port->port_res[i].napi);
-}
-#endif
-
static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
{
struct ehea_port_res *pr = param;
@@ -2952,9 +2941,6 @@ static const struct net_device_ops ehea_netdev_ops = {
.ndo_open = ehea_open,
.ndo_stop = ehea_stop,
.ndo_start_xmit = ehea_start_xmit,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ehea_netpoll,
-#endif
.ndo_get_stats64 = ehea_get_stats64,
.ndo_set_mac_address = ehea_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index ad898e8eaca1..7893beffcc71 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2207,19 +2207,6 @@ restart_poll:
return frames_processed;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ibmvnic_netpoll_controller(struct net_device *dev)
-{
- struct ibmvnic_adapter *adapter = netdev_priv(dev);
- int i;
-
- replenish_pools(netdev_priv(dev));
- for (i = 0; i < adapter->req_rx_queues; i++)
- ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
- adapter->rx_scrq[i]);
-}
-#endif
-
static int wait_for_reset(struct ibmvnic_adapter *adapter)
{
int rc, ret;
@@ -2292,9 +2279,6 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
.ndo_set_mac_address = ibmvnic_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = ibmvnic_tx_timeout,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ibmvnic_netpoll_controller,
-#endif
.ndo_change_mtu = ibmvnic_change_mtu,
.ndo_features_check = ibmvnic_features_check,
};
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index b542aba6f0e8..59e1bc0f609e 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -68,6 +68,9 @@ config E1000E
<http://support.intel.com>
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/e1000e.rst>.
+
To compile this driver as a module, choose M here. The module
will be called e1000e.
@@ -94,7 +97,7 @@ config IGB
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/e1000.rst>.
+ <file:Documentation/networking/igb.rst>.
To compile this driver as a module, choose M here. The module
will be called igb.
@@ -130,7 +133,7 @@ config IGBVF
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/e1000.rst>.
+ <file:Documentation/networking/igbvf.rst>.
To compile this driver as a module, choose M here. The module
will be called igbvf.
@@ -147,7 +150,7 @@ config IXGB
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/ixgb.txt>.
+ <file:Documentation/networking/ixgb.rst>.
To compile this driver as a module, choose M here. The module
will be called ixgb.
@@ -164,6 +167,9 @@ config IXGBE
<http://support.intel.com>
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/ixgbe.rst>.
+
To compile this driver as a module, choose M here. The module
will be called ixgbe.
@@ -194,6 +200,15 @@ config IXGBE_DCB
If unsure, say N.
+config IXGBE_IPSEC
+ bool "IPSec XFRM cryptography-offload acceleration"
+ depends on IXGBE
+ depends on XFRM_OFFLOAD
+ default y
+ select XFRM_ALGO
+ ---help---
+ Enable support for IPSec offload in ixgbe.ko
+
config IXGBEVF
tristate "Intel(R) 10GbE PCI Express Virtual Function Ethernet support"
depends on PCI_MSI
@@ -205,12 +220,21 @@ config IXGBEVF
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/ixgbevf.txt>.
+ <file:Documentation/networking/ixgbevf.rst>.
To compile this driver as a module, choose M here. The module
will be called ixgbevf. MSI-X interrupt support is required
for this driver to work correctly.
+config IXGBEVF_IPSEC
+ bool "IPSec XFRM cryptography-offload acceleration"
+ depends on IXGBEVF
+ depends on XFRM_OFFLOAD
+ default y
+ select XFRM_ALGO
+ ---help---
+ Enable support for IPSec offload in ixgbevf.ko
+
config I40E
tristate "Intel(R) Ethernet Controller XL710 Family support"
imply PTP_1588_CLOCK
@@ -222,6 +246,9 @@ config I40E
<http://support.intel.com>
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/i40e.rst>.
+
To compile this driver as a module, choose M here. The module
will be called i40e.
@@ -254,6 +281,9 @@ config I40EVF
This driver was formerly named i40evf.
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/iavf.rst>.
+
To compile this driver as a module, choose M here. The module
will be called iavf. MSI-X interrupt support is required
for this driver to work correctly.
@@ -269,6 +299,9 @@ config ICE
<http://support.intel.com>
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/ice.rst>.
+
To compile this driver as a module, choose M here. The module
will be called ice.
@@ -284,7 +317,26 @@ config FM10K
<http://support.intel.com>
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/fm10k.rst>.
+
To compile this driver as a module, choose M here. The module
will be called fm10k. MSI-X interrupt support is required
+config IGC
+ tristate "Intel(R) Ethernet Controller I225-LM/I225-V support"
+ default n
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) Ethernet Controller I225-LM/I225-V
+ family of adapters.
+
+ For more information on how to identify your adapter, go
+ to the Adapter & Driver ID Guide that can be located at:
+
+ <http://support.intel.com>
+
+ To compile this driver as a module, choose M here. The module
+ will be called igc.
+
endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index b91153df6ee8..3075290063f6 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_E100) += e100.o
obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_E1000E) += e1000e/
obj-$(CONFIG_IGB) += igb/
+obj-$(CONFIG_IGC) += igc/
obj-$(CONFIG_IGBVF) += igbvf/
obj-$(CONFIG_IXGBE) += ixgbe/
obj-$(CONFIG_IXGBEVF) += ixgbevf/
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index c0f9faca70c4..16a73bd9f4cb 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6854,8 +6854,6 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_RECOVERED;
}
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
return result;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index e707d717012f..5d4f1761dc0c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -244,7 +244,8 @@ process_mbx:
}
/* guarantee we have free space in the SM mailbox */
- if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) {
+ if (hw->mbx.state == FM10K_STATE_OPEN &&
+ !hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) {
/* keep track of how many times this occurs */
interface->hw_sm_mbx_full++;
@@ -302,6 +303,28 @@ void fm10k_iov_suspend(struct pci_dev *pdev)
}
}
+static void fm10k_mask_aer_comp_abort(struct pci_dev *pdev)
+{
+ u32 err_mask;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return;
+
+ /* Mask the completion abort bit in the ERR_UNCOR_MASK register,
+ * preventing the device from reporting these errors to the upstream
+ * PCIe root device. This avoids bringing down platforms which upgrade
+ * non-fatal completer aborts into machine check exceptions. Completer
+ * aborts can occur whenever a VF reads a queue it doesn't own.
+ */
+ pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &err_mask);
+ err_mask |= PCI_ERR_UNC_COMP_ABORT;
+ pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, err_mask);
+
+ mmiowb();
+}
+
int fm10k_iov_resume(struct pci_dev *pdev)
{
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
@@ -317,6 +340,12 @@ int fm10k_iov_resume(struct pci_dev *pdev)
if (!iov_data)
return -ENOMEM;
+ /* Lower severity of completer abort error reporting as
+ * the VFs can trigger this any time they read a queue
+ * that they don't own.
+ */
+ fm10k_mask_aer_comp_abort(pdev);
+
/* allocate hardware resources for the VFs */
hw->iov.ops.assign_resources(hw, num_vfs, num_vfs);
@@ -460,20 +489,6 @@ void fm10k_iov_disable(struct pci_dev *pdev)
fm10k_iov_free_data(pdev);
}
-static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev)
-{
- u32 err_sev;
- int pos;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
- if (!pos)
- return;
-
- pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev);
- err_sev &= ~PCI_ERR_UNC_COMP_ABORT;
- pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev);
-}
-
int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
{
int current_vfs = pci_num_vf(pdev);
@@ -495,12 +510,6 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
/* allocate VFs if not already allocated */
if (num_vfs && num_vfs != current_vfs) {
- /* Disable completer abort error reporting as
- * the VFs can trigger this any time they read a queue
- * that they don't own.
- */
- fm10k_disable_aer_comp_abort(pdev);
-
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
dev_err(&pdev->dev,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 503bbc017792..5b2a50e5798f 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -11,7 +11,7 @@
#include "fm10k.h"
-#define DRV_VERSION "0.23.4-k"
+#define DRV_VERSION "0.26.1-k"
#define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index c859ababeed5..e49fb51d3613 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -23,6 +23,8 @@ static const struct fm10k_info *fm10k_info_tbl[] = {
*/
static const struct pci_device_id fm10k_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, FM10K_DEV_ID_PF), fm10k_device_pf },
+ { PCI_VDEVICE(INTEL, FM10K_DEV_ID_SDI_FM10420_QDA2), fm10k_device_pf },
+ { PCI_VDEVICE(INTEL, FM10K_DEV_ID_SDI_FM10420_DA2), fm10k_device_pf },
{ PCI_VDEVICE(INTEL, FM10K_DEV_ID_VF), fm10k_device_vf },
/* required last entry */
{ 0, }
@@ -2440,8 +2442,6 @@ static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_RECOVERED;
}
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
return result;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 3e608e493f9d..9fb9fca375e3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -15,6 +15,8 @@ struct fm10k_hw;
#define FM10K_DEV_ID_PF 0x15A4
#define FM10K_DEV_ID_VF 0x15A5
+#define FM10K_DEV_ID_SDI_FM10420_QDA2 0x15D0
+#define FM10K_DEV_ID_SDI_FM10420_DA2 0x15D5
#define FM10K_MAX_QUEUES 256
#define FM10K_MAX_QUEUES_PF 128
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 56b911a5dd8b..a20d1cf058ad 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -132,8 +132,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
(unsigned long int)nd->vlan_features);
}
- dev_info(&pf->pdev->dev, " active_vlans is %s\n",
- vsi->active_vlans ? "<valid>" : "<null>");
dev_info(&pf->pdev->dev,
" flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 330bafe6a689..bc71a21c1dc2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -14552,7 +14552,6 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
pci_ers_result_t result;
- int err;
u32 reg;
dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -14573,14 +14572,6 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_DISCONNECT;
}
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- dev_info(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
- err);
- /* non-fatal, continue */
- }
-
return result;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 740ea58ba938..aef3c89ee79c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2,7 +2,6 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/prefetch.h>
-#include <net/busy_poll.h>
#include <linux/bpf_trace.h>
#include <net/xdp.h>
#include "i40e.h"
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index f4bb2779f03a..ac5698ed0b11 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -3674,7 +3674,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
local_vf_id, v_opcode, msglen);
switch (ret) {
- case VIRTCHNL_ERR_PARAM:
+ case VIRTCHNL_STATUS_ERR_PARAM:
return -EPERM;
default:
return -EINVAL;
@@ -4256,7 +4256,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
vf->link_forced = true;
vf->link_up = true;
pfe.event_data.link_event.link_status = true;
- pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
+ pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
break;
case IFLA_VF_LINK_STATE_DISABLE:
vf->link_forced = true;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index a512f7521841..272d76b733aa 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -342,7 +342,7 @@ struct iavf_adapter {
struct iavf_channel_config ch_config;
u8 num_tc;
struct list_head cloud_filter_list;
- /* lock to protest access to the cloud filter list */
+ /* lock to protect access to the cloud filter list */
spinlock_t cloud_filter_list_lock;
u16 num_cloud_filters;
};
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index edc349f49748..fb9bfad96daf 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -2,7 +2,6 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/prefetch.h>
-#include <net/busy_poll.h>
#include "iavf.h"
#include "iavf_trace.h"
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 45125bd074d9..e5d6f684437e 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -16,3 +16,4 @@ ice-y := ice_main.o \
ice_lib.o \
ice_txrx.o \
ice_ethtool.o
+ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 0b269c470343..4c4b5717a627 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -28,6 +28,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/if_bridge.h>
+#include <linux/avf/virtchnl.h>
#include <net/ipv6.h>
#include "ice_devids.h"
#include "ice_type.h"
@@ -35,6 +36,8 @@
#include "ice_switch.h"
#include "ice_common.h"
#include "ice_sched.h"
+#include "ice_virtchnl_pf.h"
+#include "ice_sriov.h"
extern const char ice_drv_ver[];
#define ICE_BAR0 0
@@ -46,6 +49,7 @@ extern const char ice_drv_ver[];
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_ETHTOOL_FWVER_LEN 32
#define ICE_AQ_LEN 64
+#define ICE_MBXQ_LEN 64
#define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff
#define ICE_MAX_VSI_ALLOC 130
@@ -63,6 +67,14 @@ extern const char ice_drv_ver[];
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
#define ICE_INVAL_Q_INDEX 0xffff
#define ICE_INVAL_VFID 256
+#define ICE_MAX_VF_COUNT 256
+#define ICE_MAX_QS_PER_VF 256
+#define ICE_MIN_QS_PER_VF 1
+#define ICE_DFLT_QS_PER_VF 4
+#define ICE_MAX_BASE_QS_PER_VF 16
+#define ICE_MAX_INTR_PER_VF 65
+#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
+#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
@@ -133,9 +145,21 @@ enum ice_state {
__ICE_EMPR_RECV, /* set by OICR handler */
__ICE_SUSPENDED, /* set on module remove path */
__ICE_RESET_FAILED, /* set by reset/rebuild */
+ /* When checking for the PF to be in a nominal operating state, the
+ * bits that are grouped at the beginning of the list need to be
+ * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
+ * be checked. If you need to add a bit into consideration for nominal
+ * operating state, it must be added before
+ * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
+ * without appropriate consideration.
+ */
+ __ICE_STATE_NOMINAL_CHECK_BITS,
__ICE_ADMINQ_EVENT_PENDING,
+ __ICE_MAILBOXQ_EVENT_PENDING,
__ICE_MDD_EVENT_PENDING,
+ __ICE_VFLR_EVENT_PENDING,
__ICE_FLTR_OVERFLOW_PROMISC,
+ __ICE_VF_DIS,
__ICE_CFG_BUSY,
__ICE_SERVICE_SCHED,
__ICE_SERVICE_DIS,
@@ -181,6 +205,8 @@ struct ice_vsi {
/* Interrupt thresholds */
u16 work_lmt;
+ s16 vf_id; /* VF ID for SR-IOV VSIs */
+
/* RSS config */
u16 rss_table_size; /* HW RSS table size */
u16 rss_size; /* Allocated RSS queues */
@@ -240,6 +266,8 @@ enum ice_pf_flags {
ICE_FLAG_MSIX_ENA,
ICE_FLAG_FLTR_SYNC,
ICE_FLAG_RSS_ENA,
+ ICE_FLAG_SRIOV_ENA,
+ ICE_FLAG_SRIOV_CAPABLE,
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -255,6 +283,12 @@ struct ice_pf {
struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_sw *first_sw; /* first switch created by firmware */
+ /* Virtchnl/SR-IOV config info */
+ struct ice_vf *vf;
+ int num_alloc_vfs; /* actual number of VFs allocated */
+ u16 num_vfs_supported; /* num VFs supported for this PF */
+ u16 num_vf_qps; /* num queue pairs per VF */
+ u16 num_vf_msix; /* num vectors per VF */
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS);
DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index c100b4bda195..6653555f55dd 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -87,6 +87,8 @@ struct ice_aqc_list_caps {
/* Device/Function buffer entry, repeated per reported capability */
struct ice_aqc_list_caps_elem {
__le16 cap;
+#define ICE_AQC_CAPS_SRIOV 0x0012
+#define ICE_AQC_CAPS_VF 0x0013
#define ICE_AQC_CAPS_VSI 0x0017
#define ICE_AQC_CAPS_RSS 0x0040
#define ICE_AQC_CAPS_RXQS 0x0041
@@ -1075,6 +1077,19 @@ struct ice_aqc_nvm {
__le32 addr_low;
};
+/**
+ * Send to PF command (indirect 0x0801) id is only used by PF
+ *
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ *
+ */
+struct ice_aqc_pf_vf_msg {
+ __le32 id;
+ u32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
struct ice_aqc_get_set_rss_key {
#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
@@ -1332,6 +1347,7 @@ struct ice_aq_desc {
struct ice_aqc_query_txsched_res query_sched_res;
struct ice_aqc_add_move_delete_elem add_move_delete_elem;
struct ice_aqc_nvm nvm;
+ struct ice_aqc_pf_vf_msg virt;
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
struct ice_aqc_get_set_rss_key get_set_rss_key;
struct ice_aqc_add_txqs add_txqs;
@@ -1429,6 +1445,10 @@ enum ice_adminq_opc {
/* NVM commands */
ice_aqc_opc_nvm_read = 0x0701,
+ /* PF/VF mailbox commands */
+ ice_mbx_opc_send_msg_to_pf = 0x0801,
+ ice_mbx_opc_send_msg_to_vf = 0x0802,
+
/* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02,
ice_aqc_opc_set_rss_lut = 0x0B03,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 68fbbb92d504..8cd6a2401fd9 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -43,6 +43,23 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
}
/**
+ * ice_dev_onetime_setup - Temporary HW/FW workarounds
+ * @hw: pointer to the HW structure
+ *
+ * This function provides temporary workarounds for certain issues
+ * that are expected to be fixed in the HW/FW.
+ */
+void ice_dev_onetime_setup(struct ice_hw *hw)
+{
+ /* configure Rx - set non pxe mode */
+ wr32(hw, GLLAN_RCTL_0, 0x1);
+
+#define MBX_PF_VT_PFALLOC 0x00231E80
+ /* set VFs per PF */
+ wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -218,7 +235,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
*
* Get Link Status (0x607). Returns the link status of the adapter.
*/
-enum ice_status
+static enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd)
{
@@ -740,6 +757,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_sched;
+ ice_dev_onetime_setup(hw);
+
/* Get MAC information */
/* A single port can report up to two (LAN and WoL) addresses */
mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
@@ -1406,6 +1425,28 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
u16 cap = le16_to_cpu(cap_resp->cap);
switch (cap) {
+ case ICE_AQC_CAPS_SRIOV:
+ caps->sr_iov_1_1 = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: SR-IOV = %d\n", caps->sr_iov_1_1);
+ break;
+ case ICE_AQC_CAPS_VF:
+ if (dev_p) {
+ dev_p->num_vfs_exposed = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: VFs exposed = %d\n",
+ dev_p->num_vfs_exposed);
+ } else if (func_p) {
+ func_p->num_allocd_vfs = number;
+ func_p->vf_base_id = logical_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: VFs allocated = %d\n",
+ func_p->num_allocd_vfs);
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: VF base_id = %d\n",
+ func_p->vf_base_id);
+ }
+ break;
case ICE_AQC_CAPS_VSI:
if (dev_p) {
dev_p->num_vsi_allocd_to_host = number;
@@ -1509,9 +1550,7 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
if (!status)
ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
- *cap_count =
- DIV_ROUND_UP(le16_to_cpu(desc.datalen),
- sizeof(struct ice_aqc_list_caps_elem));
+ *cap_count = le32_to_cpu(cmd->count);
return status;
}
@@ -1966,33 +2005,6 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
}
/**
- * ice_aq_set_event_mask
- * @hw: pointer to the hw struct
- * @port_num: port number of the physical function
- * @mask: event mask to be set
- * @cd: pointer to command details structure or NULL
- *
- * Set event mask (0x0613)
- */
-enum ice_status
-ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
- struct ice_sq_cd *cd)
-{
- struct ice_aqc_set_event_mask *cmd;
- struct ice_aq_desc desc;
-
- cmd = &desc.params.set_event_mask;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
-
- cmd->lport_num = port_num;
-
- cmd->event_mask = cpu_to_le16(mask);
-
- return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
-}
-
-/**
* __ice_aq_get_set_rss_lut
* @hw: pointer to the hardware structure
* @vsi_id: VSI FW index
@@ -2265,6 +2277,8 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
* @num_qgrps: number of groups in the list
* @qg_list: the list of groups to disable
* @buf_size: the total size of the qg_list buffer in bytes
+ * @rst_src: if called due to reset, specifies the RST source
+ * @vmvf_num: the relative VM or VF number that is undergoing the reset
* @cd: pointer to command details structure or NULL
*
* Disable LAN Tx queue (0x0C31)
@@ -2272,6 +2286,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
static enum ice_status
ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
+ enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
struct ice_aqc_dis_txqs *cmd;
@@ -2281,14 +2296,45 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
cmd = &desc.params.dis_txqs;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
- if (!qg_list)
+ /* qg_list can be NULL only in VM/VF reset flow */
+ if (!qg_list && !rst_src)
return ICE_ERR_PARAM;
if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
return ICE_ERR_PARAM;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
cmd->num_entries = num_qgrps;
+ cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
+ ICE_AQC_Q_DIS_TIMEOUT_M);
+
+ switch (rst_src) {
+ case ICE_VM_RESET:
+ cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
+ cmd->vmvf_and_timeout |=
+ cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
+ break;
+ case ICE_VF_RESET:
+ cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
+ /* In this case, FW expects vmvf_num to be absolute VF id */
+ cmd->vmvf_and_timeout |=
+ cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
+ ICE_AQC_Q_DIS_VMVF_NUM_M);
+ break;
+ case ICE_NO_RESET:
+ default:
+ break;
+ }
+
+ /* If no queue group info, we are in a reset flow. Issue the AQ */
+ if (!qg_list)
+ goto do_aq;
+
+ /* set RD bit to indicate that command buffer is provided by the driver
+ * and it needs to be read by the firmware
+ */
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
for (i = 0; i < num_qgrps; ++i) {
/* Calculate the size taken up by the queue IDs in this group */
sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
@@ -2304,6 +2350,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
if (buf_size != sz)
return ICE_ERR_PARAM;
+do_aq:
return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
}
@@ -2610,13 +2657,16 @@ ena_txq_exit:
* @num_queues: number of queues
* @q_ids: pointer to the q_id array
* @q_teids: pointer to queue node teids
+ * @rst_src: if called due to reset, specifies the RST source
+ * @vmvf_num: the relative VM or VF number that is undergoing the reset
* @cd: pointer to command details structure or NULL
*
* This function removes queues and their corresponding nodes in SW DB
*/
enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
- u32 *q_teids, struct ice_sq_cd *cd)
+ u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
+ struct ice_sq_cd *cd)
{
enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_aqc_dis_txq_item qg_list;
@@ -2625,6 +2675,15 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG;
+ /* if queue is disabled already yet the disable queue command has to be
+ * sent to complete the VF reset, then call ice_aq_dis_lan_txq without
+ * any queue information
+ */
+
+ if (!num_queues && rst_src)
+ return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num,
+ NULL);
+
mutex_lock(&pi->sched_lock);
for (i = 0; i < num_queues; i++) {
@@ -2637,7 +2696,8 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
qg_list.num_qs = 1;
qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
- sizeof(qg_list), cd);
+ sizeof(qg_list), rst_src, vmvf_num,
+ cd);
if (status)
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 7b2a5bb2e550..cf760c24a6aa 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -7,6 +7,7 @@
#include "ice.h"
#include "ice_type.h"
#include "ice_switch.h"
+#include <linux/avf/virtchnl.h>
void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf,
u16 buf_len);
@@ -33,6 +34,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_sq_cd *cd);
void ice_clear_pxe_mode(struct ice_hw *hw);
enum ice_status ice_get_caps(struct ice_hw *hw);
+
+void ice_dev_onetime_setup(struct ice_hw *hw);
+
enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
@@ -82,14 +86,9 @@ enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
enum ice_status
-ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
- struct ice_link_status *link, struct ice_sq_cd *cd);
-enum ice_status
-ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
- struct ice_sq_cd *cd);
-enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
- u32 *q_teids, struct ice_sq_cd *cmd_details);
+ u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
+ struct ice_sq_cd *cmd_details);
enum ice_status
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *max_lanqs);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index b25ce4f587f5..84c967294eaf 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -33,6 +33,36 @@ static void ice_adminq_init_regs(struct ice_hw *hw)
}
/**
+ * ice_mailbox_init_regs - Initialize Mailbox registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_sq and alloc_rq functions have already been called
+ */
+static void ice_mailbox_init_regs(struct ice_hw *hw)
+{
+ struct ice_ctl_q_info *cq = &hw->mailboxq;
+
+ /* set head and tail registers in our local struct */
+ cq->sq.head = PF_MBX_ATQH;
+ cq->sq.tail = PF_MBX_ATQT;
+ cq->sq.len = PF_MBX_ATQLEN;
+ cq->sq.bah = PF_MBX_ATQBAH;
+ cq->sq.bal = PF_MBX_ATQBAL;
+ cq->sq.len_mask = PF_MBX_ATQLEN_ATQLEN_M;
+ cq->sq.len_ena_mask = PF_MBX_ATQLEN_ATQENABLE_M;
+ cq->sq.head_mask = PF_MBX_ATQH_ATQH_M;
+
+ cq->rq.head = PF_MBX_ARQH;
+ cq->rq.tail = PF_MBX_ARQT;
+ cq->rq.len = PF_MBX_ARQLEN;
+ cq->rq.bah = PF_MBX_ARQBAH;
+ cq->rq.bal = PF_MBX_ARQBAL;
+ cq->rq.len_mask = PF_MBX_ARQLEN_ARQLEN_M;
+ cq->rq.len_ena_mask = PF_MBX_ARQLEN_ARQENABLE_M;
+ cq->rq.head_mask = PF_MBX_ARQH_ARQH_M;
+}
+
+/**
* ice_check_sq_alive
* @hw: pointer to the hw struct
* @cq: pointer to the specific Control queue
@@ -639,6 +669,10 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
ice_adminq_init_regs(hw);
cq = &hw->adminq;
break;
+ case ICE_CTL_Q_MAILBOX:
+ ice_mailbox_init_regs(hw);
+ cq = &hw->mailboxq;
+ break;
default:
return ICE_ERR_PARAM;
}
@@ -696,7 +730,12 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
if (ret_code)
return ret_code;
- return ice_init_check_adminq(hw);
+ ret_code = ice_init_check_adminq(hw);
+ if (ret_code)
+ return ret_code;
+
+ /* Init Mailbox queue */
+ return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
/**
@@ -714,6 +753,9 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
if (ice_check_sq_alive(hw, cq))
ice_aq_q_shutdown(hw, true);
break;
+ case ICE_CTL_Q_MAILBOX:
+ cq = &hw->mailboxq;
+ break;
default:
return;
}
@@ -736,6 +778,8 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
{
/* Shutdown FW admin queue */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ /* Shutdown PF-VF Mailbox */
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index ea02b89243e2..0038a4109c99 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -8,6 +8,7 @@
/* Maximum buffer lengths for all control queue types */
#define ICE_AQ_MAX_BUF_LEN 4096
+#define ICE_MBXQ_MAX_BUF_LEN 4096
#define ICE_CTL_Q_DESC(R, i) \
(&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
@@ -18,16 +19,16 @@
/* Defines that help manage the driver vs FW API checks.
* Take a look at ice_aq_ver_check in ice_controlq.c for actual usage.
- *
*/
#define EXP_FW_API_VER_BRANCH 0x00
-#define EXP_FW_API_VER_MAJOR 0x00
-#define EXP_FW_API_VER_MINOR 0x01
+#define EXP_FW_API_VER_MAJOR 0x01
+#define EXP_FW_API_VER_MINOR 0x03
/* Different control queue types: These are mainly for SW consumption. */
enum ice_ctl_q {
ICE_CTL_Q_UNKNOWN = 0,
ICE_CTL_Q_ADMIN,
+ ICE_CTL_Q_MAILBOX,
};
/* Control Queue default settings */
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index a6f0a5c0c305..f8d5c661d0ba 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -6,10 +6,10 @@
/* Device IDs */
/* Intel(R) Ethernet Controller E810-C for backplane */
-#define ICE_DEV_ID_C810_BACKPLANE 0x1591
+#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller E810-C for QSFP */
-#define ICE_DEV_ID_C810_QSFP 0x1592
+#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
-#define ICE_DEV_ID_C810_SFP 0x1593
+#define ICE_DEV_ID_E810C_SFP 0x1593
#endif /* _ICE_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 9a78d83eaa3e..5fdea6ec7675 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -29,6 +29,22 @@
#define PF_FW_ATQLEN_ATQCRIT_M BIT(30)
#define PF_FW_ATQLEN_ATQENABLE_M BIT(31)
#define PF_FW_ATQT 0x00080400
+#define PF_MBX_ARQBAH 0x0022E400
+#define PF_MBX_ARQBAL 0x0022E380
+#define PF_MBX_ARQH 0x0022E500
+#define PF_MBX_ARQH_ARQH_M ICE_M(0x3FF, 0)
+#define PF_MBX_ARQLEN 0x0022E480
+#define PF_MBX_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0)
+#define PF_MBX_ARQLEN_ARQENABLE_M BIT(31)
+#define PF_MBX_ARQT 0x0022E580
+#define PF_MBX_ATQBAH 0x0022E180
+#define PF_MBX_ATQBAL 0x0022E100
+#define PF_MBX_ATQH 0x0022E280
+#define PF_MBX_ATQH_ATQH_M ICE_M(0x3FF, 0)
+#define PF_MBX_ATQLEN 0x0022E200
+#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
+#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
+#define PF_MBX_ATQT 0x0022E300
#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
@@ -74,10 +90,16 @@
#define GLGEN_RTRIG_CORER_M BIT(0)
#define GLGEN_RTRIG_GLOBR_M BIT(1)
#define GLGEN_STAT 0x000B612C
+#define GLGEN_VFLRSTAT(_i) (0x00093A04 + ((_i) * 4))
#define PFGEN_CTRL 0x00091000
#define PFGEN_CTRL_PFSWR_M BIT(0)
#define PFGEN_STATE 0x00088000
#define PRTGEN_STATUS 0x000B8100
+#define VFGEN_RSTAT(_VF) (0x00074000 + ((_VF) * 4))
+#define VPGEN_VFRSTAT(_VF) (0x00090800 + ((_VF) * 4))
+#define VPGEN_VFRSTAT_VFRD_M BIT(0)
+#define VPGEN_VFRTRIG(_VF) (0x00090000 + ((_VF) * 4))
+#define VPGEN_VFRTRIG_VFSWR_M BIT(0)
#define PFHMC_ERRORDATA 0x00520500
#define PFHMC_ERRORINFO 0x00520400
#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
@@ -90,11 +112,23 @@
#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4))
#define GLINT_RATE_INTRL_ENA_M BIT(6)
+#define GLINT_VECT2FUNC(_INT) (0x00162000 + ((_INT) * 4))
+#define GLINT_VECT2FUNC_VF_NUM_S 0
+#define GLINT_VECT2FUNC_VF_NUM_M ICE_M(0xFF, 0)
+#define GLINT_VECT2FUNC_PF_NUM_S 12
+#define GLINT_VECT2FUNC_PF_NUM_M ICE_M(0x7, 12)
+#define GLINT_VECT2FUNC_IS_PF_S 16
+#define GLINT_VECT2FUNC_IS_PF_M BIT(16)
#define PFINT_FW_CTL 0x0016C800
#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define PFINT_FW_CTL_ITR_INDX_S 11
#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, 11)
#define PFINT_FW_CTL_CAUSE_ENA_M BIT(30)
+#define PFINT_MBX_CTL 0x0016B280
+#define PFINT_MBX_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
+#define PFINT_MBX_CTL_ITR_INDX_S 11
+#define PFINT_MBX_CTL_ITR_INDX_M ICE_M(0x3, 11)
+#define PFINT_MBX_CTL_CAUSE_ENA_M BIT(30)
#define PFINT_OICR 0x0016CA00
#define PFINT_OICR_ECC_ERR_M BIT(16)
#define PFINT_OICR_MAL_DETECT_M BIT(19)
@@ -102,6 +136,7 @@
#define PFINT_OICR_PCI_EXCEPTION_M BIT(21)
#define PFINT_OICR_HMC_ERR_M BIT(26)
#define PFINT_OICR_PE_CRITERR_M BIT(28)
+#define PFINT_OICR_VFLR_M BIT(29)
#define PFINT_OICR_CTL 0x0016CA80
#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define PFINT_OICR_CTL_ITR_INDX_S 11
@@ -116,6 +151,19 @@
#define QINT_TQCTL_MSIX_INDX_S 0
#define QINT_TQCTL_ITR_INDX_S 11
#define QINT_TQCTL_CAUSE_ENA_M BIT(30)
+#define VPINT_ALLOC(_VF) (0x001D1000 + ((_VF) * 4))
+#define VPINT_ALLOC_FIRST_S 0
+#define VPINT_ALLOC_FIRST_M ICE_M(0x7FF, 0)
+#define VPINT_ALLOC_LAST_S 12
+#define VPINT_ALLOC_LAST_M ICE_M(0x7FF, 12)
+#define VPINT_ALLOC_VALID_M BIT(31)
+#define VPINT_ALLOC_PCI(_VF) (0x0009D000 + ((_VF) * 4))
+#define VPINT_ALLOC_PCI_FIRST_S 0
+#define VPINT_ALLOC_PCI_FIRST_M ICE_M(0x7FF, 0)
+#define VPINT_ALLOC_PCI_LAST_S 12
+#define VPINT_ALLOC_PCI_LAST_M ICE_M(0x7FF, 12)
+#define VPINT_ALLOC_PCI_VALID_M BIT(31)
+#define GLLAN_RCTL_0 0x002941F8
#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
#define QRX_CTRL_MAX_INDEX 2047
@@ -128,6 +176,20 @@
#define QRX_TAIL_MAX_INDEX 2047
#define QRX_TAIL_TAIL_S 0
#define QRX_TAIL_TAIL_M ICE_M(0x1FFF, 0)
+#define VPLAN_RX_QBASE(_VF) (0x00072000 + ((_VF) * 4))
+#define VPLAN_RX_QBASE_VFFIRSTQ_S 0
+#define VPLAN_RX_QBASE_VFFIRSTQ_M ICE_M(0x7FF, 0)
+#define VPLAN_RX_QBASE_VFNUMQ_S 16
+#define VPLAN_RX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
+#define VPLAN_RXQ_MAPENA(_VF) (0x00073000 + ((_VF) * 4))
+#define VPLAN_RXQ_MAPENA_RX_ENA_M BIT(0)
+#define VPLAN_TX_QBASE(_VF) (0x001D1800 + ((_VF) * 4))
+#define VPLAN_TX_QBASE_VFFIRSTQ_S 0
+#define VPLAN_TX_QBASE_VFFIRSTQ_M ICE_M(0x3FFF, 0)
+#define VPLAN_TX_QBASE_VFNUMQ_S 16
+#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
+#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4))
+#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0)
#define GL_MDET_RX 0x00294C00
#define GL_MDET_RX_QNUM_S 0
#define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0)
@@ -164,6 +226,14 @@
#define PF_MDET_TX_PQM_VALID_M BIT(0)
#define PF_MDET_TX_TCLAN 0x000FC000
#define PF_MDET_TX_TCLAN_VALID_M BIT(0)
+#define VP_MDET_RX(_VF) (0x00294400 + ((_VF) * 4))
+#define VP_MDET_RX_VALID_M BIT(0)
+#define VP_MDET_TX_PQM(_VF) (0x002D2000 + ((_VF) * 4))
+#define VP_MDET_TX_PQM_VALID_M BIT(0)
+#define VP_MDET_TX_TCLAN(_VF) (0x000FB800 + ((_VF) * 4))
+#define VP_MDET_TX_TCLAN_VALID_M BIT(0)
+#define VP_MDET_TX_TDPU(_VF) (0x00040000 + ((_VF) * 4))
+#define VP_MDET_TX_TDPU_VALID_M BIT(0)
#define GLNVM_FLA 0x000B6108
#define GLNVM_FLA_LOCKED_M BIT(6)
#define GLNVM_GENS 0x000B6100
@@ -175,6 +245,9 @@
#define PF_FUNC_RID 0x0009E880
#define PF_FUNC_RID_FUNC_NUM_S 0
#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0)
+#define PF_PCI_CIAA 0x0009E580
+#define PF_PCI_CIAA_VF_NUM_S 12
+#define PF_PCI_CIAD 0x0009E500
#define GL_PWR_MODE_CTL 0x000B820C
#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
@@ -254,6 +327,10 @@
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
+#define PF_VT_PFALLOC_HIF 0x0009DD80
#define VSIQF_HKEY_MAX_INDEX 12
+#define VSIQF_HLUT_MAX_INDEX 15
+#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
+#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
#endif /* _ICE_HW_AUTOGEN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 94504023d86e..7d2a66739e3f 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -418,6 +418,7 @@ struct ice_tlan_ctx {
u8 pf_num;
u16 vmvf_num;
u8 vmvf_type;
+#define ICE_TLAN_CTX_VMVF_TYPE_VF 0
#define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1
#define ICE_TLAN_CTX_VMVF_TYPE_PF 2
u16 src_vsi;
@@ -473,4 +474,16 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
{
return ice_ptype_lkup[ptype];
}
+
+#define ICE_LINK_SPEED_UNKNOWN 0
+#define ICE_LINK_SPEED_10MBPS 10
+#define ICE_LINK_SPEED_100MBPS 100
+#define ICE_LINK_SPEED_1000MBPS 1000
+#define ICE_LINK_SPEED_2500MBPS 2500
+#define ICE_LINK_SPEED_5000MBPS 5000
+#define ICE_LINK_SPEED_10000MBPS 10000
+#define ICE_LINK_SPEED_20000MBPS 20000
+#define ICE_LINK_SPEED_25000MBPS 25000
+#define ICE_LINK_SPEED_40000MBPS 40000
+
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index acf3478a3f3b..5bacad01f0c9 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -68,18 +68,20 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
/* Enable Flexible Descriptors in the queue context which
* allows this driver to select a specific receive descriptor format
*/
- regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
- regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
- QRXFLXP_CNTXT_RXDID_IDX_M;
-
- /* increasing context priority to pick up profile id;
- * default is 0x01; setting to 0x03 to ensure profile
- * is programming if prev context is of same priority
- */
- regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
- QRXFLXP_CNTXT_RXDID_PRIO_M;
+ if (vsi->type != ICE_VSI_VF) {
+ regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
+ regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
+ QRXFLXP_CNTXT_RXDID_IDX_M;
+
+ /* increasing context priority to pick up profile id;
+ * default is 0x01; setting to 0x03 to ensure profile
+ * is programming if prev context is of same priority
+ */
+ regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
+ QRXFLXP_CNTXT_RXDID_PRIO_M;
- wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+ wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+ }
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
@@ -90,6 +92,9 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
return -EIO;
}
+ if (vsi->type == ICE_VSI_VF)
+ return 0;
+
/* init queue specific tail register */
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail);
@@ -132,6 +137,11 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
case ICE_VSI_PF:
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
+ case ICE_VSI_VF:
+ /* Firmware expects vmvf_num to be absolute VF id */
+ tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
+ break;
default:
return;
}
@@ -285,6 +295,16 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
break;
+ case ICE_VSI_VF:
+ vsi->alloc_txq = pf->num_vf_qps;
+ vsi->alloc_rxq = pf->num_vf_qps;
+ /* pf->num_vf_msix includes (VF miscellaneous vector +
+ * data queue interrupts). Since vsi->num_q_vectors is number
+ * of queues vectors, subtract 1 from the original vector
+ * count
+ */
+ vsi->num_q_vectors = pf->num_vf_msix - 1;
+ break;
default:
dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
vsi->type);
@@ -331,6 +351,8 @@ void ice_vsi_delete(struct ice_vsi *vsi)
struct ice_vsi_ctx ctxt;
enum ice_status status;
+ if (vsi->type == ICE_VSI_VF)
+ ctxt.vf_num = vsi->vf_id;
ctxt.vsi_num = vsi->vsi_num;
memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
@@ -411,7 +433,7 @@ int ice_vsi_clear(struct ice_vsi *vsi)
* @irq: interrupt number
* @data: pointer to a q_vector
*/
-irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
+static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
{
struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
@@ -466,6 +488,10 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
+ case ICE_VSI_VF:
+ if (ice_vsi_alloc_arrays(vsi, true))
+ goto err_rings;
+ break;
default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
goto unlock_pf;
@@ -685,6 +711,15 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
BIT(cap->rss_table_entry_width));
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
break;
+ case ICE_VSI_VF:
+ /* VF VSI will gets a small RSS table
+ * For VSI_LUT, LUT size should be set to 64 bytes
+ */
+ vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+ vsi->rss_size = min_t(int, num_online_cpus(),
+ BIT(cap->rss_table_entry_width));
+ vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
+ break;
default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n",
vsi->type);
@@ -773,17 +808,17 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
* Setup number and offset of Rx queues for all TCs for the VSI
*/
+ qcount = numq_tc;
/* qcount will change if RSS is enabled */
if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
- if (vsi->type == ICE_VSI_PF)
- max_rss = ICE_MAX_LG_RSS_QS;
- else
- max_rss = ICE_MAX_SMALL_RSS_QS;
-
- qcount = min_t(int, numq_tc, max_rss);
- qcount = min_t(int, qcount, vsi->rss_size);
- } else {
- qcount = numq_tc;
+ if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
+ if (vsi->type == ICE_VSI_PF)
+ max_rss = ICE_MAX_LG_RSS_QS;
+ else
+ max_rss = ICE_MAX_SMALL_RSS_QS;
+ qcount = min_t(int, numq_tc, max_rss);
+ qcount = min_t(int, qcount, vsi->rss_size);
+ }
}
/* find the (rounded up) power-of-2 of qcount */
@@ -813,6 +848,14 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
vsi->num_txq = qcount_tx;
vsi->num_rxq = offset;
+ if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
+ dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
+ /* since there is a chance that num_rxq could have been changed
+ * in the above for loop, make num_txq equal to num_rxq.
+ */
+ vsi->num_txq = vsi->num_rxq;
+ }
+
/* Rx queue mapping */
ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
/* q_mapping buffer holds the info for the first queue allocated for
@@ -838,6 +881,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
+ case ICE_VSI_VF:
+ /* VF VSI will gets a small RSS table which is a VSI LUT type */
+ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
+ hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+ break;
default:
dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
vsi->type);
@@ -868,6 +916,11 @@ static int ice_vsi_init(struct ice_vsi *vsi)
case ICE_VSI_PF:
ctxt.flags = ICE_AQ_VSI_TYPE_PF;
break;
+ case ICE_VSI_VF:
+ ctxt.flags = ICE_AQ_VSI_TYPE_VF;
+ /* VF number here is the absolute VF number (0-255) */
+ ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
+ break;
default:
return -ENODEV;
}
@@ -961,6 +1014,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
q_vector->vsi = vsi;
q_vector->v_idx = v_idx;
+ if (vsi->type == ICE_VSI_VF)
+ goto out;
/* only set affinity_mask if the CPU is online */
if (cpu_online(v_idx))
cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
@@ -973,6 +1028,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
NAPI_POLL_WEIGHT);
+out:
/* tie q_vector and VSI together */
vsi->q_vectors[v_idx] = q_vector;
@@ -1067,6 +1123,13 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker,
num_q_vectors, vsi->idx);
break;
+ case ICE_VSI_VF:
+ /* take VF misc vector and data vectors into account */
+ num_q_vectors = pf->num_vf_msix;
+ /* For VF VSI, reserve slots only from HW interrupts */
+ vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker,
+ num_q_vectors, vsi->idx);
+ break;
default:
dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
vsi->type);
@@ -1077,9 +1140,11 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
dev_err(&pf->pdev->dev,
"Failed to get tracking for %d HW vectors for VSI %d, err=%d\n",
num_q_vectors, vsi->vsi_num, vsi->hw_base_vector);
- ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector,
- vsi->idx);
- pf->num_avail_sw_msix += num_q_vectors;
+ if (vsi->type != ICE_VSI_VF) {
+ ice_free_res(vsi->back->sw_irq_tracker,
+ vsi->sw_base_vector, vsi->idx);
+ pf->num_avail_sw_msix += num_q_vectors;
+ }
return -ENOENT;
}
@@ -1139,7 +1204,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->vsi = vsi;
ring->dev = &pf->pdev->dev;
ring->count = vsi->num_desc;
- ring->itr_setting = ICE_DFLT_TX_ITR;
vsi->tx_rings[i] = ring;
}
@@ -1159,7 +1223,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->netdev = vsi->netdev;
ring->dev = &pf->pdev->dev;
ring->count = vsi->num_desc;
- ring->itr_setting = ICE_DFLT_RX_ITR;
vsi->rx_rings[i] = ring;
}
@@ -1196,6 +1259,7 @@ static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
q_vector->num_ring_tx = tx_rings_per_v;
q_vector->tx.ring = NULL;
+ q_vector->tx.itr_idx = ICE_TX_ITR;
q_base = vsi->num_txq - tx_rings_rem;
for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
@@ -1211,6 +1275,7 @@ static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
q_vector->num_ring_rx = rx_rings_per_v;
q_vector->rx.ring = NULL;
+ q_vector->rx.itr_idx = ICE_RX_ITR;
q_base = vsi->num_rxq - rx_rings_rem;
for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
@@ -1512,6 +1577,9 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
int err = 0;
u16 i;
+ if (vsi->type == ICE_VSI_VF)
+ goto setup_rings;
+
if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
vsi->max_frame = vsi->netdev->mtu +
ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
@@ -1519,6 +1587,7 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
vsi->max_frame = ICE_RXBUF_2048;
vsi->rx_buf_len = ICE_RXBUF_2048;
+setup_rings:
/* set up individual rings */
for (i = 0; i < vsi->num_rxq && !err; i++)
err = ice_setup_rx_ctx(vsi->rx_rings[i]);
@@ -1615,6 +1684,37 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
}
/**
+ * ice_cfg_itr - configure the initial interrupt throttle values
+ * @hw: pointer to the HW structure
+ * @q_vector: interrupt vector that's being configured
+ * @vector: HW vector index to apply the interrupt throttling to
+ *
+ * Configure interrupt throttling values for the ring containers that are
+ * associated with the interrupt vector passed in.
+ */
+static void
+ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
+{
+ u8 itr_gran = hw->itr_gran;
+
+ if (q_vector->num_ring_rx) {
+ struct ice_ring_container *rc = &q_vector->rx;
+
+ rc->itr = ITR_TO_REG(ICE_DFLT_RX_ITR, itr_gran);
+ rc->latency_range = ICE_LOW_LATENCY;
+ wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr);
+ }
+
+ if (q_vector->num_ring_tx) {
+ struct ice_ring_container *rc = &q_vector->tx;
+
+ rc->itr = ITR_TO_REG(ICE_DFLT_TX_ITR, itr_gran);
+ rc->latency_range = ICE_LOW_LATENCY;
+ wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr);
+ }
+}
+
+/**
* ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
* @vsi: the VSI being configured
*/
@@ -1624,31 +1724,13 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
u16 vector = vsi->hw_base_vector;
struct ice_hw *hw = &pf->hw;
u32 txq = 0, rxq = 0;
- int i, q, itr;
- u8 itr_gran;
+ int i, q;
for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
- itr_gran = hw->itr_gran;
-
- q_vector->intrl = ICE_DFLT_INTRL;
-
- if (q_vector->num_ring_rx) {
- q_vector->rx.itr =
- ITR_TO_REG(vsi->rx_rings[rxq]->itr_setting,
- itr_gran);
- q_vector->rx.latency_range = ICE_LOW_LATENCY;
- }
+ ice_cfg_itr(hw, q_vector, vector);
- if (q_vector->num_ring_tx) {
- q_vector->tx.itr =
- ITR_TO_REG(vsi->tx_rings[txq]->itr_setting,
- itr_gran);
- q_vector->tx.latency_range = ICE_LOW_LATENCY;
- }
- wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr);
- wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr);
wr32(hw, GLINT_RATE(vector),
ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
@@ -1664,23 +1746,33 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
* tracked for this PF.
*/
for (q = 0; q < q_vector->num_ring_tx; q++) {
+ int itr_idx = q_vector->tx.itr_idx;
u32 val;
- itr = ICE_ITR_NONE;
- val = QINT_TQCTL_CAUSE_ENA_M |
- (itr << QINT_TQCTL_ITR_INDX_S) |
- (vector << QINT_TQCTL_MSIX_INDX_S);
+ if (vsi->type == ICE_VSI_VF)
+ val = QINT_TQCTL_CAUSE_ENA_M |
+ (itr_idx << QINT_TQCTL_ITR_INDX_S) |
+ ((i + 1) << QINT_TQCTL_MSIX_INDX_S);
+ else
+ val = QINT_TQCTL_CAUSE_ENA_M |
+ (itr_idx << QINT_TQCTL_ITR_INDX_S) |
+ (vector << QINT_TQCTL_MSIX_INDX_S);
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
txq++;
}
for (q = 0; q < q_vector->num_ring_rx; q++) {
+ int itr_idx = q_vector->rx.itr_idx;
u32 val;
- itr = ICE_ITR_NONE;
- val = QINT_RQCTL_CAUSE_ENA_M |
- (itr << QINT_RQCTL_ITR_INDX_S) |
- (vector << QINT_RQCTL_MSIX_INDX_S);
+ if (vsi->type == ICE_VSI_VF)
+ val = QINT_RQCTL_CAUSE_ENA_M |
+ (itr_idx << QINT_RQCTL_ITR_INDX_S) |
+ ((i + 1) << QINT_RQCTL_MSIX_INDX_S);
+ else
+ val = QINT_RQCTL_CAUSE_ENA_M |
+ (itr_idx << QINT_RQCTL_ITR_INDX_S) |
+ (vector << QINT_RQCTL_MSIX_INDX_S);
wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
rxq++;
}
@@ -1784,8 +1876,11 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
/**
* ice_vsi_stop_tx_rings - Disable Tx rings
* @vsi: the VSI being configured
+ * @rst_src: reset source
+ * @rel_vmvf_num: Relative id of VF/VM
*/
-int ice_vsi_stop_tx_rings(struct ice_vsi *vsi)
+int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
@@ -1837,7 +1932,7 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi)
GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
}
status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,
- NULL);
+ rst_src, rel_vmvf_num, NULL);
/* if the disable queue command was exercised during an active reset
* flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as
* the reset operation disables queues at the hardware level anyway.
@@ -1934,7 +2029,7 @@ err_out:
*/
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type type, u16 __always_unused vf_id)
+ enum ice_vsi_type type, u16 vf_id)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = &pf->pdev->dev;
@@ -1949,6 +2044,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
vsi->port_info = pi;
vsi->vsw = pf->first_sw;
+ if (vsi->type == ICE_VSI_VF)
+ vsi->vf_id = vf_id;
if (ice_vsi_get_qs(vsi)) {
dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
@@ -1987,6 +2084,34 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_vsi_cfg_rss_lut_key(vsi);
break;
+ case ICE_VSI_VF:
+ /* VF driver will take care of creating netdev for this type and
+ * map queues to vectors through Virtchnl, PF driver only
+ * creates a VSI and corresponding structures for bookkeeping
+ * purpose
+ */
+ ret = ice_vsi_alloc_q_vectors(vsi);
+ if (ret)
+ goto unroll_vsi_init;
+
+ ret = ice_vsi_alloc_rings(vsi);
+ if (ret)
+ goto unroll_alloc_q_vector;
+
+ /* Setup Vector base only during VF init phase or when VF asks
+ * for more vectors than assigned number. In all other cases,
+ * assign hw_base_vector to the value given earlier.
+ */
+ if (test_bit(ICE_VF_STATE_CFG_INTR, pf->vf[vf_id].vf_states)) {
+ ret = ice_vsi_setup_vector_base(vsi);
+ if (ret)
+ goto unroll_vector_base;
+ } else {
+ vsi->hw_base_vector = pf->vf[vf_id].first_vector_idx;
+ }
+ pf->q_left_tx -= vsi->alloc_txq;
+ pf->q_left_rx -= vsi->alloc_rxq;
+ break;
default:
/* if VSI type is not recognized, clean up the resources and
* exit
@@ -2045,8 +2170,8 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
- wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0);
- wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0);
+ wr32(hw, GLINT_ITR(ICE_IDX_ITR0, vector), 0);
+ wr32(hw, GLINT_ITR(ICE_IDX_ITR1, vector), 0);
for (q = 0; q < q_vector->num_ring_tx; q++) {
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
txq++;
@@ -2077,6 +2202,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
return;
ice_vsi_release_msix(vsi);
+ if (vsi->type == ICE_VSI_VF)
+ return;
vsi->irqs_ready = false;
for (i = 0; i < vsi->num_q_vectors; i++) {
@@ -2317,10 +2444,12 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
int ice_vsi_release(struct ice_vsi *vsi)
{
struct ice_pf *pf;
+ struct ice_vf *vf;
if (!vsi->back)
return -ENODEV;
pf = vsi->back;
+ vf = &pf->vf[vsi->vf_id];
/* do not unregister and free netdevs while driver is in the reset
* recovery pending state. Since reset/rebuild happens through PF
* service task workqueue, its not a good idea to unregister netdev
@@ -2342,10 +2471,23 @@ int ice_vsi_release(struct ice_vsi *vsi)
ice_vsi_close(vsi);
/* reclaim interrupt vectors back to PF */
- ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
- ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
- pf->num_avail_hw_msix += vsi->num_q_vectors;
+ if (vsi->type != ICE_VSI_VF) {
+ /* reclaim SW interrupts back to the common pool */
+ ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector,
+ vsi->idx);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+ /* reclaim HW interrupts back to the common pool */
+ ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector,
+ vsi->idx);
+ pf->num_avail_hw_msix += vsi->num_q_vectors;
+ } else if (test_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states)) {
+ /* Reclaim VF resources back only while freeing all VFs or
+ * vector reassignment is requested
+ */
+ ice_free_res(vsi->back->hw_irq_tracker, vf->first_vector_idx,
+ vsi->idx);
+ pf->num_avail_hw_msix += pf->num_vf_msix;
+ }
ice_remove_vsi_fltr(&pf->hw, vsi->idx);
ice_vsi_delete(vsi);
@@ -2387,6 +2529,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
vsi->hw_base_vector = 0;
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi, false);
+ ice_dev_onetime_setup(&vsi->back->hw);
ice_vsi_set_num_qs(vsi);
/* Initialize VSI struct elements and create VSI in FW */
@@ -2414,6 +2557,22 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ice_vsi_map_rings_to_vectors(vsi);
break;
+ case ICE_VSI_VF:
+ ret = ice_vsi_alloc_q_vectors(vsi);
+ if (ret)
+ goto err_rings;
+
+ ret = ice_vsi_setup_vector_base(vsi);
+ if (ret)
+ goto err_vectors;
+
+ ret = ice_vsi_alloc_rings(vsi);
+ if (ret)
+ goto err_vectors;
+
+ vsi->back->q_left_tx -= vsi->alloc_txq;
+ vsi->back->q_left_rx -= vsi->alloc_rxq;
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 2617afe01c82..3831b4f0960a 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -31,7 +31,8 @@ int ice_vsi_start_rx_rings(struct ice_vsi *vsi);
int ice_vsi_stop_rx_rings(struct ice_vsi *vsi);
-int ice_vsi_stop_tx_rings(struct ice_vsi *vsi);
+int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num);
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
@@ -72,5 +73,4 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
-irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 46ccf265c218..05993451147a 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -8,7 +8,7 @@
#include "ice.h"
#include "ice_lib.h"
-#define DRV_VERSION "0.7.1-k"
+#define DRV_VERSION "0.7.2-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -342,6 +342,10 @@ ice_prepare_for_reset(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
+ /* Notify VFs of impending reset */
+ if (ice_check_sq_alive(hw, &hw->mailboxq))
+ ice_vc_notify_reset(pf);
+
/* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf);
@@ -453,35 +457,6 @@ static void ice_reset_subtask(struct ice_pf *pf)
}
/**
- * ice_watchdog_subtask - periodic tasks not using event driven scheduling
- * @pf: board private structure
- */
-static void ice_watchdog_subtask(struct ice_pf *pf)
-{
- int i;
-
- /* if interface is down do nothing */
- if (test_bit(__ICE_DOWN, pf->state) ||
- test_bit(__ICE_CFG_BUSY, pf->state))
- return;
-
- /* make sure we don't do these things too often */
- if (time_before(jiffies,
- pf->serv_tmr_prev + pf->serv_tmr_period))
- return;
-
- pf->serv_tmr_prev = jiffies;
-
- /* Update the stats for active netdevs so the network stack
- * can look at updated numbers whenever it cares to
- */
- ice_update_pf_stats(pf);
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && pf->vsi[i]->netdev)
- ice_update_vsi_stats(pf->vsi[i]);
-}
-
-/**
* ice_print_link_msg - print link up or down message
* @vsi: the VSI whose link status is being queried
* @isup: boolean for if the link is now up or down
@@ -551,36 +526,6 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
}
/**
- * ice_init_link_events - enable/initialize link events
- * @pi: pointer to the port_info instance
- *
- * Returns -EIO on failure, 0 on success
- */
-static int ice_init_link_events(struct ice_port_info *pi)
-{
- u16 mask;
-
- mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
- ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
-
- if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
- dev_dbg(ice_hw_to_dev(pi->hw),
- "Failed to set link event mask for port %d\n",
- pi->lport);
- return -EIO;
- }
-
- if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
- dev_dbg(ice_hw_to_dev(pi->hw),
- "Failed to enable link events for port %d\n",
- pi->lport);
- return -EIO;
- }
-
- return 0;
-}
-
-/**
* ice_vsi_link_event - update the vsi's netdev
* @vsi: the vsi on which the link event occurred
* @link_up: whether or not the vsi needs to be set up or down
@@ -661,31 +606,41 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi)
}
}
+ ice_vc_notify_link_state(pf);
+
return 0;
}
/**
- * ice_handle_link_event - handle link event via ARQ
- * @pf: pf that the link event is associated with
- *
- * Return -EINVAL if port_info is null
- * Return status on succes
+ * ice_watchdog_subtask - periodic tasks not using event driven scheduling
+ * @pf: board private structure
*/
-static int ice_handle_link_event(struct ice_pf *pf)
+static void ice_watchdog_subtask(struct ice_pf *pf)
{
- struct ice_port_info *port_info;
- int status;
+ int i;
- port_info = pf->hw.port_info;
- if (!port_info)
- return -EINVAL;
+ /* if interface is down do nothing */
+ if (test_bit(__ICE_DOWN, pf->state) ||
+ test_bit(__ICE_CFG_BUSY, pf->state))
+ return;
- status = ice_link_event(pf, port_info);
- if (status)
- dev_dbg(&pf->pdev->dev,
- "Could not process link event, error %d\n", status);
+ /* make sure we don't do these things too often */
+ if (time_before(jiffies,
+ pf->serv_tmr_prev + pf->serv_tmr_period))
+ return;
- return status;
+ pf->serv_tmr_prev = jiffies;
+
+ if (ice_link_event(pf, pf->hw.port_info))
+ dev_dbg(&pf->pdev->dev, "ice_link_event failed\n");
+
+ /* Update the stats for active netdevs so the network stack
+ * can look at updated numbers whenever it cares to
+ */
+ ice_update_pf_stats(pf);
+ for (i = 0; i < pf->num_alloc_vsi; i++)
+ if (pf->vsi[i] && pf->vsi[i]->netdev)
+ ice_update_vsi_stats(pf->vsi[i]);
}
/**
@@ -711,6 +666,10 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
cq = &hw->adminq;
qtype = "Admin";
break;
+ case ICE_CTL_Q_MAILBOX:
+ cq = &hw->mailboxq;
+ qtype = "Mailbox";
+ break;
default:
dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
q_type);
@@ -787,10 +746,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
opcode = le16_to_cpu(event.desc.opcode);
switch (opcode) {
- case ice_aqc_opc_get_link_status:
- if (ice_handle_link_event(pf))
- dev_err(&pf->pdev->dev,
- "Could not handle link event\n");
+ case ice_mbx_opc_send_msg_to_pf:
+ ice_vc_process_vf_msg(pf, &event);
break;
case ice_aqc_opc_fw_logging:
ice_output_fw_log(hw, &event.desc, event.msg_buf);
@@ -851,6 +808,28 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
}
/**
+ * ice_clean_mailboxq_subtask - clean the MailboxQ rings
+ * @pf: board private structure
+ */
+static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state))
+ return;
+
+ if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
+ return;
+
+ clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
+
+ if (ice_ctrlq_pending(hw, &hw->mailboxq))
+ __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
+
+ ice_flush(hw);
+}
+
+/**
* ice_service_task_schedule - schedule the service task to wake up
* @pf: board private structure
*
@@ -916,6 +895,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
bool mdd_detected = false;
u32 reg;
+ int i;
if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state))
return;
@@ -1005,6 +985,51 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
}
}
+ /* see if one of the VFs needs to be reset */
+ for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ reg = rd32(hw, VP_MDET_TX_PQM(i));
+ if (reg & VP_MDET_TX_PQM_VALID_M) {
+ wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ i);
+ }
+
+ reg = rd32(hw, VP_MDET_TX_TCLAN(i));
+ if (reg & VP_MDET_TX_TCLAN_VALID_M) {
+ wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ i);
+ }
+
+ reg = rd32(hw, VP_MDET_TX_TDPU(i));
+ if (reg & VP_MDET_TX_TDPU_VALID_M) {
+ wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ i);
+ }
+
+ reg = rd32(hw, VP_MDET_RX(i));
+ if (reg & VP_MDET_RX_VALID_M) {
+ wr32(hw, VP_MDET_RX(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
+ i);
+ }
+
+ if (vf->num_mdd_events > ICE_DFLT_NUM_MDD_EVENTS_ALLOWED) {
+ dev_info(&pf->pdev->dev,
+ "Too many MDD events on VF %d, disabled\n", i);
+ dev_info(&pf->pdev->dev,
+ "Use PF Control I/F to re-enable the VF\n");
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ }
+ }
+
/* re-enable MDD interrupt cause */
clear_bit(__ICE_MDD_EVENT_PENDING, pf->state);
reg = rd32(hw, PFINT_OICR_ENA);
@@ -1038,8 +1063,10 @@ static void ice_service_task(struct work_struct *work)
ice_check_for_hang_subtask(pf);
ice_sync_fltr_subtask(pf);
ice_handle_mdd_event(pf);
+ ice_process_vflr_event(pf);
ice_watchdog_subtask(pf);
ice_clean_adminq_subtask(pf);
+ ice_clean_mailboxq_subtask(pf);
/* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
ice_service_task_complete(pf);
@@ -1050,6 +1077,8 @@ static void ice_service_task(struct work_struct *work)
*/
if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
+ test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
+ test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
mod_timer(&pf->serv_tmr, jiffies);
}
@@ -1064,6 +1093,10 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
hw->adminq.num_sq_entries = ICE_AQ_LEN;
hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
+ hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN;
+ hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN;
+ hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
+ hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
}
/**
@@ -1197,6 +1230,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
PFINT_OICR_MAL_DETECT_M |
PFINT_OICR_GRST_M |
PFINT_OICR_PCI_EXCEPTION_M |
+ PFINT_OICR_VFLR_M |
PFINT_OICR_HMC_ERR_M |
PFINT_OICR_PE_CRITERR_M);
@@ -1220,6 +1254,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
u32 oicr, ena_mask;
set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
+ set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
oicr = rd32(hw, PFINT_OICR);
ena_mask = rd32(hw, PFINT_OICR_ENA);
@@ -1228,6 +1263,10 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
}
+ if (oicr & PFINT_OICR_VFLR_M) {
+ ena_mask &= ~PFINT_OICR_VFLR_M;
+ set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
+ }
if (oicr & PFINT_OICR_GRST_M) {
u32 reset;
@@ -1406,6 +1445,11 @@ skip_req_irq:
PFINT_FW_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_FW_CTL, val);
+ /* This enables Mailbox queue Interrupt causes */
+ val = ((pf->hw_oicr_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
+ PFINT_MBX_CTL_CAUSE_ENA_M);
+ wr32(hw, PFINT_MBX_CTL, val);
+
itr_gran = hw->itr_gran;
wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx),
@@ -1775,6 +1819,15 @@ static void ice_init_pf(struct ice_pf *pf)
{
bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
+#ifdef CONFIG_PCI_IOV
+ if (pf->hw.func_caps.common_cap.sr_iov_1_1) {
+ struct ice_hw *hw = &pf->hw;
+
+ set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
+ pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs,
+ ICE_MAX_VF_COUNT);
+ }
+#endif /* CONFIG_PCI_IOV */
mutex_init(&pf->sw_mutex);
mutex_init(&pf->avail_q_mutex);
@@ -2098,12 +2151,6 @@ static int ice_probe(struct pci_dev *pdev,
/* since everything is good, start the service timer */
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
- err = ice_init_link_events(pf->hw.port_info);
- if (err) {
- dev_err(&pdev->dev, "ice_init_link_events failed: %d\n", err);
- goto err_alloc_sw_unroll;
- }
-
return 0;
err_alloc_sw_unroll:
@@ -2138,6 +2185,8 @@ static void ice_remove(struct pci_dev *pdev)
set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf);
+ if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
+ ice_free_vfs(pf);
ice_vsi_release_all(pf);
ice_free_irq_msix_misc(pf);
ice_for_each_vsi(pf, i) {
@@ -2160,9 +2209,9 @@ static void ice_remove(struct pci_dev *pdev)
* Class, Class Mask, private data (not used) }
*/
static const struct pci_device_id ice_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
/* required last entry */
{ 0, }
};
@@ -2173,6 +2222,7 @@ static struct pci_driver ice_driver = {
.id_table = ice_pci_tbl,
.probe = ice_probe,
.remove = ice_remove,
+ .sriov_configure = ice_sriov_configure,
};
/**
@@ -2908,7 +2958,7 @@ int ice_down(struct ice_vsi *vsi)
}
ice_vsi_dis_irq(vsi);
- tx_err = ice_vsi_stop_tx_rings(vsi);
+ tx_err = ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0);
if (tx_err)
netdev_err(vsi->netdev,
"Failed stop Tx rings, VSI %d error %d\n",
@@ -3102,13 +3152,14 @@ static void ice_dis_vsi(struct ice_vsi *vsi)
set_bit(__ICE_NEEDS_RESTART, vsi->state);
- if (vsi->netdev && netif_running(vsi->netdev) &&
- vsi->type == ICE_VSI_PF) {
- rtnl_lock();
- vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
- rtnl_unlock();
- } else {
- ice_vsi_close(vsi);
+ if (vsi->type == ICE_VSI_PF && vsi->netdev) {
+ if (netif_running(vsi->netdev)) {
+ rtnl_lock();
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ rtnl_unlock();
+ } else {
+ ice_vsi_close(vsi);
+ }
}
}
@@ -3120,12 +3171,16 @@ static int ice_ena_vsi(struct ice_vsi *vsi)
{
int err = 0;
- if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state))
- if (vsi->netdev && netif_running(vsi->netdev)) {
+ if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) &&
+ vsi->netdev) {
+ if (netif_running(vsi->netdev)) {
rtnl_lock();
err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
rtnl_unlock();
+ } else {
+ err = ice_vsi_open(vsi);
}
+ }
return err;
}
@@ -3174,6 +3229,10 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf)
if (!pf->vsi[i])
continue;
+ /* VF VSI rebuild isn't supported yet */
+ if (pf->vsi[i]->type == ICE_VSI_VF)
+ continue;
+
err = ice_vsi_rebuild(pf->vsi[i]);
if (err) {
dev_err(&pf->pdev->dev,
@@ -3310,6 +3369,7 @@ static void ice_rebuild(struct ice_pf *pf)
goto err_vsi_rebuild;
}
+ ice_reset_all_vfs(pf, true);
/* if we get here, reset flow is successful */
clear_bit(__ICE_RESET_FAILED, pf->state);
return;
@@ -3818,6 +3878,12 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
+ .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
+ .ndo_set_vf_mac = ice_set_vf_mac,
+ .ndo_get_vf_config = ice_get_vf_cfg,
+ .ndo_set_vf_trust = ice_set_vf_trust,
+ .ndo_set_vf_vlan = ice_set_vf_port_vlan,
+ .ndo_set_vf_link_state = ice_set_vf_link_state,
.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
.ndo_set_features = ice_set_features,
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
new file mode 100644
index 000000000000..027eba4e13f8
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice_adminq_cmd.h"
+#include "ice_sriov.h"
+
+/**
+ * ice_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: VF ID to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cd: pointer to command details
+ *
+ * Send message to VF driver (0x0802) using mailbox
+ * queue and asynchronously sending message via
+ * ice_sq_send_cmd() function
+ */
+enum ice_status
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_pf_vf_msg *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
+
+ cmd = &desc.params.virt;
+ cmd->id = cpu_to_le32(vfid);
+
+ desc.cookie_high = cpu_to_le32(v_opcode);
+ desc.cookie_low = cpu_to_le32(v_retval);
+
+ if (msglen)
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
+}
+
+/**
+ * ice_conv_link_speed_to_virtchnl
+ * @adv_link_support: determines the format of the returned link speed
+ * @link_speed: variable containing the link_speed to be converted
+ *
+ * Convert link speed supported by HW to link speed supported by virtchnl.
+ * If adv_link_support is true, then return link speed in Mbps. Else return
+ * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
+ * needs to cast back to an enum virtchnl_link_speed in the case where
+ * adv_link_support is false, but when adv_link_support is true the caller can
+ * expect the speed in Mbps.
+ */
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
+{
+ u32 speed;
+
+ if (adv_link_support)
+ switch (link_speed) {
+ case ICE_AQ_LINK_SPEED_10MB:
+ speed = ICE_LINK_SPEED_10MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_100MB:
+ speed = ICE_LINK_SPEED_100MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ speed = ICE_LINK_SPEED_1000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ speed = ICE_LINK_SPEED_2500MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_5GB:
+ speed = ICE_LINK_SPEED_5000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ speed = ICE_LINK_SPEED_10000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ speed = ICE_LINK_SPEED_20000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ speed = ICE_LINK_SPEED_25000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ speed = ICE_LINK_SPEED_40000MBPS;
+ break;
+ default:
+ speed = ICE_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ else
+ /* Virtchnl speeds are not defined for every speed supported in
+ * the hardware. To maintain compatibility with older AVF
+ * drivers, while reporting the speed the new speed values are
+ * resolved to the closest known virtchnl speeds
+ */
+ switch (link_speed) {
+ case ICE_AQ_LINK_SPEED_10MB:
+ case ICE_AQ_LINK_SPEED_100MB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ case ICE_AQ_LINK_SPEED_2500MB:
+ case ICE_AQ_LINK_SPEED_5GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ /* fall through */
+ speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
+ break;
+ default:
+ speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ return speed;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
new file mode 100644
index 000000000000..3d78a0795138
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_SRIOV_H_
+#define _ICE_SRIOV_H_
+
+#include "ice_common.h"
+
+#ifdef CONFIG_PCI_IOV
+enum ice_status
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd);
+
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
+#else /* CONFIG_PCI_IOV */
+static inline enum ice_status
+ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
+ u16 __always_unused vfid, u32 __always_unused v_opcode,
+ u32 __always_unused v_retval, u8 __always_unused *msg,
+ u16 __always_unused msglen,
+ struct ice_sq_cd __always_unused *cd)
+{
+ return 0;
+}
+
+static inline u32
+ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support,
+ u16 __always_unused link_speed)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index d2dae913d81e..f49f299ddf2c 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -6,6 +6,9 @@
/* Error Codes */
enum ice_status {
+ ICE_SUCCESS = 0,
+
+ /* Generic codes : Range -1..-49 */
ICE_ERR_PARAM = -1,
ICE_ERR_NOT_IMPL = -2,
ICE_ERR_NOT_READY = -3,
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index e949224b5282..33403f39f1b3 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -187,6 +187,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
if (!vsi_ctx->alloc_from_pool)
cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
ICE_AQ_VSI_IS_VALID);
+ cmd->vf_id = vsi_ctx->vf_num;
cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
@@ -655,6 +656,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
u8 *eth_hdr;
u32 act = 0;
__be16 *off;
+ u8 q_rgn;
if (opc == ice_aqc_opc_remove_sw_rules) {
s_rule->pdata.lkup_tx_rx.act = 0;
@@ -693,14 +695,19 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
ICE_SINGLE_ACT_Q_INDEX_M;
break;
+ case ICE_DROP_PACKET:
+ act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
+ ICE_SINGLE_ACT_VALID_BIT;
+ break;
case ICE_FWD_TO_QGRP:
+ q_rgn = f_info->qgrp_size > 0 ?
+ (u8)ilog2(f_info->qgrp_size) : 0;
act |= ICE_SINGLE_ACT_TO_Q;
- act |= (f_info->qgrp_size << ICE_SINGLE_ACT_Q_REGION_S) &
+ act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
+ ICE_SINGLE_ACT_Q_INDEX_M;
+ act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
ICE_SINGLE_ACT_Q_REGION_M;
break;
- case ICE_DROP_PACKET:
- act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP;
- break;
default:
return;
}
@@ -1415,8 +1422,8 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
fm_list->vsi_count--;
clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
- if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
- (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
+ if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
+ struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
struct ice_vsi_list_map_info *vsi_list_info =
fm_list->vsi_list_info;
u16 rem_vsi_handle;
@@ -1425,6 +1432,8 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
ICE_MAX_VSI);
if (!ice_is_vsi_valid(hw, rem_vsi_handle))
return ICE_ERR_OUT_OF_RANGE;
+
+ /* Make sure VSI list is empty before removing it below */
status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
vsi_list_id, true,
ice_aqc_opc_update_sw_rules,
@@ -1432,16 +1441,34 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
if (status)
return status;
+ tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ tmp_fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, rem_vsi_handle);
+ tmp_fltr_info.vsi_handle = rem_vsi_handle;
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW,
+ "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
+ tmp_fltr_info.fwd_id.hw_vsi_id, status);
+ return status;
+ }
+
+ fm_list->fltr_info = tmp_fltr_info;
+ }
+
+ if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
+ (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
+ struct ice_vsi_list_map_info *vsi_list_info =
+ fm_list->vsi_list_info;
+
/* Remove the VSI list since it is no longer used */
status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
- if (status)
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW,
+ "Failed to remove VSI list %d, error %d\n",
+ vsi_list_id, status);
return status;
-
- /* Change the list entry action from VSI_LIST to VSI */
- fm_list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- fm_list->fltr_info.fwd_id.hw_vsi_id =
- ice_get_hw_vsi_num(hw, rem_vsi_handle);
- fm_list->fltr_info.vsi_handle = rem_vsi_handle;
+ }
list_del(&vsi_list_info->list_entry);
devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
@@ -1983,12 +2010,12 @@ out:
enum ice_status
ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{
- struct ice_fltr_list_entry *list_itr;
+ struct ice_fltr_list_entry *list_itr, *tmp;
if (!m_list)
return ICE_ERR_PARAM;
- list_for_each_entry(list_itr, m_list, list_entry) {
+ list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
if (l_type != ICE_SW_LKUP_MAC)
@@ -2010,12 +2037,12 @@ ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
enum ice_status
ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
{
- struct ice_fltr_list_entry *v_list_itr;
+ struct ice_fltr_list_entry *v_list_itr, *tmp;
if (!v_list || !hw)
return ICE_ERR_PARAM;
- list_for_each_entry(v_list_itr, v_list, list_entry) {
+ list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
if (l_type != ICE_SW_LKUP_VLAN)
@@ -2115,7 +2142,7 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
struct ice_fltr_info *fi;
fi = &fm_entry->fltr_info;
- if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
+ if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
continue;
status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
@@ -2232,7 +2259,8 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
goto end;
continue;
}
- if (!test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
+ if (!itr->vsi_list_info ||
+ !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
continue;
/* Clearing it so that the logic can add it back */
clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index 7706e9b6003c..b88d96a1ef69 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -19,6 +19,7 @@ struct ice_vsi_ctx {
struct ice_aqc_vsi_props info;
struct ice_sched_vsi_info sched;
u8 alloc_from_pool;
+ u8 vf_num;
};
enum ice_sw_fwd_act_type {
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index a9b92974e041..1d0f58bd389b 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -105,8 +105,9 @@ enum ice_rx_dtype {
#define ICE_TX_ITR ICE_IDX_ITR1
#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define ICE_ITR_8K 125
-#define ICE_DFLT_TX_ITR ICE_ITR_8K
-#define ICE_DFLT_RX_ITR ICE_ITR_8K
+#define ICE_ITR_20K 50
+#define ICE_DFLT_TX_ITR ICE_ITR_20K
+#define ICE_DFLT_RX_ITR ICE_ITR_20K
/* apply ITR granularity translation to program the register. itr_gran is either
* 2 or 4 usecs so we need to divide by 2 first then shift by that value
*/
@@ -135,13 +136,6 @@ struct ice_ring {
u16 q_index; /* Queue number of ring */
u32 txq_teid; /* Added Tx queue TEID */
- /* high bit set means dynamic, use accessor routines to read/write.
- * hardware supports 4us/2us resolution for the ITR registers.
- * these values always store the USER setting, and must be converted
- * before programming to a register.
- */
- u16 itr_setting;
-
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
@@ -178,6 +172,7 @@ struct ice_ring_container {
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_pkts; /* total packets processed this int */
enum ice_latency_range latency_range;
+ int itr_idx; /* index in the interrupt vector */
u16 itr;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index f5c8de0ed0eb..12f9432abf11 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -84,6 +84,7 @@ enum ice_media_type {
enum ice_vsi_type {
ICE_VSI_PF = 0,
+ ICE_VSI_VF,
};
struct ice_link_status {
@@ -103,6 +104,15 @@ struct ice_link_status {
u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
};
+/* Different reset sources for which a disable queue AQ call has to be made in
+ * order to clean the TX scheduler as a part of the reset
+ */
+enum ice_disq_rst_src {
+ ICE_NO_RESET = 0,
+ ICE_VM_RESET,
+ ICE_VF_RESET,
+};
+
/* PHY info such as phy_type, etc... */
struct ice_phy_info {
struct ice_link_status link_info;
@@ -127,6 +137,9 @@ struct ice_hw_common_caps {
/* Max MTU for function or device */
u16 max_mtu;
+ /* Virtualization support */
+ u8 sr_iov_1_1; /* SR-IOV enabled */
+
/* RSS related capabilities */
u16 rss_table_size; /* 512 for PFs and 64 for VFs */
u8 rss_table_entry_width; /* RSS Entry width in bits */
@@ -135,12 +148,15 @@ struct ice_hw_common_caps {
/* Function specific capabilities */
struct ice_hw_func_caps {
struct ice_hw_common_caps common_cap;
+ u32 num_allocd_vfs; /* Number of allocated VFs */
+ u32 vf_base_id; /* Logical ID of the first VF */
u32 guaranteed_num_vsi;
};
/* Device wide capabilities */
struct ice_hw_dev_caps {
struct ice_hw_common_caps common_cap;
+ u32 num_vfs_exposed; /* Total number of VFs exposed */
u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
};
@@ -321,6 +337,7 @@ struct ice_hw {
/* Control Queue info */
struct ice_ctl_q_info adminq;
+ struct ice_ctl_q_info mailboxq;
u8 api_branch; /* API branch version */
u8 api_maj_ver; /* API major version */
@@ -426,4 +443,7 @@ struct ice_hw_port_stats {
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
#define ICE_SR_WORDS_IN_1KB 512
+/* Hash redirection LUT for VSI - maximum array size */
+#define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4)
+
#endif /* _ICE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
new file mode 100644
index 000000000000..45f10f8f01dc
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -0,0 +1,2675 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+
+/**
+ * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
+ * @pf: pointer to the PF structure
+ * @v_opcode: operation code
+ * @v_retval: return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ */
+static void
+ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
+ enum ice_status v_retval, u8 *msg, u16 msglen)
+{
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf = pf->vf;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
+ /* Not all vfs are enabled so skip the ones that are not */
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ continue;
+
+ /* Ignore return value on purpose - a given VF may fail, but
+ * we need to keep going and send to all of them
+ */
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
+ msglen, NULL);
+ }
+}
+
+/**
+ * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
+ * @vf: pointer to the VF structure
+ * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
+ * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
+ * @link_up: whether or not to set the link up/down
+ */
+static void
+ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
+ int ice_link_speed, bool link_up)
+{
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
+ pfe->event_data.link_event_adv.link_status = link_up;
+ /* Speed in Mbps */
+ pfe->event_data.link_event_adv.link_speed =
+ ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
+ } else {
+ pfe->event_data.link_event.link_status = link_up;
+ /* Legacy method for virtchnl link speeds */
+ pfe->event_data.link_event.link_speed =
+ (enum virtchnl_link_speed)
+ ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
+ }
+}
+
+/**
+ * ice_set_pfe_link_forced - Force the virtchnl_pf_event link speed/status
+ * @vf: pointer to the VF structure
+ * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
+ * @link_up: whether or not to set the link up/down
+ */
+static void
+ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
+ bool link_up)
+{
+ u16 link_speed;
+
+ if (link_up)
+ link_speed = ICE_AQ_LINK_SPEED_40GB;
+ else
+ link_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
+
+ ice_set_pfe_link(vf, pfe, link_speed, link_up);
+}
+
+/**
+ * ice_vc_notify_vf_link_state - Inform a VF of link status
+ * @vf: pointer to the VF structure
+ *
+ * send a link status message to a single VF
+ */
+static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
+{
+ struct virtchnl_pf_event pfe = { 0 };
+ struct ice_link_status *ls;
+ struct ice_pf *pf = vf->pf;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
+ ls = &hw->port_info->phy.link_info;
+
+ pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = PF_EVENT_SEVERITY_INFO;
+
+ if (vf->link_forced)
+ ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
+ else
+ ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info &
+ ICE_AQ_LINK_UP);
+
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
+ sizeof(pfe), NULL);
+}
+
+/**
+ * ice_get_vf_vector - get VF interrupt vector register offset
+ * @vf_msix: number of MSIx vector per VF on a PF
+ * @vf_id: VF identifier
+ * @i: index of MSIx vector
+ */
+static u32 ice_get_vf_vector(int vf_msix, int vf_id, int i)
+{
+ return ((i == 0) ? VFINT_DYN_CTLN(vf_id) :
+ VFINT_DYN_CTLN(((vf_msix - 1) * (vf_id)) + (i - 1)));
+}
+
+/**
+ * ice_free_vf_res - Free a VF's resources
+ * @vf: pointer to the VF info
+ */
+static void ice_free_vf_res(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ int i, pf_vf_msix;
+
+ /* First, disable VF's configuration API to prevent OS from
+ * accessing the VF's VSI after it's freed or invalidated.
+ */
+ clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+ /* free vsi & disconnect it from the parent uplink */
+ if (vf->lan_vsi_idx) {
+ ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
+ vf->lan_vsi_idx = 0;
+ vf->lan_vsi_num = 0;
+ vf->num_mac = 0;
+ }
+
+ pf_vf_msix = pf->num_vf_msix;
+ /* Disable interrupts so that VF starts in a known state */
+ for (i = 0; i < pf_vf_msix; i++) {
+ u32 reg_idx;
+
+ reg_idx = ice_get_vf_vector(pf_vf_msix, vf->vf_id, i);
+ wr32(&pf->hw, reg_idx, VFINT_DYN_CTLN_CLEARPBA_M);
+ ice_flush(&pf->hw);
+ }
+ /* reset some of the state variables keeping track of the resources */
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+}
+
+/***********************enable_vf routines*****************************/
+
+/**
+ * ice_dis_vf_mappings
+ * @vf: pointer to the VF structure
+ */
+static void ice_dis_vf_mappings(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int first, last, v;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
+ wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
+
+ first = vf->first_vector_idx;
+ last = first + pf->num_vf_msix - 1;
+ for (v = first; v <= last; v++) {
+ u32 reg;
+
+ reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
+ GLINT_VECT2FUNC_IS_PF_M) |
+ ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
+ GLINT_VECT2FUNC_PF_NUM_M));
+ wr32(hw, GLINT_VECT2FUNC(v), reg);
+ }
+
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
+ wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
+ else
+ dev_err(&pf->pdev->dev,
+ "Scattered mode for VF Tx queues is not yet implemented\n");
+
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
+ wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
+ else
+ dev_err(&pf->pdev->dev,
+ "Scattered mode for VF Rx queues is not yet implemented\n");
+}
+
+/**
+ * ice_free_vfs - Free all VFs
+ * @pf: pointer to the PF structure
+ */
+void ice_free_vfs(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ int tmp, i;
+
+ if (!pf->vf)
+ return;
+
+ while (test_and_set_bit(__ICE_VF_DIS, pf->state))
+ usleep_range(1000, 2000);
+
+ /* Avoid wait time by stopping all VFs at the same time */
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
+ continue;
+
+ /* stop rings without wait time */
+ ice_vsi_stop_tx_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
+ ICE_NO_RESET, i);
+ ice_vsi_stop_rx_rings(pf->vsi[pf->vf[i].lan_vsi_idx]);
+
+ clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
+ }
+
+ /* Disable IOV before freeing resources. This lets any VF drivers
+ * running in the host get themselves cleaned up before we yank
+ * the carpet out from underneath their feet.
+ */
+ if (!pci_vfs_assigned(pf->pdev))
+ pci_disable_sriov(pf->pdev);
+ else
+ dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+
+ tmp = pf->num_alloc_vfs;
+ pf->num_vf_qps = 0;
+ pf->num_alloc_vfs = 0;
+ for (i = 0; i < tmp; i++) {
+ if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
+ /* disable VF qp mappings */
+ ice_dis_vf_mappings(&pf->vf[i]);
+
+ /* Set this state so that assigned VF vectors can be
+ * reclaimed by PF for reuse in ice_vsi_release(). No
+ * need to clear this bit since pf->vf array is being
+ * freed anyways after this for loop
+ */
+ set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states);
+ ice_free_vf_res(&pf->vf[i]);
+ }
+ }
+
+ devm_kfree(&pf->pdev->dev, pf->vf);
+ pf->vf = NULL;
+
+ /* This check is for when the driver is unloaded while VFs are
+ * assigned. Setting the number of VFs to 0 through sysfs is caught
+ * before this function ever gets called.
+ */
+ if (!pci_vfs_assigned(pf->pdev)) {
+ int vf_id;
+
+ /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
+ * work correctly when SR-IOV gets re-enabled.
+ */
+ for (vf_id = 0; vf_id < tmp; vf_id++) {
+ u32 reg_idx, bit_idx;
+
+ reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+ wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ }
+ }
+ clear_bit(__ICE_VF_DIS, pf->state);
+ clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
+}
+
+/**
+ * ice_trigger_vf_reset - Reset a VF on HW
+ * @vf: pointer to the VF structure
+ * @is_vflr: true if VFLR was issued, false if not
+ *
+ * Trigger hardware to start a reset for a particular VF. Expects the caller
+ * to wait the proper amount of time to allow hardware to reset the VF before
+ * it cleans up and restores VF functionality.
+ */
+static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
+{
+ struct ice_pf *pf = vf->pf;
+ u32 reg, reg_idx, bit_idx;
+ struct ice_hw *hw;
+ int vf_abs_id, i;
+
+ hw = &pf->hw;
+ vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ /* Inform VF that it is no longer active, as a warning */
+ clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+
+ /* Disable VF's configuration API during reset. The flag is re-enabled
+ * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
+ * It's normally disabled in ice_free_vf_res(), but it's safer
+ * to do it earlier to give some time to finish to any VF config
+ * functions that may still be running at this point.
+ */
+ clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+ /* In the case of a VFLR, the HW has already reset the VF and we
+ * just need to clean up, so don't hit the VFRTRIG register.
+ */
+ if (!is_vflr) {
+ /* reset VF using VPGEN_VFRTRIG reg */
+ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
+ reg |= VPGEN_VFRTRIG_VFSWR_M;
+ wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
+ }
+ /* clear the VFLR bit in GLGEN_VFLRSTAT */
+ reg_idx = (vf_abs_id) / 32;
+ bit_idx = (vf_abs_id) % 32;
+ wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ ice_flush(hw);
+
+ wr32(hw, PF_PCI_CIAA,
+ VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
+ for (i = 0; i < 100; i++) {
+ reg = rd32(hw, PF_PCI_CIAD);
+ if ((reg & VF_TRANS_PENDING_M) != 0)
+ dev_err(&pf->pdev->dev,
+ "VF %d PCI transactions stuck\n", vf->vf_id);
+ udelay(1);
+ }
+}
+
+/**
+ * ice_vsi_set_pvid - Set port VLAN id for the VSI
+ * @vsi: the VSI being changed
+ * @vid: the VLAN id to set as a PVID
+ */
+static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
+{
+ struct device *dev = &vsi->back->pdev->dev;
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx ctxt = { 0 };
+ enum ice_status status;
+
+ ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED |
+ ICE_AQ_VSI_PVLAN_INSERT_PVID |
+ ICE_AQ_VSI_VLAN_EMOD_STR;
+ ctxt.info.pvid = cpu_to_le16(vid);
+ ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+ status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+ if (status) {
+ dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ return -EIO;
+ }
+
+ vsi->info.pvid = ctxt.info.pvid;
+ vsi->info.vlan_flags = ctxt.info.vlan_flags;
+ return 0;
+}
+
+/**
+ * ice_vsi_kill_pvid - Remove port VLAN id from the VSI
+ * @vsi: the VSI being changed
+ */
+static int ice_vsi_kill_pvid(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+
+ if (ice_vsi_manage_vlan_stripping(vsi, false)) {
+ dev_err(&pf->pdev->dev, "Error removing Port VLAN on VSI %i\n",
+ vsi->vsi_num);
+ return -ENODEV;
+ }
+
+ vsi->info.pvid = 0;
+ return 0;
+}
+
+/**
+ * ice_vf_vsi_setup - Set up a VF VSI
+ * @pf: board private structure
+ * @pi: pointer to the port_info instance
+ * @vf_id: defines VF id to which this VSI connects.
+ *
+ * Returns pointer to the successfully allocated VSI struct on success,
+ * otherwise returns NULL on failure.
+ */
+static struct ice_vsi *
+ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
+{
+ return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
+}
+
+/**
+ * ice_alloc_vsi_res - Setup VF VSI and its resources
+ * @vf: pointer to the VF structure
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int ice_alloc_vsi_res(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ LIST_HEAD(tmp_add_list);
+ u8 broadcast[ETH_ALEN];
+ struct ice_vsi *vsi;
+ int status = 0;
+
+ vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
+
+ if (!vsi) {
+ dev_err(&pf->pdev->dev, "Failed to create VF VSI\n");
+ return -ENOMEM;
+ }
+
+ vf->lan_vsi_idx = vsi->idx;
+ vf->lan_vsi_num = vsi->vsi_num;
+
+ /* first vector index is the VFs OICR index */
+ vf->first_vector_idx = vsi->hw_base_vector;
+ /* Since hw_base_vector holds the vector where data queue interrupts
+ * starts, increment by 1 since VFs allocated vectors include OICR intr
+ * as well.
+ */
+ vsi->hw_base_vector += 1;
+
+ /* Check if port VLAN exist before, and restore it accordingly */
+ if (vf->port_vlan_id)
+ ice_vsi_set_pvid(vsi, vf->port_vlan_id);
+
+ eth_broadcast_addr(broadcast);
+
+ status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
+ if (status)
+ goto ice_alloc_vsi_res_exit;
+
+ if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
+ status = ice_add_mac_to_list(vsi, &tmp_add_list,
+ vf->dflt_lan_addr.addr);
+ if (status)
+ goto ice_alloc_vsi_res_exit;
+ }
+
+ status = ice_add_mac(&pf->hw, &tmp_add_list);
+ if (status)
+ dev_err(&pf->pdev->dev, "could not add mac filters\n");
+
+ /* Clear this bit after VF initialization since we shouldn't reclaim
+ * and reassign interrupts for synchronous or asynchronous VFR events.
+ * We don't want to reconfigure interrupts since AVF driver doesn't
+ * expect vector assignment to be changed unless there is a request for
+ * more vectors.
+ */
+ clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states);
+ice_alloc_vsi_res_exit:
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ return status;
+}
+
+/**
+ * ice_alloc_vf_res - Allocate VF resources
+ * @vf: pointer to the VF structure
+ */
+static int ice_alloc_vf_res(struct ice_vf *vf)
+{
+ int status;
+
+ /* setup VF VSI and necessary resources */
+ status = ice_alloc_vsi_res(vf);
+ if (status)
+ goto ice_alloc_vf_res_exit;
+
+ if (vf->trusted)
+ set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+ else
+ clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+
+ /* VF is now completely initialized */
+ set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+ return status;
+
+ice_alloc_vf_res_exit:
+ ice_free_vf_res(vf);
+ return status;
+}
+
+/**
+ * ice_ena_vf_mappings
+ * @vf: pointer to the VF structure
+ *
+ * Enable VF vectors and queues allocation by writing the details into
+ * respective registers.
+ */
+static void ice_ena_vf_mappings(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int first, last, v;
+ struct ice_hw *hw;
+ int abs_vf_id;
+ u32 reg;
+
+ hw = &pf->hw;
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ first = vf->first_vector_idx;
+ last = (first + pf->num_vf_msix) - 1;
+ abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ /* VF Vector allocation */
+ reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
+ ((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
+ VPINT_ALLOC_VALID_M);
+ wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
+
+ reg = (((first << VPINT_ALLOC_PCI_FIRST_S) & VPINT_ALLOC_PCI_FIRST_M) |
+ ((last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
+ VPINT_ALLOC_PCI_VALID_M);
+ wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
+ /* map the interrupts to its functions */
+ for (v = first; v <= last; v++) {
+ reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
+ GLINT_VECT2FUNC_VF_NUM_M) |
+ ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
+ GLINT_VECT2FUNC_PF_NUM_M));
+ wr32(hw, GLINT_VECT2FUNC(v), reg);
+ }
+
+ /* set regardless of mapping mode */
+ wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
+
+ /* VF Tx queues allocation */
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
+ /* set the VF PF Tx queue range
+ * VFNUMQ value should be set to (number of queues - 1). A value
+ * of 0 means 1 queue and a value of 255 means 256 queues
+ */
+ reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
+ VPLAN_TX_QBASE_VFFIRSTQ_M) |
+ (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
+ VPLAN_TX_QBASE_VFNUMQ_M));
+ wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Scattered mode for VF Tx queues is not yet implemented\n");
+ }
+
+ /* set regardless of mapping mode */
+ wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
+
+ /* VF Rx queues allocation */
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
+ /* set the VF PF Rx queue range
+ * VFNUMQ value should be set to (number of queues - 1). A value
+ * of 0 means 1 queue and a value of 255 means 256 queues
+ */
+ reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
+ VPLAN_RX_QBASE_VFFIRSTQ_M) |
+ (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
+ VPLAN_RX_QBASE_VFNUMQ_M));
+ wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Scattered mode for VF Rx queues is not yet implemented\n");
+ }
+}
+
+/**
+ * ice_determine_res
+ * @pf: pointer to the PF structure
+ * @avail_res: available resources in the PF structure
+ * @max_res: maximum resources that can be given per VF
+ * @min_res: minimum resources that can be given per VF
+ *
+ * Returns non-zero value if resources (queues/vectors) are available or
+ * returns zero if PF cannot accommodate for all num_alloc_vfs.
+ */
+static int
+ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
+{
+ bool checked_min_res = false;
+ int res;
+
+ /* start by checking if PF can assign max number of resources for
+ * all num_alloc_vfs.
+ * if yes, return number per VF
+ * If no, divide by 2 and roundup, check again
+ * repeat the loop till we reach a point where even minimum resources
+ * are not available, in that case return 0
+ */
+ res = max_res;
+ while ((res >= min_res) && !checked_min_res) {
+ int num_all_res;
+
+ num_all_res = pf->num_alloc_vfs * res;
+ if (num_all_res <= avail_res)
+ return res;
+
+ if (res == min_res)
+ checked_min_res = true;
+
+ res = DIV_ROUND_UP(res, 2);
+ }
+ return 0;
+}
+
+/**
+ * ice_check_avail_res - check if vectors and queues are available
+ * @pf: pointer to the PF structure
+ *
+ * This function is where we calculate actual number of resources for VF VSIs,
+ * we don't reserve ahead of time during probe. Returns success if vectors and
+ * queues resources are available, otherwise returns error code
+ */
+static int ice_check_avail_res(struct ice_pf *pf)
+{
+ u16 num_msix, num_txq, num_rxq;
+
+ if (!pf->num_alloc_vfs)
+ return -EINVAL;
+
+ /* Grab from HW interrupts common pool
+ * Note: By the time the user decides it needs more vectors in a VF
+ * its already too late since one must decide this prior to creating the
+ * VF interface. So the best we can do is take a guess as to what the
+ * user might want.
+ *
+ * We have two policies for vector allocation:
+ * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small
+ * number of NFV VFs used for NFV appliances, since this is a special
+ * case, we try to assign maximum vectors per VF (65) as much as
+ * possible, based on determine_resources algorithm.
+ * 2. if num_alloc_vfs is from 17 to 256, then its large number of
+ * regular VFs which are not used for any special purpose. Hence try to
+ * grab default interrupt vectors (5 as supported by AVF driver).
+ */
+ if (pf->num_alloc_vfs <= 16) {
+ num_msix = ice_determine_res(pf, pf->num_avail_hw_msix,
+ ICE_MAX_INTR_PER_VF,
+ ICE_MIN_INTR_PER_VF);
+ } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
+ num_msix = ice_determine_res(pf, pf->num_avail_hw_msix,
+ ICE_DFLT_INTR_PER_VF,
+ ICE_MIN_INTR_PER_VF);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Number of VFs %d exceeds max VF count %d\n",
+ pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
+ return -EIO;
+ }
+
+ if (!num_msix)
+ return -EIO;
+
+ /* Grab from the common pool
+ * start by requesting Default queues (4 as supported by AVF driver),
+ * Note that, the main difference between queues and vectors is, latter
+ * can only be reserved at init time but queues can be requested by VF
+ * at runtime through Virtchnl, that is the reason we start by reserving
+ * few queues.
+ */
+ num_txq = ice_determine_res(pf, pf->q_left_tx, ICE_DFLT_QS_PER_VF,
+ ICE_MIN_QS_PER_VF);
+
+ num_rxq = ice_determine_res(pf, pf->q_left_rx, ICE_DFLT_QS_PER_VF,
+ ICE_MIN_QS_PER_VF);
+
+ if (!num_txq || !num_rxq)
+ return -EIO;
+
+ /* since AVF driver works with only queue pairs which means, it expects
+ * to have equal number of Rx and Tx queues, so take the minimum of
+ * available Tx or Rx queues
+ */
+ pf->num_vf_qps = min_t(int, num_txq, num_rxq);
+ pf->num_vf_msix = num_msix;
+
+ return 0;
+}
+
+/**
+ * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
+ * @vf: pointer to the VF structure
+ *
+ * Cleanup a VF after the hardware reset is finished. Expects the caller to
+ * have verified whether the reset is finished properly, and ensure the
+ * minimum amount of wait time has passed. Reallocate VF resources back to make
+ * VF state active
+ */
+static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_hw *hw;
+ u32 reg;
+
+ hw = &pf->hw;
+
+ /* PF software completes the flow by notifying VF that reset flow is
+ * completed. This is done by enabling hardware by clearing the reset
+ * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT
+ * register to VFR completed (done at the end of this function)
+ * By doing this we allow HW to access VF memory at any point. If we
+ * did it any sooner, HW could access memory while it was being freed
+ * in ice_free_vf_res(), causing an IOMMU fault.
+ *
+ * On the other hand, this needs to be done ASAP, because the VF driver
+ * is waiting for this to happen and may report a timeout. It's
+ * harmless, but it gets logged into Guest OS kernel log, so best avoid
+ * it.
+ */
+ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
+ reg &= ~VPGEN_VFRTRIG_VFSWR_M;
+ wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
+
+ /* reallocate VF resources to finish resetting the VSI state */
+ if (!ice_alloc_vf_res(vf)) {
+ ice_ena_vf_mappings(vf);
+ set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+ clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ vf->num_vlan = 0;
+ }
+
+ /* Tell the VF driver the reset is done. This needs to be done only
+ * after VF has been fully initialized, because the VF driver may
+ * request resources immediately after setting this flag.
+ */
+ wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+}
+
+/**
+ * ice_reset_all_vfs - reset all allocated VFs in one go
+ * @pf: pointer to the PF structure
+ * @is_vflr: true if VFLR was issued, false if not
+ *
+ * First, tell the hardware to reset each VF, then do all the waiting in one
+ * chunk, and finally finish restoring each VF after the wait. This is useful
+ * during PF routines which need to reset all VFs, as otherwise it must perform
+ * these resets in a serialized fashion.
+ *
+ * Returns true if any VFs were reset, and false otherwise.
+ */
+bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
+{
+ struct ice_hw *hw = &pf->hw;
+ int v, i;
+
+ /* If we don't have any VFs, then there is nothing to reset */
+ if (!pf->num_alloc_vfs)
+ return false;
+
+ /* If VFs have been disabled, there is no need to reset */
+ if (test_and_set_bit(__ICE_VF_DIS, pf->state))
+ return false;
+
+ /* Begin reset on all VFs at once */
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ ice_trigger_vf_reset(&pf->vf[v], is_vflr);
+
+ /* Call Disable LAN Tx queue AQ call with VFR bit set and 0
+ * queues to inform Firmware about VF reset.
+ */
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ ice_dis_vsi_txq(pf->vsi[0]->port_info, 0, NULL, NULL,
+ ICE_VF_RESET, v, NULL);
+
+ /* HW requires some time to make sure it can flush the FIFO for a VF
+ * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
+ * sequence to make sure that it has completed. We'll keep track of
+ * the VFs using a simple iterator that increments once that VF has
+ * finished resetting.
+ */
+ for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
+ usleep_range(10000, 20000);
+
+ /* Check each VF in sequence */
+ while (v < pf->num_alloc_vfs) {
+ struct ice_vf *vf = &pf->vf[v];
+ u32 reg;
+
+ reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
+ if (!(reg & VPGEN_VFRSTAT_VFRD_M))
+ break;
+
+ /* If the current VF has finished resetting, move on
+ * to the next VF in sequence.
+ */
+ v++;
+ }
+ }
+
+ /* Display a warning if at least one VF didn't manage to reset in
+ * time, but continue on with the operation.
+ */
+ if (v < pf->num_alloc_vfs)
+ dev_warn(&pf->pdev->dev, "VF reset check timeout\n");
+ usleep_range(10000, 20000);
+
+ /* free VF resources to begin resetting the VSI state */
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ ice_free_vf_res(&pf->vf[v]);
+
+ if (ice_check_avail_res(pf)) {
+ dev_err(&pf->pdev->dev,
+ "Cannot allocate VF resources, try with fewer number of VFs\n");
+ return false;
+ }
+
+ /* Finish the reset on each VF */
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ ice_cleanup_and_realloc_vf(&pf->vf[v]);
+
+ ice_flush(hw);
+ clear_bit(__ICE_VF_DIS, pf->state);
+
+ return true;
+}
+
+/**
+ * ice_reset_vf - Reset a particular VF
+ * @vf: pointer to the VF structure
+ * @is_vflr: true if VFLR was issued, false if not
+ *
+ * Returns true if the VF is reset, false otherwise.
+ */
+static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_hw *hw = &pf->hw;
+ bool rsd = false;
+ u32 reg;
+ int i;
+
+ /* If the VFs have been disabled, this means something else is
+ * resetting the VF, so we shouldn't continue.
+ */
+ if (test_and_set_bit(__ICE_VF_DIS, pf->state))
+ return false;
+
+ ice_trigger_vf_reset(vf, is_vflr);
+
+ if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
+ ice_vsi_stop_tx_rings(pf->vsi[vf->lan_vsi_idx], ICE_VF_RESET,
+ vf->vf_id);
+ ice_vsi_stop_rx_rings(pf->vsi[vf->lan_vsi_idx]);
+ clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
+ } else {
+ /* Call Disable LAN Tx queue AQ call even when queues are not
+ * enabled. This is needed for successful completiom of VFR
+ */
+ ice_dis_vsi_txq(pf->vsi[vf->lan_vsi_idx]->port_info, 0,
+ NULL, NULL, ICE_VF_RESET, vf->vf_id, NULL);
+ }
+
+ /* poll VPGEN_VFRSTAT reg to make sure
+ * that reset is complete
+ */
+ for (i = 0; i < 10; i++) {
+ /* VF reset requires driver to first reset the VF and then
+ * poll the status register to make sure that the reset
+ * completed successfully.
+ */
+ usleep_range(10000, 20000);
+ reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
+ if (reg & VPGEN_VFRSTAT_VFRD_M) {
+ rsd = true;
+ break;
+ }
+ }
+
+ /* Display a warning if VF didn't manage to reset in time, but need to
+ * continue on with the operation.
+ */
+ if (!rsd)
+ dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
+ vf->vf_id);
+
+ usleep_range(10000, 20000);
+
+ /* free VF resources to begin resetting the VSI state */
+ ice_free_vf_res(vf);
+
+ ice_cleanup_and_realloc_vf(vf);
+
+ ice_flush(hw);
+ clear_bit(__ICE_VF_DIS, pf->state);
+
+ return true;
+}
+
+/**
+ * ice_vc_notify_link_state - Inform all VFs on a PF of link status
+ * @pf: pointer to the PF structure
+ */
+void ice_vc_notify_link_state(struct ice_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ ice_vc_notify_vf_link_state(&pf->vf[i]);
+}
+
+/**
+ * ice_vc_notify_reset - Send pending reset message to all VFs
+ * @pf: pointer to the PF structure
+ *
+ * indicate a pending reset to all VFs on a given PF
+ */
+void ice_vc_notify_reset(struct ice_pf *pf)
+{
+ struct virtchnl_pf_event pfe;
+
+ if (!pf->num_alloc_vfs)
+ return;
+
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, ICE_SUCCESS,
+ (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
+}
+
+/**
+ * ice_vc_notify_vf_reset - Notify VF of a reset event
+ * @vf: pointer to the VF structure
+ */
+static void ice_vc_notify_vf_reset(struct ice_vf *vf)
+{
+ struct virtchnl_pf_event pfe;
+
+ /* validate the request */
+ if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+ return;
+
+ /* verify if the VF is in either init or active before proceeding */
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ return;
+
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0,
+ (u8 *)&pfe, sizeof(pfe), NULL);
+}
+
+/**
+ * ice_alloc_vfs - Allocate and set up VFs resources
+ * @pf: pointer to the PF structure
+ * @num_alloc_vfs: number of VFs to allocate
+ */
+static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
+{
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vfs;
+ int i, ret;
+
+ /* Disable global interrupt 0 so we don't try to handle the VFLR. */
+ wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx),
+ ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
+
+ ice_flush(hw);
+
+ ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+ if (ret) {
+ pf->num_alloc_vfs = 0;
+ goto err_unroll_intr;
+ }
+ /* allocate memory */
+ vfs = devm_kcalloc(&pf->pdev->dev, num_alloc_vfs, sizeof(*vfs),
+ GFP_KERNEL);
+ if (!vfs) {
+ ret = -ENOMEM;
+ goto err_unroll_sriov;
+ }
+ pf->vf = vfs;
+
+ /* apply default profile */
+ for (i = 0; i < num_alloc_vfs; i++) {
+ vfs[i].pf = pf;
+ vfs[i].vf_sw_id = pf->first_sw;
+ vfs[i].vf_id = i;
+
+ /* assign default capabilities */
+ set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
+ vfs[i].spoofchk = true;
+
+ /* Set this state so that PF driver does VF vector assignment */
+ set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states);
+ }
+ pf->num_alloc_vfs = num_alloc_vfs;
+
+ /* VF resources get allocated during reset */
+ if (!ice_reset_all_vfs(pf, false))
+ goto err_unroll_sriov;
+
+ goto err_unroll_intr;
+
+err_unroll_sriov:
+ pci_disable_sriov(pf->pdev);
+err_unroll_intr:
+ /* rearm interrupts here */
+ ice_irq_dynamic_ena(hw, NULL, NULL);
+ return ret;
+}
+
+/**
+ * ice_pf_state_is_nominal - checks the pf for nominal state
+ * @pf: pointer to pf to check
+ *
+ * Check the PF's state for a collection of bits that would indicate
+ * the PF is in a state that would inhibit normal operation for
+ * driver functionality.
+ *
+ * Returns true if PF is in a nominal state.
+ * Returns false otherwise
+ */
+static bool ice_pf_state_is_nominal(struct ice_pf *pf)
+{
+ DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
+
+ if (!pf)
+ return false;
+
+ bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
+ if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_pci_sriov_ena - Enable or change number of VFs
+ * @pf: pointer to the PF structure
+ * @num_vfs: number of VFs to allocate
+ */
+static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
+{
+ int pre_existing_vfs = pci_num_vf(pf->pdev);
+ struct device *dev = &pf->pdev->dev;
+ int err;
+
+ if (!ice_pf_state_is_nominal(pf)) {
+ dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
+ return -EBUSY;
+ }
+
+ if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
+ dev_err(dev, "This device is not capable of SR-IOV\n");
+ return -ENODEV;
+ }
+
+ if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+ ice_free_vfs(pf);
+ else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+ return num_vfs;
+
+ if (num_vfs > pf->num_vfs_supported) {
+ dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
+ num_vfs, pf->num_vfs_supported);
+ return -ENOTSUPP;
+ }
+
+ dev_info(dev, "Allocating %d VFs\n", num_vfs);
+ err = ice_alloc_vfs(pf, num_vfs);
+ if (err) {
+ dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
+ return err;
+ }
+
+ set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
+ return num_vfs;
+}
+
+/**
+ * ice_sriov_configure - Enable or change number of VFs via sysfs
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs to allocate
+ *
+ * This function is called when the user updates the number of VFs in sysfs.
+ */
+int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ if (num_vfs)
+ return ice_pci_sriov_ena(pf, num_vfs);
+
+ if (!pci_vfs_assigned(pdev)) {
+ ice_free_vfs(pf);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "can't free VFs because some are assigned to VMs.\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_process_vflr_event - Free VF resources via IRQ calls
+ * @pf: pointer to the PF structure
+ *
+ * called from the VLFR IRQ handler to
+ * free up VF resources and state variables
+ */
+void ice_process_vflr_event(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ int vf_id;
+ u32 reg;
+
+ if (!test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
+ !pf->num_alloc_vfs)
+ return;
+
+ /* Re-enable the VFLR interrupt cause here, before looking for which
+ * VF got reset. Otherwise, if another VF gets a reset while the
+ * first one is being processed, that interrupt will be lost, and
+ * that VF will be stuck in reset forever.
+ */
+ reg = rd32(hw, PFINT_OICR_ENA);
+ reg |= PFINT_OICR_VFLR_M;
+ wr32(hw, PFINT_OICR_ENA, reg);
+ ice_flush(hw);
+
+ clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
+ for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
+ struct ice_vf *vf = &pf->vf[vf_id];
+ u32 reg_idx, bit_idx;
+
+ reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+ /* read GLGEN_VFLRSTAT register to find out the flr VFs */
+ reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
+ if (reg & BIT(bit_idx))
+ /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
+ ice_reset_vf(vf, true);
+ }
+}
+
+/**
+ * ice_vc_dis_vf - Disable a given VF via SW reset
+ * @vf: pointer to the VF info
+ *
+ * Disable the VF through a SW reset
+ */
+static void ice_vc_dis_vf(struct ice_vf *vf)
+{
+ ice_vc_notify_vf_reset(vf);
+ ice_reset_vf(vf, false);
+}
+
+/**
+ * ice_vc_send_msg_to_vf - Send message to VF
+ * @vf: pointer to the VF info
+ * @v_opcode: virtual channel opcode
+ * @v_retval: virtual channel return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send msg to VF
+ */
+static int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+ enum ice_status v_retval, u8 *msg, u16 msglen)
+{
+ enum ice_status aq_ret;
+ struct ice_pf *pf;
+
+ /* validate the request */
+ if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+ return -EINVAL;
+
+ pf = vf->pf;
+
+ /* single place to detect unsuccessful return values */
+ if (v_retval) {
+ vf->num_inval_msgs++;
+ dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
+ vf->vf_id, v_opcode, v_retval);
+ if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
+ dev_err(&pf->pdev->dev,
+ "Number of invalid messages exceeded for VF %d\n",
+ vf->vf_id);
+ dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ return -EIO;
+ }
+ } else {
+ vf->num_valid_msgs++;
+ /* reset the invalid counter, if a valid message is received. */
+ vf->num_inval_msgs = 0;
+ }
+
+ aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "Unable to send the message to VF %d aq_err %d\n",
+ vf->vf_id, pf->hw.mailboxq.sq_last_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_get_ver_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to request the API version used by the PF
+ */
+static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_version_info info = {
+ VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
+ };
+
+ vf->vf_ver = *(struct virtchnl_version_info *)msg;
+ /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
+ if (VF_IS_V10(&vf->vf_ver))
+ info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, ICE_SUCCESS,
+ (u8 *)&info,
+ sizeof(struct virtchnl_version_info));
+}
+
+/**
+ * ice_vc_get_vf_res_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to request its resources
+ */
+static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vf_resource *vfres = NULL;
+ enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int len = 0;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto err;
+ }
+
+ len = sizeof(struct virtchnl_vf_resource);
+
+ vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL);
+ if (!vfres) {
+ aq_ret = ICE_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+ if (VF_IS_V11(&vf->vf_ver))
+ vf->driver_caps = *(u32 *)msg;
+ else
+ vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ VIRTCHNL_VF_OFFLOAD_VLAN;
+
+ vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi->info.pvid)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
+ } else {
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+ else
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
+ }
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
+
+ vfres->num_vsis = 1;
+ /* Tx and Rx queue are equal for VF */
+ vfres->num_queue_pairs = vsi->num_txq;
+ vfres->max_vectors = pf->num_vf_msix;
+ vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
+ vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+
+ vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
+ vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
+ vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
+ ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
+ vf->dflt_lan_addr.addr);
+
+ set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+
+err:
+ /* send the response back to the VF */
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret,
+ (u8 *)vfres, len);
+
+ devm_kfree(&pf->pdev->dev, vfres);
+ return ret;
+}
+
+/**
+ * ice_vc_reset_vf_msg
+ * @vf: pointer to the VF info
+ *
+ * called from the VF to reset itself,
+ * unlike other virtchnl messages, PF driver
+ * doesn't send the response back to the VF
+ */
+static void ice_vc_reset_vf_msg(struct ice_vf *vf)
+{
+ if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ ice_reset_vf(vf, false);
+}
+
+/**
+ * ice_find_vsi_from_id
+ * @pf: the pf structure to search for the VSI
+ * @id: id of the VSI it is searching for
+ *
+ * searches for the VSI with the given id
+ */
+static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++)
+ if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
+ return pf->vsi[i];
+
+ return NULL;
+}
+
+/**
+ * ice_vc_isvalid_vsi_id
+ * @vf: pointer to the VF info
+ * @vsi_id: VF relative VSI id
+ *
+ * check for the valid VSI id
+ */
+static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = ice_find_vsi_from_id(pf, vsi_id);
+
+ return (vsi && (vsi->vf_id == vf->vf_id));
+}
+
+/**
+ * ice_vc_isvalid_q_id
+ * @vf: pointer to the VF info
+ * @vsi_id: VSI id
+ * @qid: VSI relative queue id
+ *
+ * check for the valid queue id
+ */
+static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
+{
+ struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
+ /* allocated Tx and Rx queues should be always equal for VF VSI */
+ return (vsi && (qid < vsi->alloc_txq));
+}
+
+/**
+ * ice_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS key
+ */
+static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ struct ice_vsi *vsi = NULL;
+ enum ice_status aq_ret;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vrk->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ ret = ice_set_rss(vsi, vrk->key, NULL, 0);
+ aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS LUT
+ */
+static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+ struct ice_vsi *vsi = NULL;
+ enum ice_status aq_ret;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vrl->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ ret = ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE);
+ aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_get_stats_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to get VSI stats
+ */
+static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ enum ice_status aq_ret = 0;
+ struct ice_eth_stats stats;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ memset(&stats, 0, sizeof(struct ice_eth_stats));
+ ice_update_eth_stats(vsi);
+
+ stats = vsi->eth_stats;
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
+ (u8 *)&stats, sizeof(stats));
+}
+
+/**
+ * ice_vc_ena_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to enable all or specific queue(s)
+ */
+static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ enum ice_status aq_ret = 0;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!vqs->rx_queues && !vqs->tx_queues) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Enable only Rx rings, Tx rings were enabled by the FW when the
+ * Tx queue group list was configured and the context bits were
+ * programmed using ice_vsi_cfg_txqs
+ */
+ if (ice_vsi_start_rx_rings(vsi))
+ aq_ret = ICE_ERR_PARAM;
+
+ /* Set flag to indicate that queues are enabled */
+ if (!aq_ret)
+ set_bit(ICE_VF_STATE_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_dis_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to disable all or specific
+ * queue(s)
+ */
+static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ enum ice_status aq_ret = 0;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!vqs->rx_queues && !vqs->tx_queues) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to stop tx rings on VSI %d\n",
+ vsi->vsi_num);
+ aq_ret = ICE_ERR_PARAM;
+ }
+
+ if (ice_vsi_stop_rx_rings(vsi)) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to stop rx rings on VSI %d\n",
+ vsi->vsi_num);
+ aq_ret = ICE_ERR_PARAM;
+ }
+
+ /* Clear enabled queues flag */
+ if (!aq_ret)
+ clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_irq_map_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the IRQ to queue map
+ */
+static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_irq_map_info *irqmap_info =
+ (struct virtchnl_irq_map_info *)msg;
+ u16 vsi_id, vsi_q_id, vector_id;
+ struct virtchnl_vector_map *map;
+ struct ice_vsi *vsi = NULL;
+ struct ice_pf *pf = vf->pf;
+ enum ice_status aq_ret = 0;
+ unsigned long qmap;
+ int i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < irqmap_info->num_vectors; i++) {
+ map = &irqmap_info->vecmap[i];
+
+ vector_id = map->vector_id;
+ vsi_id = map->vsi_id;
+ /* validate msg params */
+ if (!(vector_id < pf->hw.func_caps.common_cap
+ .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* lookout for the invalid queue index */
+ qmap = map->rxq_map;
+ for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
+ struct ice_q_vector *q_vector;
+
+ if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+ q_vector = vsi->q_vectors[i];
+ q_vector->num_ring_rx++;
+ q_vector->rx.itr_idx = map->rxitr_idx;
+ vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
+ }
+
+ qmap = map->txq_map;
+ for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
+ struct ice_q_vector *q_vector;
+
+ if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+ q_vector = vsi->q_vectors[i];
+ q_vector->num_ring_tx++;
+ q_vector->tx.itr_idx = map->txitr_idx;
+ vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
+ }
+ }
+
+ if (vsi)
+ ice_vsi_cfg_msix(vsi);
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the Rx/Tx queues
+ */
+static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vsi_queue_config_info *qci =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_queue_pair_info *qpi;
+ enum ice_status aq_ret = 0;
+ struct ice_vsi *vsi;
+ int i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, qci->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ qpi = &qci->qpair[i];
+ if (qpi->txq.vsi_id != qci->vsi_id ||
+ qpi->rxq.vsi_id != qci->vsi_id ||
+ qpi->rxq.queue_id != qpi->txq.queue_id ||
+ !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+ /* copy Tx queue info from VF into VSI */
+ vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
+ vsi->tx_rings[i]->count = qpi->txq.ring_len;
+ /* copy Rx queue info from VF into vsi */
+ vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
+ vsi->rx_rings[i]->count = qpi->rxq.ring_len;
+ if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+ vsi->rx_buf_len = qpi->rxq.databuffer_size;
+ if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
+ qpi->rxq.max_pkt_size < 64) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+ vsi->max_frame = qpi->rxq.max_pkt_size;
+ }
+
+ /* VF can request to configure less than allocated queues
+ * or default allocated queues. So update the VSI with new number
+ */
+ vsi->num_txq = qci->num_queue_pairs;
+ vsi->num_rxq = qci->num_queue_pairs;
+
+ if (!ice_vsi_cfg_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi))
+ aq_ret = 0;
+ else
+ aq_ret = ICE_ERR_PARAM;
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_is_vf_trusted
+ * @vf: pointer to the VF info
+ */
+static bool ice_is_vf_trusted(struct ice_vf *vf)
+{
+ return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+}
+
+/**
+ * ice_can_vf_change_mac
+ * @vf: pointer to the VF info
+ *
+ * Return true if the VF is allowed to change its MAC filters, false otherwise
+ */
+static bool ice_can_vf_change_mac(struct ice_vf *vf)
+{
+ /* If the VF MAC address has been set administratively (via the
+ * ndo_set_vf_mac command), then deny permission to the VF to
+ * add/delete unicast MAC addresses, unless the VF is trusted
+ */
+ if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_handle_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @set: true if mac filters are being set, false otherwise
+ *
+ * add guest mac address filter
+ */
+static int
+ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
+{
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
+ struct ice_pf *pf = vf->pf;
+ enum virtchnl_ops vc_op;
+ enum ice_status ret;
+ LIST_HEAD(mac_list);
+ struct ice_vsi *vsi;
+ int mac_count = 0;
+ int i;
+
+ if (set)
+ vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
+ else
+ vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+ ret = ICE_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ if (set && !ice_is_vf_trusted(vf) &&
+ (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
+ dev_err(&pf->pdev->dev,
+ "Can't add more MAC addresses, because VF is not trusted, switch the VF to trusted mode in order to add more functionalities\n");
+ ret = ICE_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ for (i = 0; i < al->num_elements; i++) {
+ u8 *maddr = al->list[i].addr;
+
+ if (ether_addr_equal(maddr, vf->dflt_lan_addr.addr) ||
+ is_broadcast_ether_addr(maddr)) {
+ if (set) {
+ /* VF is trying to add filters that the PF
+ * already added. Just continue.
+ */
+ dev_info(&pf->pdev->dev,
+ "mac %pM already set for VF %d\n",
+ maddr, vf->vf_id);
+ continue;
+ } else {
+ /* VF can't remove dflt_lan_addr/bcast mac */
+ dev_err(&pf->pdev->dev,
+ "can't remove mac %pM for VF %d\n",
+ maddr, vf->vf_id);
+ ret = ICE_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+ }
+
+ /* check for the invalid cases and bail if necessary */
+ if (is_zero_ether_addr(maddr)) {
+ dev_err(&pf->pdev->dev,
+ "invalid mac %pM provided for VF %d\n",
+ maddr, vf->vf_id);
+ ret = ICE_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ if (is_unicast_ether_addr(maddr) &&
+ !ice_can_vf_change_mac(vf)) {
+ dev_err(&pf->pdev->dev,
+ "can't change unicast mac for untrusted VF %d\n",
+ vf->vf_id);
+ ret = ICE_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ /* get here if maddr is multicast or if VF can change mac */
+ if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) {
+ ret = ICE_ERR_NO_MEMORY;
+ goto handle_mac_exit;
+ }
+ mac_count++;
+ }
+
+ /* program the updated filter list */
+ if (set)
+ ret = ice_add_mac(&pf->hw, &mac_list);
+ else
+ ret = ice_remove_mac(&pf->hw, &mac_list);
+
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "can't update mac filters for VF %d, error %d\n",
+ vf->vf_id, ret);
+ } else {
+ if (set)
+ vf->num_mac += mac_count;
+ else
+ vf->num_mac -= mac_count;
+ }
+
+handle_mac_exit:
+ ice_free_fltr_list(&pf->pdev->dev, &mac_list);
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, vc_op, ret, NULL, 0);
+}
+
+/**
+ * ice_vc_add_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * add guest MAC address filter
+ */
+static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_handle_mac_addr_msg(vf, msg, true);
+}
+
+/**
+ * ice_vc_del_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * remove guest MAC address filter
+ */
+static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_handle_mac_addr_msg(vf, msg, false);
+}
+
+/**
+ * ice_vc_request_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * VFs get a default number of queues but can use this message to request a
+ * different number. If the request is successful, PF will reset the VF and
+ * return 0. If unsuccessful, PF will send message informing VF of number of
+ * available queue pairs via virtchnl message response to VF.
+ */
+static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vf_res_request *vfres =
+ (struct virtchnl_vf_res_request *)msg;
+ int req_queues = vfres->num_queue_pairs;
+ enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
+ int tx_rx_queue_left;
+ int cur_queues;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ cur_queues = pf->num_vf_qps;
+ tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
+ if (req_queues <= 0) {
+ dev_err(&pf->pdev->dev,
+ "VF %d tried to request %d queues. Ignoring.\n",
+ vf->vf_id, req_queues);
+ } else if (req_queues > ICE_MAX_QS_PER_VF) {
+ dev_err(&pf->pdev->dev,
+ "VF %d tried to request more than %d queues.\n",
+ vf->vf_id, ICE_MAX_QS_PER_VF);
+ vfres->num_queue_pairs = ICE_MAX_QS_PER_VF;
+ } else if (req_queues - cur_queues > tx_rx_queue_left) {
+ dev_warn(&pf->pdev->dev,
+ "VF %d requested %d more queues, but only %d left.\n",
+ vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
+ vfres->num_queue_pairs = tx_rx_queue_left + cur_queues;
+ } else {
+ /* request is successful, then reset VF */
+ vf->num_req_qs = req_queues;
+ ice_vc_dis_vf(vf);
+ dev_info(&pf->pdev->dev,
+ "VF %d granted request of %d queues.\n",
+ vf->vf_id, req_queues);
+ return 0;
+ }
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
+ aq_ret, (u8 *)vfres, sizeof(*vfres));
+}
+
+/**
+ * ice_set_vf_port_vlan
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @vlan_id: VLAN id being set
+ * @qos: priority setting
+ * @vlan_proto: VLAN protocol
+ *
+ * program VF Port VLAN id and/or qos
+ */
+int
+ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+ __be16 vlan_proto)
+{
+ u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S);
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct ice_vsi *vsi;
+ struct ice_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ if (vlan_id > ICE_MAX_VLANID || qos > 7) {
+ dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
+ return -EINVAL;
+ }
+
+ if (vlan_proto != htons(ETH_P_8021Q)) {
+ dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
+ return -EPROTONOSUPPORT;
+ }
+
+ vf = &pf->vf[vf_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
+ /* duplicate request, so just return success */
+ dev_info(&pf->pdev->dev,
+ "Duplicate pvid %d request\n", vlanprio);
+ return ret;
+ }
+
+ /* If pvid, then remove all filters on the old VLAN */
+ if (vsi->info.pvid)
+ ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
+ VLAN_VID_MASK));
+
+ if (vlan_id || qos) {
+ ret = ice_vsi_set_pvid(vsi, vlanprio);
+ if (ret)
+ goto error_set_pvid;
+ } else {
+ ice_vsi_kill_pvid(vsi);
+ }
+
+ if (vlan_id) {
+ dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+ vlan_id, qos, vf_id);
+
+ /* add new VLAN filter for each MAC */
+ ret = ice_vsi_add_vlan(vsi, vlan_id);
+ if (ret)
+ goto error_set_pvid;
+ }
+
+ /* The Port VLAN needs to be saved across resets the same as the
+ * default LAN MAC address.
+ */
+ vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+
+error_set_pvid:
+ return ret;
+}
+
+/**
+ * ice_vc_process_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @add_v: Add VLAN if true, otherwise delete VLAN
+ *
+ * Process virtchnl op to add or remove programmed guest VLAN id
+ */
+static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
+{
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+ enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add_v && !ice_is_vf_trusted(vf) &&
+ vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
+ dev_info(&pf->pdev->dev,
+ "VF is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n");
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
+ aq_ret = ICE_ERR_PARAM;
+ dev_err(&pf->pdev->dev,
+ "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
+ goto error_param;
+ }
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vfl->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vsi->info.pvid) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_vsi_manage_vlan_stripping(vsi, add_v)) {
+ dev_err(&pf->pdev->dev,
+ "%sable VLAN stripping failed for VSI %i\n",
+ add_v ? "en" : "dis", vsi->vsi_num);
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add_v) {
+ for (i = 0; i < vfl->num_elements; i++) {
+ u16 vid = vfl->vlan_id[i];
+
+ if (!ice_vsi_add_vlan(vsi, vid)) {
+ vf->num_vlan++;
+ set_bit(vid, vsi->active_vlans);
+
+ /* Enable VLAN pruning when VLAN 0 is added */
+ if (unlikely(!vid))
+ if (ice_cfg_vlan_pruning(vsi, true))
+ aq_ret = ICE_ERR_PARAM;
+ } else {
+ aq_ret = ICE_ERR_PARAM;
+ }
+ }
+ } else {
+ for (i = 0; i < vfl->num_elements; i++) {
+ u16 vid = vfl->vlan_id[i];
+
+ /* Make sure ice_vsi_kill_vlan is successful before
+ * updating VLAN information
+ */
+ if (!ice_vsi_kill_vlan(vsi, vid)) {
+ vf->num_vlan--;
+ clear_bit(vid, vsi->active_vlans);
+
+ /* Disable VLAN pruning when removing VLAN 0 */
+ if (unlikely(!vid))
+ ice_cfg_vlan_pruning(vsi, false);
+ }
+ }
+ }
+
+error_param:
+ /* send the response to the VF */
+ if (add_v)
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret,
+ NULL, 0);
+ else
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_add_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Add and program guest VLAN id
+ */
+static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_process_vlan_msg(vf, msg, true);
+}
+
+/**
+ * ice_vc_remove_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * remove programmed guest VLAN id
+ */
+static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_process_vlan_msg(vf, msg, false);
+}
+
+/**
+ * ice_vc_ena_vlan_stripping
+ * @vf: pointer to the VF info
+ *
+ * Enable VLAN header stripping for a given VF
+ */
+static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
+{
+ enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (ice_vsi_manage_vlan_stripping(vsi, true))
+ aq_ret = ICE_ERR_AQ_ERROR;
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
+ aq_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_dis_vlan_stripping
+ * @vf: pointer to the VF info
+ *
+ * Disable VLAN header stripping for a given VF
+ */
+static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
+{
+ enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (ice_vsi_manage_vlan_stripping(vsi, false))
+ aq_ret = ICE_ERR_AQ_ERROR;
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+ aq_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_process_vf_msg - Process request from VF
+ * @pf: pointer to the PF structure
+ * @event: pointer to the AQ event
+ *
+ * called from the common asq/arq handler to
+ * process request from VF
+ */
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
+ s16 vf_id = le16_to_cpu(event->desc.retval);
+ u16 msglen = event->msg_len;
+ u8 *msg = event->msg_buf;
+ struct ice_vf *vf = NULL;
+ int err = 0;
+
+ if (vf_id >= pf->num_alloc_vfs) {
+ err = -EINVAL;
+ goto error_handler;
+ }
+
+ vf = &pf->vf[vf_id];
+
+ /* Check if VF is disabled. */
+ if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
+ err = -EPERM;
+ goto error_handler;
+ }
+
+ /* Perform basic checks on the msg */
+ err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
+ if (err) {
+ if (err == VIRTCHNL_ERR_PARAM)
+ err = -EPERM;
+ else
+ err = -EINVAL;
+ goto error_handler;
+ }
+
+ /* Perform additional checks specific to RSS and Virtchnl */
+ if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
+ struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
+
+ if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE)
+ err = -EINVAL;
+ } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
+ struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+
+ if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE)
+ err = -EINVAL;
+ }
+
+error_handler:
+ if (err) {
+ ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_PARAM, NULL, 0);
+ dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
+ vf_id, v_opcode, msglen, err);
+ return;
+ }
+
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ err = ice_vc_get_ver_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ err = ice_vc_get_vf_res_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ ice_vc_reset_vf_msg(vf);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ err = ice_vc_add_mac_addr_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ err = ice_vc_del_mac_addr_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ err = ice_vc_cfg_qs_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ err = ice_vc_ena_qs_msg(vf, msg);
+ ice_vc_notify_vf_link_state(vf);
+ break;
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ err = ice_vc_dis_qs_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ err = ice_vc_request_qs_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ err = ice_vc_cfg_irq_map_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ err = ice_vc_config_rss_key(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ err = ice_vc_config_rss_lut(vf, msg);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ err = ice_vc_get_stats_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ err = ice_vc_add_vlan_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_VLAN:
+ err = ice_vc_remove_vlan_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ err = ice_vc_ena_vlan_stripping(vf);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ err = ice_vc_dis_vlan_stripping(vf);
+ break;
+ case VIRTCHNL_OP_UNKNOWN:
+ default:
+ dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
+ v_opcode, vf_id);
+ err = ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_NOT_IMPL,
+ NULL, 0);
+ break;
+ }
+ if (err) {
+ /* Helper function cares less about error return values here
+ * as it is busy with pending work.
+ */
+ dev_info(&pf->pdev->dev,
+ "PF failed to honor VF %d, opcode %d\n, error %d\n",
+ vf_id, v_opcode, err);
+ }
+}
+
+/**
+ * ice_get_vf_cfg
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ivi: VF configuration structure
+ *
+ * return VF configuration
+ */
+int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
+ struct ifla_vf_info *ivi)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ ivi->vf = vf_id;
+ ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
+
+ /* VF configuration for VLAN and applicable QoS */
+ ivi->vlan = le16_to_cpu(vsi->info.pvid) & ICE_VLAN_M;
+ ivi->qos = (le16_to_cpu(vsi->info.pvid) & ICE_PRIORITY_M) >>
+ ICE_VLAN_PRIORITY_S;
+
+ ivi->trusted = vf->trusted;
+ ivi->spoofchk = vf->spoofchk;
+ if (!vf->link_forced)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->link_up)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+ ivi->max_tx_rate = vf->tx_rate;
+ ivi->min_tx_rate = 0;
+ return 0;
+}
+
+/**
+ * ice_set_vf_spoofchk
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ena: flag to enable or disable feature
+ *
+ * Enable or disable VF spoof checking
+ */
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi_ctx ctx = { 0 };
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf;
+ int status;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ if (ena == vf->spoofchk) {
+ dev_dbg(&pf->pdev->dev, "VF spoofchk already %s\n",
+ ena ? "ON" : "OFF");
+ return 0;
+ }
+
+ ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+
+ if (ena) {
+ ctx.info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+ ctx.info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M;
+ }
+
+ status = ice_update_vsi(&pf->hw, vsi->idx, &ctx, NULL);
+ if (status) {
+ dev_dbg(&pf->pdev->dev,
+ "Error %d, failed to update VSI* parameters\n", status);
+ return -EIO;
+ }
+
+ vf->spoofchk = ena;
+ vsi->info.sec_flags = ctx.info.sec_flags;
+ vsi->info.sw_flags2 = ctx.info.sw_flags2;
+
+ return status;
+}
+
+/**
+ * ice_set_vf_mac
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @mac: mac address
+ *
+ * program VF mac address
+ */
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
+ netdev_err(netdev, "%pM not a valid unicast address\n", mac);
+ return -EINVAL;
+ }
+
+ /* copy mac into dflt_lan_addr and trigger a VF reset. The reset
+ * flow will use the updated dflt_lan_addr and add a MAC filter
+ * using ice_add_mac. Also set pf_set_mac to indicate that the PF has
+ * set the MAC address for this VF.
+ */
+ ether_addr_copy(vf->dflt_lan_addr.addr, mac);
+ vf->pf_set_mac = true;
+ netdev_info(netdev,
+ "mac on VF %d set to %pM\n. VF driver will be reinitialized\n",
+ vf_id, mac);
+
+ ice_vc_dis_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_set_vf_trust
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @trusted: Boolean value to enable/disable trusted VF
+ *
+ * Enable or disable a given VF as trusted
+ */
+int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ /* Check if already trusted */
+ if (trusted == vf->trusted)
+ return 0;
+
+ vf->trusted = trusted;
+ ice_vc_dis_vf(vf);
+ dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
+ vf_id, trusted ? "" : "un");
+
+ return 0;
+}
+
+/**
+ * ice_set_vf_link_state
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @link_state: required link state
+ *
+ * Set VF's link state, irrespective of physical link state status
+ */
+int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct virtchnl_pf_event pfe = { 0 };
+ struct ice_link_status *ls;
+ struct ice_vf *vf;
+ struct ice_hw *hw;
+
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+ hw = &pf->hw;
+ ls = &pf->hw.port_info->phy.link_info;
+
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "vf %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = PF_EVENT_SEVERITY_INFO;
+
+ switch (link_state) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ vf->link_forced = false;
+ vf->link_up = ls->link_info & ICE_AQ_LINK_UP;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ vf->link_forced = true;
+ vf->link_up = true;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ vf->link_forced = true;
+ vf->link_up = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (vf->link_forced)
+ ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
+ else
+ ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up);
+
+ /* Notify the VF of its new link state */
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
+ sizeof(pfe), NULL);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
new file mode 100644
index 000000000000..10131e0180f9
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_VIRTCHNL_PF_H_
+#define _ICE_VIRTCHNL_PF_H_
+#include "ice.h"
+
+#define ICE_MAX_VLANID 4095
+#define ICE_VLAN_PRIORITY_S 12
+#define ICE_VLAN_M 0xFFF
+#define ICE_PRIORITY_M 0x7000
+
+/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
+#define ICE_MAX_VLAN_PER_VF 8
+#define ICE_MAX_MACADDR_PER_VF 12
+
+/* Malicious Driver Detection */
+#define ICE_DFLT_NUM_MDD_EVENTS_ALLOWED 3
+#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10
+
+/* Static VF transaction/status register def */
+#define VF_DEVICE_STATUS 0xAA
+#define VF_TRANS_PENDING_M 0x20
+
+/* Specific VF states */
+enum ice_vf_states {
+ ICE_VF_STATE_INIT = 0,
+ ICE_VF_STATE_ACTIVE,
+ ICE_VF_STATE_ENA,
+ ICE_VF_STATE_DIS,
+ ICE_VF_STATE_MC_PROMISC,
+ ICE_VF_STATE_UC_PROMISC,
+ /* state to indicate if PF needs to do vector assignment for VF.
+ * This needs to be set during first time VF initialization or later
+ * when VF asks for more Vectors through virtchnl OP.
+ */
+ ICE_VF_STATE_CFG_INTR,
+ ICE_VF_STATES_NBITS
+};
+
+/* VF capabilities */
+enum ice_virtchnl_cap {
+ ICE_VIRTCHNL_VF_CAP_L2 = 0,
+ ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
+};
+
+/* VF information structure */
+struct ice_vf {
+ struct ice_pf *pf;
+
+ s16 vf_id; /* VF id in the PF space */
+ u32 driver_caps; /* reported by VF driver */
+ int first_vector_idx; /* first vector index of this VF */
+ struct ice_sw *vf_sw_id; /* switch id the VF VSIs connect to */
+ struct virtchnl_version_info vf_ver;
+ struct virtchnl_ether_addr dflt_lan_addr;
+ u16 port_vlan_id;
+ u8 pf_set_mac; /* VF MAC address set by VMM admin */
+ u8 trusted;
+ u16 lan_vsi_idx; /* index into PF struct */
+ u16 lan_vsi_num; /* ID as used by firmware */
+ u64 num_mdd_events; /* number of mdd events detected */
+ u64 num_inval_msgs; /* number of continuous invalid msgs */
+ u64 num_valid_msgs; /* number of valid msgs detected */
+ unsigned long vf_caps; /* vf's adv. capabilities */
+ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
+ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
+ u8 link_forced;
+ u8 link_up; /* only valid if VF link is forced */
+ u8 spoofchk;
+ u16 num_mac;
+ u16 num_vlan;
+ u8 num_req_qs; /* num of queue pairs requested by VF */
+};
+
+#ifdef CONFIG_PCI_IOV
+void ice_process_vflr_event(struct ice_pf *pf);
+int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
+int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
+ struct ifla_vf_info *ivi);
+
+void ice_free_vfs(struct ice_pf *pf);
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
+void ice_vc_notify_link_state(struct ice_pf *pf);
+void ice_vc_notify_reset(struct ice_pf *pf);
+bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
+
+int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ u16 vlan_id, u8 qos, __be16 vlan_proto);
+
+int ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate);
+
+int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
+
+int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
+
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
+#else /* CONFIG_PCI_IOV */
+#define ice_process_vflr_event(pf) do {} while (0)
+#define ice_free_vfs(pf) do {} while (0)
+#define ice_vc_process_vf_msg(pf, event) do {} while (0)
+#define ice_vc_notify_link_state(pf) do {} while (0)
+#define ice_vc_notify_reset(pf) do {} while (0)
+
+static inline bool
+ice_reset_all_vfs(struct ice_pf __always_unused *pf,
+ bool __always_unused is_vflr)
+{
+ return true;
+}
+
+static inline int
+ice_sriov_configure(struct pci_dev __always_unused *pdev,
+ int __always_unused num_vfs)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_mac(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, u8 __always_unused *mac)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_get_vf_cfg(struct net_device __always_unused *netdev,
+ int __always_unused vf_id,
+ struct ifla_vf_info __always_unused *ivi)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_trust(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, bool __always_unused trusted)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_port_vlan(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, u16 __always_unused vid,
+ u8 __always_unused qos, __be16 __always_unused v_proto)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_spoofchk(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, bool __always_unused ena)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_link_state(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, int __always_unused link_state)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_bw(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, int __always_unused min_tx_rate,
+ int __always_unused max_tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 0d29df8accd8..5df88ad8ac81 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -9086,7 +9086,6 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
pci_ers_result_t result;
- int err;
if (pci_enable_device_mem(pdev)) {
dev_err(&pdev->dev,
@@ -9110,14 +9109,6 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_RECOVERED;
}
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
- err);
- /* non-fatal, continue */
- }
-
return result;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 9f4d700e09df..29ced6b74d36 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -51,9 +51,15 @@
*
* The 40 bit 82580 SYSTIM overflows every
* 2^40 * 10^-9 / 60 = 18.3 minutes.
+ *
+ * SYSTIM is converted to real time using a timecounter. As
+ * timecounter_cyc2time() allows old timestamps, the timecounter
+ * needs to be updated at least once per half of the SYSTIM interval.
+ * Scheduling of delayed work is not very accurate, so we aim for 8
+ * minutes to be sure the actual interval is shorter than 9.16 minutes.
*/
-#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
+#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 8)
#define IGB_PTP_TX_TIMEOUT (HZ * 15)
#define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT)
#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
new file mode 100644
index 000000000000..4387f6ba8e67
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2018 Intel Corporation
+
+#
+# Intel(R) I225-LM/I225-V 2.5G Ethernet Controller
+#
+
+obj-$(CONFIG_IGC) += igc.o
+
+igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
new file mode 100644
index 000000000000..cdf18a5d9e08
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -0,0 +1,443 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Intel Corporation */
+
+#ifndef _IGC_H_
+#define _IGC_H_
+
+#include <linux/kobject.h>
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+
+#include <linux/ethtool.h>
+
+#include <linux/sctp.h>
+
+#define IGC_ERR(args...) pr_err("igc: " args)
+
+#define PFX "igc: "
+
+#include <linux/timecounter.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include "igc_hw.h"
+
+/* main */
+extern char igc_driver_name[];
+extern char igc_driver_version[];
+
+/* Interrupt defines */
+#define IGC_START_ITR 648 /* ~6000 ints/sec */
+#define IGC_FLAG_HAS_MSI BIT(0)
+#define IGC_FLAG_QUEUE_PAIRS BIT(4)
+#define IGC_FLAG_NEED_LINK_UPDATE BIT(9)
+#define IGC_FLAG_MEDIA_RESET BIT(10)
+#define IGC_FLAG_MAS_ENABLE BIT(12)
+#define IGC_FLAG_HAS_MSIX BIT(13)
+#define IGC_FLAG_VLAN_PROMISC BIT(15)
+
+#define IGC_START_ITR 648 /* ~6000 ints/sec */
+#define IGC_4K_ITR 980
+#define IGC_20K_ITR 196
+#define IGC_70K_ITR 56
+
+#define IGC_DEFAULT_ITR 3 /* dynamic */
+#define IGC_MAX_ITR_USECS 10000
+#define IGC_MIN_ITR_USECS 10
+#define NON_Q_VECTORS 1
+#define MAX_MSIX_ENTRIES 10
+
+/* TX/RX descriptor defines */
+#define IGC_DEFAULT_TXD 256
+#define IGC_DEFAULT_TX_WORK 128
+#define IGC_MIN_TXD 80
+#define IGC_MAX_TXD 4096
+
+#define IGC_DEFAULT_RXD 256
+#define IGC_MIN_RXD 80
+#define IGC_MAX_RXD 4096
+
+/* Transmit and receive queues */
+#define IGC_MAX_RX_QUEUES 4
+#define IGC_MAX_TX_QUEUES 4
+
+#define MAX_Q_VECTORS 8
+#define MAX_STD_JUMBO_FRAME_SIZE 9216
+
+/* Supported Rx Buffer Sizes */
+#define IGC_RXBUFFER_256 256
+#define IGC_RXBUFFER_2048 2048
+#define IGC_RXBUFFER_3072 3072
+
+#define IGC_RX_HDR_LEN IGC_RXBUFFER_256
+
+/* RX and TX descriptor control thresholds.
+ * PTHRESH - MAC will consider prefetch if it has fewer than this number of
+ * descriptors available in its onboard memory.
+ * Setting this to 0 disables RX descriptor prefetch.
+ * HTHRESH - MAC will only prefetch if there are at least this many descriptors
+ * available in host memory.
+ * If PTHRESH is 0, this should also be 0.
+ * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
+ * descriptors until either it has this many to write back, or the
+ * ITR timer expires.
+ */
+#define IGC_RX_PTHRESH 8
+#define IGC_RX_HTHRESH 8
+#define IGC_TX_PTHRESH 8
+#define IGC_TX_HTHRESH 1
+#define IGC_RX_WTHRESH 4
+#define IGC_TX_WTHRESH 16
+
+#define IGC_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
+#define IGC_TS_HDR_LEN 16
+
+#define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+
+#if (PAGE_SIZE < 8192)
+#define IGC_MAX_FRAME_BUILD_SKB \
+ (SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN)
+#else
+#define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN)
+#endif
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IGC_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+/* igc_test_staterr - tests bits within Rx descriptor status and error fields */
+static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc,
+ const u32 stat_err_bits)
+{
+ return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
+}
+
+enum igc_state_t {
+ __IGC_TESTING,
+ __IGC_RESETTING,
+ __IGC_DOWN,
+ __IGC_PTP_TX_IN_PROGRESS,
+};
+
+enum igc_tx_flags {
+ /* cmd_type flags */
+ IGC_TX_FLAGS_VLAN = 0x01,
+ IGC_TX_FLAGS_TSO = 0x02,
+ IGC_TX_FLAGS_TSTAMP = 0x04,
+
+ /* olinfo flags */
+ IGC_TX_FLAGS_IPV4 = 0x10,
+ IGC_TX_FLAGS_CSUM = 0x20,
+};
+
+enum igc_boards {
+ board_base,
+};
+
+/* The largest size we can write to the descriptor is 65535. In order to
+ * maintain a power of two alignment we have to limit ourselves to 32K.
+ */
+#define IGC_MAX_TXD_PWR 15
+#define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct igc_tx_buffer {
+ union igc_adv_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+ struct sk_buff *skb;
+ unsigned int bytecount;
+ u16 gso_segs;
+ __be16 protocol;
+
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
+};
+
+struct igc_rx_buffer {
+ dma_addr_t dma;
+ struct page *page;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+ __u32 page_offset;
+#else
+ __u16 page_offset;
+#endif
+ __u16 pagecnt_bias;
+};
+
+struct igc_tx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 restart_queue;
+ u64 restart_queue2;
+};
+
+struct igc_rx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 drops;
+ u64 csum_err;
+ u64 alloc_failed;
+};
+
+struct igc_rx_packet_stats {
+ u64 ipv4_packets; /* IPv4 headers processed */
+ u64 ipv4e_packets; /* IPv4E headers with extensions processed */
+ u64 ipv6_packets; /* IPv6 headers processed */
+ u64 ipv6e_packets; /* IPv6E headers with extensions processed */
+ u64 tcp_packets; /* TCP headers processed */
+ u64 udp_packets; /* UDP headers processed */
+ u64 sctp_packets; /* SCTP headers processed */
+ u64 nfs_packets; /* NFS headers processe */
+ u64 other_packets;
+};
+
+struct igc_ring_container {
+ struct igc_ring *ring; /* pointer to linked list of rings */
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u16 work_limit; /* total work allowed per interrupt */
+ u8 count; /* total number of rings in vector */
+ u8 itr; /* current ITR setting for ring */
+};
+
+struct igc_ring {
+ struct igc_q_vector *q_vector; /* backlink to q_vector */
+ struct net_device *netdev; /* back pointer to net_device */
+ struct device *dev; /* device for dma mapping */
+ union { /* array of buffer info structs */
+ struct igc_tx_buffer *tx_buffer_info;
+ struct igc_rx_buffer *rx_buffer_info;
+ };
+ void *desc; /* descriptor ring memory */
+ unsigned long flags; /* ring specific flags */
+ void __iomem *tail; /* pointer to ring tail register */
+ dma_addr_t dma; /* phys address of the ring */
+ unsigned int size; /* length of desc. ring in bytes */
+
+ u16 count; /* number of desc. in the ring */
+ u8 queue_index; /* logical index of the ring*/
+ u8 reg_idx; /* physical index of the ring */
+
+ /* everything past this point are written often */
+ u16 next_to_clean;
+ u16 next_to_use;
+ u16 next_to_alloc;
+
+ union {
+ /* TX */
+ struct {
+ struct igc_tx_queue_stats tx_stats;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync tx_syncp2;
+ };
+ /* RX */
+ struct {
+ struct igc_rx_queue_stats rx_stats;
+ struct igc_rx_packet_stats pkt_stats;
+ struct u64_stats_sync rx_syncp;
+ struct sk_buff *skb;
+ };
+ };
+} ____cacheline_internodealigned_in_smp;
+
+struct igc_q_vector {
+ struct igc_adapter *adapter; /* backlink */
+ void __iomem *itr_register;
+ u32 eims_value; /* EIMS mask value */
+
+ u16 itr_val;
+ u8 set_itr;
+
+ struct igc_ring_container rx, tx;
+
+ struct napi_struct napi;
+
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+ char name[IFNAMSIZ + 9];
+ struct net_device poll_dev;
+
+ /* for dynamic allocation of rings associated with this q_vector */
+ struct igc_ring ring[0] ____cacheline_internodealigned_in_smp;
+};
+
+struct igc_mac_addr {
+ u8 addr[ETH_ALEN];
+ u8 queue;
+ u8 state; /* bitmask */
+};
+
+#define IGC_MAC_STATE_DEFAULT 0x1
+#define IGC_MAC_STATE_MODIFIED 0x2
+#define IGC_MAC_STATE_IN_USE 0x4
+
+/* Board specific private data structure */
+struct igc_adapter {
+ struct net_device *netdev;
+
+ unsigned long state;
+ unsigned int flags;
+ unsigned int num_q_vectors;
+
+ struct msix_entry *msix_entries;
+
+ /* TX */
+ u16 tx_work_limit;
+ u32 tx_timeout_count;
+ int num_tx_queues;
+ struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES];
+
+ /* RX */
+ int num_rx_queues;
+ struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES];
+
+ struct timer_list watchdog_timer;
+ struct timer_list dma_err_timer;
+ struct timer_list phy_info_timer;
+
+ u16 link_speed;
+ u16 link_duplex;
+
+ u8 port_num;
+
+ u8 __iomem *io_addr;
+ /* Interrupt Throttle Rate */
+ u32 rx_itr_setting;
+ u32 tx_itr_setting;
+
+ struct work_struct reset_task;
+ struct work_struct watchdog_task;
+ struct work_struct dma_err_task;
+ bool fc_autoneg;
+
+ u8 tx_timeout_factor;
+
+ int msg_enable;
+ u32 max_frame_size;
+ u32 min_frame_size;
+
+ /* OS defined structs */
+ struct pci_dev *pdev;
+ /* lock for statistics */
+ spinlock_t stats64_lock;
+ struct rtnl_link_stats64 stats64;
+
+ /* structs defined in igc_hw.h */
+ struct igc_hw hw;
+ struct igc_hw_stats stats;
+
+ struct igc_q_vector *q_vector[MAX_Q_VECTORS];
+ u32 eims_enable_mask;
+ u32 eims_other;
+
+ u16 tx_ring_count;
+ u16 rx_ring_count;
+
+ u32 *shadow_vfta;
+
+ u32 rss_queues;
+
+ /* lock for RX network flow classification filter */
+ spinlock_t nfc_lock;
+
+ struct igc_mac_addr *mac_table;
+
+ unsigned long link_check_timeout;
+ struct igc_info ei;
+};
+
+/* igc_desc_unused - calculate if we have unused descriptors */
+static inline u16 igc_desc_unused(const struct igc_ring *ring)
+{
+ u16 ntc = ring->next_to_clean;
+ u16 ntu = ring->next_to_use;
+
+ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
+
+static inline s32 igc_get_phy_info(struct igc_hw *hw)
+{
+ if (hw->phy.ops.get_phy_info)
+ return hw->phy.ops.get_phy_info(hw);
+
+ return 0;
+}
+
+static inline s32 igc_reset_phy(struct igc_hw *hw)
+{
+ if (hw->phy.ops.reset)
+ return hw->phy.ops.reset(hw);
+
+ return 0;
+}
+
+static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring)
+{
+ return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
+}
+
+enum igc_ring_flags_t {
+ IGC_RING_FLAG_RX_3K_BUFFER,
+ IGC_RING_FLAG_RX_BUILD_SKB_ENABLED,
+ IGC_RING_FLAG_RX_SCTP_CSUM,
+ IGC_RING_FLAG_RX_LB_VLAN_BSWAP,
+ IGC_RING_FLAG_TX_CTX_IDX,
+ IGC_RING_FLAG_TX_DETECT_HANG
+};
+
+#define ring_uses_large_buffer(ring) \
+ test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
+
+#define ring_uses_build_skb(ring) \
+ test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+
+static inline unsigned int igc_rx_bufsz(struct igc_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring_uses_large_buffer(ring))
+ return IGC_RXBUFFER_3072;
+
+ if (ring_uses_build_skb(ring))
+ return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN;
+#endif
+ return IGC_RXBUFFER_2048;
+}
+
+static inline unsigned int igc_rx_pg_order(struct igc_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring_uses_large_buffer(ring))
+ return 1;
+#endif
+ return 0;
+}
+
+static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
+{
+ if (hw->phy.ops.read_reg)
+ return hw->phy.ops.read_reg(hw, offset, data);
+
+ return 0;
+}
+
+#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
+
+#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS)
+
+#define IGC_RX_DESC(R, i) \
+ (&(((union igc_adv_rx_desc *)((R)->desc))[i]))
+#define IGC_TX_DESC(R, i) \
+ (&(((union igc_adv_tx_desc *)((R)->desc))[i]))
+#define IGC_TX_CTXTDESC(R, i) \
+ (&(((struct igc_adv_tx_context_desc *)((R)->desc))[i]))
+
+#endif /* _IGC_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
new file mode 100644
index 000000000000..832da609d9a7
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -0,0 +1,541 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Intel Corporation */
+
+#include <linux/delay.h>
+
+#include "igc_hw.h"
+#include "igc_i225.h"
+#include "igc_mac.h"
+#include "igc_base.h"
+#include "igc.h"
+
+/**
+ * igc_set_pcie_completion_timeout - set pci-e completion timeout
+ * @hw: pointer to the HW structure
+ */
+static s32 igc_set_pcie_completion_timeout(struct igc_hw *hw)
+{
+ u32 gcr = rd32(IGC_GCR);
+ u16 pcie_devctl2;
+ s32 ret_val = 0;
+
+ /* only take action if timeout value is defaulted to 0 */
+ if (gcr & IGC_GCR_CMPL_TMOUT_MASK)
+ goto out;
+
+ /* if capabilities version is type 1 we can write the
+ * timeout of 10ms to 200ms through the GCR register
+ */
+ if (!(gcr & IGC_GCR_CAP_VER2)) {
+ gcr |= IGC_GCR_CMPL_TMOUT_10ms;
+ goto out;
+ }
+
+ /* for version 2 capabilities we need to write the config space
+ * directly in order to set the completion timeout value for
+ * 16ms to 55ms
+ */
+ ret_val = igc_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+ &pcie_devctl2);
+ if (ret_val)
+ goto out;
+
+ pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
+
+ ret_val = igc_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+ &pcie_devctl2);
+out:
+ /* disable completion timeout resend */
+ gcr &= ~IGC_GCR_CMPL_TMOUT_RESEND;
+
+ wr32(IGC_GCR, gcr);
+
+ return ret_val;
+}
+
+/**
+ * igc_check_for_link_base - Check for link
+ * @hw: pointer to the HW structure
+ *
+ * If sgmii is enabled, then use the pcs register to determine link, otherwise
+ * use the generic interface for determining link.
+ */
+static s32 igc_check_for_link_base(struct igc_hw *hw)
+{
+ s32 ret_val = 0;
+
+ ret_val = igc_check_for_copper_link(hw);
+
+ return ret_val;
+}
+
+/**
+ * igc_reset_hw_base - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state. This is a
+ * function pointer entry point called by the api module.
+ */
+static s32 igc_reset_hw_base(struct igc_hw *hw)
+{
+ s32 ret_val;
+ u32 ctrl;
+
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = igc_disable_pcie_master(hw);
+ if (ret_val)
+ hw_dbg("PCI-E Master disable polling has failed.\n");
+
+ /* set the completion timeout for interface */
+ ret_val = igc_set_pcie_completion_timeout(hw);
+ if (ret_val)
+ hw_dbg("PCI-E Set completion timeout has failed.\n");
+
+ hw_dbg("Masking off all interrupts\n");
+ wr32(IGC_IMC, 0xffffffff);
+
+ wr32(IGC_RCTL, 0);
+ wr32(IGC_TCTL, IGC_TCTL_PSP);
+ wrfl();
+
+ usleep_range(10000, 20000);
+
+ ctrl = rd32(IGC_CTRL);
+
+ hw_dbg("Issuing a global reset to MAC\n");
+ wr32(IGC_CTRL, ctrl | IGC_CTRL_RST);
+
+ ret_val = igc_get_auto_rd_done(hw);
+ if (ret_val) {
+ /* When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ hw_dbg("Auto Read Done did not complete\n");
+ }
+
+ /* Clear any pending interrupt events. */
+ wr32(IGC_IMC, 0xffffffff);
+ rd32(IGC_ICR);
+
+ return ret_val;
+}
+
+/**
+ * igc_get_phy_id_base - Retrieve PHY addr and id
+ * @hw: pointer to the HW structure
+ *
+ * Retrieves the PHY address and ID for both PHY's which do and do not use
+ * sgmi interface.
+ */
+static s32 igc_get_phy_id_base(struct igc_hw *hw)
+{
+ s32 ret_val = 0;
+
+ ret_val = igc_get_phy_id(hw);
+
+ return ret_val;
+}
+
+/**
+ * igc_init_nvm_params_base - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ */
+static s32 igc_init_nvm_params_base(struct igc_hw *hw)
+{
+ struct igc_nvm_info *nvm = &hw->nvm;
+ u32 eecd = rd32(IGC_EECD);
+ u16 size;
+
+ size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >>
+ IGC_EECD_SIZE_EX_SHIFT);
+
+ /* Added to a constant, "size" becomes the left-shift value
+ * for setting word_size.
+ */
+ size += NVM_WORD_SIZE_BASE_SHIFT;
+
+ /* Just in case size is out of range, cap it to the largest
+ * EEPROM size supported
+ */
+ if (size > 15)
+ size = 15;
+
+ nvm->word_size = BIT(size);
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+
+ nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ?
+ 16 : 8;
+
+ if (nvm->word_size == BIT(15))
+ nvm->page_size = 128;
+
+ return 0;
+}
+
+/**
+ * igc_setup_copper_link_base - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Configures the link for auto-neg or forced speed and duplex. Then we check
+ * for link, once link is established calls to configure collision distance
+ * and flow control are called.
+ */
+static s32 igc_setup_copper_link_base(struct igc_hw *hw)
+{
+ s32 ret_val = 0;
+ u32 ctrl;
+
+ ctrl = rd32(IGC_CTRL);
+ ctrl |= IGC_CTRL_SLU;
+ ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX);
+ wr32(IGC_CTRL, ctrl);
+
+ ret_val = igc_setup_copper_link(hw);
+
+ return ret_val;
+}
+
+/**
+ * igc_init_mac_params_base - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ */
+static s32 igc_init_mac_params_base(struct igc_hw *hw)
+{
+ struct igc_dev_spec_base *dev_spec = &hw->dev_spec._base;
+ struct igc_mac_info *mac = &hw->mac;
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ mac->rar_entry_count = IGC_RAR_ENTRIES;
+
+ /* reset */
+ mac->ops.reset_hw = igc_reset_hw_base;
+
+ mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225;
+ mac->ops.release_swfw_sync = igc_release_swfw_sync_i225;
+
+ /* Allow a single clear of the SW semaphore on I225 */
+ if (mac->type == igc_i225)
+ dev_spec->clear_semaphore_once = true;
+
+ /* physical interface link setup */
+ mac->ops.setup_physical_interface = igc_setup_copper_link_base;
+
+ return 0;
+}
+
+/**
+ * igc_init_phy_params_base - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ */
+static s32 igc_init_phy_params_base(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+ u32 ctrl_ext;
+
+ if (hw->phy.media_type != igc_media_type_copper) {
+ phy->type = igc_phy_none;
+ goto out;
+ }
+
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500;
+ phy->reset_delay_us = 100;
+
+ ctrl_ext = rd32(IGC_CTRL_EXT);
+
+ /* set lan id */
+ hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >>
+ IGC_STATUS_FUNC_SHIFT;
+
+ /* Make sure the PHY is in a good state. Several people have reported
+ * firmware leaving the PHY's page select register set to something
+ * other than the default of zero, which causes the PHY ID read to
+ * access something other than the intended register.
+ */
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+ hw_dbg("Error resetting the PHY.\n");
+ goto out;
+ }
+
+ ret_val = igc_get_phy_id_base(hw);
+ if (ret_val)
+ return ret_val;
+
+ igc_check_for_link_base(hw);
+
+ /* Verify phy id and set remaining function pointers */
+ switch (phy->id) {
+ case I225_I_PHY_ID:
+ phy->type = igc_phy_i225;
+ break;
+ default:
+ ret_val = -IGC_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+static s32 igc_get_invariants_base(struct igc_hw *hw)
+{
+ struct igc_mac_info *mac = &hw->mac;
+ u32 link_mode = 0;
+ u32 ctrl_ext = 0;
+ s32 ret_val = 0;
+
+ switch (hw->device_id) {
+ case IGC_DEV_ID_I225_LM:
+ case IGC_DEV_ID_I225_V:
+ mac->type = igc_i225;
+ break;
+ default:
+ return -IGC_ERR_MAC_INIT;
+ }
+
+ hw->phy.media_type = igc_media_type_copper;
+
+ ctrl_ext = rd32(IGC_CTRL_EXT);
+ link_mode = ctrl_ext & IGC_CTRL_EXT_LINK_MODE_MASK;
+
+ /* mac initialization and operations */
+ ret_val = igc_init_mac_params_base(hw);
+ if (ret_val)
+ goto out;
+
+ /* NVM initialization */
+ ret_val = igc_init_nvm_params_base(hw);
+ switch (hw->mac.type) {
+ case igc_i225:
+ ret_val = igc_init_nvm_params_i225(hw);
+ break;
+ default:
+ break;
+ }
+
+ /* setup PHY parameters */
+ ret_val = igc_init_phy_params_base(hw);
+ if (ret_val)
+ goto out;
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_acquire_phy_base - Acquire rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * Acquire access rights to the correct PHY. This is a
+ * function pointer entry point called by the api module.
+ */
+static s32 igc_acquire_phy_base(struct igc_hw *hw)
+{
+ u16 mask = IGC_SWFW_PHY0_SM;
+
+ return hw->mac.ops.acquire_swfw_sync(hw, mask);
+}
+
+/**
+ * igc_release_phy_base - Release rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * A wrapper to release access rights to the correct PHY. This is a
+ * function pointer entry point called by the api module.
+ */
+static void igc_release_phy_base(struct igc_hw *hw)
+{
+ u16 mask = IGC_SWFW_PHY0_SM;
+
+ hw->mac.ops.release_swfw_sync(hw, mask);
+}
+
+/**
+ * igc_get_link_up_info_base - Get link speed/duplex info
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * This is a wrapper function, if using the serial gigabit media independent
+ * interface, use PCS to retrieve the link speed and duplex information.
+ * Otherwise, use the generic function to get the link speed and duplex info.
+ */
+static s32 igc_get_link_up_info_base(struct igc_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 ret_val;
+
+ ret_val = igc_get_speed_and_duplex_copper(hw, speed, duplex);
+
+ return ret_val;
+}
+
+/**
+ * igc_init_hw_base - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation.
+ */
+static s32 igc_init_hw_base(struct igc_hw *hw)
+{
+ struct igc_mac_info *mac = &hw->mac;
+ u16 i, rar_count = mac->rar_entry_count;
+ s32 ret_val = 0;
+
+ /* Setup the receive address */
+ igc_init_rx_addrs(hw, rar_count);
+
+ /* Zero out the Multicast HASH table */
+ hw_dbg("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ array_wr32(IGC_MTA, i, 0);
+
+ /* Zero out the Unicast HASH table */
+ hw_dbg("Zeroing the UTA\n");
+ for (i = 0; i < mac->uta_reg_count; i++)
+ array_wr32(IGC_UTA, i, 0);
+
+ /* Setup link and flow control */
+ ret_val = igc_setup_link(hw);
+
+ /* Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ igc_clear_hw_cntrs_base(hw);
+
+ return ret_val;
+}
+
+/**
+ * igc_read_mac_addr_base - Read device MAC address
+ * @hw: pointer to the HW structure
+ */
+static s32 igc_read_mac_addr_base(struct igc_hw *hw)
+{
+ s32 ret_val = 0;
+
+ ret_val = igc_read_mac_addr(hw);
+
+ return ret_val;
+}
+
+/**
+ * igc_power_down_phy_copper_base - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ */
+void igc_power_down_phy_copper_base(struct igc_hw *hw)
+{
+ /* If the management interface is not enabled, then power down */
+ if (!(igc_enable_mng_pass_thru(hw) || igc_check_reset_block(hw)))
+ igc_power_down_phy_copper(hw);
+}
+
+/**
+ * igc_rx_fifo_flush_base - Clean rx fifo after Rx enable
+ * @hw: pointer to the HW structure
+ *
+ * After Rx enable, if manageability is enabled then there is likely some
+ * bad data at the start of the fifo and possibly in the DMA fifo. This
+ * function clears the fifos and flushes any packets that came in as rx was
+ * being enabled.
+ */
+void igc_rx_fifo_flush_base(struct igc_hw *hw)
+{
+ u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
+ int i, ms_wait;
+
+ /* disable IPv6 options as per hardware errata */
+ rfctl = rd32(IGC_RFCTL);
+ rfctl |= IGC_RFCTL_IPV6_EX_DIS;
+ wr32(IGC_RFCTL, rfctl);
+
+ if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN))
+ return;
+
+ /* Disable all Rx queues */
+ for (i = 0; i < 4; i++) {
+ rxdctl[i] = rd32(IGC_RXDCTL(i));
+ wr32(IGC_RXDCTL(i),
+ rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE);
+ }
+ /* Poll all queues to verify they have shut down */
+ for (ms_wait = 0; ms_wait < 10; ms_wait++) {
+ usleep_range(1000, 2000);
+ rx_enabled = 0;
+ for (i = 0; i < 4; i++)
+ rx_enabled |= rd32(IGC_RXDCTL(i));
+ if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE))
+ break;
+ }
+
+ if (ms_wait == 10)
+ pr_debug("Queue disable timed out after 10ms\n");
+
+ /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
+ * incoming packets are rejected. Set enable and wait 2ms so that
+ * any packet that was coming in as RCTL.EN was set is flushed
+ */
+ wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF);
+
+ rlpml = rd32(IGC_RLPML);
+ wr32(IGC_RLPML, 0);
+
+ rctl = rd32(IGC_RCTL);
+ temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP);
+ temp_rctl |= IGC_RCTL_LPE;
+
+ wr32(IGC_RCTL, temp_rctl);
+ wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN);
+ wrfl();
+ usleep_range(2000, 3000);
+
+ /* Enable Rx queues that were previously enabled and restore our
+ * previous state
+ */
+ for (i = 0; i < 4; i++)
+ wr32(IGC_RXDCTL(i), rxdctl[i]);
+ wr32(IGC_RCTL, rctl);
+ wrfl();
+
+ wr32(IGC_RLPML, rlpml);
+ wr32(IGC_RFCTL, rfctl);
+
+ /* Flush receive errors generated by workaround */
+ rd32(IGC_ROC);
+ rd32(IGC_RNBC);
+ rd32(IGC_MPC);
+}
+
+static struct igc_mac_operations igc_mac_ops_base = {
+ .init_hw = igc_init_hw_base,
+ .check_for_link = igc_check_for_link_base,
+ .rar_set = igc_rar_set,
+ .read_mac_addr = igc_read_mac_addr_base,
+ .get_speed_and_duplex = igc_get_link_up_info_base,
+};
+
+static const struct igc_phy_operations igc_phy_ops_base = {
+ .acquire = igc_acquire_phy_base,
+ .release = igc_release_phy_base,
+ .reset = igc_phy_hw_reset,
+ .read_reg = igc_read_phy_reg_gpy,
+ .write_reg = igc_write_phy_reg_gpy,
+};
+
+const struct igc_info igc_base_info = {
+ .get_invariants = igc_get_invariants_base,
+ .mac_ops = &igc_mac_ops_base,
+ .phy_ops = &igc_phy_ops_base,
+};
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
new file mode 100644
index 000000000000..35588fa7b8c5
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Intel Corporation */
+
+#ifndef _IGC_BASE_H
+#define _IGC_BASE_H
+
+/* forward declaration */
+void igc_rx_fifo_flush_base(struct igc_hw *hw);
+void igc_power_down_phy_copper_base(struct igc_hw *hw);
+
+/* Transmit Descriptor - Advanced */
+union igc_adv_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define IGC_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */
+#define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
+#define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
+#define IGC_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IGC_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
+#define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
+#define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
+#define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+
+#define IGC_RAR_ENTRIES 16
+
+struct igc_adv_data_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ u32 data;
+ struct {
+ u32 datalen:16; /* Data buffer length */
+ u32 rsvd:4;
+ u32 dtyp:4; /* Descriptor type */
+ u32 dcmd:8; /* Descriptor command */
+ } config;
+ } lower;
+ union {
+ u32 data;
+ struct {
+ u32 status:4; /* Descriptor status */
+ u32 idx:4;
+ u32 popts:6; /* Packet Options */
+ u32 paylen:18; /* Payload length */
+ } options;
+ } upper;
+};
+
+/* Receive Descriptor - Advanced */
+union igc_adv_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info; /*RSS type, Pkt type*/
+ /* Split Header, header buffer len */
+ __le16 hdr_info;
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+
+/* Additional Transmit Descriptor Control definitions */
+#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
+
+/* Additional Receive Descriptor Control definitions */
+#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
+
+/* SRRCTL bit definitions */
+#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
+#define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
+#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+
+#endif /* _IGC_BASE_H */
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
new file mode 100644
index 000000000000..8740754ea1fd
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -0,0 +1,389 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Intel Corporation */
+
+#ifndef _IGC_DEFINES_H_
+#define _IGC_DEFINES_H_
+
+#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */
+
+/* PCI Bus Info */
+#define PCIE_DEVICE_CONTROL2 0x28
+#define PCIE_DEVICE_CONTROL2_16ms 0x0005
+
+/* Physical Func Reset Done Indication */
+#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+
+/* Loop limit on how long we wait for auto-negotiation to complete */
+#define COPPER_LINK_UP_LIMIT 10
+#define PHY_AUTO_NEG_LIMIT 45
+#define PHY_FORCE_LIMIT 20
+
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT 800
+/*Blocks new Master requests */
+#define IGC_CTRL_GIO_MASTER_DISABLE 0x00000004
+/* Status of Master requests. */
+#define IGC_STATUS_GIO_MASTER_ENABLE 0x00080000
+
+/* PCI Express Control */
+#define IGC_GCR_CMPL_TMOUT_MASK 0x0000F000
+#define IGC_GCR_CMPL_TMOUT_10ms 0x00001000
+#define IGC_GCR_CMPL_TMOUT_RESEND 0x00010000
+#define IGC_GCR_CAP_VER2 0x00040000
+
+/* Receive Address
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * Technically, we have 16 spots. However, we reserve one of these spots
+ * (RAR[15]) for our directed address used by controllers with
+ * manageability enabled, allowing us room for 15 multicast addresses.
+ */
+#define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */
+#define IGC_RAH_POOL_1 0x00040000
+#define IGC_RAL_MAC_ADDR_LEN 4
+#define IGC_RAH_MAC_ADDR_LEN 2
+
+/* Error Codes */
+#define IGC_SUCCESS 0
+#define IGC_ERR_NVM 1
+#define IGC_ERR_PHY 2
+#define IGC_ERR_CONFIG 3
+#define IGC_ERR_PARAM 4
+#define IGC_ERR_MAC_INIT 5
+#define IGC_ERR_RESET 9
+#define IGC_ERR_MASTER_REQUESTS_PENDING 10
+#define IGC_ERR_BLK_PHY_RESET 12
+#define IGC_ERR_SWFW_SYNC 13
+
+/* Device Control */
+#define IGC_CTRL_RST 0x04000000 /* Global reset */
+
+#define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */
+#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
+#define IGC_CTRL_FRCSPD 0x00000800 /* Force Speed */
+#define IGC_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+
+#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
+#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
+
+#define IGC_CONNSW_AUTOSENSE_EN 0x1
+
+/* PBA constants */
+#define IGC_PBA_34K 0x0022
+
+/* SW Semaphore Register */
+#define IGC_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define IGC_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+
+/* SWFW_SYNC Definitions */
+#define IGC_SWFW_EEP_SM 0x1
+#define IGC_SWFW_PHY0_SM 0x2
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
+#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
+#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
+
+/* PHY GPY 211 registers */
+#define STANDARD_AN_REG_MASK 0x0007 /* MMD */
+#define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */
+#define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */
+#define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */
+
+/* NVM Control */
+/* Number of milliseconds for NVM auto read done after MAC reset. */
+#define AUTO_READ_DONE_TIMEOUT 10
+#define IGC_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
+#define IGC_EECD_REQ 0x00000040 /* NVM Access Request */
+#define IGC_EECD_GNT 0x00000080 /* NVM Access Grant */
+/* NVM Addressing bits based on type 0=small, 1=large */
+#define IGC_EECD_ADDR_BITS 0x00000400
+#define IGC_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
+#define IGC_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
+#define IGC_EECD_SIZE_EX_SHIFT 11
+#define IGC_EECD_FLUPD_I225 0x00800000 /* Update FLASH */
+#define IGC_EECD_FLUDONE_I225 0x04000000 /* Update FLASH done*/
+#define IGC_EECD_FLASH_DETECTED_I225 0x00080000 /* FLASH detected */
+#define IGC_FLUDONE_ATTEMPTS 20000
+#define IGC_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
+
+/* Offset to data in NVM read/write registers */
+#define IGC_NVM_RW_REG_DATA 16
+#define IGC_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
+#define IGC_NVM_RW_REG_START 1 /* Start operation */
+#define IGC_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define IGC_NVM_POLL_READ 0 /* Flag for polling for read complete */
+
+/* NVM Word Offsets */
+#define NVM_CHECKSUM_REG 0x003F
+
+/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+#define NVM_SUM 0xBABA
+
+#define NVM_PBA_OFFSET_0 8
+#define NVM_PBA_OFFSET_1 9
+#define NVM_RESERVED_WORD 0xFFFF
+#define NVM_PBA_PTR_GUARD 0xFAFA
+#define NVM_WORD_SIZE_BASE_SHIFT 6
+
+/* Collision related configuration parameters */
+#define IGC_COLLISION_THRESHOLD 15
+#define IGC_CT_SHIFT 4
+#define IGC_COLLISION_DISTANCE 63
+#define IGC_COLD_SHIFT 12
+
+/* Device Status */
+#define IGC_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
+#define IGC_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
+#define IGC_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
+#define IGC_STATUS_FUNC_SHIFT 2
+#define IGC_STATUS_FUNC_1 0x00000004 /* Function 1 */
+#define IGC_STATUS_TXOFF 0x00000010 /* transmission paused */
+#define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
+#define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+#define IGC_STATUS_SPEED_2500 0x00400000 /* Speed 2.5Gb/s */
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define SPEED_2500 2500
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+/* 1Gbps and 2.5Gbps half duplex is not supported, nor spec-compliant. */
+#define ADVERTISE_10_HALF 0x0001
+#define ADVERTISE_10_FULL 0x0002
+#define ADVERTISE_100_HALF 0x0004
+#define ADVERTISE_100_FULL 0x0008
+#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL 0x0020
+#define ADVERTISE_2500_HALF 0x0040 /* Not used, just FYI */
+#define ADVERTISE_2500_FULL 0x0080
+
+#define IGC_ALL_SPEED_DUPLEX_2500 ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL | ADVERTISE_1000_FULL | ADVERTISE_2500_FULL)
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT_2500 IGC_ALL_SPEED_DUPLEX_2500
+
+/* Interrupt Cause Read */
+#define IGC_ICR_TXDW BIT(0) /* Transmit desc written back */
+#define IGC_ICR_TXQE BIT(1) /* Transmit Queue empty */
+#define IGC_ICR_LSC BIT(2) /* Link Status Change */
+#define IGC_ICR_RXSEQ BIT(3) /* Rx sequence error */
+#define IGC_ICR_RXDMT0 BIT(4) /* Rx desc min. threshold (0) */
+#define IGC_ICR_RXO BIT(6) /* Rx overrun */
+#define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */
+#define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */
+
+/* If this bit asserted, the driver should claim the interrupt */
+#define IGC_ICR_INT_ASSERTED BIT(31)
+
+#define IGC_ICS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */
+
+#define IMS_ENABLE_MASK ( \
+ IGC_IMS_RXT0 | \
+ IGC_IMS_TXDW | \
+ IGC_IMS_RXDMT0 | \
+ IGC_IMS_RXSEQ | \
+ IGC_IMS_LSC)
+
+/* Interrupt Mask Set */
+#define IGC_IMS_TXDW IGC_ICR_TXDW /* Tx desc written back */
+#define IGC_IMS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */
+#define IGC_IMS_LSC IGC_ICR_LSC /* Link Status Change */
+#define IGC_IMS_DOUTSYNC IGC_ICR_DOUTSYNC /* NIC DMA out of sync */
+#define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */
+#define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */
+#define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */
+
+#define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */
+#define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */
+
+/* Interrupt Cause Set */
+#define IGC_ICS_LSC IGC_ICR_LSC /* Link Status Change */
+#define IGC_ICS_RXDMT0 IGC_ICR_RXDMT0 /* rx desc min. threshold */
+#define IGC_ICS_DRSTA IGC_ICR_DRSTA /* Device Reset Aserted */
+
+#define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
+#define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
+#define IGC_IVAR_VALID 0x80
+#define IGC_GPIE_NSICR 0x00000001
+#define IGC_GPIE_MSIX_MODE 0x00000010
+#define IGC_GPIE_EIAME 0x40000000
+#define IGC_GPIE_PBA 0x80000000
+
+/* Transmit Descriptor bit definitions */
+#define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */
+#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */
+#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define IGC_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define IGC_TXD_CMD_RS 0x08000000 /* Report Status */
+#define IGC_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
+#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
+#define IGC_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define IGC_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
+#define IGC_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define IGC_TXD_STAT_EC 0x00000002 /* Excess Collisions */
+#define IGC_TXD_STAT_LC 0x00000004 /* Late Collisions */
+#define IGC_TXD_STAT_TU 0x00000008 /* Transmit underrun */
+#define IGC_TXD_CMD_TCP 0x01000000 /* TCP packet */
+#define IGC_TXD_CMD_IP 0x02000000 /* IP packet */
+#define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
+#define IGC_TXD_STAT_TC 0x00000004 /* Tx Underrun */
+#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
+
+/* Transmit Control */
+#define IGC_TCTL_EN 0x00000002 /* enable Tx */
+#define IGC_TCTL_PSP 0x00000008 /* pad short packets */
+#define IGC_TCTL_CT 0x00000ff0 /* collision threshold */
+#define IGC_TCTL_COLD 0x003ff000 /* collision distance */
+#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
+#define IGC_TCTL_MULR 0x10000000 /* Multiple request support */
+
+#define IGC_CT_SHIFT 4
+#define IGC_COLLISION_THRESHOLD 15
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE 0x8808
+/* Enable XON frame transmission */
+#define IGC_FCRTL_XONE 0x80000000
+
+/* Management Control */
+#define IGC_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
+#define IGC_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
+
+/* Receive Control */
+#define IGC_RCTL_RST 0x00000001 /* Software reset */
+#define IGC_RCTL_EN 0x00000002 /* enable */
+#define IGC_RCTL_SBP 0x00000004 /* store bad packet */
+#define IGC_RCTL_UPE 0x00000008 /* unicast promisc enable */
+#define IGC_RCTL_MPE 0x00000010 /* multicast promisc enable */
+#define IGC_RCTL_LPE 0x00000020 /* long packet enable */
+#define IGC_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
+#define IGC_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
+
+#define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
+#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */
+
+/* Receive Descriptor bit definitions */
+#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */
+
+#define IGC_RXDEXT_STATERR_CE 0x01000000
+#define IGC_RXDEXT_STATERR_SE 0x02000000
+#define IGC_RXDEXT_STATERR_SEQ 0x04000000
+#define IGC_RXDEXT_STATERR_CXE 0x10000000
+#define IGC_RXDEXT_STATERR_TCPE 0x20000000
+#define IGC_RXDEXT_STATERR_IPE 0x40000000
+#define IGC_RXDEXT_STATERR_RXE 0x80000000
+
+/* Same mask, but for extended and packet split descriptors */
+#define IGC_RXDEXT_ERR_FRAME_ERR_MASK ( \
+ IGC_RXDEXT_STATERR_CE | \
+ IGC_RXDEXT_STATERR_SE | \
+ IGC_RXDEXT_STATERR_SEQ | \
+ IGC_RXDEXT_STATERR_CXE | \
+ IGC_RXDEXT_STATERR_RXE)
+
+/* Header split receive */
+#define IGC_RFCTL_IPV6_EX_DIS 0x00010000
+#define IGC_RFCTL_LEF 0x00040000
+
+#define IGC_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
+
+#define IGC_RCTL_MO_SHIFT 12 /* multicast offset shift */
+#define IGC_RCTL_CFIEN 0x00080000 /* canonical form enable */
+#define IGC_RCTL_DPF 0x00400000 /* discard pause frames */
+#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */
+#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
+
+#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
+#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
+
+/* GPY211 - I225 defines */
+#define GPY_MMD_MASK 0xFFFF0000
+#define GPY_MMD_SHIFT 16
+#define GPY_REG_MASK 0x0000FFFF
+
+#define IGC_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
+
+/* MAC definitions */
+#define IGC_FACTPS_MNGCG 0x20000000
+#define IGC_FWSM_MODE_MASK 0xE
+#define IGC_FWSM_MODE_SHIFT 1
+
+/* Management Control */
+#define IGC_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
+#define IGC_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
+
+/* PHY */
+#define PHY_REVISION_MASK 0xFFFFFFF0
+#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
+#define IGC_GEN_POLL_TIMEOUT 1920
+
+/* PHY Control Register */
+#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
+#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
+#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_1000 0x0040
+#define MII_CR_SPEED_100 0x2000
+#define MII_CR_SPEED_10 0x0000
+
+/* PHY Status Register */
+#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CONTROL 0x00 /* Control Register */
+#define PHY_STATUS 0x01 /* Status Register */
+#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
+#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+
+/* Bit definitions for valid PHY IDs. I = Integrated E = External */
+#define I225_I_PHY_ID 0x67C9DC00
+
+/* MDI Control */
+#define IGC_MDIC_DATA_MASK 0x0000FFFF
+#define IGC_MDIC_REG_MASK 0x001F0000
+#define IGC_MDIC_REG_SHIFT 16
+#define IGC_MDIC_PHY_MASK 0x03E00000
+#define IGC_MDIC_PHY_SHIFT 21
+#define IGC_MDIC_OP_WRITE 0x04000000
+#define IGC_MDIC_OP_READ 0x08000000
+#define IGC_MDIC_READY 0x10000000
+#define IGC_MDIC_INT_EN 0x20000000
+#define IGC_MDIC_ERROR 0x40000000
+#define IGC_MDIC_DEST 0x80000000
+
+#define IGC_N0_QUEUE -1
+
+#endif /* _IGC_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
new file mode 100644
index 000000000000..c50414f48f0d
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Intel Corporation */
+
+#ifndef _IGC_HW_H_
+#define _IGC_HW_H_
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+
+#include "igc_regs.h"
+#include "igc_defines.h"
+#include "igc_mac.h"
+#include "igc_phy.h"
+#include "igc_nvm.h"
+#include "igc_i225.h"
+#include "igc_base.h"
+
+#define IGC_DEV_ID_I225_LM 0x15F2
+#define IGC_DEV_ID_I225_V 0x15F3
+
+#define IGC_FUNC_0 0
+
+/* Function pointers for the MAC. */
+struct igc_mac_operations {
+ s32 (*check_for_link)(struct igc_hw *hw);
+ s32 (*reset_hw)(struct igc_hw *hw);
+ s32 (*init_hw)(struct igc_hw *hw);
+ s32 (*setup_physical_interface)(struct igc_hw *hw);
+ void (*rar_set)(struct igc_hw *hw, u8 *address, u32 index);
+ s32 (*read_mac_addr)(struct igc_hw *hw);
+ s32 (*get_speed_and_duplex)(struct igc_hw *hw, u16 *speed,
+ u16 *duplex);
+ s32 (*acquire_swfw_sync)(struct igc_hw *hw, u16 mask);
+ void (*release_swfw_sync)(struct igc_hw *hw, u16 mask);
+};
+
+enum igc_mac_type {
+ igc_undefined = 0,
+ igc_i225,
+ igc_num_macs /* List is 1-based, so subtract 1 for true count. */
+};
+
+enum igc_phy_type {
+ igc_phy_unknown = 0,
+ igc_phy_none,
+ igc_phy_i225,
+};
+
+enum igc_media_type {
+ igc_media_type_unknown = 0,
+ igc_media_type_copper = 1,
+ igc_num_media_types
+};
+
+enum igc_nvm_type {
+ igc_nvm_unknown = 0,
+ igc_nvm_flash_hw,
+ igc_nvm_invm,
+};
+
+struct igc_info {
+ s32 (*get_invariants)(struct igc_hw *hw);
+ struct igc_mac_operations *mac_ops;
+ const struct igc_phy_operations *phy_ops;
+ struct igc_nvm_operations *nvm_ops;
+};
+
+extern const struct igc_info igc_base_info;
+
+struct igc_mac_info {
+ struct igc_mac_operations ops;
+
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+
+ enum igc_mac_type type;
+
+ u32 collision_delta;
+ u32 ledctl_default;
+ u32 ledctl_mode1;
+ u32 ledctl_mode2;
+ u32 mc_filter_type;
+ u32 tx_packet_delta;
+ u32 txcw;
+
+ u16 mta_reg_count;
+ u16 uta_reg_count;
+
+ u16 rar_entry_count;
+
+ u8 forced_speed_duplex;
+
+ bool adaptive_ifs;
+ bool has_fwsm;
+ bool asf_firmware_present;
+ bool arc_subsystem_valid;
+
+ bool autoneg;
+ bool autoneg_failed;
+ bool get_link_status;
+};
+
+struct igc_nvm_operations {
+ s32 (*acquire)(struct igc_hw *hw);
+ s32 (*read)(struct igc_hw *hw, u16 offset, u16 i, u16 *data);
+ void (*release)(struct igc_hw *hw);
+ s32 (*write)(struct igc_hw *hw, u16 offset, u16 i, u16 *data);
+ s32 (*update)(struct igc_hw *hw);
+ s32 (*validate)(struct igc_hw *hw);
+ s32 (*valid_led_default)(struct igc_hw *hw, u16 *data);
+};
+
+struct igc_phy_operations {
+ s32 (*acquire)(struct igc_hw *hw);
+ s32 (*check_polarity)(struct igc_hw *hw);
+ s32 (*check_reset_block)(struct igc_hw *hw);
+ s32 (*force_speed_duplex)(struct igc_hw *hw);
+ s32 (*get_cfg_done)(struct igc_hw *hw);
+ s32 (*get_cable_length)(struct igc_hw *hw);
+ s32 (*get_phy_info)(struct igc_hw *hw);
+ s32 (*read_reg)(struct igc_hw *hw, u32 address, u16 *data);
+ void (*release)(struct igc_hw *hw);
+ s32 (*reset)(struct igc_hw *hw);
+ s32 (*write_reg)(struct igc_hw *hw, u32 address, u16 data);
+};
+
+struct igc_nvm_info {
+ struct igc_nvm_operations ops;
+ enum igc_nvm_type type;
+
+ u32 flash_bank_size;
+ u32 flash_base_addr;
+
+ u16 word_size;
+ u16 delay_usec;
+ u16 address_bits;
+ u16 opcode_bits;
+ u16 page_size;
+};
+
+struct igc_phy_info {
+ struct igc_phy_operations ops;
+
+ enum igc_phy_type type;
+
+ u32 addr;
+ u32 id;
+ u32 reset_delay_us; /* in usec */
+ u32 revision;
+
+ enum igc_media_type media_type;
+
+ u16 autoneg_advertised;
+ u16 autoneg_mask;
+ u16 cable_length;
+ u16 max_cable_length;
+ u16 min_cable_length;
+ u16 pair_length[4];
+
+ u8 mdix;
+
+ bool disable_polarity_correction;
+ bool is_mdix;
+ bool polarity_correction;
+ bool reset_disable;
+ bool speed_downgraded;
+ bool autoneg_wait_to_complete;
+};
+
+struct igc_bus_info {
+ u16 func;
+ u16 pci_cmd_word;
+};
+
+enum igc_fc_mode {
+ igc_fc_none = 0,
+ igc_fc_rx_pause,
+ igc_fc_tx_pause,
+ igc_fc_full,
+ igc_fc_default = 0xFF
+};
+
+struct igc_fc_info {
+ u32 high_water; /* Flow control high-water mark */
+ u32 low_water; /* Flow control low-water mark */
+ u16 pause_time; /* Flow control pause timer */
+ bool send_xon; /* Flow control send XON */
+ bool strict_ieee; /* Strict IEEE mode */
+ enum igc_fc_mode current_mode; /* Type of flow control */
+ enum igc_fc_mode requested_mode;
+};
+
+struct igc_dev_spec_base {
+ bool global_device_reset;
+ bool eee_disable;
+ bool clear_semaphore_once;
+ bool module_plugged;
+ u8 media_port;
+ bool mas_capable;
+};
+
+struct igc_hw {
+ void *back;
+
+ u8 __iomem *hw_addr;
+ unsigned long io_base;
+
+ struct igc_mac_info mac;
+ struct igc_fc_info fc;
+ struct igc_nvm_info nvm;
+ struct igc_phy_info phy;
+
+ struct igc_bus_info bus;
+
+ union {
+ struct igc_dev_spec_base _base;
+ } dev_spec;
+
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 vendor_id;
+
+ u8 revision_id;
+};
+
+/* Statistics counters collected by the MAC */
+struct igc_hw_stats {
+ u64 crcerrs;
+ u64 algnerrc;
+ u64 symerrs;
+ u64 rxerrc;
+ u64 mpc;
+ u64 scc;
+ u64 ecol;
+ u64 mcc;
+ u64 latecol;
+ u64 colc;
+ u64 dc;
+ u64 tncrs;
+ u64 sec;
+ u64 cexterr;
+ u64 rlec;
+ u64 xonrxc;
+ u64 xontxc;
+ u64 xoffrxc;
+ u64 xofftxc;
+ u64 fcruc;
+ u64 prc64;
+ u64 prc127;
+ u64 prc255;
+ u64 prc511;
+ u64 prc1023;
+ u64 prc1522;
+ u64 gprc;
+ u64 bprc;
+ u64 mprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 rnbc;
+ u64 ruc;
+ u64 rfc;
+ u64 roc;
+ u64 rjc;
+ u64 mgprc;
+ u64 mgpdc;
+ u64 mgptc;
+ u64 tor;
+ u64 tot;
+ u64 tpr;
+ u64 tpt;
+ u64 ptc64;
+ u64 ptc127;
+ u64 ptc255;
+ u64 ptc511;
+ u64 ptc1023;
+ u64 ptc1522;
+ u64 mptc;
+ u64 bptc;
+ u64 tsctc;
+ u64 tsctfc;
+ u64 iac;
+ u64 icrxptc;
+ u64 icrxatc;
+ u64 ictxptc;
+ u64 ictxatc;
+ u64 ictxqec;
+ u64 ictxqmtc;
+ u64 icrxdmtc;
+ u64 icrxoc;
+ u64 cbtmpc;
+ u64 htdpmc;
+ u64 cbrdpc;
+ u64 cbrmpc;
+ u64 rpthc;
+ u64 hgptc;
+ u64 htcbdpc;
+ u64 hgorc;
+ u64 hgotc;
+ u64 lenerrs;
+ u64 scvpc;
+ u64 hrmpc;
+ u64 doosync;
+ u64 o2bgptc;
+ u64 o2bspc;
+ u64 b2ospc;
+ u64 b2ogprc;
+};
+
+struct net_device *igc_get_hw_dev(struct igc_hw *hw);
+#define hw_dbg(format, arg...) \
+ netdev_dbg(igc_get_hw_dev(hw), format, ##arg)
+
+s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
+s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
+void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
+void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
+
+#endif /* _IGC_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
new file mode 100644
index 000000000000..c25f555aaf82
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -0,0 +1,490 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Intel Corporation */
+
+#include <linux/delay.h>
+
+#include "igc_hw.h"
+
+/**
+ * igc_get_hw_semaphore_i225 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the necessary semaphores for exclusive access to the EEPROM.
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -IGC_ERR_NVM (-1).
+ */
+static s32 igc_acquire_nvm_i225(struct igc_hw *hw)
+{
+ return igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
+}
+
+/**
+ * igc_release_nvm_i225 - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ * then release the semaphores acquired.
+ */
+static void igc_release_nvm_i225(struct igc_hw *hw)
+{
+ igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
+}
+
+/**
+ * igc_get_hw_semaphore_i225 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ */
+static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw)
+{
+ s32 timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+ u32 swsm;
+
+ /* Get the SW semaphore */
+ while (i < timeout) {
+ swsm = rd32(IGC_SWSM);
+ if (!(swsm & IGC_SWSM_SMBI))
+ break;
+
+ usleep_range(500, 600);
+ i++;
+ }
+
+ if (i == timeout) {
+ /* In rare circumstances, the SW semaphore may already be held
+ * unintentionally. Clear the semaphore once before giving up.
+ */
+ if (hw->dev_spec._base.clear_semaphore_once) {
+ hw->dev_spec._base.clear_semaphore_once = false;
+ igc_put_hw_semaphore(hw);
+ for (i = 0; i < timeout; i++) {
+ swsm = rd32(IGC_SWSM);
+ if (!(swsm & IGC_SWSM_SMBI))
+ break;
+
+ usleep_range(500, 600);
+ }
+ }
+
+ /* If we do not have the semaphore here, we have to give up. */
+ if (i == timeout) {
+ hw_dbg("Driver can't access device - SMBI bit is set.\n");
+ return -IGC_ERR_NVM;
+ }
+ }
+
+ /* Get the FW semaphore. */
+ for (i = 0; i < timeout; i++) {
+ swsm = rd32(IGC_SWSM);
+ wr32(IGC_SWSM, swsm | IGC_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (rd32(IGC_SWSM) & IGC_SWSM_SWESMBI)
+ break;
+
+ usleep_range(500, 600);
+ }
+
+ if (i == timeout) {
+ /* Release semaphores */
+ igc_put_hw_semaphore(hw);
+ hw_dbg("Driver can't access the NVM\n");
+ return -IGC_ERR_NVM;
+ }
+
+ return 0;
+}
+
+/**
+ * igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
+ * will also specify which port we're acquiring the lock for.
+ */
+s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask)
+{
+ s32 i = 0, timeout = 200;
+ u32 fwmask = mask << 16;
+ u32 swmask = mask;
+ s32 ret_val = 0;
+ u32 swfw_sync;
+
+ while (i < timeout) {
+ if (igc_get_hw_semaphore_i225(hw)) {
+ ret_val = -IGC_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync = rd32(IGC_SW_FW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask)))
+ break;
+
+ /* Firmware currently using resource (fwmask) */
+ igc_put_hw_semaphore(hw);
+ mdelay(5);
+ i++;
+ }
+
+ if (i == timeout) {
+ hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ ret_val = -IGC_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync |= swmask;
+ wr32(IGC_SW_FW_SYNC, swfw_sync);
+
+ igc_put_hw_semaphore(hw);
+out:
+ return ret_val;
+}
+
+/**
+ * igc_release_swfw_sync_i225 - Release SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Release the SW/FW semaphore used to access the PHY or NVM. The mask
+ * will also specify which port we're releasing the lock for.
+ */
+void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+
+ while (igc_get_hw_semaphore_i225(hw))
+ ; /* Empty */
+
+ swfw_sync = rd32(IGC_SW_FW_SYNC);
+ swfw_sync &= ~mask;
+ wr32(IGC_SW_FW_SYNC, swfw_sync);
+
+ igc_put_hw_semaphore(hw);
+}
+
+/**
+ * igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the Shadow Ram to read
+ * @words: number of words to read
+ * @data: word read from the Shadow Ram
+ *
+ * Reads a 16 bit word from the Shadow Ram using the EERD register.
+ * Uses necessary synchronization semaphores.
+ */
+static s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 status = 0;
+ u16 i, count;
+
+ /* We cannot hold synchronization semaphores for too long,
+ * because of forceful takeover procedure. However it is more efficient
+ * to read in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
+ count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
+ IGC_EERD_EEWR_MAX_COUNT : (words - i);
+
+ status = hw->nvm.ops.acquire(hw);
+ if (status)
+ break;
+
+ status = igc_read_nvm_eerd(hw, offset, count, data + i);
+ hw->nvm.ops.release(hw);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * igc_write_nvm_srwr - Write to Shadow Ram using EEWR
+ * @hw: pointer to the HW structure
+ * @offset: offset within the Shadow Ram to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the Shadow Ram
+ *
+ * Writes data to Shadow Ram at offset using EEWR register.
+ *
+ * If igc_update_nvm_checksum is not called after this function , the
+ * Shadow Ram will most likely contain an invalid checksum.
+ */
+static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct igc_nvm_info *nvm = &hw->nvm;
+ u32 attempts = 100000;
+ u32 i, k, eewr = 0;
+ s32 ret_val = 0;
+
+ /* A check for invalid values: offset too large, too many words,
+ * too many words for the offset, and not enough words.
+ */
+ if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
+ words == 0) {
+ hw_dbg("nvm parameter(s) out of bounds\n");
+ ret_val = -IGC_ERR_NVM;
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) |
+ (data[i] << IGC_NVM_RW_REG_DATA) |
+ IGC_NVM_RW_REG_START;
+
+ wr32(IGC_SRWR, eewr);
+
+ for (k = 0; k < attempts; k++) {
+ if (IGC_NVM_RW_REG_DONE &
+ rd32(IGC_SRWR)) {
+ ret_val = 0;
+ break;
+ }
+ udelay(5);
+ }
+
+ if (ret_val) {
+ hw_dbg("Shadow RAM write EEWR timed out\n");
+ break;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR
+ * @hw: pointer to the HW structure
+ * @offset: offset within the Shadow RAM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the Shadow RAM
+ *
+ * Writes data to Shadow RAM at offset using EEWR register.
+ *
+ * If igc_update_nvm_checksum is not called after this function , the
+ * data will not be committed to FLASH and also Shadow RAM will most likely
+ * contain an invalid checksum.
+ *
+ * If error code is returned, data and Shadow RAM may be inconsistent - buffer
+ * partially written.
+ */
+static s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 status = 0;
+ u16 i, count;
+
+ /* We cannot hold synchronization semaphores for too long,
+ * because of forceful takeover procedure. However it is more efficient
+ * to write in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
+ count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
+ IGC_EERD_EEWR_MAX_COUNT : (words - i);
+
+ status = hw->nvm.ops.acquire(hw);
+ if (status)
+ break;
+
+ status = igc_write_nvm_srwr(hw, offset, count, data + i);
+ hw->nvm.ops.release(hw);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * igc_validate_nvm_checksum_i225 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ */
+static s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw)
+{
+ s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset, u16 count,
+ u16 *data);
+ s32 status = 0;
+
+ status = hw->nvm.ops.acquire(hw);
+ if (status)
+ goto out;
+
+ /* Replace the read function with semaphore grabbing with
+ * the one that skips this for a while.
+ * We have semaphore taken already here.
+ */
+ read_op_ptr = hw->nvm.ops.read;
+ hw->nvm.ops.read = igc_read_nvm_eerd;
+
+ status = igc_validate_nvm_checksum(hw);
+
+ /* Revert original read operation. */
+ hw->nvm.ops.read = read_op_ptr;
+
+ hw->nvm.ops.release(hw);
+
+out:
+ return status;
+}
+
+/**
+ * igc_pool_flash_update_done_i225 - Pool FLUDONE status
+ * @hw: pointer to the HW structure
+ */
+static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw)
+{
+ s32 ret_val = -IGC_ERR_NVM;
+ u32 i, reg;
+
+ for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) {
+ reg = rd32(IGC_EECD);
+ if (reg & IGC_EECD_FLUDONE_I225) {
+ ret_val = 0;
+ break;
+ }
+ udelay(5);
+ }
+
+ return ret_val;
+}
+
+/**
+ * igc_update_flash_i225 - Commit EEPROM to the flash
+ * @hw: pointer to the HW structure
+ */
+static s32 igc_update_flash_i225(struct igc_hw *hw)
+{
+ s32 ret_val = 0;
+ u32 flup;
+
+ ret_val = igc_pool_flash_update_done_i225(hw);
+ if (ret_val == -IGC_ERR_NVM) {
+ hw_dbg("Flash update time out\n");
+ goto out;
+ }
+
+ flup = rd32(IGC_EECD) | IGC_EECD_FLUPD_I225;
+ wr32(IGC_EECD, flup);
+
+ ret_val = igc_pool_flash_update_done_i225(hw);
+ if (ret_val)
+ hw_dbg("Flash update time out\n");
+ else
+ hw_dbg("Flash update complete\n");
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_update_nvm_checksum_i225 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM. Next commit EEPROM data onto the Flash.
+ */
+static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw)
+{
+ u16 checksum = 0;
+ s32 ret_val = 0;
+ u16 i, nvm_data;
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("EEPROM read failed\n");
+ goto out;
+ }
+
+ ret_val = hw->nvm.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
+ * because we do not want to take the synchronization
+ * semaphores twice here.
+ */
+
+ for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+ ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ hw->nvm.ops.release(hw);
+ hw_dbg("NVM Read Error while updating checksum.\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16)NVM_SUM - checksum;
+ ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
+ &checksum);
+ if (ret_val) {
+ hw->nvm.ops.release(hw);
+ hw_dbg("NVM Write Error while updating checksum.\n");
+ goto out;
+ }
+
+ hw->nvm.ops.release(hw);
+
+ ret_val = igc_update_flash_i225(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_get_flash_presence_i225 - Check if flash device is detected
+ * @hw: pointer to the HW structure
+ */
+bool igc_get_flash_presence_i225(struct igc_hw *hw)
+{
+ bool ret_val = false;
+ u32 eec = 0;
+
+ eec = rd32(IGC_EECD);
+ if (eec & IGC_EECD_FLASH_DETECTED_I225)
+ ret_val = true;
+
+ return ret_val;
+}
+
+/**
+ * igc_init_nvm_params_i225 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ */
+s32 igc_init_nvm_params_i225(struct igc_hw *hw)
+{
+ struct igc_nvm_info *nvm = &hw->nvm;
+
+ nvm->ops.acquire = igc_acquire_nvm_i225;
+ nvm->ops.release = igc_release_nvm_i225;
+
+ /* NVM Function Pointers */
+ if (igc_get_flash_presence_i225(hw)) {
+ hw->nvm.type = igc_nvm_flash_hw;
+ nvm->ops.read = igc_read_nvm_srrd_i225;
+ nvm->ops.write = igc_write_nvm_srwr_i225;
+ nvm->ops.validate = igc_validate_nvm_checksum_i225;
+ nvm->ops.update = igc_update_nvm_checksum_i225;
+ } else {
+ hw->nvm.type = igc_nvm_invm;
+ nvm->ops.read = igc_read_nvm_eerd;
+ nvm->ops.write = NULL;
+ nvm->ops.validate = NULL;
+ nvm->ops.update = NULL;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.h b/drivers/net/ethernet/intel/igc/igc_i225.h
new file mode 100644
index 000000000000..7b66e1f9c0e6
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_i225.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Intel Corporation */
+
+#ifndef _IGC_I225_H_
+#define _IGC_I225_H_
+
+s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask);
+void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask);
+
+s32 igc_init_nvm_params_i225(struct igc_hw *hw);
+bool igc_get_flash_presence_i225(struct igc_hw *hw);
+
+#endif
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
new file mode 100644
index 000000000000..f7683d3ae47c
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -0,0 +1,806 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Intel Corporation */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "igc_mac.h"
+#include "igc_hw.h"
+
+/* forward declaration */
+static s32 igc_set_default_fc(struct igc_hw *hw);
+static s32 igc_set_fc_watermarks(struct igc_hw *hw);
+
+/**
+ * igc_disable_pcie_master - Disables PCI-express master access
+ * @hw: pointer to the HW structure
+ *
+ * Returns 0 (0) if successful, else returns -10
+ * (-IGC_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
+ * the master requests to be disabled.
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests.
+ */
+s32 igc_disable_pcie_master(struct igc_hw *hw)
+{
+ s32 timeout = MASTER_DISABLE_TIMEOUT;
+ s32 ret_val = 0;
+ u32 ctrl;
+
+ ctrl = rd32(IGC_CTRL);
+ ctrl |= IGC_CTRL_GIO_MASTER_DISABLE;
+ wr32(IGC_CTRL, ctrl);
+
+ while (timeout) {
+ if (!(rd32(IGC_STATUS) &
+ IGC_STATUS_GIO_MASTER_ENABLE))
+ break;
+ usleep_range(2000, 3000);
+ timeout--;
+ }
+
+ if (!timeout) {
+ hw_dbg("Master requests are pending.\n");
+ ret_val = -IGC_ERR_MASTER_REQUESTS_PENDING;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_init_rx_addrs - Initialize receive addresses
+ * @hw: pointer to the HW structure
+ * @rar_count: receive address registers
+ *
+ * Setup the receive address registers by setting the base receive address
+ * register to the devices MAC address and clearing all the other receive
+ * address registers to 0.
+ */
+void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count)
+{
+ u8 mac_addr[ETH_ALEN] = {0};
+ u32 i;
+
+ /* Setup the receive address */
+ hw_dbg("Programming MAC Address into RAR[0]\n");
+
+ hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
+
+ /* Zero out the other (rar_entry_count - 1) receive addresses */
+ hw_dbg("Clearing RAR[1-%u]\n", rar_count - 1);
+ for (i = 1; i < rar_count; i++)
+ hw->mac.ops.rar_set(hw, mac_addr, i);
+}
+
+/**
+ * igc_setup_link - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ */
+s32 igc_setup_link(struct igc_hw *hw)
+{
+ s32 ret_val = 0;
+
+ /* In the case of the phy reset being blocked, we already have a link.
+ * We do not need to set it up again.
+ */
+ if (igc_check_reset_block(hw))
+ goto out;
+
+ /* If requested flow control is set to default, set flow control
+ * based on the EEPROM flow control settings.
+ */
+ if (hw->fc.requested_mode == igc_fc_default) {
+ ret_val = igc_set_default_fc(hw);
+ if (ret_val)
+ goto out;
+ }
+
+ /* We want to save off the original Flow Control configuration just
+ * in case we get disconnected and then reconnected into a different
+ * hub or switch with different Flow Control capabilities.
+ */
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
+
+ /* Call the necessary media_type subroutine to configure the link. */
+ ret_val = hw->mac.ops.setup_physical_interface(hw);
+ if (ret_val)
+ goto out;
+
+ /* Initialize the flow control address, type, and PAUSE timer
+ * registers to their default values. This is done even if flow
+ * control is disabled, because it does not hurt anything to
+ * initialize these registers.
+ */
+ hw_dbg("Initializing the Flow Control address, type and timer regs\n");
+ wr32(IGC_FCT, FLOW_CONTROL_TYPE);
+ wr32(IGC_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ wr32(IGC_FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+ wr32(IGC_FCTTV, hw->fc.pause_time);
+
+ ret_val = igc_set_fc_watermarks(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_set_default_fc - Set flow control default values
+ * @hw: pointer to the HW structure
+ *
+ * Read the EEPROM for the default values for flow control and store the
+ * values.
+ */
+static s32 igc_set_default_fc(struct igc_hw *hw)
+{
+ hw->fc.requested_mode = igc_fc_full;
+ return 0;
+}
+
+/**
+ * igc_force_mac_fc - Force the MAC's flow control settings
+ * @hw: pointer to the HW structure
+ *
+ * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
+ * device control register to reflect the adapter settings. TFCE and RFCE
+ * need to be explicitly set by software when a copper PHY is used because
+ * autonegotiation is managed by the PHY rather than the MAC. Software must
+ * also configure these bits when link is forced on a fiber connection.
+ */
+s32 igc_force_mac_fc(struct igc_hw *hw)
+{
+ s32 ret_val = 0;
+ u32 ctrl;
+
+ ctrl = rd32(IGC_CTRL);
+
+ /* Because we didn't get link via the internal auto-negotiation
+ * mechanism (we either forced link or we got link via PHY
+ * auto-neg), we have to manually enable/disable transmit an
+ * receive flow control.
+ *
+ * The "Case" statement below enables/disable flow control
+ * according to the "hw->fc.current_mode" parameter.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause
+ * frames but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * frames but we do not receive pause frames).
+ * 3: Both Rx and TX flow control (symmetric) is enabled.
+ * other: No other values should be possible at this point.
+ */
+ hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
+
+ switch (hw->fc.current_mode) {
+ case igc_fc_none:
+ ctrl &= (~(IGC_CTRL_TFCE | IGC_CTRL_RFCE));
+ break;
+ case igc_fc_rx_pause:
+ ctrl &= (~IGC_CTRL_TFCE);
+ ctrl |= IGC_CTRL_RFCE;
+ break;
+ case igc_fc_tx_pause:
+ ctrl &= (~IGC_CTRL_RFCE);
+ ctrl |= IGC_CTRL_TFCE;
+ break;
+ case igc_fc_full:
+ ctrl |= (IGC_CTRL_TFCE | IGC_CTRL_RFCE);
+ break;
+ default:
+ hw_dbg("Flow control param set incorrectly\n");
+ ret_val = -IGC_ERR_CONFIG;
+ goto out;
+ }
+
+ wr32(IGC_CTRL, ctrl);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_set_fc_watermarks - Set flow control high/low watermarks
+ * @hw: pointer to the HW structure
+ *
+ * Sets the flow control high/low threshold (watermark) registers. If
+ * flow control XON frame transmission is enabled, then set XON frame
+ * transmission as well.
+ */
+static s32 igc_set_fc_watermarks(struct igc_hw *hw)
+{
+ u32 fcrtl = 0, fcrth = 0;
+
+ /* Set the flow control receive threshold registers. Normally,
+ * these registers will be set to a default threshold that may be
+ * adjusted later by the driver's runtime code. However, if the
+ * ability to transmit pause frames is not enabled, then these
+ * registers will be set to 0.
+ */
+ if (hw->fc.current_mode & igc_fc_tx_pause) {
+ /* We need to set up the Receive Threshold high and low water
+ * marks as well as (optionally) enabling the transmission of
+ * XON frames.
+ */
+ fcrtl = hw->fc.low_water;
+ if (hw->fc.send_xon)
+ fcrtl |= IGC_FCRTL_XONE;
+
+ fcrth = hw->fc.high_water;
+ }
+ wr32(IGC_FCRTL, fcrtl);
+ wr32(IGC_FCRTH, fcrth);
+
+ return 0;
+}
+
+/**
+ * igc_clear_hw_cntrs_base - Clear base hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the base hardware counters by reading the counter registers.
+ */
+void igc_clear_hw_cntrs_base(struct igc_hw *hw)
+{
+ rd32(IGC_CRCERRS);
+ rd32(IGC_SYMERRS);
+ rd32(IGC_MPC);
+ rd32(IGC_SCC);
+ rd32(IGC_ECOL);
+ rd32(IGC_MCC);
+ rd32(IGC_LATECOL);
+ rd32(IGC_COLC);
+ rd32(IGC_DC);
+ rd32(IGC_SEC);
+ rd32(IGC_RLEC);
+ rd32(IGC_XONRXC);
+ rd32(IGC_XONTXC);
+ rd32(IGC_XOFFRXC);
+ rd32(IGC_XOFFTXC);
+ rd32(IGC_FCRUC);
+ rd32(IGC_GPRC);
+ rd32(IGC_BPRC);
+ rd32(IGC_MPRC);
+ rd32(IGC_GPTC);
+ rd32(IGC_GORCL);
+ rd32(IGC_GORCH);
+ rd32(IGC_GOTCL);
+ rd32(IGC_GOTCH);
+ rd32(IGC_RNBC);
+ rd32(IGC_RUC);
+ rd32(IGC_RFC);
+ rd32(IGC_ROC);
+ rd32(IGC_RJC);
+ rd32(IGC_TORL);
+ rd32(IGC_TORH);
+ rd32(IGC_TOTL);
+ rd32(IGC_TOTH);
+ rd32(IGC_TPR);
+ rd32(IGC_TPT);
+ rd32(IGC_MPTC);
+ rd32(IGC_BPTC);
+
+ rd32(IGC_PRC64);
+ rd32(IGC_PRC127);
+ rd32(IGC_PRC255);
+ rd32(IGC_PRC511);
+ rd32(IGC_PRC1023);
+ rd32(IGC_PRC1522);
+ rd32(IGC_PTC64);
+ rd32(IGC_PTC127);
+ rd32(IGC_PTC255);
+ rd32(IGC_PTC511);
+ rd32(IGC_PTC1023);
+ rd32(IGC_PTC1522);
+
+ rd32(IGC_ALGNERRC);
+ rd32(IGC_RXERRC);
+ rd32(IGC_TNCRS);
+ rd32(IGC_CEXTERR);
+ rd32(IGC_TSCTC);
+ rd32(IGC_TSCTFC);
+
+ rd32(IGC_MGTPRC);
+ rd32(IGC_MGTPDC);
+ rd32(IGC_MGTPTC);
+
+ rd32(IGC_IAC);
+ rd32(IGC_ICRXOC);
+
+ rd32(IGC_ICRXPTC);
+ rd32(IGC_ICRXATC);
+ rd32(IGC_ICTXPTC);
+ rd32(IGC_ICTXATC);
+ rd32(IGC_ICTXQEC);
+ rd32(IGC_ICTXQMTC);
+ rd32(IGC_ICRXDMTC);
+
+ rd32(IGC_CBTMPC);
+ rd32(IGC_HTDPMC);
+ rd32(IGC_CBRMPC);
+ rd32(IGC_RPTHC);
+ rd32(IGC_HGPTC);
+ rd32(IGC_HTCBDPC);
+ rd32(IGC_HGORCL);
+ rd32(IGC_HGORCH);
+ rd32(IGC_HGOTCL);
+ rd32(IGC_HGOTCH);
+ rd32(IGC_LENERRS);
+}
+
+/**
+ * igc_rar_set - Set receive address register
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index: receive address array register
+ *
+ * Sets the receive address array register at index to the address passed
+ * in by addr.
+ */
+void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32)addr[0] |
+ ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
+
+ rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
+
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= IGC_RAH_AV;
+
+ /* Some bridges will combine consecutive 32-bit writes into
+ * a single burst write, which will malfunction on some parts.
+ * The flushes avoid this.
+ */
+ wr32(IGC_RAL(index), rar_low);
+ wrfl();
+ wr32(IGC_RAH(index), rar_high);
+ wrfl();
+}
+
+/**
+ * igc_check_for_copper_link - Check for link (Copper)
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see of the link status of the hardware has changed. If a
+ * change in link status has been detected, then we read the PHY registers
+ * to get the current speed/duplex if link exists.
+ */
+s32 igc_check_for_copper_link(struct igc_hw *hw)
+{
+ struct igc_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ bool link;
+
+ /* We only want to go out to the PHY registers to see if Auto-Neg
+ * has completed and/or if our link status has changed. The
+ * get_link_status flag is set upon receiving a Link Status
+ * Change or Rx Sequence Error interrupt.
+ */
+ if (!mac->get_link_status) {
+ ret_val = 0;
+ goto out;
+ }
+
+ /* First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = igc_phy_has_link(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (!link)
+ goto out; /* No link detected */
+
+ mac->get_link_status = false;
+
+ /* Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
+ igc_check_downshift(hw);
+
+ /* If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg) {
+ ret_val = -IGC_ERR_CONFIG;
+ goto out;
+ }
+
+ /* Auto-Neg is enabled. Auto Speed Detection takes care
+ * of MAC speed/duplex configuration. So we only need to
+ * configure Collision Distance in the MAC.
+ */
+ igc_config_collision_dist(hw);
+
+ /* Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = igc_config_fc_after_link_up(hw);
+ if (ret_val)
+ hw_dbg("Error configuring flow control\n");
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_config_collision_dist - Configure collision distance
+ * @hw: pointer to the HW structure
+ *
+ * Configures the collision distance to the default value and is used
+ * during link setup. Currently no func pointer exists and all
+ * implementations are handled in the generic version of this function.
+ */
+void igc_config_collision_dist(struct igc_hw *hw)
+{
+ u32 tctl;
+
+ tctl = rd32(IGC_TCTL);
+
+ tctl &= ~IGC_TCTL_COLD;
+ tctl |= IGC_COLLISION_DISTANCE << IGC_COLD_SHIFT;
+
+ wr32(IGC_TCTL, tctl);
+ wrfl();
+}
+
+/**
+ * igc_config_fc_after_link_up - Configures flow control after link
+ * @hw: pointer to the HW structure
+ *
+ * Checks the status of auto-negotiation after link up to ensure that the
+ * speed and duplex were not forced. If the link needed to be forced, then
+ * flow control needs to be forced also. If auto-negotiation is enabled
+ * and did not fail, then we configure flow control based on our link
+ * partner.
+ */
+s32 igc_config_fc_after_link_up(struct igc_hw *hw)
+{
+ u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
+ struct igc_mac_info *mac = &hw->mac;
+ u16 speed, duplex;
+ s32 ret_val = 0;
+
+ /* Check for the case where we have fiber media and auto-neg failed
+ * so we had to force link. In this case, we need to force the
+ * configuration of the MAC to match the "fc" parameter.
+ */
+ if (mac->autoneg_failed) {
+ if (hw->phy.media_type == igc_media_type_copper)
+ ret_val = igc_force_mac_fc(hw);
+ }
+
+ if (ret_val) {
+ hw_dbg("Error forcing flow control settings\n");
+ goto out;
+ }
+
+ /* Check for the case where we have copper media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if (hw->phy.media_type == igc_media_type_copper && mac->autoneg) {
+ /* Read the MII Status Register and check to see if AutoNeg
+ * has completed. We read this twice because this reg has
+ * some "sticky" (latched) bits.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
+ &mii_status_reg);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
+ &mii_status_reg);
+ if (ret_val)
+ goto out;
+
+ if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+ hw_dbg("Copper PHY and Auto Neg has not completed.\n");
+ goto out;
+ }
+
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (Address 4) and the Auto_Negotiation Base
+ * Page Ability Register (Address 5) to determine how
+ * flow control was negotiated.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
+ &mii_nway_adv_reg);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
+ &mii_nway_lp_ability_reg);
+ if (ret_val)
+ goto out;
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (Address 4) and two bits in the Auto Negotiation Base
+ * Page Ability Register (Address 5) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | igc_fc_none
+ * 0 | 1 | 0 | DC | igc_fc_none
+ * 0 | 1 | 1 | 0 | igc_fc_none
+ * 0 | 1 | 1 | 1 | igc_fc_tx_pause
+ * 1 | 0 | 0 | DC | igc_fc_none
+ * 1 | DC | 1 | DC | igc_fc_full
+ * 1 | 1 | 0 | 0 | igc_fc_none
+ * 1 | 1 | 0 | 1 | igc_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | IGC_fc_full
+ *
+ */
+ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ /* Now we need to check if the user selected RX ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == igc_fc_full) {
+ hw->fc.current_mode = igc_fc_full;
+ hw_dbg("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = igc_fc_rx_pause;
+ hw_dbg("Flow Control = RX PAUSE frames only.\n");
+ }
+ }
+
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | igc_fc_tx_pause
+ */
+ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = igc_fc_tx_pause;
+ hw_dbg("Flow Control = TX PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | igc_fc_rx_pause
+ */
+ else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = igc_fc_rx_pause;
+ hw_dbg("Flow Control = RX PAUSE frames only.\n");
+ }
+ /* Per the IEEE spec, at this point flow control should be
+ * disabled. However, we want to consider that we could
+ * be connected to a legacy switch that doesn't advertise
+ * desired flow control, but can be forced on the link
+ * partner. So if we advertised no flow control, that is
+ * what we will resolve to. If we advertised some kind of
+ * receive capability (Rx Pause Only or Full Flow Control)
+ * and the link partner advertised none, we will configure
+ * ourselves to enable Rx Flow Control only. We can do
+ * this safely for two reasons: If the link partner really
+ * didn't want flow control enabled, and we enable Rx, no
+ * harm done since we won't be receiving any PAUSE frames
+ * anyway. If the intent on the link partner was to have
+ * flow control enabled, then by us enabling RX only, we
+ * can at least receive pause frames and process them.
+ * This is a good idea because in most cases, since we are
+ * predominantly a server NIC, more times than not we will
+ * be asked to delay transmission of packets than asking
+ * our link partner to pause transmission of frames.
+ */
+ else if ((hw->fc.requested_mode == igc_fc_none) ||
+ (hw->fc.requested_mode == igc_fc_tx_pause) ||
+ (hw->fc.strict_ieee)) {
+ hw->fc.current_mode = igc_fc_none;
+ hw_dbg("Flow Control = NONE.\n");
+ } else {
+ hw->fc.current_mode = igc_fc_rx_pause;
+ hw_dbg("Flow Control = RX PAUSE frames only.\n");
+ }
+
+ /* Now we need to do one last check... If we auto-
+ * negotiated to HALF DUPLEX, flow control should not be
+ * enabled per IEEE 802.3 spec.
+ */
+ ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ hw_dbg("Error getting link speed and duplex\n");
+ goto out;
+ }
+
+ if (duplex == HALF_DUPLEX)
+ hw->fc.current_mode = igc_fc_none;
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ ret_val = igc_force_mac_fc(hw);
+ if (ret_val) {
+ hw_dbg("Error forcing flow control settings\n");
+ goto out;
+ }
+ }
+
+out:
+ return 0;
+}
+
+/**
+ * igc_get_auto_rd_done - Check for auto read completion
+ * @hw: pointer to the HW structure
+ *
+ * Check EEPROM for Auto Read done bit.
+ */
+s32 igc_get_auto_rd_done(struct igc_hw *hw)
+{
+ s32 ret_val = 0;
+ s32 i = 0;
+
+ while (i < AUTO_READ_DONE_TIMEOUT) {
+ if (rd32(IGC_EECD) & IGC_EECD_AUTO_RD)
+ break;
+ usleep_range(1000, 2000);
+ i++;
+ }
+
+ if (i == AUTO_READ_DONE_TIMEOUT) {
+ hw_dbg("Auto read by HW from NVM has not completed.\n");
+ ret_val = -IGC_ERR_RESET;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_get_speed_and_duplex_copper - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * Read the status register for the current speed/duplex and store the current
+ * speed and duplex for copper connections.
+ */
+s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ u32 status;
+
+ status = rd32(IGC_STATUS);
+ if (status & IGC_STATUS_SPEED_1000) {
+ /* For I225, STATUS will indicate 1G speed in both 1 Gbps
+ * and 2.5 Gbps link modes. An additional bit is used
+ * to differentiate between 1 Gbps and 2.5 Gbps.
+ */
+ if (hw->mac.type == igc_i225 &&
+ (status & IGC_STATUS_SPEED_2500)) {
+ *speed = SPEED_2500;
+ hw_dbg("2500 Mbs, ");
+ } else {
+ *speed = SPEED_1000;
+ hw_dbg("1000 Mbs, ");
+ }
+ } else if (status & IGC_STATUS_SPEED_100) {
+ *speed = SPEED_100;
+ hw_dbg("100 Mbs, ");
+ } else {
+ *speed = SPEED_10;
+ hw_dbg("10 Mbs, ");
+ }
+
+ if (status & IGC_STATUS_FD) {
+ *duplex = FULL_DUPLEX;
+ hw_dbg("Full Duplex\n");
+ } else {
+ *duplex = HALF_DUPLEX;
+ hw_dbg("Half Duplex\n");
+ }
+
+ return 0;
+}
+
+/**
+ * igc_put_hw_semaphore - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ */
+void igc_put_hw_semaphore(struct igc_hw *hw)
+{
+ u32 swsm;
+
+ swsm = rd32(IGC_SWSM);
+
+ swsm &= ~(IGC_SWSM_SMBI | IGC_SWSM_SWESMBI);
+
+ wr32(IGC_SWSM, swsm);
+}
+
+/**
+ * igc_enable_mng_pass_thru - Enable processing of ARP's
+ * @hw: pointer to the HW structure
+ *
+ * Verifies the hardware needs to leave interface enabled so that frames can
+ * be directed to and from the management interface.
+ */
+bool igc_enable_mng_pass_thru(struct igc_hw *hw)
+{
+ bool ret_val = false;
+ u32 fwsm, factps;
+ u32 manc;
+
+ if (!hw->mac.asf_firmware_present)
+ goto out;
+
+ manc = rd32(IGC_MANC);
+
+ if (!(manc & IGC_MANC_RCV_TCO_EN))
+ goto out;
+
+ if (hw->mac.arc_subsystem_valid) {
+ fwsm = rd32(IGC_FWSM);
+ factps = rd32(IGC_FACTPS);
+
+ if (!(factps & IGC_FACTPS_MNGCG) &&
+ ((fwsm & IGC_FWSM_MODE_MASK) ==
+ (igc_mng_mode_pt << IGC_FWSM_MODE_SHIFT))) {
+ ret_val = true;
+ goto out;
+ }
+ } else {
+ if ((manc & IGC_MANC_SMBUS_EN) &&
+ !(manc & IGC_MANC_ASF_EN)) {
+ ret_val = true;
+ goto out;
+ }
+ }
+
+out:
+ return ret_val;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.h b/drivers/net/ethernet/intel/igc/igc_mac.h
new file mode 100644
index 000000000000..782bc995badc
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_mac.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Intel Corporation */
+
+#ifndef _IGC_MAC_H_
+#define _IGC_MAC_H_
+
+#include "igc_hw.h"
+#include "igc_phy.h"
+#include "igc_defines.h"
+
+#ifndef IGC_REMOVED
+#define IGC_REMOVED(a) (0)
+#endif /* IGC_REMOVED */
+
+/* forward declaration */
+s32 igc_disable_pcie_master(struct igc_hw *hw);
+s32 igc_check_for_copper_link(struct igc_hw *hw);
+s32 igc_config_fc_after_link_up(struct igc_hw *hw);
+s32 igc_force_mac_fc(struct igc_hw *hw);
+void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count);
+s32 igc_setup_link(struct igc_hw *hw);
+void igc_clear_hw_cntrs_base(struct igc_hw *hw);
+s32 igc_get_auto_rd_done(struct igc_hw *hw);
+void igc_put_hw_semaphore(struct igc_hw *hw);
+void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index);
+void igc_config_collision_dist(struct igc_hw *hw);
+
+s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed,
+ u16 *duplex);
+
+bool igc_enable_mng_pass_thru(struct igc_hw *hw);
+
+enum igc_mng_mode {
+ igc_mng_mode_none = 0,
+ igc_mng_mode_asf,
+ igc_mng_mode_pt,
+ igc_mng_mode_ipmi,
+ igc_mng_mode_host_if_only
+};
+
+#endif
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
new file mode 100644
index 000000000000..9d85707e8a81
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -0,0 +1,3901 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Intel Corporation */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/if_vlan.h>
+#include <linux/aer.h>
+
+#include "igc.h"
+#include "igc_hw.h"
+
+#define DRV_VERSION "0.0.1-k"
+#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
+
+static int debug = -1;
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION(DRV_SUMMARY);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+char igc_driver_name[] = "igc";
+char igc_driver_version[] = DRV_VERSION;
+static const char igc_driver_string[] = DRV_SUMMARY;
+static const char igc_copyright[] =
+ "Copyright(c) 2018 Intel Corporation.";
+
+static const struct igc_info *igc_info_tbl[] = {
+ [board_base] = &igc_base_info,
+};
+
+static const struct pci_device_id igc_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
+ /* required last entry */
+ {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
+
+/* forward declaration */
+static void igc_clean_tx_ring(struct igc_ring *tx_ring);
+static int igc_sw_init(struct igc_adapter *);
+static void igc_configure(struct igc_adapter *adapter);
+static void igc_power_down_link(struct igc_adapter *adapter);
+static void igc_set_default_mac_filter(struct igc_adapter *adapter);
+static void igc_set_rx_mode(struct net_device *netdev);
+static void igc_write_itr(struct igc_q_vector *q_vector);
+static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector);
+static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx);
+static void igc_set_interrupt_capability(struct igc_adapter *adapter,
+ bool msix);
+static void igc_free_q_vectors(struct igc_adapter *adapter);
+static void igc_irq_disable(struct igc_adapter *adapter);
+static void igc_irq_enable(struct igc_adapter *adapter);
+static void igc_configure_msix(struct igc_adapter *adapter);
+static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
+ struct igc_rx_buffer *bi);
+
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+static void igc_reset(struct igc_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct igc_hw *hw = &adapter->hw;
+
+ hw->mac.ops.reset_hw(hw);
+
+ if (hw->mac.ops.init_hw(hw))
+ dev_err(&pdev->dev, "Hardware Error\n");
+
+ if (!netif_running(adapter->netdev))
+ igc_power_down_link(adapter);
+
+ igc_get_phy_info(hw);
+}
+
+/**
+ * igc_power_up_link - Power up the phy/serdes link
+ * @adapter: address of board private structure
+ */
+static void igc_power_up_link(struct igc_adapter *adapter)
+{
+ igc_reset_phy(&adapter->hw);
+
+ if (adapter->hw.phy.media_type == igc_media_type_copper)
+ igc_power_up_phy_copper(&adapter->hw);
+
+ igc_setup_link(&adapter->hw);
+}
+
+/**
+ * igc_power_down_link - Power down the phy/serdes link
+ * @adapter: address of board private structure
+ */
+static void igc_power_down_link(struct igc_adapter *adapter)
+{
+ if (adapter->hw.phy.media_type == igc_media_type_copper)
+ igc_power_down_phy_copper_base(&adapter->hw);
+}
+
+/**
+ * igc_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded.
+ */
+static void igc_release_hw_control(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 ctrl_ext;
+
+ /* Let firmware take over control of h/w */
+ ctrl_ext = rd32(IGC_CTRL_EXT);
+ wr32(IGC_CTRL_EXT,
+ ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
+}
+
+/**
+ * igc_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded.
+ */
+static void igc_get_hw_control(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 ctrl_ext;
+
+ /* Let firmware know the driver has taken over */
+ ctrl_ext = rd32(IGC_CTRL_EXT);
+ wr32(IGC_CTRL_EXT,
+ ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
+}
+
+/**
+ * igc_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ */
+static void igc_free_tx_resources(struct igc_ring *tx_ring)
+{
+ igc_clean_tx_ring(tx_ring);
+
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * igc_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ */
+static void igc_free_all_tx_resources(struct igc_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ igc_free_tx_resources(adapter->tx_ring[i]);
+}
+
+/**
+ * igc_clean_tx_ring - Free Tx Buffers
+ * @tx_ring: ring to be cleaned
+ */
+static void igc_clean_tx_ring(struct igc_ring *tx_ring)
+{
+ u16 i = tx_ring->next_to_clean;
+ struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
+
+ while (i != tx_ring->next_to_use) {
+ union igc_adv_tx_desc *eop_desc, *tx_desc;
+
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+
+ /* check for eop_desc to determine the end of the packet */
+ eop_desc = tx_buffer->next_to_watch;
+ tx_desc = IGC_TX_DESC(tx_ring, i);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IGC_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ }
+ }
+
+ /* reset BQL for queue */
+ netdev_tx_reset_queue(txring_txq(tx_ring));
+
+ /* reset next_to_use and next_to_clean */
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+}
+
+/**
+ * igc_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ */
+static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ if (adapter->tx_ring[i])
+ igc_clean_tx_ring(adapter->tx_ring[i]);
+}
+
+/**
+ * igc_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ */
+static int igc_setup_tx_resources(struct igc_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int size = 0;
+
+ size = sizeof(struct igc_tx_buffer) * tx_ring->count;
+ tx_ring->tx_buffer_info = vzalloc(size);
+ if (!tx_ring->tx_buffer_info)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+
+ if (!tx_ring->desc)
+ goto err;
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ return 0;
+
+err:
+ vfree(tx_ring->tx_buffer_info);
+ dev_err(dev,
+ "Unable to allocate memory for the transmit descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ */
+static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ err = igc_setup_tx_resources(adapter->tx_ring[i]);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Allocation for Tx Queue %u failed\n", i);
+ for (i--; i >= 0; i--)
+ igc_free_tx_resources(adapter->tx_ring[i]);
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * igc_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ */
+static void igc_clean_rx_ring(struct igc_ring *rx_ring)
+{
+ u16 i = rx_ring->next_to_clean;
+
+ if (rx_ring->skb)
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
+
+ /* Free all the Rx ring sk_buffs */
+ while (i != rx_ring->next_to_alloc) {
+ struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
+
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ buffer_info->dma,
+ buffer_info->page_offset,
+ igc_rx_bufsz(rx_ring),
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(rx_ring->dev,
+ buffer_info->dma,
+ igc_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IGC_RX_DMA_ATTR);
+ __page_frag_cache_drain(buffer_info->page,
+ buffer_info->pagecnt_bias);
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ }
+
+ rx_ring->next_to_alloc = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+/**
+ * igc_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ */
+static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ if (adapter->rx_ring[i])
+ igc_clean_rx_ring(adapter->rx_ring[i]);
+}
+
+/**
+ * igc_free_rx_resources - Free Rx Resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ */
+static void igc_free_rx_resources(struct igc_ring *rx_ring)
+{
+ igc_clean_rx_ring(rx_ring);
+
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!rx_ring->desc)
+ return;
+
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
+
+ rx_ring->desc = NULL;
+}
+
+/**
+ * igc_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ */
+static void igc_free_all_rx_resources(struct igc_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ igc_free_rx_resources(adapter->rx_ring[i]);
+}
+
+/**
+ * igc_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int igc_setup_rx_resources(struct igc_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ int size, desc_len;
+
+ size = sizeof(struct igc_rx_buffer) * rx_ring->count;
+ rx_ring->rx_buffer_info = vzalloc(size);
+ if (!rx_ring->rx_buffer_info)
+ goto err;
+
+ desc_len = sizeof(union igc_adv_rx_desc);
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * desc_len;
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc)
+ goto err;
+
+ rx_ring->next_to_alloc = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ return 0;
+
+err:
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ dev_err(dev,
+ "Unable to allocate memory for the receive descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * igc_setup_all_rx_resources - wrapper to allocate Rx resources
+ * (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ */
+static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = igc_setup_rx_resources(adapter->rx_ring[i]);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Allocation for Rx Queue %u failed\n", i);
+ for (i--; i >= 0; i--)
+ igc_free_rx_resources(adapter->rx_ring[i]);
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * igc_configure_rx_ring - Configure a receive ring after Reset
+ * @adapter: board private structure
+ * @ring: receive ring to be configured
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ */
+static void igc_configure_rx_ring(struct igc_adapter *adapter,
+ struct igc_ring *ring)
+{
+ struct igc_hw *hw = &adapter->hw;
+ union igc_adv_rx_desc *rx_desc;
+ int reg_idx = ring->reg_idx;
+ u32 srrctl = 0, rxdctl = 0;
+ u64 rdba = ring->dma;
+
+ /* disable the queue */
+ wr32(IGC_RXDCTL(reg_idx), 0);
+
+ /* Set DMA base address registers */
+ wr32(IGC_RDBAL(reg_idx),
+ rdba & 0x00000000ffffffffULL);
+ wr32(IGC_RDBAH(reg_idx), rdba >> 32);
+ wr32(IGC_RDLEN(reg_idx),
+ ring->count * sizeof(union igc_adv_rx_desc));
+
+ /* initialize head and tail */
+ ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
+ wr32(IGC_RDH(reg_idx), 0);
+ writel(0, ring->tail);
+
+ /* reset next-to- use/clean to place SW in sync with hardware */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+
+ /* set descriptor configuration */
+ srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ if (ring_uses_large_buffer(ring))
+ srrctl |= IGC_RXBUFFER_3072 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= IGC_RXBUFFER_2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+ wr32(IGC_SRRCTL(reg_idx), srrctl);
+
+ rxdctl |= IGC_RX_PTHRESH;
+ rxdctl |= IGC_RX_HTHRESH << 8;
+ rxdctl |= IGC_RX_WTHRESH << 16;
+
+ /* initialize rx_buffer_info */
+ memset(ring->rx_buffer_info, 0,
+ sizeof(struct igc_rx_buffer) * ring->count);
+
+ /* initialize Rx descriptor 0 */
+ rx_desc = IGC_RX_DESC(ring, 0);
+ rx_desc->wb.upper.length = 0;
+
+ /* enable receive descriptor fetching */
+ rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
+
+ wr32(IGC_RXDCTL(reg_idx), rxdctl);
+}
+
+/**
+ * igc_configure_rx - Configure receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ */
+static void igc_configure_rx(struct igc_adapter *adapter)
+{
+ int i;
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
+}
+
+/**
+ * igc_configure_tx_ring - Configure transmit ring after Reset
+ * @adapter: board private structure
+ * @ring: tx ring to configure
+ *
+ * Configure a transmit ring after a reset.
+ */
+static void igc_configure_tx_ring(struct igc_adapter *adapter,
+ struct igc_ring *ring)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int reg_idx = ring->reg_idx;
+ u64 tdba = ring->dma;
+ u32 txdctl = 0;
+
+ /* disable the queue */
+ wr32(IGC_TXDCTL(reg_idx), 0);
+ wrfl();
+ mdelay(10);
+
+ wr32(IGC_TDLEN(reg_idx),
+ ring->count * sizeof(union igc_adv_tx_desc));
+ wr32(IGC_TDBAL(reg_idx),
+ tdba & 0x00000000ffffffffULL);
+ wr32(IGC_TDBAH(reg_idx), tdba >> 32);
+
+ ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
+ wr32(IGC_TDH(reg_idx), 0);
+ writel(0, ring->tail);
+
+ txdctl |= IGC_TX_PTHRESH;
+ txdctl |= IGC_TX_HTHRESH << 8;
+ txdctl |= IGC_TX_WTHRESH << 16;
+
+ txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
+ wr32(IGC_TXDCTL(reg_idx), txdctl);
+}
+
+/**
+ * igc_configure_tx - Configure transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ */
+static void igc_configure_tx(struct igc_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
+}
+
+/**
+ * igc_setup_mrqc - configure the multiple receive queue control registers
+ * @adapter: Board private structure
+ */
+static void igc_setup_mrqc(struct igc_adapter *adapter)
+{
+}
+
+/**
+ * igc_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
+ */
+static void igc_setup_rctl(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ rctl = rd32(IGC_RCTL);
+
+ rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
+ rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
+
+ rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
+ (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
+
+ /* enable stripping of CRC. Newer features require
+ * that the HW strips the CRC.
+ */
+ rctl |= IGC_RCTL_SECRC;
+
+ /* disable store bad packets and clear size bits. */
+ rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
+
+ /* enable LPE to allow for reception of jumbo frames */
+ rctl |= IGC_RCTL_LPE;
+
+ /* disable queue 0 to prevent tail write w/o re-config */
+ wr32(IGC_RXDCTL(0), 0);
+
+ /* This is useful for sniffing bad packets. */
+ if (adapter->netdev->features & NETIF_F_RXALL) {
+ /* UPE and MPE will be handled by normal PROMISC logic
+ * in set_rx_mode
+ */
+ rctl |= (IGC_RCTL_SBP | /* Receive bad packets */
+ IGC_RCTL_BAM | /* RX All Bcast Pkts */
+ IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
+
+ rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */
+ IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */
+ }
+
+ wr32(IGC_RCTL, rctl);
+}
+
+/**
+ * igc_setup_tctl - configure the transmit control registers
+ * @adapter: Board private structure
+ */
+static void igc_setup_tctl(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 tctl;
+
+ /* disable queue 0 which icould be enabled by default */
+ wr32(IGC_TXDCTL(0), 0);
+
+ /* Program the Transmit Control Register */
+ tctl = rd32(IGC_TCTL);
+ tctl &= ~IGC_TCTL_CT;
+ tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
+ (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
+
+ /* Enable transmits */
+ tctl |= IGC_TCTL_EN;
+
+ wr32(IGC_TCTL, tctl);
+}
+
+/**
+ * igc_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int igc_set_mac(struct net_device *netdev, void *p)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+
+ /* set the correct pool for the new PF MAC address in entry 0 */
+ igc_set_default_mac_filter(adapter);
+
+ return 0;
+}
+
+static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
+{
+}
+
+static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
+{
+ struct net_device *netdev = tx_ring->netdev;
+
+ netif_stop_subqueue(netdev, tx_ring->queue_index);
+
+ /* memory barriier comment */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available.
+ */
+ if (igc_desc_unused(tx_ring) < size)
+ return -EBUSY;
+
+ /* A reprieve! */
+ netif_wake_subqueue(netdev, tx_ring->queue_index);
+
+ u64_stats_update_begin(&tx_ring->tx_syncp2);
+ tx_ring->tx_stats.restart_queue2++;
+ u64_stats_update_end(&tx_ring->tx_syncp2);
+
+ return 0;
+}
+
+static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
+{
+ if (igc_desc_unused(tx_ring) >= size)
+ return 0;
+ return __igc_maybe_stop_tx(tx_ring, size);
+}
+
+static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
+{
+ /* set type for advanced descriptor with frame checksum insertion */
+ u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
+ IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS;
+
+ return cmd_type;
+}
+
+static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
+ union igc_adv_tx_desc *tx_desc,
+ u32 tx_flags, unsigned int paylen)
+{
+ u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
+
+ /* insert L4 checksum */
+ olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *
+ ((IGC_TXD_POPTS_TXSM << 8) /
+ IGC_TX_FLAGS_CSUM);
+
+ /* insert IPv4 checksum */
+ olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *
+ (((IGC_TXD_POPTS_IXSM << 8)) /
+ IGC_TX_FLAGS_IPV4);
+
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+}
+
+static int igc_tx_map(struct igc_ring *tx_ring,
+ struct igc_tx_buffer *first,
+ const u8 hdr_len)
+{
+ struct sk_buff *skb = first->skb;
+ struct igc_tx_buffer *tx_buffer;
+ union igc_adv_tx_desc *tx_desc;
+ u32 tx_flags = first->tx_flags;
+ struct skb_frag_struct *frag;
+ u16 i = tx_ring->next_to_use;
+ unsigned int data_len, size;
+ dma_addr_t dma;
+ u32 cmd_type = igc_tx_cmd_type(skb, tx_flags);
+
+ tx_desc = IGC_TX_DESC(tx_ring, i);
+
+ igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
+
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+ tx_buffer = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ while (unlikely(size > IGC_MAX_DATA_PER_TXD)) {
+ tx_desc->read.cmd_type_len =
+ cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);
+
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = IGC_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+ tx_desc->read.olinfo_status = 0;
+
+ dma += IGC_MAX_DATA_PER_TXD;
+ size -= IGC_MAX_DATA_PER_TXD;
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ }
+
+ if (likely(!data_len))
+ break;
+
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
+
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = IGC_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+ tx_desc->read.olinfo_status = 0;
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
+ size, DMA_TO_DEVICE);
+
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ }
+
+ /* write last descriptor with RS and EOP bits */
+ cmd_type |= size | IGC_TXD_DCMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
+
+ /* set the timestamp */
+ first->time_stamp = jiffies;
+
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch. (Only applicable for weak-ordered
+ * memory model archs, such as IA-64).
+ *
+ * We also need this memory barrier to make certain all of the
+ * status bits have been updated before next_to_watch is written.
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ /* Make sure there is space in the ring for the next send. */
+ igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ writel(i, tx_ring->tail);
+
+ /* we need this if more than one processor can write to our tail
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
+ mmiowb();
+ }
+
+ return 0;
+dma_error:
+ dev_err(tx_ring->dev, "TX DMA map failed\n");
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+
+ /* clear dma mappings for failed tx_buffer_info map */
+ while (tx_buffer != first) {
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ if (i-- == 0)
+ i += tx_ring->count;
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ }
+
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ dev_kfree_skb_any(tx_buffer->skb);
+ tx_buffer->skb = NULL;
+
+ tx_ring->next_to_use = i;
+
+ return -1;
+}
+
+static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
+ struct igc_ring *tx_ring)
+{
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
+ __be16 protocol = vlan_get_protocol(skb);
+ struct igc_tx_buffer *first;
+ u32 tx_flags = 0;
+ unsigned short f;
+ u8 hdr_len = 0;
+
+ /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+
+ if (igc_maybe_stop_tx(tx_ring, count + 3)) {
+ /* this is a hard error */
+ return NETDEV_TX_BUSY;
+ }
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
+
+ skb_tx_timestamp(skb);
+
+ /* record initial flags and protocol */
+ first->tx_flags = tx_flags;
+ first->protocol = protocol;
+
+ igc_tx_csum(tx_ring, first);
+
+ igc_tx_map(tx_ring, first, hdr_len);
+
+ return NETDEV_TX_OK;
+}
+
+static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
+ struct sk_buff *skb)
+{
+ unsigned int r_idx = skb->queue_mapping;
+
+ if (r_idx >= adapter->num_tx_queues)
+ r_idx = r_idx % adapter->num_tx_queues;
+
+ return adapter->tx_ring[r_idx];
+}
+
+static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
+ * in order to meet this minimum size requirement.
+ */
+ if (skb->len < 17) {
+ if (skb_padto(skb, 17))
+ return NETDEV_TX_OK;
+ skb->len = 17;
+ }
+
+ return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
+}
+
+static inline void igc_rx_hash(struct igc_ring *ring,
+ union igc_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ if (ring->netdev->features & NETIF_F_RXHASH)
+ skb_set_hash(skb,
+ le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+ PKT_HASH_TYPE_L3);
+}
+
+/**
+ * igc_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, timestamp, protocol, and
+ * other fields within the skb.
+ */
+static void igc_process_skb_fields(struct igc_ring *rx_ring,
+ union igc_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ igc_rx_hash(rx_ring, rx_desc, skb);
+
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+}
+
+static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
+ const unsigned int size)
+{
+ struct igc_rx_buffer *rx_buffer;
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ prefetchw(rx_buffer->page);
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+
+ rx_buffer->pagecnt_bias--;
+
+ return rx_buffer;
+}
+
+/**
+ * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @skb: sk_buff to place the data into
+ * @size: size of buffer to be added
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ */
+static void igc_add_rx_frag(struct igc_ring *rx_ring,
+ struct igc_rx_buffer *rx_buffer,
+ struct sk_buff *skb,
+ unsigned int size)
+{
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+ rx_buffer->page_offset, size, truesize);
+ rx_buffer->page_offset ^= truesize;
+#else
+ unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
+ SKB_DATA_ALIGN(size);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+ rx_buffer->page_offset, size, truesize);
+ rx_buffer->page_offset += truesize;
+#endif
+}
+
+static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
+ struct igc_rx_buffer *rx_buffer,
+ union igc_adv_rx_desc *rx_desc,
+ unsigned int size)
+{
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(IGC_SKB_PAD + size);
+#endif
+ struct sk_buff *skb;
+
+ /* prefetch first cache line of first page */
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
+
+ /* build an skb around the page buffer */
+ skb = build_skb(va - IGC_SKB_PAD, truesize);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, IGC_SKB_PAD);
+ __skb_put(skb, size);
+
+ /* update buffer offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+
+ return skb;
+}
+
+static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
+ struct igc_rx_buffer *rx_buffer,
+ union igc_adv_rx_desc *rx_desc,
+ unsigned int size)
+{
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+ unsigned int headlen;
+ struct sk_buff *skb;
+
+ /* prefetch first cache line of first page */
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* Determine available headroom for copy */
+ headlen = size;
+ if (headlen > IGC_RX_HDR_LEN)
+ headlen = eth_get_headlen(va, IGC_RX_HDR_LEN);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+
+ /* update all of the pointers */
+ size -= headlen;
+ if (size) {
+ skb_add_rx_frag(skb, 0, rx_buffer->page,
+ (va + headlen) - page_address(rx_buffer->page),
+ size, truesize);
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+ } else {
+ rx_buffer->pagecnt_bias++;
+ }
+
+ return skb;
+}
+
+/**
+ * igc_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ */
+static void igc_reuse_rx_page(struct igc_ring *rx_ring,
+ struct igc_rx_buffer *old_buff)
+{
+ u16 nta = rx_ring->next_to_alloc;
+ struct igc_rx_buffer *new_buff;
+
+ new_buff = &rx_ring->rx_buffer_info[nta];
+
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* Transfer page from old buffer to new buffer.
+ * Move each member individually to avoid possible store
+ * forwarding stalls.
+ */
+ new_buff->dma = old_buff->dma;
+ new_buff->page = old_buff->page;
+ new_buff->page_offset = old_buff->page_offset;
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
+}
+
+static inline bool igc_page_is_reserved(struct page *page)
+{
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+}
+
+static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
+{
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+ struct page *page = rx_buffer->page;
+
+ /* avoid re-using remote pages */
+ if (unlikely(igc_page_is_reserved(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+ return false;
+#else
+#define IGC_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
+
+ if (rx_buffer->page_offset > IGC_LAST_OFFSET)
+ return false;
+#endif
+
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
+ */
+ if (unlikely(!pagecnt_bias)) {
+ page_ref_add(page, USHRT_MAX);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
+
+ return true;
+}
+
+/**
+ * igc_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ */
+static bool igc_is_non_eop(struct igc_ring *rx_ring,
+ union igc_adv_rx_desc *rx_desc)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(IGC_RX_DESC(rx_ring, ntc));
+
+ if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))
+ return false;
+
+ return true;
+}
+
+/**
+ * igc_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ */
+static bool igc_cleanup_headers(struct igc_ring *rx_ring,
+ union igc_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ if (unlikely((igc_test_staterr(rx_desc,
+ IGC_RXDEXT_ERR_FRAME_ERR_MASK)))) {
+ struct net_device *netdev = rx_ring->netdev;
+
+ if (!(netdev->features & NETIF_F_RXALL)) {
+ dev_kfree_skb_any(skb);
+ return true;
+ }
+ }
+
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
+
+ return false;
+}
+
+static void igc_put_rx_buffer(struct igc_ring *rx_ring,
+ struct igc_rx_buffer *rx_buffer)
+{
+ if (igc_can_reuse_rx_page(rx_buffer)) {
+ /* hand second half of page back to the ring */
+ igc_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* We are not reusing the buffer so unmap it and free
+ * any references we are holding to it
+ */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
+ IGC_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
+ }
+
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+}
+
+/**
+ * igc_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @adapter: address of board private structure
+ */
+static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
+{
+ union igc_adv_rx_desc *rx_desc;
+ u16 i = rx_ring->next_to_use;
+ struct igc_rx_buffer *bi;
+ u16 bufsz;
+
+ /* nothing to do */
+ if (!cleaned_count)
+ return;
+
+ rx_desc = IGC_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ i -= rx_ring->count;
+
+ bufsz = igc_rx_bufsz(rx_ring);
+
+ do {
+ if (!igc_alloc_mapped_page(rx_ring, bi))
+ break;
+
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset, bufsz,
+ DMA_FROM_DEVICE);
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+
+ rx_desc++;
+ bi++;
+ i++;
+ if (unlikely(!i)) {
+ rx_desc = IGC_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_buffer_info;
+ i -= rx_ring->count;
+ }
+
+ /* clear the length for the next_to_use descriptor */
+ rx_desc->wb.upper.length = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ i += rx_ring->count;
+
+ if (rx_ring->next_to_use != i) {
+ /* record the next descriptor to use */
+ rx_ring->next_to_use = i;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = i;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, rx_ring->tail);
+ }
+}
+
+static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
+{
+ unsigned int total_bytes = 0, total_packets = 0;
+ struct igc_ring *rx_ring = q_vector->rx.ring;
+ struct sk_buff *skb = rx_ring->skb;
+ u16 cleaned_count = igc_desc_unused(rx_ring);
+
+ while (likely(total_packets < budget)) {
+ union igc_adv_rx_desc *rx_desc;
+ struct igc_rx_buffer *rx_buffer;
+ unsigned int size;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
+ igc_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+ if (!size)
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * descriptor has been written back
+ */
+ dma_rmb();
+
+ rx_buffer = igc_get_rx_buffer(rx_ring, size);
+
+ /* retrieve a buffer from the ring */
+ if (skb)
+ igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
+ else if (ring_uses_build_skb(rx_ring))
+ skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
+ else
+ skb = igc_construct_skb(rx_ring, rx_buffer,
+ rx_desc, size);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_failed++;
+ rx_buffer->pagecnt_bias++;
+ break;
+ }
+
+ igc_put_rx_buffer(rx_ring, rx_buffer);
+ cleaned_count++;
+
+ /* fetch next buffer in frame if non-eop */
+ if (igc_is_non_eop(rx_ring, rx_desc))
+ continue;
+
+ /* verify the packet layout is correct */
+ if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
+ skb = NULL;
+ continue;
+ }
+
+ /* probably a little skewed due to removing CRC */
+ total_bytes += skb->len;
+
+ /* populate checksum, timestamp, VLAN, and protocol */
+ igc_process_skb_fields(rx_ring, rx_desc, skb);
+
+ napi_gro_receive(&q_vector->napi, skb);
+
+ /* reset skb pointer */
+ skb = NULL;
+
+ /* update budget accounting */
+ total_packets++;
+ }
+
+ /* place incomplete frames back on ring for completion */
+ rx_ring->skb = skb;
+
+ u64_stats_update_begin(&rx_ring->rx_syncp);
+ rx_ring->rx_stats.packets += total_packets;
+ rx_ring->rx_stats.bytes += total_bytes;
+ u64_stats_update_end(&rx_ring->rx_syncp);
+ q_vector->rx.total_packets += total_packets;
+ q_vector->rx.total_bytes += total_bytes;
+
+ if (cleaned_count)
+ igc_alloc_rx_buffers(rx_ring, cleaned_count);
+
+ return total_packets;
+}
+
+static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;
+}
+
+static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
+ struct igc_rx_buffer *bi)
+{
+ struct page *page = bi->page;
+ dma_addr_t dma;
+
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page))
+ return true;
+
+ /* alloc new page for storage */
+ page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ /* map page for use */
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ igc_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IGC_RX_DMA_ATTR);
+
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_page(page);
+
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = igc_rx_offset(rx_ring);
+ bi->pagecnt_bias = 1;
+
+ return true;
+}
+
+/**
+ * igc_clean_tx_irq - Reclaim resources after transmit completes
+ * @q_vector: pointer to q_vector containing needed info
+ * @napi_budget: Used to determine if we are in netpoll
+ *
+ * returns true if ring is completely cleaned
+ */
+static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int budget = q_vector->tx.work_limit;
+ struct igc_ring *tx_ring = q_vector->tx.ring;
+ unsigned int i = tx_ring->next_to_clean;
+ struct igc_tx_buffer *tx_buffer;
+ union igc_adv_tx_desc *tx_desc;
+
+ if (test_bit(__IGC_DOWN, &adapter->state))
+ return true;
+
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ tx_desc = IGC_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ smp_rmb();
+
+ /* if DD is not set pending work has not been completed */
+ if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buffer->next_to_watch = NULL;
+
+ /* update the statistics for this packet */
+ total_bytes += tx_buffer->bytecount;
+ total_packets += tx_buffer->gso_segs;
+
+ /* free the skb */
+ napi_consume_skb(tx_buffer->skb, napi_budget);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+
+ /* clear tx_buffer data */
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ /* clear last DMA location and unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IGC_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IGC_TX_DESC(tx_ring, 0);
+ }
+
+ /* issue prefetch for next Tx descriptor */
+ prefetch(tx_desc);
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ netdev_tx_completed_queue(txring_txq(tx_ring),
+ total_packets, total_bytes);
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->tx_syncp);
+ tx_ring->tx_stats.bytes += total_bytes;
+ tx_ring->tx_stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->tx_syncp);
+ q_vector->tx.total_bytes += total_bytes;
+ q_vector->tx.total_packets += total_packets;
+
+ if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
+ struct igc_hw *hw = &adapter->hw;
+
+ /* Detect a transmit hang in hardware, this serializes the
+ * check with the clearing of time_stamp and movement of i
+ */
+ clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
+ if (tx_buffer->next_to_watch &&
+ time_after(jiffies, tx_buffer->time_stamp +
+ (adapter->tx_timeout_factor * HZ)) &&
+ !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
+ /* detected Tx unit hang */
+ dev_err(tx_ring->dev,
+ "Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%p>\n"
+ " jiffies <%lx>\n"
+ " desc.status <%x>\n",
+ tx_ring->queue_index,
+ rd32(IGC_TDH(tx_ring->reg_idx)),
+ readl(tx_ring->tail),
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_buffer->time_stamp,
+ tx_buffer->next_to_watch,
+ jiffies,
+ tx_buffer->next_to_watch->wb.status);
+ netif_stop_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+
+ /* we are about to reset, no point in enabling stuff */
+ return true;
+ }
+ }
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(total_packets &&
+ netif_carrier_ok(tx_ring->netdev) &&
+ igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index) &&
+ !(test_bit(__IGC_DOWN, &adapter->state))) {
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+
+ u64_stats_update_begin(&tx_ring->tx_syncp);
+ tx_ring->tx_stats.restart_queue++;
+ u64_stats_update_end(&tx_ring->tx_syncp);
+ }
+ }
+
+ return !!budget;
+}
+
+/**
+ * igc_ioctl - I/O control method
+ * @netdev: network interface device structure
+ * @ifreq: frequency
+ * @cmd: command
+ */
+static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * igc_up - Open the interface and prepare it to handle traffic
+ * @adapter: board private structure
+ */
+static void igc_up(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int i = 0;
+
+ /* hardware has been reset, we need to reload some things */
+ igc_configure(adapter);
+
+ clear_bit(__IGC_DOWN, &adapter->state);
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ napi_enable(&adapter->q_vector[i]->napi);
+
+ if (adapter->msix_entries)
+ igc_configure_msix(adapter);
+ else
+ igc_assign_vector(adapter->q_vector[0], 0);
+
+ /* Clear any pending interrupts. */
+ rd32(IGC_ICR);
+ igc_irq_enable(adapter);
+
+ netif_tx_start_all_queues(adapter->netdev);
+
+ /* start the watchdog. */
+ hw->mac.get_link_status = 1;
+ schedule_work(&adapter->watchdog_task);
+}
+
+/**
+ * igc_update_stats - Update the board statistics counters
+ * @adapter: board private structure
+ */
+static void igc_update_stats(struct igc_adapter *adapter)
+{
+}
+
+static void igc_nfc_filter_exit(struct igc_adapter *adapter)
+{
+}
+
+/**
+ * igc_down - Close the interface
+ * @adapter: board private structure
+ */
+static void igc_down(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct igc_hw *hw = &adapter->hw;
+ u32 tctl, rctl;
+ int i = 0;
+
+ set_bit(__IGC_DOWN, &adapter->state);
+
+ /* disable receives in the hardware */
+ rctl = rd32(IGC_RCTL);
+ wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
+ /* flush and sleep below */
+
+ igc_nfc_filter_exit(adapter);
+
+ /* set trans_start so we don't get spurious watchdogs during reset */
+ netif_trans_update(netdev);
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ /* disable transmits in the hardware */
+ tctl = rd32(IGC_TCTL);
+ tctl &= ~IGC_TCTL_EN;
+ wr32(IGC_TCTL, tctl);
+ /* flush both disables and wait for them to finish */
+ wrfl();
+ usleep_range(10000, 20000);
+
+ igc_irq_disable(adapter);
+
+ adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ if (adapter->q_vector[i]) {
+ napi_synchronize(&adapter->q_vector[i]->napi);
+ napi_disable(&adapter->q_vector[i]->napi);
+ }
+ }
+
+ del_timer_sync(&adapter->watchdog_timer);
+ del_timer_sync(&adapter->phy_info_timer);
+
+ /* record the stats before reset*/
+ spin_lock(&adapter->stats64_lock);
+ igc_update_stats(adapter);
+ spin_unlock(&adapter->stats64_lock);
+
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+
+ if (!pci_channel_offline(adapter->pdev))
+ igc_reset(adapter);
+
+ /* clear VLAN promisc flag so VFTA will be updated if necessary */
+ adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
+
+ igc_clean_all_tx_rings(adapter);
+ igc_clean_all_rx_rings(adapter);
+}
+
+static void igc_reinit_locked(struct igc_adapter *adapter)
+{
+ WARN_ON(in_interrupt());
+ while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+ igc_down(adapter);
+ igc_up(adapter);
+ clear_bit(__IGC_RESETTING, &adapter->state);
+}
+
+static void igc_reset_task(struct work_struct *work)
+{
+ struct igc_adapter *adapter;
+
+ adapter = container_of(work, struct igc_adapter, reset_task);
+
+ netdev_err(adapter->netdev, "Reset adapter\n");
+ igc_reinit_locked(adapter);
+}
+
+/**
+ * igc_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int igc_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* adjust max frame to be at least the size of a standard frame */
+ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+ max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
+
+ while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ /* igc_down has a dependency on max_frame_size */
+ adapter->max_frame_size = max_frame;
+
+ if (netif_running(netdev))
+ igc_down(adapter);
+
+ dev_info(&pdev->dev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+
+ if (netif_running(netdev))
+ igc_up(adapter);
+ else
+ igc_reset(adapter);
+
+ clear_bit(__IGC_RESETTING, &adapter->state);
+
+ return 0;
+}
+
+/**
+ * igc_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are updated here and also from the timer callback.
+ */
+static struct net_device_stats *igc_get_stats(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ if (!test_bit(__IGC_RESETTING, &adapter->state))
+ igc_update_stats(adapter);
+
+ /* only return the current stats */
+ return &netdev->stats;
+}
+
+/**
+ * igc_configure - configure the hardware for RX and TX
+ * @adapter: private board structure
+ */
+static void igc_configure(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i = 0;
+
+ igc_get_hw_control(adapter);
+ igc_set_rx_mode(netdev);
+
+ igc_setup_tctl(adapter);
+ igc_setup_mrqc(adapter);
+ igc_setup_rctl(adapter);
+
+ igc_configure_tx(adapter);
+ igc_configure_rx(adapter);
+
+ igc_rx_fifo_flush_base(&adapter->hw);
+
+ /* call igc_desc_unused which always leaves
+ * at least 1 descriptor unused to make sure
+ * next_to_use != next_to_clean
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igc_ring *ring = adapter->rx_ring[i];
+
+ igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
+ }
+}
+
+/**
+ * igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
+ * @adapter: Pointer to adapter structure
+ * @index: Index of the RAR entry which need to be synced with MAC table
+ */
+static void igc_rar_set_index(struct igc_adapter *adapter, u32 index)
+{
+ u8 *addr = adapter->mac_table[index].addr;
+ struct igc_hw *hw = &adapter->hw;
+ u32 rar_low, rar_high;
+
+ /* HW expects these to be in network order when they are plugged
+ * into the registers which are little endian. In order to guarantee
+ * that ordering we need to do an leXX_to_cpup here in order to be
+ * ready for the byteswap that occurs with writel
+ */
+ rar_low = le32_to_cpup((__le32 *)(addr));
+ rar_high = le16_to_cpup((__le16 *)(addr + 4));
+
+ /* Indicate to hardware the Address is Valid. */
+ if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) {
+ if (is_valid_ether_addr(addr))
+ rar_high |= IGC_RAH_AV;
+
+ rar_high |= IGC_RAH_POOL_1 <<
+ adapter->mac_table[index].queue;
+ }
+
+ wr32(IGC_RAL(index), rar_low);
+ wrfl();
+ wr32(IGC_RAH(index), rar_high);
+ wrfl();
+}
+
+/* Set default MAC address for the PF in the first RAR entry */
+static void igc_set_default_mac_filter(struct igc_adapter *adapter)
+{
+ struct igc_mac_addr *mac_table = &adapter->mac_table[0];
+
+ ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
+ mac_table->state = IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
+
+ igc_rar_set_index(adapter, 0);
+}
+
+/**
+ * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_mode entry point is called whenever the unicast or multicast
+ * address lists or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
+ * promiscuous mode, and all-multi behavior.
+ */
+static void igc_set_rx_mode(struct net_device *netdev)
+{
+}
+
+/**
+ * igc_msix_other - msix other interrupt handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ */
+static irqreturn_t igc_msix_other(int irq, void *data)
+{
+ struct igc_adapter *adapter = data;
+ struct igc_hw *hw = &adapter->hw;
+ u32 icr = rd32(IGC_ICR);
+
+ /* reading ICR causes bit 31 of EICR to be cleared */
+ if (icr & IGC_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
+ if (icr & IGC_ICR_DOUTSYNC) {
+ /* HW is reporting DMA is out of sync */
+ adapter->stats.doosync++;
+ }
+
+ if (icr & IGC_ICR_LSC) {
+ hw->mac.get_link_status = 1;
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__IGC_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+ wr32(IGC_EIMS, adapter->eims_other);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * igc_write_ivar - configure ivar for given MSI-X vector
+ * @hw: pointer to the HW structure
+ * @msix_vector: vector number we are allocating to a given ring
+ * @index: row index of IVAR register to write within IVAR table
+ * @offset: column offset of in IVAR, should be multiple of 8
+ *
+ * The IVAR table consists of 2 columns,
+ * each containing an cause allocation for an Rx and Tx ring, and a
+ * variable number of rows depending on the number of queues supported.
+ */
+static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
+ int index, int offset)
+{
+ u32 ivar = array_rd32(IGC_IVAR0, index);
+
+ /* clear any bits that are currently set */
+ ivar &= ~((u32)0xFF << offset);
+
+ /* write vector and valid bit */
+ ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
+
+ array_wr32(IGC_IVAR0, index, ivar);
+}
+
+static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ struct igc_hw *hw = &adapter->hw;
+ int rx_queue = IGC_N0_QUEUE;
+ int tx_queue = IGC_N0_QUEUE;
+
+ if (q_vector->rx.ring)
+ rx_queue = q_vector->rx.ring->reg_idx;
+ if (q_vector->tx.ring)
+ tx_queue = q_vector->tx.ring->reg_idx;
+
+ switch (hw->mac.type) {
+ case igc_i225:
+ if (rx_queue > IGC_N0_QUEUE)
+ igc_write_ivar(hw, msix_vector,
+ rx_queue >> 1,
+ (rx_queue & 0x1) << 4);
+ if (tx_queue > IGC_N0_QUEUE)
+ igc_write_ivar(hw, msix_vector,
+ tx_queue >> 1,
+ ((tx_queue & 0x1) << 4) + 8);
+ q_vector->eims_value = BIT(msix_vector);
+ break;
+ default:
+ WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
+ break;
+ }
+
+ /* add q_vector eims value to global eims_enable_mask */
+ adapter->eims_enable_mask |= q_vector->eims_value;
+
+ /* configure q_vector to set itr on first interrupt */
+ q_vector->set_itr = 1;
+}
+
+/**
+ * igc_configure_msix - Configure MSI-X hardware
+ * @adapter: Pointer to adapter structure
+ *
+ * igc_configure_msix sets up the hardware to properly
+ * generate MSI-X interrupts.
+ */
+static void igc_configure_msix(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int i, vector = 0;
+ u32 tmp;
+
+ adapter->eims_enable_mask = 0;
+
+ /* set vector for other causes, i.e. link changes */
+ switch (hw->mac.type) {
+ case igc_i225:
+ /* Turn on MSI-X capability first, or our settings
+ * won't stick. And it will take days to debug.
+ */
+ wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
+ IGC_GPIE_PBA | IGC_GPIE_EIAME |
+ IGC_GPIE_NSICR);
+
+ /* enable msix_other interrupt */
+ adapter->eims_other = BIT(vector);
+ tmp = (vector++ | IGC_IVAR_VALID) << 8;
+
+ wr32(IGC_IVAR_MISC, tmp);
+ break;
+ default:
+ /* do nothing, since nothing else supports MSI-X */
+ break;
+ } /* switch (hw->mac.type) */
+
+ adapter->eims_enable_mask |= adapter->eims_other;
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ igc_assign_vector(adapter->q_vector[i], vector++);
+
+ wrfl();
+}
+
+static irqreturn_t igc_msix_ring(int irq, void *data)
+{
+ struct igc_q_vector *q_vector = data;
+
+ /* Write the ITR value calculated from the previous interrupt. */
+ igc_write_itr(q_vector);
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * igc_request_msix - Initialize MSI-X interrupts
+ * @adapter: Pointer to adapter structure
+ *
+ * igc_request_msix allocates MSI-X vectors and requests interrupts from the
+ * kernel.
+ */
+static int igc_request_msix(struct igc_adapter *adapter)
+{
+ int i = 0, err = 0, vector = 0, free_vector = 0;
+ struct net_device *netdev = adapter->netdev;
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ &igc_msix_other, 0, netdev->name, adapter);
+ if (err)
+ goto err_out;
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igc_q_vector *q_vector = adapter->q_vector[i];
+
+ vector++;
+
+ q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
+
+ if (q_vector->rx.ring && q_vector->tx.ring)
+ sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
+ q_vector->rx.ring->queue_index);
+ else if (q_vector->tx.ring)
+ sprintf(q_vector->name, "%s-tx-%u", netdev->name,
+ q_vector->tx.ring->queue_index);
+ else if (q_vector->rx.ring)
+ sprintf(q_vector->name, "%s-rx-%u", netdev->name,
+ q_vector->rx.ring->queue_index);
+ else
+ sprintf(q_vector->name, "%s-unused", netdev->name);
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ igc_msix_ring, 0, q_vector->name,
+ q_vector);
+ if (err)
+ goto err_free;
+ }
+
+ igc_configure_msix(adapter);
+ return 0;
+
+err_free:
+ /* free already assigned IRQs */
+ free_irq(adapter->msix_entries[free_vector++].vector, adapter);
+
+ vector--;
+ for (i = 0; i < vector; i++) {
+ free_irq(adapter->msix_entries[free_vector++].vector,
+ adapter->q_vector[i]);
+ }
+err_out:
+ return err;
+}
+
+/**
+ * igc_reset_q_vector - Reset config for interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be reset
+ *
+ * If NAPI is enabled it will delete any references to the
+ * NAPI struct. This is preparation for igc_free_q_vector.
+ */
+static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
+{
+ struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
+
+ /* if we're coming from igc_set_interrupt_capability, the vectors are
+ * not yet allocated
+ */
+ if (!q_vector)
+ return;
+
+ if (q_vector->tx.ring)
+ adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
+
+ if (q_vector->rx.ring)
+ adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
+
+ netif_napi_del(&q_vector->napi);
+}
+
+static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
+{
+ int v_idx = adapter->num_q_vectors;
+
+ if (adapter->msix_entries) {
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
+ pci_disable_msi(adapter->pdev);
+ }
+
+ while (v_idx--)
+ igc_reset_q_vector(adapter, v_idx);
+}
+
+/**
+ * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
+ * @adapter: Pointer to adapter structure
+ *
+ * This function resets the device so that it has 0 rx queues, tx queues, and
+ * MSI-X interrupts allocated.
+ */
+static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
+{
+ igc_free_q_vectors(adapter);
+ igc_reset_interrupt_capability(adapter);
+}
+
+/**
+ * igc_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ */
+static void igc_free_q_vectors(struct igc_adapter *adapter)
+{
+ int v_idx = adapter->num_q_vectors;
+
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--) {
+ igc_reset_q_vector(adapter, v_idx);
+ igc_free_q_vector(adapter, v_idx);
+ }
+}
+
+/**
+ * igc_free_q_vector - Free memory allocated for specific interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector.
+ */
+static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
+{
+ struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
+
+ adapter->q_vector[v_idx] = NULL;
+
+ /* igc_get_stats64() might access the rings on this vector,
+ * we must wait a grace period before freeing it.
+ */
+ if (q_vector)
+ kfree_rcu(q_vector, rcu);
+}
+
+/* Need to wait a few seconds after link up to get diagnostic information from
+ * the phy
+ */
+static void igc_update_phy_info(struct timer_list *t)
+{
+ struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
+
+ igc_get_phy_info(&adapter->hw);
+}
+
+/**
+ * igc_has_link - check shared code for link and determine up/down
+ * @adapter: pointer to driver private info
+ */
+static bool igc_has_link(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ bool link_active = false;
+
+ /* get_link_status is set on LSC (link status) interrupt or
+ * rx sequence error interrupt. get_link_status will stay
+ * false until the igc_check_for_link establishes link
+ * for copper adapters ONLY
+ */
+ switch (hw->phy.media_type) {
+ case igc_media_type_copper:
+ if (!hw->mac.get_link_status)
+ return true;
+ hw->mac.ops.check_for_link(hw);
+ link_active = !hw->mac.get_link_status;
+ break;
+ default:
+ case igc_media_type_unknown:
+ break;
+ }
+
+ if (hw->mac.type == igc_i225 &&
+ hw->phy.id == I225_I_PHY_ID) {
+ if (!netif_carrier_ok(adapter->netdev)) {
+ adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
+ } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
+ adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ }
+ }
+
+ return link_active;
+}
+
+/**
+ * igc_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ */
+static void igc_watchdog(struct timer_list *t)
+{
+ struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ /* Do the rest outside of interrupt context */
+ schedule_work(&adapter->watchdog_task);
+}
+
+static void igc_watchdog_task(struct work_struct *work)
+{
+ struct igc_adapter *adapter = container_of(work,
+ struct igc_adapter,
+ watchdog_task);
+ struct net_device *netdev = adapter->netdev;
+ struct igc_hw *hw = &adapter->hw;
+ struct igc_phy_info *phy = &hw->phy;
+ u16 phy_data, retry_count = 20;
+ u32 connsw;
+ u32 link;
+ int i;
+
+ link = igc_has_link(adapter);
+
+ if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) {
+ if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
+ adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
+ else
+ link = false;
+ }
+
+ /* Force link down if we have fiber to swap to */
+ if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
+ if (hw->phy.media_type == igc_media_type_copper) {
+ connsw = rd32(IGC_CONNSW);
+ if (!(connsw & IGC_CONNSW_AUTOSENSE_EN))
+ link = 0;
+ }
+ }
+ if (link) {
+ if (!netif_carrier_ok(netdev)) {
+ u32 ctrl;
+
+ hw->mac.ops.get_speed_and_duplex(hw,
+ &adapter->link_speed,
+ &adapter->link_duplex);
+
+ ctrl = rd32(IGC_CTRL);
+ /* Link status message must follow this format */
+ netdev_info(netdev,
+ "igc: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+ netdev->name,
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full" : "Half",
+ (ctrl & IGC_CTRL_TFCE) &&
+ (ctrl & IGC_CTRL_RFCE) ? "RX/TX" :
+ (ctrl & IGC_CTRL_RFCE) ? "RX" :
+ (ctrl & IGC_CTRL_TFCE) ? "TX" : "None");
+
+ /* check if SmartSpeed worked */
+ igc_check_downshift(hw);
+ if (phy->speed_downgraded)
+ netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
+
+ /* adjust timeout factor according to speed/duplex */
+ adapter->tx_timeout_factor = 1;
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adapter->tx_timeout_factor = 14;
+ break;
+ case SPEED_100:
+ /* maybe add some timeout factor ? */
+ break;
+ }
+
+ if (adapter->link_speed != SPEED_1000)
+ goto no_wait;
+
+ /* wait for Remote receiver status OK */
+retry_read_status:
+ if (!igc_read_phy_reg(hw, PHY_1000T_STATUS,
+ &phy_data)) {
+ if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
+ retry_count) {
+ msleep(100);
+ retry_count--;
+ goto retry_read_status;
+ } else if (!retry_count) {
+ dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
+ }
+ } else {
+ dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
+ }
+no_wait:
+ netif_carrier_on(netdev);
+
+ /* link state has changed, schedule phy info update */
+ if (!test_bit(__IGC_DOWN, &adapter->state))
+ mod_timer(&adapter->phy_info_timer,
+ round_jiffies(jiffies + 2 * HZ));
+ }
+ } else {
+ if (netif_carrier_ok(netdev)) {
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+
+ /* Links status message must follow this format */
+ netdev_info(netdev, "igc: %s NIC Link is Down\n",
+ netdev->name);
+ netif_carrier_off(netdev);
+
+ /* link state has changed, schedule phy info update */
+ if (!test_bit(__IGC_DOWN, &adapter->state))
+ mod_timer(&adapter->phy_info_timer,
+ round_jiffies(jiffies + 2 * HZ));
+
+ /* link is down, time to check for alternate media */
+ if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
+ if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately */
+ return;
+ }
+ }
+
+ /* also check for alternate media here */
+ } else if (!netif_carrier_ok(netdev) &&
+ (adapter->flags & IGC_FLAG_MAS_ENABLE)) {
+ if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately */
+ return;
+ }
+ }
+ }
+
+ spin_lock(&adapter->stats64_lock);
+ igc_update_stats(adapter);
+ spin_unlock(&adapter->stats64_lock);
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *tx_ring = adapter->tx_ring[i];
+
+ if (!netif_carrier_ok(netdev)) {
+ /* We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context).
+ */
+ if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) {
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+ /* return immediately since reset is imminent */
+ return;
+ }
+ }
+
+ /* Force detection of hung controller every watchdog period */
+ set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
+ }
+
+ /* Cause software interrupt to ensure Rx ring is cleaned */
+ if (adapter->flags & IGC_FLAG_HAS_MSIX) {
+ u32 eics = 0;
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ eics |= adapter->q_vector[i]->eims_value;
+ wr32(IGC_EICS, eics);
+ } else {
+ wr32(IGC_ICS, IGC_ICS_RXDMT0);
+ }
+
+ /* Reset the timer */
+ if (!test_bit(__IGC_DOWN, &adapter->state)) {
+ if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + HZ));
+ else
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 2 * HZ));
+ }
+}
+
+/**
+ * igc_update_ring_itr - update the dynamic ITR value based on packet size
+ * @q_vector: pointer to q_vector
+ *
+ * Stores a new ITR value based on strictly on packet size. This
+ * algorithm is less sophisticated than that used in igc_update_itr,
+ * due to the difficulty of synchronizing statistics across multiple
+ * receive rings. The divisors and thresholds used by this function
+ * were determined based on theoretical maximum wire speed and testing
+ * data, in order to minimize response time while increasing bulk
+ * throughput.
+ * NOTE: This function is called only when operating in a multiqueue
+ * receive environment.
+ */
+static void igc_update_ring_itr(struct igc_q_vector *q_vector)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ int new_val = q_vector->itr_val;
+ int avg_wire_size = 0;
+ unsigned int packets;
+
+ /* For non-gigabit speeds, just fix the interrupt rate at 4000
+ * ints/sec - ITR timer value of 120 ticks.
+ */
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
+ new_val = IGC_4K_ITR;
+ goto set_itr_val;
+ default:
+ break;
+ }
+
+ packets = q_vector->rx.total_packets;
+ if (packets)
+ avg_wire_size = q_vector->rx.total_bytes / packets;
+
+ packets = q_vector->tx.total_packets;
+ if (packets)
+ avg_wire_size = max_t(u32, avg_wire_size,
+ q_vector->tx.total_bytes / packets);
+
+ /* if avg_wire_size isn't set no work was done */
+ if (!avg_wire_size)
+ goto clear_counts;
+
+ /* Add 24 bytes to size to account for CRC, preamble, and gap */
+ avg_wire_size += 24;
+
+ /* Don't starve jumbo frames */
+ avg_wire_size = min(avg_wire_size, 3000);
+
+ /* Give a little boost to mid-size frames */
+ if (avg_wire_size > 300 && avg_wire_size < 1200)
+ new_val = avg_wire_size / 3;
+ else
+ new_val = avg_wire_size / 2;
+
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (new_val < IGC_20K_ITR &&
+ ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
+ (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
+ new_val = IGC_20K_ITR;
+
+set_itr_val:
+ if (new_val != q_vector->itr_val) {
+ q_vector->itr_val = new_val;
+ q_vector->set_itr = 1;
+ }
+clear_counts:
+ q_vector->rx.total_bytes = 0;
+ q_vector->rx.total_packets = 0;
+ q_vector->tx.total_bytes = 0;
+ q_vector->tx.total_packets = 0;
+}
+
+/**
+ * igc_update_itr - update the dynamic ITR value based on statistics
+ * @q_vector: pointer to q_vector
+ * @ring_container: ring info to update the itr for
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ * NOTE: These calculations are only valid when operating in a single-
+ * queue environment.
+ */
+static void igc_update_itr(struct igc_q_vector *q_vector,
+ struct igc_ring_container *ring_container)
+{
+ unsigned int packets = ring_container->total_packets;
+ unsigned int bytes = ring_container->total_bytes;
+ u8 itrval = ring_container->itr;
+
+ /* no packets, exit with status unchanged */
+ if (packets == 0)
+ return;
+
+ switch (itrval) {
+ case lowest_latency:
+ /* handle TSO and jumbo frames */
+ if (bytes / packets > 8000)
+ itrval = bulk_latency;
+ else if ((packets < 5) && (bytes > 512))
+ itrval = low_latency;
+ break;
+ case low_latency: /* 50 usec aka 20000 ints/s */
+ if (bytes > 10000) {
+ /* this if handles the TSO accounting */
+ if (bytes / packets > 8000)
+ itrval = bulk_latency;
+ else if ((packets < 10) || ((bytes / packets) > 1200))
+ itrval = bulk_latency;
+ else if ((packets > 35))
+ itrval = lowest_latency;
+ } else if (bytes / packets > 2000) {
+ itrval = bulk_latency;
+ } else if (packets <= 2 && bytes < 512) {
+ itrval = lowest_latency;
+ }
+ break;
+ case bulk_latency: /* 250 usec aka 4000 ints/s */
+ if (bytes > 25000) {
+ if (packets > 35)
+ itrval = low_latency;
+ } else if (bytes < 1500) {
+ itrval = low_latency;
+ }
+ break;
+ }
+
+ /* clear work counters since we have the values we need */
+ ring_container->total_bytes = 0;
+ ring_container->total_packets = 0;
+
+ /* write updated itr to ring container */
+ ring_container->itr = itrval;
+}
+
+/**
+ * igc_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ */
+static irqreturn_t igc_intr_msi(int irq, void *data)
+{
+ struct igc_adapter *adapter = data;
+ struct igc_q_vector *q_vector = adapter->q_vector[0];
+ struct igc_hw *hw = &adapter->hw;
+ /* read ICR disables interrupts using IAM */
+ u32 icr = rd32(IGC_ICR);
+
+ igc_write_itr(q_vector);
+
+ if (icr & IGC_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
+ if (icr & IGC_ICR_DOUTSYNC) {
+ /* HW is reporting DMA is out of sync */
+ adapter->stats.doosync++;
+ }
+
+ if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
+ hw->mac.get_link_status = 1;
+ if (!test_bit(__IGC_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * igc_intr - Legacy Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ */
+static irqreturn_t igc_intr(int irq, void *data)
+{
+ struct igc_adapter *adapter = data;
+ struct igc_q_vector *q_vector = adapter->q_vector[0];
+ struct igc_hw *hw = &adapter->hw;
+ /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
+ * need for the IMC write
+ */
+ u32 icr = rd32(IGC_ICR);
+
+ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+ * not set, then the adapter didn't send an interrupt
+ */
+ if (!(icr & IGC_ICR_INT_ASSERTED))
+ return IRQ_NONE;
+
+ igc_write_itr(q_vector);
+
+ if (icr & IGC_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
+ if (icr & IGC_ICR_DOUTSYNC) {
+ /* HW is reporting DMA is out of sync */
+ adapter->stats.doosync++;
+ }
+
+ if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
+ hw->mac.get_link_status = 1;
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__IGC_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+static void igc_set_itr(struct igc_q_vector *q_vector)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ u32 new_itr = q_vector->itr_val;
+ u8 current_itr = 0;
+
+ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
+ current_itr = 0;
+ new_itr = IGC_4K_ITR;
+ goto set_itr_now;
+ default:
+ break;
+ }
+
+ igc_update_itr(q_vector, &q_vector->tx);
+ igc_update_itr(q_vector, &q_vector->rx);
+
+ current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
+
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (current_itr == lowest_latency &&
+ ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
+ (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
+ current_itr = low_latency;
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
+ break;
+ case low_latency:
+ new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
+ break;
+ case bulk_latency:
+ new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
+ break;
+ default:
+ break;
+ }
+
+set_itr_now:
+ if (new_itr != q_vector->itr_val) {
+ /* this attempts to bias the interrupt rate towards Bulk
+ * by adding intermediate steps when interrupt rate is
+ * increasing
+ */
+ new_itr = new_itr > q_vector->itr_val ?
+ max((new_itr * q_vector->itr_val) /
+ (new_itr + (q_vector->itr_val >> 2)),
+ new_itr) : new_itr;
+ /* Don't write the value here; it resets the adapter's
+ * internal timer, and causes us to delay far longer than
+ * we should between interrupts. Instead, we write the ITR
+ * value at the beginning of the next interrupt so the timing
+ * ends up being correct.
+ */
+ q_vector->itr_val = new_itr;
+ q_vector->set_itr = 1;
+ }
+}
+
+static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ struct igc_hw *hw = &adapter->hw;
+
+ if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
+ (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
+ if (adapter->num_q_vectors == 1)
+ igc_set_itr(q_vector);
+ else
+ igc_update_ring_itr(q_vector);
+ }
+
+ if (!test_bit(__IGC_DOWN, &adapter->state)) {
+ if (adapter->msix_entries)
+ wr32(IGC_EIMS, q_vector->eims_value);
+ else
+ igc_irq_enable(adapter);
+ }
+}
+
+/**
+ * igc_poll - NAPI Rx polling callback
+ * @napi: napi polling structure
+ * @budget: count of how many packets we should handle
+ */
+static int igc_poll(struct napi_struct *napi, int budget)
+{
+ struct igc_q_vector *q_vector = container_of(napi,
+ struct igc_q_vector,
+ napi);
+ bool clean_complete = true;
+ int work_done = 0;
+
+ if (q_vector->tx.ring)
+ clean_complete = igc_clean_tx_irq(q_vector, budget);
+
+ if (q_vector->rx.ring) {
+ int cleaned = igc_clean_rx_irq(q_vector, budget);
+
+ work_done += cleaned;
+ if (cleaned >= budget)
+ clean_complete = false;
+ }
+
+ /* If all work not completed, return budget and keep polling */
+ if (!clean_complete)
+ return budget;
+
+ /* If not enough Rx work done, exit the polling mode */
+ napi_complete_done(napi, work_done);
+ igc_ring_irq_enable(q_vector);
+
+ return 0;
+}
+
+/**
+ * igc_set_interrupt_capability - set MSI or MSI-X if supported
+ * @adapter: Pointer to adapter structure
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ */
+static void igc_set_interrupt_capability(struct igc_adapter *adapter,
+ bool msix)
+{
+ int numvecs, i;
+ int err;
+
+ if (!msix)
+ goto msi_only;
+ adapter->flags |= IGC_FLAG_HAS_MSIX;
+
+ /* Number of supported queues. */
+ adapter->num_rx_queues = adapter->rss_queues;
+
+ adapter->num_tx_queues = adapter->rss_queues;
+
+ /* start with one vector for every Rx queue */
+ numvecs = adapter->num_rx_queues;
+
+ /* if Tx handler is separate add 1 for every Tx queue */
+ if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
+ numvecs += adapter->num_tx_queues;
+
+ /* store the number of vectors reserved for queues */
+ adapter->num_q_vectors = numvecs;
+
+ /* add 1 vector for link status interrupts */
+ numvecs++;
+
+ adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
+ GFP_KERNEL);
+
+ if (!adapter->msix_entries)
+ return;
+
+ /* populate entry values */
+ for (i = 0; i < numvecs; i++)
+ adapter->msix_entries[i].entry = i;
+
+ err = pci_enable_msix_range(adapter->pdev,
+ adapter->msix_entries,
+ numvecs,
+ numvecs);
+ if (err > 0)
+ return;
+
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+
+ igc_reset_interrupt_capability(adapter);
+
+msi_only:
+ adapter->flags &= ~IGC_FLAG_HAS_MSIX;
+
+ adapter->rss_queues = 1;
+ adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_q_vectors = 1;
+ if (!pci_enable_msi(adapter->pdev))
+ adapter->flags |= IGC_FLAG_HAS_MSI;
+}
+
+static void igc_add_ring(struct igc_ring *ring,
+ struct igc_ring_container *head)
+{
+ head->ring = ring;
+ head->count++;
+}
+
+/**
+ * igc_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_count: q_vectors allocated on adapter, used for ring interleaving
+ * @v_idx: index of vector in adapter struct
+ * @txr_count: total number of Tx rings to allocate
+ * @txr_idx: index of first Tx ring to allocate
+ * @rxr_count: total number of Rx rings to allocate
+ * @rxr_idx: index of first Rx ring to allocate
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ */
+static int igc_alloc_q_vector(struct igc_adapter *adapter,
+ unsigned int v_count, unsigned int v_idx,
+ unsigned int txr_count, unsigned int txr_idx,
+ unsigned int rxr_count, unsigned int rxr_idx)
+{
+ struct igc_q_vector *q_vector;
+ struct igc_ring *ring;
+ int ring_count, size;
+
+ /* igc only supports 1 Tx and/or 1 Rx queue per vector */
+ if (txr_count > 1 || rxr_count > 1)
+ return -ENOMEM;
+
+ ring_count = txr_count + rxr_count;
+ size = sizeof(struct igc_q_vector) +
+ (sizeof(struct igc_ring) * ring_count);
+
+ /* allocate q_vector and rings */
+ q_vector = adapter->q_vector[v_idx];
+ if (!q_vector)
+ q_vector = kzalloc(size, GFP_KERNEL);
+ else
+ memset(q_vector, 0, size);
+ if (!q_vector)
+ return -ENOMEM;
+
+ /* initialize NAPI */
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ igc_poll, 64);
+
+ /* tie q_vector and adapter together */
+ adapter->q_vector[v_idx] = q_vector;
+ q_vector->adapter = adapter;
+
+ /* initialize work limits */
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+
+ /* initialize ITR configuration */
+ q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
+ q_vector->itr_val = IGC_START_ITR;
+
+ /* initialize pointer to rings */
+ ring = q_vector->ring;
+
+ /* initialize ITR */
+ if (rxr_count) {
+ /* rx or rx/tx vector */
+ if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
+ q_vector->itr_val = adapter->rx_itr_setting;
+ } else {
+ /* tx only vector */
+ if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
+ q_vector->itr_val = adapter->tx_itr_setting;
+ }
+
+ if (txr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Tx values */
+ igc_add_ring(ring, &q_vector->tx);
+
+ /* apply Tx specific ring traits */
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = txr_idx;
+
+ /* assign ring to adapter */
+ adapter->tx_ring[txr_idx] = ring;
+
+ /* push pointer to next ring */
+ ring++;
+ }
+
+ if (rxr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Rx values */
+ igc_add_ring(ring, &q_vector->rx);
+
+ /* apply Rx specific ring traits */
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = rxr_idx;
+
+ /* assign ring to adapter */
+ adapter->rx_ring[rxr_idx] = ring;
+ }
+
+ return 0;
+}
+
+/**
+ * igc_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ */
+static int igc_alloc_q_vectors(struct igc_adapter *adapter)
+{
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+ int q_vectors = adapter->num_q_vectors;
+ int err;
+
+ if (q_vectors >= (rxr_remaining + txr_remaining)) {
+ for (; rxr_remaining; v_idx++) {
+ err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
+ 0, 0, 1, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining--;
+ rxr_idx++;
+ }
+ }
+
+ for (; v_idx < q_vectors; v_idx++) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
+ int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+
+ err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
+ tqpv, txr_idx, rqpv, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ txr_remaining -= tqpv;
+ rxr_idx++;
+ txr_idx++;
+ }
+
+ return 0;
+
+err_out:
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--)
+ igc_free_q_vector(adapter, v_idx);
+
+ return -ENOMEM;
+}
+
+/**
+ * igc_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
+ *
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
+ */
+static void igc_cache_ring_register(struct igc_adapter *adapter)
+{
+ int i = 0, j = 0;
+
+ switch (adapter->hw.mac.type) {
+ case igc_i225:
+ /* Fall through */
+ default:
+ for (; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->reg_idx = i;
+ for (; j < adapter->num_tx_queues; j++)
+ adapter->tx_ring[j]->reg_idx = j;
+ break;
+ }
+}
+
+/**
+ * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
+ * @adapter: Pointer to adapter structure
+ *
+ * This function initializes the interrupts and allocates all of the queues.
+ */
+static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int err = 0;
+
+ igc_set_interrupt_capability(adapter, msix);
+
+ err = igc_alloc_q_vectors(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ igc_cache_ring_register(adapter);
+
+ return 0;
+
+err_alloc_q_vectors:
+ igc_reset_interrupt_capability(adapter);
+ return err;
+}
+
+static void igc_free_irq(struct igc_adapter *adapter)
+{
+ if (adapter->msix_entries) {
+ int vector = 0, i;
+
+ free_irq(adapter->msix_entries[vector++].vector, adapter);
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ free_irq(adapter->msix_entries[vector++].vector,
+ adapter->q_vector[i]);
+ } else {
+ free_irq(adapter->pdev->irq, adapter);
+ }
+}
+
+/**
+ * igc_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ */
+static void igc_irq_disable(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ if (adapter->msix_entries) {
+ u32 regval = rd32(IGC_EIAM);
+
+ wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
+ wr32(IGC_EIMC, adapter->eims_enable_mask);
+ regval = rd32(IGC_EIAC);
+ wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
+ }
+
+ wr32(IGC_IAM, 0);
+ wr32(IGC_IMC, ~0);
+ wrfl();
+
+ if (adapter->msix_entries) {
+ int vector = 0, i;
+
+ synchronize_irq(adapter->msix_entries[vector++].vector);
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ synchronize_irq(adapter->msix_entries[vector++].vector);
+ } else {
+ synchronize_irq(adapter->pdev->irq);
+ }
+}
+
+/**
+ * igc_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ */
+static void igc_irq_enable(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ if (adapter->msix_entries) {
+ u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
+ u32 regval = rd32(IGC_EIAC);
+
+ wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
+ regval = rd32(IGC_EIAM);
+ wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
+ wr32(IGC_EIMS, adapter->eims_enable_mask);
+ wr32(IGC_IMS, ims);
+ } else {
+ wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
+ wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
+ }
+}
+
+/**
+ * igc_request_irq - initialize interrupts
+ * @adapter: Pointer to adapter structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ */
+static int igc_request_irq(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err = 0;
+
+ if (adapter->flags & IGC_FLAG_HAS_MSIX) {
+ err = igc_request_msix(adapter);
+ if (!err)
+ goto request_done;
+ /* fall back to MSI */
+ igc_free_all_tx_resources(adapter);
+ igc_free_all_rx_resources(adapter);
+
+ igc_clear_interrupt_scheme(adapter);
+ err = igc_init_interrupt_scheme(adapter, false);
+ if (err)
+ goto request_done;
+ igc_setup_all_tx_resources(adapter);
+ igc_setup_all_rx_resources(adapter);
+ igc_configure(adapter);
+ }
+
+ igc_assign_vector(adapter->q_vector[0], 0);
+
+ if (adapter->flags & IGC_FLAG_HAS_MSI) {
+ err = request_irq(pdev->irq, &igc_intr_msi, 0,
+ netdev->name, adapter);
+ if (!err)
+ goto request_done;
+
+ /* fall back to legacy interrupts */
+ igc_reset_interrupt_capability(adapter);
+ adapter->flags &= ~IGC_FLAG_HAS_MSI;
+ }
+
+ err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED,
+ netdev->name, adapter);
+
+ if (err)
+ dev_err(&pdev->dev, "Error %d getting interrupt\n",
+ err);
+
+request_done:
+ return err;
+}
+
+static void igc_write_itr(struct igc_q_vector *q_vector)
+{
+ u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
+
+ if (!q_vector->set_itr)
+ return;
+
+ if (!itr_val)
+ itr_val = IGC_ITR_VAL_MASK;
+
+ itr_val |= IGC_EITR_CNT_IGNR;
+
+ writel(itr_val, q_vector->itr_register);
+ q_vector->set_itr = 0;
+}
+
+/**
+ * igc_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ */
+static int __igc_open(struct net_device *netdev, bool resuming)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ int err = 0;
+ int i = 0;
+
+ /* disallow open during test */
+
+ if (test_bit(__IGC_TESTING, &adapter->state)) {
+ WARN_ON(resuming);
+ return -EBUSY;
+ }
+
+ netif_carrier_off(netdev);
+
+ /* allocate transmit descriptors */
+ err = igc_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = igc_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ igc_power_up_link(adapter);
+
+ igc_configure(adapter);
+
+ err = igc_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ /* Notify the stack of the actual queue counts. */
+ netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
+ if (err)
+ goto err_set_queues;
+
+ err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
+ if (err)
+ goto err_set_queues;
+
+ clear_bit(__IGC_DOWN, &adapter->state);
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ napi_enable(&adapter->q_vector[i]->napi);
+
+ /* Clear any pending interrupts. */
+ rd32(IGC_ICR);
+ igc_irq_enable(adapter);
+
+ netif_tx_start_all_queues(netdev);
+
+ /* start the watchdog. */
+ hw->mac.get_link_status = 1;
+ schedule_work(&adapter->watchdog_task);
+
+ return IGC_SUCCESS;
+
+err_set_queues:
+ igc_free_irq(adapter);
+err_req_irq:
+ igc_release_hw_control(adapter);
+ igc_power_down_link(adapter);
+ igc_free_all_rx_resources(adapter);
+err_setup_rx:
+ igc_free_all_tx_resources(adapter);
+err_setup_tx:
+ igc_reset(adapter);
+
+ return err;
+}
+
+static int igc_open(struct net_device *netdev)
+{
+ return __igc_open(netdev, false);
+}
+
+/**
+ * igc_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the driver's control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ */
+static int __igc_close(struct net_device *netdev, bool suspending)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
+
+ igc_down(adapter);
+
+ igc_release_hw_control(adapter);
+
+ igc_free_irq(adapter);
+
+ igc_free_all_tx_resources(adapter);
+ igc_free_all_rx_resources(adapter);
+
+ return 0;
+}
+
+static int igc_close(struct net_device *netdev)
+{
+ if (netif_device_present(netdev) || netdev->dismantle)
+ return __igc_close(netdev, false);
+ return 0;
+}
+
+static const struct net_device_ops igc_netdev_ops = {
+ .ndo_open = igc_open,
+ .ndo_stop = igc_close,
+ .ndo_start_xmit = igc_xmit_frame,
+ .ndo_set_mac_address = igc_set_mac,
+ .ndo_change_mtu = igc_change_mtu,
+ .ndo_get_stats = igc_get_stats,
+ .ndo_do_ioctl = igc_ioctl,
+};
+
+/* PCIe configuration access */
+void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
+{
+ struct igc_adapter *adapter = hw->back;
+
+ pci_read_config_word(adapter->pdev, reg, value);
+}
+
+void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
+{
+ struct igc_adapter *adapter = hw->back;
+
+ pci_write_config_word(adapter->pdev, reg, *value);
+}
+
+s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
+{
+ struct igc_adapter *adapter = hw->back;
+ u16 cap_offset;
+
+ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ if (!cap_offset)
+ return -IGC_ERR_CONFIG;
+
+ pci_read_config_word(adapter->pdev, cap_offset + reg, value);
+
+ return IGC_SUCCESS;
+}
+
+s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
+{
+ struct igc_adapter *adapter = hw->back;
+ u16 cap_offset;
+
+ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ if (!cap_offset)
+ return -IGC_ERR_CONFIG;
+
+ pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
+
+ return IGC_SUCCESS;
+}
+
+u32 igc_rd32(struct igc_hw *hw, u32 reg)
+{
+ struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
+ u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
+ u32 value = 0;
+
+ if (IGC_REMOVED(hw_addr))
+ return ~value;
+
+ value = readl(&hw_addr[reg]);
+
+ /* reads should not return all F's */
+ if (!(~value) && (!reg || !(~readl(hw_addr)))) {
+ struct net_device *netdev = igc->netdev;
+
+ hw->hw_addr = NULL;
+ netif_device_detach(netdev);
+ netdev_err(netdev, "PCIe link lost, device now detached\n");
+ }
+
+ return value;
+}
+
+/**
+ * igc_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in igc_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * igc_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring the adapter private structure,
+ * and a hardware reset occur.
+ */
+static int igc_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct igc_adapter *adapter;
+ struct net_device *netdev;
+ struct igc_hw *hw;
+ const struct igc_info *ei = igc_info_tbl[ent->driver_data];
+ int err, pci_using_dac;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ pci_using_dac = 0;
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(64));
+ if (!err)
+ pci_using_dac = 1;
+ } else {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ IGC_ERR("Wrong DMA configuration, aborting\n");
+ goto err_dma;
+ }
+ }
+ }
+
+ err = pci_request_selected_regions(pdev,
+ pci_select_bars(pdev,
+ IORESOURCE_MEM),
+ igc_driver_name);
+ if (err)
+ goto err_pci_reg;
+
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+
+ err = -ENOMEM;
+ netdev = alloc_etherdev_mq(sizeof(struct igc_adapter),
+ IGC_MAX_TX_QUEUES);
+
+ if (!netdev)
+ goto err_alloc_etherdev;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ hw = &adapter->hw;
+ hw->back = adapter;
+ adapter->port_num = hw->bus.func;
+ adapter->msg_enable = GENMASK(debug - 1, 0);
+
+ err = pci_save_state(pdev);
+ if (err)
+ goto err_ioremap;
+
+ err = -EIO;
+ adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!adapter->io_addr)
+ goto err_ioremap;
+
+ /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
+ hw->hw_addr = adapter->io_addr;
+
+ netdev->netdev_ops = &igc_netdev_ops;
+
+ netdev->watchdog_timeo = 5 * HZ;
+
+ netdev->mem_start = pci_resource_start(pdev, 0);
+ netdev->mem_end = pci_resource_end(pdev, 0);
+
+ /* PCI config space info */
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->revision_id = pdev->revision;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+
+ /* Copy the default MAC and PHY function pointers */
+ memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
+ memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
+
+ /* Initialize skew-specific constants */
+ err = ei->get_invariants(hw);
+ if (err)
+ goto err_sw_init;
+
+ /* setup the private structure */
+ err = igc_sw_init(adapter);
+ if (err)
+ goto err_sw_init;
+
+ /* MTU range: 68 - 9216 */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
+
+ /* before reading the NVM, reset the controller to put the device in a
+ * known good starting state
+ */
+ hw->mac.ops.reset_hw(hw);
+
+ if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
+ /* copy the MAC address out of the NVM */
+ if (hw->mac.ops.read_mac_addr(hw))
+ dev_err(&pdev->dev, "NVM Read Error\n");
+ }
+
+ memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ dev_err(&pdev->dev, "Invalid MAC Address\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ /* configure RXPBSIZE and TXPBSIZE */
+ wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
+ wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
+
+ timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
+ timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
+
+ INIT_WORK(&adapter->reset_task, igc_reset_task);
+ INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
+
+ /* Initialize link properties that are user-changeable */
+ adapter->fc_autoneg = true;
+ hw->mac.autoneg = true;
+ hw->phy.autoneg_advertised = 0xaf;
+
+ hw->fc.requested_mode = igc_fc_default;
+ hw->fc.current_mode = igc_fc_default;
+
+ /* reset the hardware with the new settings */
+ igc_reset(adapter);
+
+ /* let the f/w know that the h/w is now under the control of the
+ * driver.
+ */
+ igc_get_hw_control(adapter);
+
+ strncpy(netdev->name, "eth%d", IFNAMSIZ);
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+ /* Check if Media Autosense is enabled */
+ adapter->ei = *ei;
+
+ /* print pcie link status and MAC address */
+ pcie_print_link_status(pdev);
+ netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
+
+ return 0;
+
+err_register:
+ igc_release_hw_control(adapter);
+err_eeprom:
+ if (!igc_check_reset_block(hw))
+ igc_reset_phy(hw);
+err_sw_init:
+ igc_clear_interrupt_scheme(adapter);
+ iounmap(adapter->io_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * igc_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * igc_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. This could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ */
+static void igc_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ set_bit(__IGC_DOWN, &adapter->state);
+
+ del_timer_sync(&adapter->watchdog_timer);
+ del_timer_sync(&adapter->phy_info_timer);
+
+ cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->watchdog_task);
+
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant.
+ */
+ igc_release_hw_control(adapter);
+ unregister_netdev(netdev);
+
+ igc_clear_interrupt_scheme(adapter);
+ pci_iounmap(pdev, adapter->io_addr);
+ pci_release_mem_regions(pdev);
+
+ kfree(adapter->mac_table);
+ kfree(adapter->shadow_vfta);
+ free_netdev(netdev);
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver igc_driver = {
+ .name = igc_driver_name,
+ .id_table = igc_pci_tbl,
+ .probe = igc_probe,
+ .remove = igc_remove,
+};
+
+static void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
+ const u32 max_rss_queues)
+{
+ /* Determine if we need to pair queues. */
+ /* If rss_queues > half of max_rss_queues, pair the queues in
+ * order to conserve interrupts due to limited supply.
+ */
+ if (adapter->rss_queues > (max_rss_queues / 2))
+ adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
+ else
+ adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
+}
+
+static unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
+{
+ unsigned int max_rss_queues;
+
+ /* Determine the maximum number of RSS queues supported. */
+ max_rss_queues = IGC_MAX_RX_QUEUES;
+
+ return max_rss_queues;
+}
+
+static void igc_init_queue_configuration(struct igc_adapter *adapter)
+{
+ u32 max_rss_queues;
+
+ max_rss_queues = igc_get_max_rss_queues(adapter);
+ adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
+
+ igc_set_flag_queue_pairs(adapter, max_rss_queues);
+}
+
+/**
+ * igc_sw_init - Initialize general software structures (struct igc_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * igc_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ */
+static int igc_sw_init(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct igc_hw *hw = &adapter->hw;
+
+ int size = sizeof(struct igc_mac_addr) * hw->mac.rar_entry_count;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = IGC_DEFAULT_TXD;
+ adapter->rx_ring_count = IGC_DEFAULT_RXD;
+
+ /* set default ITR values */
+ adapter->rx_itr_setting = IGC_DEFAULT_ITR;
+ adapter->tx_itr_setting = IGC_DEFAULT_ITR;
+
+ /* set default work limits */
+ adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
+
+ /* adjust max frame to be at least the size of a standard frame */
+ adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
+ VLAN_HLEN;
+ adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+ spin_lock_init(&adapter->nfc_lock);
+ spin_lock_init(&adapter->stats64_lock);
+ /* Assume MSI-X interrupts, will be checked during IRQ allocation */
+ adapter->flags |= IGC_FLAG_HAS_MSIX;
+
+ adapter->mac_table = kzalloc(size, GFP_ATOMIC);
+ if (!adapter->mac_table)
+ return -ENOMEM;
+
+ igc_init_queue_configuration(adapter);
+
+ /* This call may decrease the number of queues */
+ if (igc_init_interrupt_scheme(adapter, true)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ /* Explicitly disable IRQ since the NIC can be in any state. */
+ igc_irq_disable(adapter);
+
+ set_bit(__IGC_DOWN, &adapter->state);
+
+ return 0;
+}
+
+/**
+ * igc_get_hw_dev - return device
+ * @hw: pointer to hardware structure
+ *
+ * used by hardware layer to print debugging information
+ */
+struct net_device *igc_get_hw_dev(struct igc_hw *hw)
+{
+ struct igc_adapter *adapter = hw->back;
+
+ return adapter->netdev;
+}
+
+/**
+ * igc_init_module - Driver Registration Routine
+ *
+ * igc_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ */
+static int __init igc_init_module(void)
+{
+ int ret;
+
+ pr_info("%s - version %s\n",
+ igc_driver_string, igc_driver_version);
+
+ pr_info("%s\n", igc_copyright);
+
+ ret = pci_register_driver(&igc_driver);
+ return ret;
+}
+
+module_init(igc_init_module);
+
+/**
+ * igc_exit_module - Driver Exit Cleanup Routine
+ *
+ * igc_exit_module is called just before the driver is removed
+ * from memory.
+ */
+static void __exit igc_exit_module(void)
+{
+ pci_unregister_driver(&igc_driver);
+}
+
+module_exit(igc_exit_module);
+/* igc_main.c */
diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.c b/drivers/net/ethernet/intel/igc/igc_nvm.c
new file mode 100644
index 000000000000..58f81aba0144
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_nvm.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Intel Corporation */
+
+#include "igc_mac.h"
+#include "igc_nvm.h"
+
+/**
+ * igc_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ * @hw: pointer to the HW structure
+ * @ee_reg: EEPROM flag for polling
+ *
+ * Polls the EEPROM status bit for either read or write completion based
+ * upon the value of 'ee_reg'.
+ */
+static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg)
+{
+ s32 ret_val = -IGC_ERR_NVM;
+ u32 attempts = 100000;
+ u32 i, reg = 0;
+
+ for (i = 0; i < attempts; i++) {
+ if (ee_reg == IGC_NVM_POLL_READ)
+ reg = rd32(IGC_EERD);
+ else
+ reg = rd32(IGC_EEWR);
+
+ if (reg & IGC_NVM_RW_REG_DONE) {
+ ret_val = 0;
+ break;
+ }
+
+ udelay(5);
+ }
+
+ return ret_val;
+}
+
+/**
+ * igc_acquire_nvm - Generic request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -IGC_ERR_NVM (-1).
+ */
+s32 igc_acquire_nvm(struct igc_hw *hw)
+{
+ s32 timeout = IGC_NVM_GRANT_ATTEMPTS;
+ u32 eecd = rd32(IGC_EECD);
+ s32 ret_val = 0;
+
+ wr32(IGC_EECD, eecd | IGC_EECD_REQ);
+ eecd = rd32(IGC_EECD);
+
+ while (timeout) {
+ if (eecd & IGC_EECD_GNT)
+ break;
+ udelay(5);
+ eecd = rd32(IGC_EECD);
+ timeout--;
+ }
+
+ if (!timeout) {
+ eecd &= ~IGC_EECD_REQ;
+ wr32(IGC_EECD, eecd);
+ hw_dbg("Could not acquire NVM grant\n");
+ ret_val = -IGC_ERR_NVM;
+ }
+
+ return ret_val;
+}
+
+/**
+ * igc_release_nvm - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ */
+void igc_release_nvm(struct igc_hw *hw)
+{
+ u32 eecd;
+
+ eecd = rd32(IGC_EECD);
+ eecd &= ~IGC_EECD_REQ;
+ wr32(IGC_EECD, eecd);
+}
+
+/**
+ * igc_read_nvm_eerd - Reads EEPROM using EERD register
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ */
+s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct igc_nvm_info *nvm = &hw->nvm;
+ u32 i, eerd = 0;
+ s32 ret_val = 0;
+
+ /* A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
+ words == 0) {
+ hw_dbg("nvm parameter(s) out of bounds\n");
+ ret_val = -IGC_ERR_NVM;
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ eerd = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) +
+ IGC_NVM_RW_REG_START;
+
+ wr32(IGC_EERD, eerd);
+ ret_val = igc_poll_eerd_eewr_done(hw, IGC_NVM_POLL_READ);
+ if (ret_val)
+ break;
+
+ data[i] = (rd32(IGC_EERD) >> IGC_NVM_RW_REG_DATA);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_read_mac_addr - Read device MAC address
+ * @hw: pointer to the HW structure
+ */
+s32 igc_read_mac_addr(struct igc_hw *hw)
+{
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
+
+ rar_high = rd32(IGC_RAH(0));
+ rar_low = rd32(IGC_RAL(0));
+
+ for (i = 0; i < IGC_RAL_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8));
+
+ for (i = 0; i < IGC_RAH_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8));
+
+ for (i = 0; i < ETH_ALEN; i++)
+ hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+ return 0;
+}
+
+/**
+ * igc_validate_nvm_checksum - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ */
+s32 igc_validate_nvm_checksum(struct igc_hw *hw)
+{
+ u16 checksum = 0;
+ u16 i, nvm_data;
+ s32 ret_val = 0;
+
+ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+
+ if (checksum != (u16)NVM_SUM) {
+ hw_dbg("NVM Checksum Invalid\n");
+ ret_val = -IGC_ERR_NVM;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_update_nvm_checksum - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM.
+ */
+s32 igc_update_nvm_checksum(struct igc_hw *hw)
+{
+ u16 checksum = 0;
+ u16 i, nvm_data;
+ s32 ret_val;
+
+ for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error while updating checksum.\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16)NVM_SUM - checksum;
+ ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
+ if (ret_val)
+ hw_dbg("NVM Write Error while updating checksum.\n");
+
+out:
+ return ret_val;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.h b/drivers/net/ethernet/intel/igc/igc_nvm.h
new file mode 100644
index 000000000000..f9fc2e9cfb03
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_nvm.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Intel Corporation */
+
+#ifndef _IGC_NVM_H_
+#define _IGC_NVM_H_
+
+s32 igc_acquire_nvm(struct igc_hw *hw);
+void igc_release_nvm(struct igc_hw *hw);
+s32 igc_read_mac_addr(struct igc_hw *hw);
+s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data);
+s32 igc_validate_nvm_checksum(struct igc_hw *hw);
+s32 igc_update_nvm_checksum(struct igc_hw *hw);
+
+#endif
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
new file mode 100644
index 000000000000..38e43e6fc1c7
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -0,0 +1,791 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Intel Corporation */
+
+#include "igc_phy.h"
+
+/* forward declaration */
+static s32 igc_phy_setup_autoneg(struct igc_hw *hw);
+static s32 igc_wait_autoneg(struct igc_hw *hw);
+
+/**
+ * igc_check_reset_block - Check if PHY reset is blocked
+ * @hw: pointer to the HW structure
+ *
+ * Read the PHY management control register and check whether a PHY reset
+ * is blocked. If a reset is not blocked return 0, otherwise
+ * return IGC_ERR_BLK_PHY_RESET (12).
+ */
+s32 igc_check_reset_block(struct igc_hw *hw)
+{
+ u32 manc;
+
+ manc = rd32(IGC_MANC);
+
+ return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ?
+ IGC_ERR_BLK_PHY_RESET : 0;
+}
+
+/**
+ * igc_get_phy_id - Retrieve the PHY ID and revision
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY registers and stores the PHY ID and possibly the PHY
+ * revision in the hardware structure.
+ */
+s32 igc_get_phy_id(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+ u16 phy_id;
+
+ ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
+ if (ret_val)
+ goto out;
+
+ phy->id = (u32)(phy_id << 16);
+ usleep_range(200, 500);
+ ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
+ if (ret_val)
+ goto out;
+
+ phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+ phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_phy_has_link - Polls PHY for link
+ * @hw: pointer to the HW structure
+ * @iterations: number of times to poll for link
+ * @usec_interval: delay between polling attempts
+ * @success: pointer to whether polling was successful or not
+ *
+ * Polls the PHY status register for link, 'iterations' number of times.
+ */
+s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success)
+{
+ u16 i, phy_status;
+ s32 ret_val = 0;
+
+ for (i = 0; i < iterations; i++) {
+ /* Some PHYs require the PHY_STATUS register to be read
+ * twice due to the link bit being sticky. No harm doing
+ * it across the board.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val && usec_interval > 0) {
+ /* If the first read fails, another entity may have
+ * ownership of the resources, wait and try again to
+ * see if they have relinquished the resources yet.
+ */
+ if (usec_interval >= 1000)
+ mdelay(usec_interval / 1000);
+ else
+ udelay(usec_interval);
+ }
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ if (phy_status & MII_SR_LINK_STATUS)
+ break;
+ if (usec_interval >= 1000)
+ mdelay(usec_interval / 1000);
+ else
+ udelay(usec_interval);
+ }
+
+ *success = (i < iterations) ? true : false;
+
+ return ret_val;
+}
+
+/**
+ * igc_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, restore the link to previous settings.
+ */
+void igc_power_up_phy_copper(struct igc_hw *hw)
+{
+ u16 mii_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+ mii_reg &= ~MII_CR_POWER_DOWN;
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+}
+
+/**
+ * igc_power_down_phy_copper - Power down copper PHY
+ * @hw: pointer to the HW structure
+ *
+ * Power down PHY to save power when interface is down and wake on lan
+ * is not enabled.
+ */
+void igc_power_down_phy_copper(struct igc_hw *hw)
+{
+ u16 mii_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+
+ /* Temporary workaround - should be removed when PHY will implement
+ * IEEE registers as properly
+ */
+ /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/
+ usleep_range(1000, 2000);
+}
+
+/**
+ * igc_check_downshift - Checks whether a downshift in speed occurred
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * A downshift is detected by querying the PHY link health.
+ */
+s32 igc_check_downshift(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ u16 phy_data, offset, mask;
+ s32 ret_val;
+
+ switch (phy->type) {
+ case igc_phy_i225:
+ default:
+ /* speed downshift not supported */
+ phy->speed_downgraded = false;
+ ret_val = 0;
+ goto out;
+ }
+
+ ret_val = phy->ops.read_reg(hw, offset, &phy_data);
+
+ if (!ret_val)
+ phy->speed_downgraded = (phy_data & mask) ? true : false;
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_phy_hw_reset - PHY hardware reset
+ * @hw: pointer to the HW structure
+ *
+ * Verify the reset block is not blocking us from resetting. Acquire
+ * semaphore (if necessary) and read/set/write the device control reset
+ * bit in the PHY. Wait the appropriate delay time for the device to
+ * reset and release the semaphore (if necessary).
+ */
+s32 igc_phy_hw_reset(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u32 ctrl;
+
+ ret_val = igc_check_reset_block(hw);
+ if (ret_val) {
+ ret_val = 0;
+ goto out;
+ }
+
+ ret_val = phy->ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ctrl = rd32(IGC_CTRL);
+ wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST);
+ wrfl();
+
+ udelay(phy->reset_delay_us);
+
+ wr32(IGC_CTRL, ctrl);
+ wrfl();
+
+ usleep_range(1500, 2000);
+
+ phy->ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_copper_link_autoneg - Setup/Enable autoneg for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Performs initial bounds checking on autoneg advertisement parameter, then
+ * configure to advertise the full capability. Setup the PHY to autoneg
+ * and restart the negotiation process between the link partner. If
+ * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
+ */
+static s32 igc_copper_link_autoneg(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ u16 phy_ctrl;
+ s32 ret_val;
+
+ /* Perform some bounds checking on the autoneg advertisement
+ * parameter.
+ */
+ phy->autoneg_advertised &= phy->autoneg_mask;
+
+ /* If autoneg_advertised is zero, we assume it was not defaulted
+ * by the calling code so we set to advertise full capability.
+ */
+ if (phy->autoneg_advertised == 0)
+ phy->autoneg_advertised = phy->autoneg_mask;
+
+ hw_dbg("Reconfiguring auto-neg advertisement params\n");
+ ret_val = igc_phy_setup_autoneg(hw);
+ if (ret_val) {
+ hw_dbg("Error Setting up Auto-Negotiation\n");
+ goto out;
+ }
+ hw_dbg("Restarting Auto-Neg\n");
+
+ /* Restart auto-negotiation by setting the Auto Neg Enable bit and
+ * the Auto Neg Restart bit in the PHY control register.
+ */
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+ if (ret_val)
+ goto out;
+
+ phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+ if (ret_val)
+ goto out;
+
+ /* Does the user want to wait for Auto-Neg to complete here, or
+ * check at a later time (for example, callback routine).
+ */
+ if (phy->autoneg_wait_to_complete) {
+ ret_val = igc_wait_autoneg(hw);
+ if (ret_val) {
+ hw_dbg("Error while waiting for autoneg to complete\n");
+ goto out;
+ }
+ }
+
+ hw->mac.get_link_status = true;
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_wait_autoneg - Wait for auto-neg completion
+ * @hw: pointer to the HW structure
+ *
+ * Waits for auto-negotiation to complete or for the auto-negotiation time
+ * limit to expire, which ever happens first.
+ */
+static s32 igc_wait_autoneg(struct igc_hw *hw)
+{
+ u16 i, phy_status;
+ s32 ret_val = 0;
+
+ /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
+ for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ if (phy_status & MII_SR_AUTONEG_COMPLETE)
+ break;
+ msleep(100);
+ }
+
+ /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+ * has completed.
+ */
+ return ret_val;
+}
+
+/**
+ * igc_phy_setup_autoneg - Configure PHY for auto-negotiation
+ * @hw: pointer to the HW structure
+ *
+ * Reads the MII auto-neg advertisement register and/or the 1000T control
+ * register and if the PHY is already setup for auto-negotiation, then
+ * return successful. Otherwise, setup advertisement and flow control to
+ * the appropriate values for the wanted auto-negotiation.
+ */
+static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ u16 aneg_multigbt_an_ctrl = 0;
+ u16 mii_1000t_ctrl_reg = 0;
+ u16 mii_autoneg_adv_reg;
+ s32 ret_val;
+
+ phy->autoneg_advertised &= phy->autoneg_mask;
+
+ /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+ ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
+ &mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
+ hw->phy.id == I225_I_PHY_ID) {
+ /* Read the MULTI GBT AN Control Register - reg 7.32 */
+ ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
+ MMD_DEVADDR_SHIFT) |
+ ANEG_MULTIGBT_AN_CTRL,
+ &aneg_multigbt_an_ctrl);
+
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Need to parse both autoneg_advertised and fc and set up
+ * the appropriate PHY registers. First we will parse for
+ * autoneg_advertised software override. Since we can advertise
+ * a plethora of combinations, we need to check each bit
+ * individually.
+ */
+
+ /* First we clear all the 10/100 mb speed bits in the Auto-Neg
+ * Advertisement Register (Address 4) and the 1000 mb speed bits in
+ * the 1000Base-T Control Register (Address 9).
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
+ NWAY_AR_100TX_HD_CAPS |
+ NWAY_AR_10T_FD_CAPS |
+ NWAY_AR_10T_HD_CAPS);
+ mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+
+ hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
+
+ /* Do we want to advertise 10 Mb Half Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
+ hw_dbg("Advertise 10mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+ }
+
+ /* Do we want to advertise 10 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
+ hw_dbg("Advertise 10mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Half Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
+ hw_dbg("Advertise 100mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
+ hw_dbg("Advertise 100mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+ }
+
+ /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+ if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
+ hw_dbg("Advertise 1000mb Half duplex request denied!\n");
+
+ /* Do we want to advertise 1000 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
+ hw_dbg("Advertise 1000mb Full duplex\n");
+ mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+ }
+
+ /* We do not allow the Phy to advertise 2500 Mb Half Duplex */
+ if (phy->autoneg_advertised & ADVERTISE_2500_HALF)
+ hw_dbg("Advertise 2500mb Half duplex request denied!\n");
+
+ /* Do we want to advertise 2500 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_2500_FULL) {
+ hw_dbg("Advertise 2500mb Full duplex\n");
+ aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS;
+ } else {
+ aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS;
+ }
+
+ /* Check for a software override of the flow control settings, and
+ * setup the PHY advertisement registers accordingly. If
+ * auto-negotiation is enabled, then software will have to set the
+ * "PAUSE" bits to the correct value in the Auto-Negotiation
+ * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
+ * negotiation.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * but we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: No software override. The flow control configuration
+ * in the EEPROM is used.
+ */
+ switch (hw->fc.current_mode) {
+ case igc_fc_none:
+ /* Flow control (Rx & Tx) is completely disabled by a
+ * software over-ride.
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case igc_fc_rx_pause:
+ /* Rx Flow control is enabled, and Tx Flow control is
+ * disabled, by a software over-ride.
+ *
+ * Since there really isn't a way to advertise that we are
+ * capable of Rx Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric Rx PAUSE. Later
+ * (in igc_config_fc_after_link_up) we will disable the
+ * hw's ability to send PAUSE frames.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case igc_fc_tx_pause:
+ /* Tx Flow control is enabled, and Rx Flow control is
+ * disabled, by a software over-ride.
+ */
+ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+ mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+ break;
+ case igc_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ default:
+ hw_dbg("Flow control param set incorrectly\n");
+ return -IGC_ERR_CONFIG;
+ }
+
+ ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL)
+ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
+ mii_1000t_ctrl_reg);
+
+ if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
+ hw->phy.id == I225_I_PHY_ID)
+ ret_val = phy->ops.write_reg(hw,
+ (STANDARD_AN_REG_MASK <<
+ MMD_DEVADDR_SHIFT) |
+ ANEG_MULTIGBT_AN_CTRL,
+ aneg_multigbt_an_ctrl);
+
+ return ret_val;
+}
+
+/**
+ * igc_setup_copper_link - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Calls the appropriate function to configure the link for auto-neg or forced
+ * speed and duplex. Then we check for link, once link is established calls
+ * to configure collision distance and flow control are called. If link is
+ * not established, we return -IGC_ERR_PHY (-2).
+ */
+s32 igc_setup_copper_link(struct igc_hw *hw)
+{
+ s32 ret_val = 0;
+ bool link;
+
+ if (hw->mac.autoneg) {
+ /* Setup autoneg and flow control advertisement and perform
+ * autonegotiation.
+ */
+ ret_val = igc_copper_link_autoneg(hw);
+ if (ret_val)
+ goto out;
+ } else {
+ /* PHY will be set to 10H, 10F, 100H or 100F
+ * depending on user settings.
+ */
+ hw_dbg("Forcing Speed and Duplex\n");
+ ret_val = hw->phy.ops.force_speed_duplex(hw);
+ if (ret_val) {
+ hw_dbg("Error Forcing Speed and Duplex\n");
+ goto out;
+ }
+ }
+
+ /* Check link status. Wait up to 100 microseconds for link to become
+ * valid.
+ */
+ ret_val = igc_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);
+ if (ret_val)
+ goto out;
+
+ if (link) {
+ hw_dbg("Valid link established!!!\n");
+ igc_config_collision_dist(hw);
+ ret_val = igc_config_fc_after_link_up(hw);
+ } else {
+ hw_dbg("Unable to establish link!!!\n");
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_read_phy_reg_mdic - Read MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the MDI control register in the PHY at offset and stores the
+ * information read to data.
+ */
+static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ u32 i, mdic = 0;
+ s32 ret_val = 0;
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ hw_dbg("PHY Address %d is out of range\n", offset);
+ ret_val = -IGC_ERR_PARAM;
+ goto out;
+ }
+
+ /* Set up Op-code, Phy Address, and register offset in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ mdic = ((offset << IGC_MDIC_REG_SHIFT) |
+ (phy->addr << IGC_MDIC_PHY_SHIFT) |
+ (IGC_MDIC_OP_READ));
+
+ wr32(IGC_MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
+ usleep_range(500, 1000);
+ mdic = rd32(IGC_MDIC);
+ if (mdic & IGC_MDIC_READY)
+ break;
+ }
+ if (!(mdic & IGC_MDIC_READY)) {
+ hw_dbg("MDI Read did not complete\n");
+ ret_val = -IGC_ERR_PHY;
+ goto out;
+ }
+ if (mdic & IGC_MDIC_ERROR) {
+ hw_dbg("MDI Error\n");
+ ret_val = -IGC_ERR_PHY;
+ goto out;
+ }
+ *data = (u16)mdic;
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_write_phy_reg_mdic - Write MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write to register at offset
+ *
+ * Writes data to MDI control register in the PHY at offset.
+ */
+static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ u32 i, mdic = 0;
+ s32 ret_val = 0;
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ hw_dbg("PHY Address %d is out of range\n", offset);
+ ret_val = -IGC_ERR_PARAM;
+ goto out;
+ }
+
+ /* Set up Op-code, Phy Address, and register offset in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to write the desired data.
+ */
+ mdic = (((u32)data) |
+ (offset << IGC_MDIC_REG_SHIFT) |
+ (phy->addr << IGC_MDIC_PHY_SHIFT) |
+ (IGC_MDIC_OP_WRITE));
+
+ wr32(IGC_MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
+ usleep_range(500, 1000);
+ mdic = rd32(IGC_MDIC);
+ if (mdic & IGC_MDIC_READY)
+ break;
+ }
+ if (!(mdic & IGC_MDIC_READY)) {
+ hw_dbg("MDI Write did not complete\n");
+ ret_val = -IGC_ERR_PHY;
+ goto out;
+ }
+ if (mdic & IGC_MDIC_ERROR) {
+ hw_dbg("MDI Error\n");
+ ret_val = -IGC_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * __igc_access_xmdio_reg - Read/write XMDIO register
+ * @hw: pointer to the HW structure
+ * @address: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: pointer to value to read/write from/to the XMDIO address
+ * @read: boolean flag to indicate read or write
+ */
+static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address,
+ u8 dev_addr, u16 *data, bool read)
+{
+ s32 ret_val;
+
+ ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA |
+ dev_addr);
+ if (ret_val)
+ return ret_val;
+
+ if (read)
+ ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data);
+ else
+ ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data);
+ if (ret_val)
+ return ret_val;
+
+ /* Recalibrate the device back to 0 */
+ ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0);
+ if (ret_val)
+ return ret_val;
+
+ return ret_val;
+}
+
+/**
+ * igc_read_xmdio_reg - Read XMDIO register
+ * @hw: pointer to the HW structure
+ * @addr: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: value to be read from the EMI address
+ */
+static s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr,
+ u8 dev_addr, u16 *data)
+{
+ return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true);
+}
+
+/**
+ * igc_write_xmdio_reg - Write XMDIO register
+ * @hw: pointer to the HW structure
+ * @addr: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: value to be written to the XMDIO address
+ */
+static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr,
+ u8 dev_addr, u16 data)
+{
+ return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, false);
+}
+
+/**
+ * igc_write_phy_reg_gpy - Write GPY PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ */
+s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
+{
+ u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
+ s32 ret_val;
+
+ offset = offset & GPY_REG_MASK;
+
+ if (!dev_addr) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ ret_val = igc_write_phy_reg_mdic(hw, offset, data);
+ if (ret_val)
+ return ret_val;
+ hw->phy.ops.release(hw);
+ } else {
+ ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr,
+ data);
+ }
+
+ return ret_val;
+}
+
+/**
+ * igc_read_phy_reg_gpy - Read GPY PHY register
+ * @hw: pointer to the HW structure
+ * @offset: lower half is register offset to read to
+ * upper half is MMD to use.
+ * @data: data to read at register offset
+ *
+ * Acquires semaphore, if necessary, then reads the data in the PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ */
+s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
+{
+ u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
+ s32 ret_val;
+
+ offset = offset & GPY_REG_MASK;
+
+ if (!dev_addr) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ ret_val = igc_read_phy_reg_mdic(hw, offset, data);
+ if (ret_val)
+ return ret_val;
+ hw->phy.ops.release(hw);
+ } else {
+ ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr,
+ data);
+ }
+
+ return ret_val;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.h b/drivers/net/ethernet/intel/igc/igc_phy.h
new file mode 100644
index 000000000000..25cba33de7e2
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_phy.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Intel Corporation */
+
+#ifndef _IGC_PHY_H_
+#define _IGC_PHY_H_
+
+#include "igc_mac.h"
+
+s32 igc_check_reset_block(struct igc_hw *hw);
+s32 igc_phy_hw_reset(struct igc_hw *hw);
+s32 igc_get_phy_id(struct igc_hw *hw);
+s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success);
+s32 igc_check_downshift(struct igc_hw *hw);
+s32 igc_setup_copper_link(struct igc_hw *hw);
+void igc_power_up_phy_copper(struct igc_hw *hw);
+void igc_power_down_phy_copper(struct igc_hw *hw);
+s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data);
+s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data);
+
+#endif
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
new file mode 100644
index 000000000000..a1bd3216c906
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Intel Corporation */
+
+#ifndef _IGC_REGS_H_
+#define _IGC_REGS_H_
+
+/* General Register Descriptions */
+#define IGC_CTRL 0x00000 /* Device Control - RW */
+#define IGC_STATUS 0x00008 /* Device Status - RO */
+#define IGC_EECD 0x00010 /* EEPROM/Flash Control - RW */
+#define IGC_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define IGC_MDIC 0x00020 /* MDI Control - RW */
+#define IGC_MDICNFG 0x00E04 /* MDC/MDIO Configuration - RW */
+#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
+
+/* Internal Packet Buffer Size Registers */
+#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
+
+/* NVM Register Descriptions */
+#define IGC_EERD 0x12014 /* EEprom mode read - RW */
+#define IGC_EEWR 0x12018 /* EEprom mode write - RW */
+
+/* Flow Control Register Descriptions */
+#define IGC_FCAL 0x00028 /* FC Address Low - RW */
+#define IGC_FCAH 0x0002C /* FC Address High - RW */
+#define IGC_FCT 0x00030 /* FC Type - RW */
+#define IGC_FCTTV 0x00170 /* FC Transmit Timer - RW */
+#define IGC_FCRTL 0x02160 /* FC Receive Threshold Low - RW */
+#define IGC_FCRTH 0x02168 /* FC Receive Threshold High - RW */
+#define IGC_FCRTV 0x02460 /* FC Refresh Timer Value - RW */
+#define IGC_FCSTS 0x02464 /* FC Status - RO */
+
+/* PCIe Register Description */
+#define IGC_GCR 0x05B00 /* PCIe control- RW */
+
+/* Semaphore registers */
+#define IGC_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */
+#define IGC_SWSM 0x05B50 /* SW Semaphore */
+#define IGC_FWSM 0x05B54 /* FW Semaphore */
+
+/* Function Active and Power State to MNG */
+#define IGC_FACTPS 0x05B30
+
+/* Interrupt Register Description */
+#define IGC_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
+#define IGC_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
+#define IGC_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
+#define IGC_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
+#define IGC_EIAM 0x01530 /* Ext. Interrupt Auto Mask - RW */
+#define IGC_ICR 0x01500 /* Intr Cause Read - RC/W1C */
+#define IGC_ICS 0x01504 /* Intr Cause Set - WO */
+#define IGC_IMS 0x01508 /* Intr Mask Set/Read - RW */
+#define IGC_IMC 0x0150C /* Intr Mask Clear - WO */
+#define IGC_IAM 0x01510 /* Intr Ack Auto Mask- RW */
+/* Intr Throttle - RW */
+#define IGC_EITR(_n) (0x01680 + (0x4 * (_n)))
+/* Interrupt Vector Allocation - RW */
+#define IGC_IVAR0 0x01700
+#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
+#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */
+
+/* Interrupt Cause */
+#define IGC_ICRXPTC 0x04104 /* Rx Packet Timer Expire Count */
+#define IGC_ICRXATC 0x04108 /* Rx Absolute Timer Expire Count */
+#define IGC_ICTXPTC 0x0410C /* Tx Packet Timer Expire Count */
+#define IGC_ICTXATC 0x04110 /* Tx Absolute Timer Expire Count */
+#define IGC_ICTXQEC 0x04118 /* Tx Queue Empty Count */
+#define IGC_ICTXQMTC 0x0411C /* Tx Queue Min Threshold Count */
+#define IGC_ICRXDMTC 0x04120 /* Rx Descriptor Min Threshold Count */
+#define IGC_ICRXOC 0x04124 /* Receiver Overrun Count */
+
+#define IGC_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */
+#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded Packets */
+#define IGC_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */
+#define IGC_RPTHC 0x04104 /* Rx Packets To Host */
+#define IGC_HGPTC 0x04118 /* Host Good Packets TX Count */
+#define IGC_HTCBDPC 0x04124 /* Host TX Circ.Breaker Drop Count */
+
+/* MSI-X Table Register Descriptions */
+#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */
+
+/* Receive Register Descriptions */
+#define IGC_RCTL 0x00100 /* Rx Control - RW */
+#define IGC_SRRCTL(_n) (0x0C00C + ((_n) * 0x40))
+#define IGC_PSRTYPE(_i) (0x05480 + ((_i) * 4))
+#define IGC_RDBAL(_n) (0x0C000 + ((_n) * 0x40))
+#define IGC_RDBAH(_n) (0x0C004 + ((_n) * 0x40))
+#define IGC_RDLEN(_n) (0x0C008 + ((_n) * 0x40))
+#define IGC_RDH(_n) (0x0C010 + ((_n) * 0x40))
+#define IGC_RDT(_n) (0x0C018 + ((_n) * 0x40))
+#define IGC_RXDCTL(_n) (0x0C028 + ((_n) * 0x40))
+#define IGC_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
+#define IGC_RXCSUM 0x05000 /* Rx Checksum Control - RW */
+#define IGC_RLPML 0x05004 /* Rx Long Packet Max Length */
+#define IGC_RFCTL 0x05008 /* Receive Filter Control*/
+#define IGC_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define IGC_UTA 0x0A000 /* Unicast Table Array - RW */
+#define IGC_RAL(_n) (0x05400 + ((_n) * 0x08))
+#define IGC_RAH(_n) (0x05404 + ((_n) * 0x08))
+
+/* Transmit Register Descriptions */
+#define IGC_TCTL 0x00400 /* Tx Control - RW */
+#define IGC_TIPG 0x00410 /* Tx Inter-packet gap - RW */
+#define IGC_TDBAL(_n) (0x0E000 + ((_n) * 0x40))
+#define IGC_TDBAH(_n) (0x0E004 + ((_n) * 0x40))
+#define IGC_TDLEN(_n) (0x0E008 + ((_n) * 0x40))
+#define IGC_TDH(_n) (0x0E010 + ((_n) * 0x40))
+#define IGC_TDT(_n) (0x0E018 + ((_n) * 0x40))
+#define IGC_TXDCTL(_n) (0x0E028 + ((_n) * 0x40))
+
+/* MMD Register Descriptions */
+#define IGC_MMDAC 13 /* MMD Access Control */
+#define IGC_MMDAAD 14 /* MMD Access Address/Data */
+
+/* Good transmitted packets counter registers */
+#define IGC_PQGPTC(_n) (0x010014 + (0x100 * (_n)))
+
+/* Statistics Register Descriptions */
+#define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */
+#define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define IGC_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
+#define IGC_RXERRC 0x0400C /* Receive Error Count - R/clr */
+#define IGC_MPC 0x04010 /* Missed Packet Count - R/clr */
+#define IGC_SCC 0x04014 /* Single Collision Count - R/clr */
+#define IGC_ECOL 0x04018 /* Excessive Collision Count - R/clr */
+#define IGC_MCC 0x0401C /* Multiple Collision Count - R/clr */
+#define IGC_LATECOL 0x04020 /* Late Collision Count - R/clr */
+#define IGC_COLC 0x04028 /* Collision Count - R/clr */
+#define IGC_DC 0x04030 /* Defer Count - R/clr */
+#define IGC_TNCRS 0x04034 /* Tx-No CRS - R/clr */
+#define IGC_SEC 0x04038 /* Sequence Error Count - R/clr */
+#define IGC_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
+#define IGC_RLEC 0x04040 /* Receive Length Error Count - R/clr */
+#define IGC_XONRXC 0x04048 /* XON Rx Count - R/clr */
+#define IGC_XONTXC 0x0404C /* XON Tx Count - R/clr */
+#define IGC_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */
+#define IGC_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */
+#define IGC_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */
+#define IGC_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */
+#define IGC_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */
+#define IGC_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */
+#define IGC_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */
+#define IGC_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
+#define IGC_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
+#define IGC_GPRC 0x04074 /* Good Packets Rx Count - R/clr */
+#define IGC_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */
+#define IGC_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */
+#define IGC_GPTC 0x04080 /* Good Packets Tx Count - R/clr */
+#define IGC_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */
+#define IGC_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */
+#define IGC_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */
+#define IGC_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */
+#define IGC_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */
+#define IGC_RUC 0x040A4 /* Rx Undersize Count - R/clr */
+#define IGC_RFC 0x040A8 /* Rx Fragment Count - R/clr */
+#define IGC_ROC 0x040AC /* Rx Oversize Count - R/clr */
+#define IGC_RJC 0x040B0 /* Rx Jabber Count - R/clr */
+#define IGC_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */
+#define IGC_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
+#define IGC_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */
+#define IGC_TORL 0x040C0 /* Total Octets Rx Low - R/clr */
+#define IGC_TORH 0x040C4 /* Total Octets Rx High - R/clr */
+#define IGC_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */
+#define IGC_TOTH 0x040CC /* Total Octets Tx High - R/clr */
+#define IGC_TPR 0x040D0 /* Total Packets Rx - R/clr */
+#define IGC_TPT 0x040D4 /* Total Packets Tx - R/clr */
+#define IGC_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */
+#define IGC_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */
+#define IGC_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
+#define IGC_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
+#define IGC_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
+#define IGC_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
+#define IGC_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
+#define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
+#define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
+#define IGC_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */
+#define IGC_IAC 0x04100 /* Interrupt Assertion Count */
+#define IGC_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
+#define IGC_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
+#define IGC_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
+#define IGC_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
+#define IGC_RPTHC 0x04104 /* Rx Packets To Host */
+#define IGC_HGPTC 0x04118 /* Host Good Packets Tx Count */
+#define IGC_RXDMTC 0x04120 /* Rx Descriptor Minimum Threshold Count */
+#define IGC_HGORCL 0x04128 /* Host Good Octets Received Count Low */
+#define IGC_HGORCH 0x0412C /* Host Good Octets Received Count High */
+#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
+#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
+#define IGC_LENERRS 0x04138 /* Length Errors Count */
+#define IGC_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
+#define IGC_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
+
+/* Management registers */
+#define IGC_MANC 0x05820 /* Management Control - RW */
+
+/* Shadow Ram Write Register - RW */
+#define IGC_SRWR 0x12018
+
+/* forward declaration */
+struct igc_hw;
+u32 igc_rd32(struct igc_hw *hw, u32 reg);
+
+/* write operations, indexed using DWORDS */
+#define wr32(reg, val) \
+do { \
+ u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
+ if (!IGC_REMOVED(hw_addr)) \
+ writel((val), &hw_addr[(reg)]); \
+} while (0)
+
+#define rd32(reg) (igc_rd32(hw, reg))
+
+#define wrfl() ((void)rd32(IGC_STATUS))
+
+#define array_wr32(reg, offset, value) \
+ wr32((reg) + ((offset) << 2), (value))
+
+#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2)))
+
+#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 5414685189ce..4fb0d9e3f2da 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -8,7 +8,8 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
- ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o
+ ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
+ ixgbe_xsk.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
@@ -16,4 +17,4 @@ ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o
ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
-ixgbe-$(CONFIG_XFRM_OFFLOAD) += ixgbe_ipsec.o
+ixgbe-$(CONFIG_IXGBE_IPSEC) += ixgbe_ipsec.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 5c6fd42e90ed..143bdd5ee2a0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -30,7 +30,6 @@
#include "ixgbe_ipsec.h"
#include <net/xdp.h>
-#include <net/busy_poll.h>
/* common prefix used by pr_<> macros */
#undef pr_fmt
@@ -228,13 +227,17 @@ struct ixgbe_tx_buffer {
struct ixgbe_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
- struct page *page;
-#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
- __u32 page_offset;
-#else
- __u16 page_offset;
-#endif
- __u16 pagecnt_bias;
+ union {
+ struct {
+ struct page *page;
+ __u32 page_offset;
+ __u16 pagecnt_bias;
+ };
+ struct {
+ void *addr;
+ u64 handle;
+ };
+ };
};
struct ixgbe_queue_stats {
@@ -271,6 +274,7 @@ enum ixgbe_ring_state_t {
__IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED,
__IXGBE_TX_XDP_RING,
+ __IXGBE_TX_DISABLED,
};
#define ring_uses_build_skb(ring) \
@@ -347,6 +351,10 @@ struct ixgbe_ring {
struct ixgbe_rx_queue_stats rx_stats;
};
struct xdp_rxq_info xdp_rxq;
+ struct xdp_umem *xsk_umem;
+ struct zero_copy_allocator zca; /* ZC allocator anchor */
+ u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */
+ u16 rx_buf_len;
} ____cacheline_internodealigned_in_smp;
enum ixgbe_ring_f_enum {
@@ -761,9 +769,14 @@ struct ixgbe_adapter {
#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
u32 *rss_key;
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBE_IPSEC
struct ixgbe_ipsec *ipsec;
-#endif /* CONFIG_XFRM_OFFLOAD */
+#endif /* CONFIG_IXGBE_IPSEC */
+
+ /* AF_XDP zero-copy */
+ struct xdp_umem **xsk_umems;
+ u16 num_xsk_umems_used;
+ u16 num_xsk_umems;
};
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
@@ -995,7 +1008,7 @@ void ixgbe_store_key(struct ixgbe_adapter *adapter);
void ixgbe_store_reta(struct ixgbe_adapter *adapter);
s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBE_IPSEC
void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter);
void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter);
@@ -1023,5 +1036,5 @@ static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter,
u32 *mbuf, u32 vf) { return -EACCES; }
static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter,
u32 *mbuf, u32 vf) { return -EACCES; }
-#endif /* CONFIG_XFRM_OFFLOAD */
+#endif /* CONFIG_IXGBE_IPSEC */
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 970f71d5da04..0bd1294ba517 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -3485,17 +3485,6 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
}
/**
- * ixgbe_fw_recovery_mode - Check if in FW NVM recovery mode
- * @hw: pointer to hardware structure
- */
-bool ixgbe_fw_recovery_mode(struct ixgbe_hw *hw)
-{
- if (hw->mac.ops.fw_recovery_mode)
- return hw->mac.ops.fw_recovery_mode(hw);
- return false;
-}
-
-/**
* ixgbe_get_device_caps_generic - Get additional device capabilities
* @hw: pointer to hardware structure
* @device_caps: the EEPROM word with the extra device capabilities
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index d361f570ca37..62e6499e4146 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1055,7 +1055,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
int txr_remaining = adapter->num_tx_queues;
int xdp_remaining = adapter->num_xdp_queues;
int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
- int err;
+ int err, i;
/* only one q_vector if MSI-X is disabled. */
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
@@ -1097,6 +1097,21 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
xdp_idx += xqpv;
}
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ if (adapter->rx_ring[i])
+ adapter->rx_ring[i]->ring_idx = i;
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ if (adapter->tx_ring[i])
+ adapter->tx_ring[i]->ring_idx = i;
+ }
+
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ if (adapter->xdp_ring[i])
+ adapter->xdp_ring[i]->ring_idx = i;
+ }
+
return 0;
err_out:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 140e87a10ff5..113b38e0defb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -34,12 +34,14 @@
#include <net/tc_act/tc_mirred.h>
#include <net/vxlan.h>
#include <net/mpls.h>
+#include <net/xdp_sock.h>
#include "ixgbe.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb_82599.h"
#include "ixgbe_sriov.h"
#include "ixgbe_model.h"
+#include "ixgbe_txrx_common.h"
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
@@ -893,8 +895,8 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
}
}
-static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
- u64 qmask)
+void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
+ u64 qmask)
{
u32 mask;
@@ -1673,9 +1675,9 @@ static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
* order to populate the hash, checksum, VLAN, timestamp, protocol, and
* other fields within the skb.
**/
-static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
struct net_device *dev = rx_ring->netdev;
u32 flags = rx_ring->q_vector->adapter->flags;
@@ -1708,8 +1710,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
skb->protocol = eth_type_trans(skb, dev);
}
-static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
- struct sk_buff *skb)
+void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
+ struct sk_buff *skb)
{
napi_gro_receive(&q_vector->napi, skb);
}
@@ -1868,9 +1870,9 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
*
* Returns true if an error was encountered and skb was freed.
**/
-static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
struct net_device *netdev = rx_ring->netdev;
@@ -2186,14 +2188,6 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
return skb;
}
-#define IXGBE_XDP_PASS 0
-#define IXGBE_XDP_CONSUMED BIT(0)
-#define IXGBE_XDP_TX BIT(1)
-#define IXGBE_XDP_REDIR BIT(2)
-
-static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
- struct xdp_frame *xdpf);
-
static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring,
struct xdp_buff *xdp)
@@ -3167,7 +3161,11 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
#endif
ixgbe_for_each_ring(ring, q_vector->tx) {
- if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
+ bool wd = ring->xsk_umem ?
+ ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
+ ixgbe_clean_tx_irq(q_vector, ring, budget);
+
+ if (!wd)
clean_complete = false;
}
@@ -3183,7 +3181,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
per_ring_budget = budget;
ixgbe_for_each_ring(ring, q_vector->rx) {
- int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
+ int cleaned = ring->xsk_umem ?
+ ixgbe_clean_rx_irq_zc(q_vector, ring,
+ per_ring_budget) :
+ ixgbe_clean_rx_irq(q_vector, ring,
per_ring_budget);
work_done += cleaned;
@@ -3196,11 +3197,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
return budget;
/* all work done, exit the polling mode */
- napi_complete_done(napi, work_done);
- if (adapter->rx_itr_setting & 1)
- ixgbe_set_itr(q_vector);
- if (!test_bit(__IXGBE_DOWN, &adapter->state))
- ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
+ if (likely(napi_complete_done(napi, work_done))) {
+ if (adapter->rx_itr_setting & 1)
+ ixgbe_set_itr(q_vector);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable_queues(adapter,
+ BIT_ULL(q_vector->v_idx));
+ }
return min(work_done, budget - 1);
}
@@ -3473,6 +3476,10 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
u32 txdctl = IXGBE_TXDCTL_ENABLE;
u8 reg_idx = ring->reg_idx;
+ ring->xsk_umem = NULL;
+ if (ring_is_xdp(ring))
+ ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
+
/* disable queue to avoid issues while updating state */
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
IXGBE_WRITE_FLUSH(hw);
@@ -3577,12 +3584,18 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
else
mtqc |= IXGBE_MTQC_64VF;
} else {
- if (tcs > 4)
+ if (tcs > 4) {
mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
- else if (tcs > 1)
+ } else if (tcs > 1) {
mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
- else
- mtqc = IXGBE_MTQC_64Q_1PB;
+ } else {
+ u8 max_txq = adapter->num_tx_queues +
+ adapter->num_xdp_queues;
+ if (max_txq > 63)
+ mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ else
+ mtqc = IXGBE_MTQC_64Q_1PB;
+ }
}
IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
@@ -3705,10 +3718,27 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
/* configure the packet buffer length */
- if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state))
+ if (rx_ring->xsk_umem) {
+ u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr -
+ XDP_PACKET_HEADROOM;
+
+ /* If the MAC support setting RXDCTL.RLPML, the
+ * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
+ * RXDCTL.RLPML is set to the actual UMEM buffer
+ * size. If not, then we are stuck with a 1k buffer
+ * size resolution. In this case frames larger than
+ * the UMEM buffer size viewed in a 1k resolution will
+ * be dropped.
+ */
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) {
srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- else
+ } else {
srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ }
/* configure descriptor type */
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -4031,6 +4061,19 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
+ xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+ ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
+ if (ring->xsk_umem) {
+ ring->zca.free = ixgbe_zca_free;
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_ZERO_COPY,
+ &ring->zca));
+
+ } else {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL));
+ }
+
/* disable queue to avoid use of these values while updating state */
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
@@ -4080,6 +4123,17 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
#endif
}
+ if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {
+ u32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr -
+ XDP_PACKET_HEADROOM;
+
+ rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
+ IXGBE_RXDCTL_RLPML_EN);
+ rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN;
+
+ ring->rx_buf_len = xsk_buf_len;
+ }
+
/* initialize rx_buffer_info */
memset(ring->rx_buffer_info, 0,
sizeof(struct ixgbe_rx_buffer) * ring->count);
@@ -4093,7 +4147,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
ixgbe_rx_desc_queue_enable(adapter, ring);
- ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
+ if (ring->xsk_umem)
+ ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
+ else
+ ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
}
static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
@@ -5173,6 +5230,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct hlist_node *node2;
struct ixgbe_fdir_filter *filter;
+ u64 action;
spin_lock(&adapter->fdir_perfect_lock);
@@ -5181,12 +5239,17 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
hlist_for_each_entry_safe(filter, node2,
&adapter->fdir_filter_list, fdir_node) {
+ action = filter->action;
+ if (action != IXGBE_FDIR_DROP_QUEUE && action != 0)
+ action =
+ (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1;
+
ixgbe_fdir_write_perfect_filter_82599(hw,
&filter->filter,
filter->sw_idx,
- (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
+ (action == IXGBE_FDIR_DROP_QUEUE) ?
IXGBE_FDIR_DROP_QUEUE :
- adapter->rx_ring[filter->action]->reg_idx);
+ adapter->rx_ring[action]->reg_idx);
}
spin_unlock(&adapter->fdir_perfect_lock);
@@ -5201,6 +5264,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
u16 i = rx_ring->next_to_clean;
struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
+ if (rx_ring->xsk_umem) {
+ ixgbe_xsk_clean_rx_ring(rx_ring);
+ goto skip_free;
+ }
+
/* Free all the Rx ring sk_buffs */
while (i != rx_ring->next_to_alloc) {
if (rx_buffer->skb) {
@@ -5239,6 +5307,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
}
}
+skip_free:
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -5883,6 +5952,11 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
u16 i = tx_ring->next_to_clean;
struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
+ if (tx_ring->xsk_umem) {
+ ixgbe_xsk_clean_tx_ring(tx_ring);
+ goto out;
+ }
+
while (i != tx_ring->next_to_use) {
union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
@@ -5934,6 +6008,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
if (!ring_is_xdp(tx_ring))
netdev_tx_reset_queue(txring_txq(tx_ring));
+out:
/* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -6434,7 +6509,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
struct device *dev = rx_ring->dev;
int orig_node = dev_to_node(dev);
int ring_node = -1;
- int size, err;
+ int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
@@ -6471,13 +6546,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
rx_ring->queue_index) < 0)
goto err;
- err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL);
- if (err) {
- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- goto err;
- }
-
rx_ring->xdp_prog = adapter->xdp_prog;
return 0;
@@ -8102,9 +8170,6 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
return __ixgbe_maybe_stop_tx(tx_ring, size);
}
-#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
- IXGBE_TXD_CMD_RS)
-
static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
const u8 hdr_len)
@@ -8457,8 +8522,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
}
#endif
-static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
- struct xdp_frame *xdpf)
+int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+ struct xdp_frame *xdpf)
{
struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
struct ixgbe_tx_buffer *tx_buffer;
@@ -8629,7 +8694,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
#endif /* IXGBE_FCOE */
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBE_IPSEC
if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
goto out_drop;
#endif
@@ -8680,6 +8745,8 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
+ if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state)))
+ return NETDEV_TX_BUSY;
return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
}
@@ -10123,7 +10190,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
* the TSO, so it's the exception.
*/
if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBE_IPSEC
if (!skb->sp)
#endif
features &= ~NETIF_F_TSO;
@@ -10191,12 +10258,19 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
xdp->prog_id = adapter->xdp_prog ?
adapter->xdp_prog->aux->id : 0;
return 0;
+ case XDP_QUERY_XSK_UMEM:
+ return ixgbe_xsk_umem_query(adapter, &xdp->xsk.umem,
+ xdp->xsk.queue_id);
+ case XDP_SETUP_XSK_UMEM:
+ return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,
+ xdp->xsk.queue_id);
+
default:
return -EINVAL;
}
}
-static void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
+void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
{
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
@@ -10226,6 +10300,9 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
if (unlikely(!ring))
return -ENXIO;
+ if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
+ return -ENXIO;
+
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;
@@ -10287,8 +10364,162 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_features_check = ixgbe_features_check,
.ndo_bpf = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
+ .ndo_xsk_async_xmit = ixgbe_xsk_async_xmit,
};
+static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring)
+{
+ unsigned long wait_delay, delay_interval;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 reg_idx = tx_ring->reg_idx;
+ int wait_loop;
+ u32 txdctl;
+
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+
+ /* delay mechanism from ixgbe_disable_tx */
+ delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
+
+ wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ wait_delay = delay_interval;
+
+ while (wait_loop--) {
+ usleep_range(wait_delay, wait_delay + 10);
+ wait_delay += delay_interval * 2;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+
+ if (!(txdctl & IXGBE_TXDCTL_ENABLE))
+ return;
+ }
+
+ e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n");
+}
+
+static void ixgbe_disable_txr(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring)
+{
+ set_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
+ ixgbe_disable_txr_hw(adapter, tx_ring);
+}
+
+static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring)
+{
+ unsigned long wait_delay, delay_interval;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 reg_idx = rx_ring->reg_idx;
+ int wait_loop;
+ u32 rxdctl;
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+ rxdctl |= IXGBE_RXDCTL_SWFLSH;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+
+ /* RXDCTL.EN may not change on 82598 if link is down, so skip it */
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
+
+ /* delay mechanism from ixgbe_disable_rx */
+ delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
+
+ wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ wait_delay = delay_interval;
+
+ while (wait_loop--) {
+ usleep_range(wait_delay, wait_delay + 10);
+ wait_delay += delay_interval * 2;
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+
+ if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
+ return;
+ }
+
+ e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n");
+}
+
+static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring)
+{
+ memset(&tx_ring->stats, 0, sizeof(tx_ring->stats));
+ memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
+}
+
+static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
+{
+ memset(&rx_ring->stats, 0, sizeof(rx_ring->stats));
+ memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
+}
+
+/**
+ * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
+ * @adapter: adapter structure
+ * @ring: ring index
+ *
+ * This function disables a certain Rx/Tx/XDP Tx ring. The function
+ * assumes that the netdev is running.
+ **/
+void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
+{
+ struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
+
+ rx_ring = adapter->rx_ring[ring];
+ tx_ring = adapter->tx_ring[ring];
+ xdp_ring = adapter->xdp_ring[ring];
+
+ ixgbe_disable_txr(adapter, tx_ring);
+ if (xdp_ring)
+ ixgbe_disable_txr(adapter, xdp_ring);
+ ixgbe_disable_rxr_hw(adapter, rx_ring);
+
+ if (xdp_ring)
+ synchronize_sched();
+
+ /* Rx/Tx/XDP Tx share the same napi context. */
+ napi_disable(&rx_ring->q_vector->napi);
+
+ ixgbe_clean_tx_ring(tx_ring);
+ if (xdp_ring)
+ ixgbe_clean_tx_ring(xdp_ring);
+ ixgbe_clean_rx_ring(rx_ring);
+
+ ixgbe_reset_txr_stats(tx_ring);
+ if (xdp_ring)
+ ixgbe_reset_txr_stats(xdp_ring);
+ ixgbe_reset_rxr_stats(rx_ring);
+}
+
+/**
+ * ixgbe_txrx_ring_enable - Enable Rx/Tx/XDP Tx rings
+ * @adapter: adapter structure
+ * @ring: ring index
+ *
+ * This function enables a certain Rx/Tx/XDP Tx ring. The function
+ * assumes that the netdev is running.
+ **/
+void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
+{
+ struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
+
+ rx_ring = adapter->rx_ring[ring];
+ tx_ring = adapter->tx_ring[ring];
+ xdp_ring = adapter->xdp_ring[ring];
+
+ /* Rx/Tx/XDP Tx share the same napi context. */
+ napi_enable(&rx_ring->q_vector->napi);
+
+ ixgbe_configure_tx_ring(adapter, tx_ring);
+ if (xdp_ring)
+ ixgbe_configure_tx_ring(adapter, xdp_ring);
+ ixgbe_configure_rx_ring(adapter, rx_ring);
+
+ clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
+ clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
+}
+
/**
* ixgbe_enumerate_functions - Get the number of ports this device has
* @adapter: adapter structure
@@ -10652,7 +10883,7 @@ skip_sriov:
if (hw->mac.type >= ixgbe_mac_82599EB)
netdev->features |= NETIF_F_SCTP_CRC;
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBE_IPSEC
#define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
NETIF_F_HW_ESP_TX_CSUM | \
NETIF_F_GSO_ESP)
@@ -11091,8 +11322,6 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
/* Free device reference count */
pci_dev_put(vfdev);
}
-
- pci_cleanup_aer_uncorrect_error_status(pdev);
}
/*
@@ -11142,7 +11371,6 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
{
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
pci_ers_result_t result;
- int err;
if (pci_enable_device_mem(pdev)) {
e_err(probe, "Cannot re-enable PCI device after reset.\n");
@@ -11162,13 +11390,6 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_RECOVERED;
}
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- e_dev_err("pci_cleanup_aer_uncorrect_error_status "
- "failed 0x%0x\n", err);
- /* non-fatal, continue */
- }
-
return result;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index af25a8fffeb8..5dacfc870259 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -722,8 +722,10 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
adapter->default_up, vf);
- if (vfinfo->spoofchk_enabled)
+ if (vfinfo->spoofchk_enabled) {
hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+ hw->mac.ops.set_mac_anti_spoofing(hw, true, vf);
+ }
}
/* reset multicast table array for vf */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
new file mode 100644
index 000000000000..53d4089f5644
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2018 Intel Corporation. */
+
+#ifndef _IXGBE_TXRX_COMMON_H_
+#define _IXGBE_TXRX_COMMON_H_
+
+#define IXGBE_XDP_PASS 0
+#define IXGBE_XDP_CONSUMED BIT(0)
+#define IXGBE_XDP_TX BIT(1)
+#define IXGBE_XDP_REDIR BIT(2)
+
+#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
+ IXGBE_TXD_CMD_RS)
+
+int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+ struct xdp_frame *xdpf);
+bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb);
+void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb);
+void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
+ struct sk_buff *skb);
+void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring);
+void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask);
+
+void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring);
+void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring);
+
+struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring);
+int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem,
+ u16 qid);
+int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
+ u16 qid);
+
+void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
+
+void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count);
+int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring *rx_ring,
+ const int budget);
+void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring);
+bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring *tx_ring, int napi_budget);
+int ixgbe_xsk_async_xmit(struct net_device *dev, u32 queue_id);
+void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring);
+
+#endif /* #define _IXGBE_TXRX_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
new file mode 100644
index 000000000000..65c3e2c979d4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -0,0 +1,801 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2018 Intel Corporation. */
+
+#include <linux/bpf_trace.h>
+#include <net/xdp_sock.h>
+#include <net/xdp.h>
+
+#include "ixgbe.h"
+#include "ixgbe_txrx_common.h"
+
+struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ bool xdp_on = READ_ONCE(adapter->xdp_prog);
+ int qid = ring->ring_idx;
+
+ if (!adapter->xsk_umems || !adapter->xsk_umems[qid] ||
+ qid >= adapter->num_xsk_umems || !xdp_on)
+ return NULL;
+
+ return adapter->xsk_umems[qid];
+}
+
+static int ixgbe_alloc_xsk_umems(struct ixgbe_adapter *adapter)
+{
+ if (adapter->xsk_umems)
+ return 0;
+
+ adapter->num_xsk_umems_used = 0;
+ adapter->num_xsk_umems = adapter->num_rx_queues;
+ adapter->xsk_umems = kcalloc(adapter->num_xsk_umems,
+ sizeof(*adapter->xsk_umems),
+ GFP_KERNEL);
+ if (!adapter->xsk_umems) {
+ adapter->num_xsk_umems = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int ixgbe_add_xsk_umem(struct ixgbe_adapter *adapter,
+ struct xdp_umem *umem,
+ u16 qid)
+{
+ int err;
+
+ err = ixgbe_alloc_xsk_umems(adapter);
+ if (err)
+ return err;
+
+ adapter->xsk_umems[qid] = umem;
+ adapter->num_xsk_umems_used++;
+
+ return 0;
+}
+
+static void ixgbe_remove_xsk_umem(struct ixgbe_adapter *adapter, u16 qid)
+{
+ adapter->xsk_umems[qid] = NULL;
+ adapter->num_xsk_umems_used--;
+
+ if (adapter->num_xsk_umems == 0) {
+ kfree(adapter->xsk_umems);
+ adapter->xsk_umems = NULL;
+ adapter->num_xsk_umems = 0;
+ }
+}
+
+static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter,
+ struct xdp_umem *umem)
+{
+ struct device *dev = &adapter->pdev->dev;
+ unsigned int i, j;
+ dma_addr_t dma;
+
+ for (i = 0; i < umem->npgs; i++) {
+ dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
+ if (dma_mapping_error(dev, dma))
+ goto out_unmap;
+
+ umem->pages[i].dma = dma;
+ }
+
+ return 0;
+
+out_unmap:
+ for (j = 0; j < i; j++) {
+ dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
+ umem->pages[i].dma = 0;
+ }
+
+ return -1;
+}
+
+static void ixgbe_xsk_umem_dma_unmap(struct ixgbe_adapter *adapter,
+ struct xdp_umem *umem)
+{
+ struct device *dev = &adapter->pdev->dev;
+ unsigned int i;
+
+ for (i = 0; i < umem->npgs; i++) {
+ dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
+
+ umem->pages[i].dma = 0;
+ }
+}
+
+static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
+ struct xdp_umem *umem,
+ u16 qid)
+{
+ struct xdp_umem_fq_reuse *reuseq;
+ bool if_running;
+ int err;
+
+ if (qid >= adapter->num_rx_queues)
+ return -EINVAL;
+
+ if (adapter->xsk_umems) {
+ if (qid >= adapter->num_xsk_umems)
+ return -EINVAL;
+ if (adapter->xsk_umems[qid])
+ return -EBUSY;
+ }
+
+ reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count);
+ if (!reuseq)
+ return -ENOMEM;
+
+ xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
+
+ err = ixgbe_xsk_umem_dma_map(adapter, umem);
+ if (err)
+ return err;
+
+ if_running = netif_running(adapter->netdev) &&
+ READ_ONCE(adapter->xdp_prog);
+
+ if (if_running)
+ ixgbe_txrx_ring_disable(adapter, qid);
+
+ err = ixgbe_add_xsk_umem(adapter, umem, qid);
+
+ if (if_running)
+ ixgbe_txrx_ring_enable(adapter, qid);
+
+ return err;
+}
+
+static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
+{
+ bool if_running;
+
+ if (!adapter->xsk_umems || qid >= adapter->num_xsk_umems ||
+ !adapter->xsk_umems[qid])
+ return -EINVAL;
+
+ if_running = netif_running(adapter->netdev) &&
+ READ_ONCE(adapter->xdp_prog);
+
+ if (if_running)
+ ixgbe_txrx_ring_disable(adapter, qid);
+
+ ixgbe_xsk_umem_dma_unmap(adapter, adapter->xsk_umems[qid]);
+ ixgbe_remove_xsk_umem(adapter, qid);
+
+ if (if_running)
+ ixgbe_txrx_ring_enable(adapter, qid);
+
+ return 0;
+}
+
+int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem,
+ u16 qid)
+{
+ if (qid >= adapter->num_rx_queues)
+ return -EINVAL;
+
+ if (adapter->xsk_umems) {
+ if (qid >= adapter->num_xsk_umems)
+ return -EINVAL;
+ *umem = adapter->xsk_umems[qid];
+ return 0;
+ }
+
+ *umem = NULL;
+ return 0;
+}
+
+int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
+ u16 qid)
+{
+ return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) :
+ ixgbe_xsk_umem_disable(adapter, qid);
+}
+
+static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ struct xdp_buff *xdp)
+{
+ int err, result = IXGBE_XDP_PASS;
+ struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
+ u32 act;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ xdp->handle += xdp->data - xdp->data_hard_start;
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdpf = convert_to_xdp_frame(xdp);
+ if (unlikely(!xdpf)) {
+ result = IXGBE_XDP_CONSUMED;
+ break;
+ }
+ result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fallthrough */
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ /* fallthrough -- handle aborts by dropping packet */
+ case XDP_DROP:
+ result = IXGBE_XDP_CONSUMED;
+ break;
+ }
+ rcu_read_unlock();
+ return result;
+}
+
+static struct
+ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring,
+ unsigned int size)
+{
+ struct ixgbe_rx_buffer *bi;
+
+ bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ bi->dma, 0,
+ size,
+ DMA_BIDIRECTIONAL);
+
+ return bi;
+}
+
+static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *obi)
+{
+ unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
+ u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
+ u16 nta = rx_ring->next_to_alloc;
+ struct ixgbe_rx_buffer *nbi;
+
+ nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc];
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* transfer page from old buffer to new buffer */
+ nbi->dma = obi->dma & mask;
+ nbi->dma += hr;
+
+ nbi->addr = (void *)((unsigned long)obi->addr & mask);
+ nbi->addr += hr;
+
+ nbi->handle = obi->handle & mask;
+ nbi->handle += rx_ring->xsk_umem->headroom;
+
+ obi->addr = NULL;
+ obi->skb = NULL;
+}
+
+void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
+{
+ struct ixgbe_rx_buffer *bi;
+ struct ixgbe_ring *rx_ring;
+ u64 hr, mask;
+ u16 nta;
+
+ rx_ring = container_of(alloc, struct ixgbe_ring, zca);
+ hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
+ mask = rx_ring->xsk_umem->chunk_mask;
+
+ nta = rx_ring->next_to_alloc;
+ bi = rx_ring->rx_buffer_info;
+
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ handle &= mask;
+
+ bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
+ bi->dma += hr;
+
+ bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
+ bi->addr += hr;
+
+ bi->handle = (u64)handle + rx_ring->xsk_umem->headroom;
+}
+
+static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *bi)
+{
+ struct xdp_umem *umem = rx_ring->xsk_umem;
+ void *addr = bi->addr;
+ u64 handle, hr;
+
+ if (addr)
+ return true;
+
+ if (!xsk_umem_peek_addr(umem, &handle)) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ return false;
+ }
+
+ hr = umem->headroom + XDP_PACKET_HEADROOM;
+
+ bi->dma = xdp_umem_get_dma(umem, handle);
+ bi->dma += hr;
+
+ bi->addr = xdp_umem_get_data(umem, handle);
+ bi->addr += hr;
+
+ bi->handle = handle + umem->headroom;
+
+ xsk_umem_discard_addr(umem);
+ return true;
+}
+
+static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *bi)
+{
+ struct xdp_umem *umem = rx_ring->xsk_umem;
+ u64 handle, hr;
+
+ if (!xsk_umem_peek_addr_rq(umem, &handle)) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ return false;
+ }
+
+ handle &= rx_ring->xsk_umem->chunk_mask;
+
+ hr = umem->headroom + XDP_PACKET_HEADROOM;
+
+ bi->dma = xdp_umem_get_dma(umem, handle);
+ bi->dma += hr;
+
+ bi->addr = xdp_umem_get_data(umem, handle);
+ bi->addr += hr;
+
+ bi->handle = handle + umem->headroom;
+
+ xsk_umem_discard_addr_rq(umem);
+ return true;
+}
+
+static __always_inline bool
+__ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count,
+ bool alloc(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *bi))
+{
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbe_rx_buffer *bi;
+ u16 i = rx_ring->next_to_use;
+ bool ok = true;
+
+ /* nothing to do */
+ if (!cleaned_count)
+ return true;
+
+ rx_desc = IXGBE_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ i -= rx_ring->count;
+
+ do {
+ if (!alloc(rx_ring, bi)) {
+ ok = false;
+ break;
+ }
+
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ rx_ring->rx_buf_len,
+ DMA_BIDIRECTIONAL);
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+
+ rx_desc++;
+ bi++;
+ i++;
+ if (unlikely(!i)) {
+ rx_desc = IXGBE_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_buffer_info;
+ i -= rx_ring->count;
+ }
+
+ /* clear the length for the next_to_use descriptor */
+ rx_desc->wb.upper.length = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ i += rx_ring->count;
+
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = i;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, rx_ring->tail);
+ }
+
+ return ok;
+}
+
+void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
+{
+ __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
+ ixgbe_alloc_buffer_slow_zc);
+}
+
+static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring,
+ u16 count)
+{
+ return __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
+ ixgbe_alloc_buffer_zc);
+}
+
+static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *bi,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ unsigned int datasize = xdp->data_end - xdp->data;
+ struct sk_buff *skb;
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ xdp->data_end - xdp->data_hard_start,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), xdp->data, datasize);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+ return skb;
+}
+
+static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+ prefetch(IXGBE_RX_DESC(rx_ring, ntc));
+}
+
+int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring *rx_ring,
+ const int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ u16 cleaned_count = ixgbe_desc_unused(rx_ring);
+ unsigned int xdp_res, xdp_xmit = 0;
+ bool failure = false;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
+
+ xdp.rxq = &rx_ring->xdp_rxq;
+
+ while (likely(total_rx_packets < budget)) {
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbe_rx_buffer *bi;
+ unsigned int size;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
+ failure = failure ||
+ !ixgbe_alloc_rx_buffers_fast_zc(rx_ring,
+ cleaned_count);
+ cleaned_count = 0;
+ }
+
+ rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+ if (!size)
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * descriptor has been written back
+ */
+ dma_rmb();
+
+ bi = ixgbe_get_rx_buffer_zc(rx_ring, size);
+
+ if (unlikely(!ixgbe_test_staterr(rx_desc,
+ IXGBE_RXD_STAT_EOP))) {
+ struct ixgbe_rx_buffer *next_bi;
+
+ ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+ ixgbe_inc_ntc(rx_ring);
+ next_bi =
+ &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ next_bi->skb = ERR_PTR(-EINVAL);
+ continue;
+ }
+
+ if (unlikely(bi->skb)) {
+ ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+ ixgbe_inc_ntc(rx_ring);
+ continue;
+ }
+
+ xdp.data = bi->addr;
+ xdp.data_meta = xdp.data;
+ xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+ xdp.data_end = xdp.data + size;
+ xdp.handle = bi->handle;
+
+ xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp);
+
+ if (xdp_res) {
+ if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
+ xdp_xmit |= xdp_res;
+ bi->addr = NULL;
+ bi->skb = NULL;
+ } else {
+ ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+ }
+ total_rx_packets++;
+ total_rx_bytes += size;
+
+ cleaned_count++;
+ ixgbe_inc_ntc(rx_ring);
+ continue;
+ }
+
+ /* XDP_PASS path */
+ skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ break;
+ }
+
+ cleaned_count++;
+ ixgbe_inc_ntc(rx_ring);
+
+ if (eth_skb_pad(skb))
+ continue;
+
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
+ ixgbe_rx_skb(q_vector, skb);
+ }
+
+ if (xdp_xmit & IXGBE_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (xdp_xmit & IXGBE_XDP_TX) {
+ struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch.
+ */
+ wmb();
+ writel(ring->next_to_use, ring->tail);
+ }
+
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+ q_vector->rx.total_packets += total_rx_packets;
+ q_vector->rx.total_bytes += total_rx_bytes;
+
+ return failure ? budget : (int)total_rx_packets;
+}
+
+void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
+{
+ u16 i = rx_ring->next_to_clean;
+ struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i];
+
+ while (i != rx_ring->next_to_alloc) {
+ xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle);
+ i++;
+ bi++;
+ if (i == rx_ring->count) {
+ i = 0;
+ bi = rx_ring->rx_buffer_info;
+ }
+ }
+}
+
+static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
+{
+ union ixgbe_adv_tx_desc *tx_desc = NULL;
+ struct ixgbe_tx_buffer *tx_bi;
+ bool work_done = true;
+ u32 len, cmd_type;
+ dma_addr_t dma;
+
+ while (budget-- > 0) {
+ if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
+ work_done = false;
+ break;
+ }
+
+ if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len))
+ break;
+
+ dma_sync_single_for_device(xdp_ring->dev, dma, len,
+ DMA_BIDIRECTIONAL);
+
+ tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
+ tx_bi->bytecount = len;
+ tx_bi->xdpf = NULL;
+
+ tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ /* put descriptor type bits */
+ cmd_type = IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_DEXT |
+ IXGBE_ADVTXD_DCMD_IFCS;
+ cmd_type |= len | IXGBE_TXD_CMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.olinfo_status =
+ cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+ xdp_ring->next_to_use++;
+ if (xdp_ring->next_to_use == xdp_ring->count)
+ xdp_ring->next_to_use = 0;
+ }
+
+ if (tx_desc) {
+ ixgbe_xdp_ring_update_tail(xdp_ring);
+ xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ }
+
+ return !!budget && work_done;
+}
+
+static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *tx_bi)
+{
+ xdp_return_frame(tx_bi->xdpf);
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_bi, dma),
+ dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_bi, len, 0);
+}
+
+bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring *tx_ring, int napi_budget)
+{
+ unsigned int total_packets = 0, total_bytes = 0;
+ u32 i = tx_ring->next_to_clean, xsk_frames = 0;
+ unsigned int budget = q_vector->tx.work_limit;
+ struct xdp_umem *umem = tx_ring->xsk_umem;
+ union ixgbe_adv_tx_desc *tx_desc;
+ struct ixgbe_tx_buffer *tx_bi;
+ bool xmit_done;
+
+ tx_bi = &tx_ring->tx_buffer_info[i];
+ tx_desc = IXGBE_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
+ break;
+
+ total_bytes += tx_bi->bytecount;
+ total_packets += tx_bi->gso_segs;
+
+ if (tx_bi->xdpf)
+ ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
+ else
+ xsk_frames++;
+
+ tx_bi->xdpf = NULL;
+ total_bytes += tx_bi->bytecount;
+
+ tx_bi++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_bi = tx_ring->tx_buffer_info;
+ tx_desc = IXGBE_TX_DESC(tx_ring, 0);
+ }
+
+ /* issue prefetch for next Tx descriptor */
+ prefetch(tx_desc);
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+ q_vector->tx.total_bytes += total_bytes;
+ q_vector->tx.total_packets += total_packets;
+
+ if (xsk_frames)
+ xsk_umem_complete_tx(umem, xsk_frames);
+
+ xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
+ return budget > 0 && xmit_done;
+}
+
+int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_ring *ring;
+
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ return -ENETDOWN;
+
+ if (!READ_ONCE(adapter->xdp_prog))
+ return -ENXIO;
+
+ if (qid >= adapter->num_xdp_queues)
+ return -ENXIO;
+
+ if (!adapter->xsk_umems || !adapter->xsk_umems[qid])
+ return -ENXIO;
+
+ ring = adapter->xdp_ring[qid];
+ if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
+ u64 eics = BIT_ULL(ring->q_vector->v_idx);
+
+ ixgbe_irq_rearm_queues(adapter, eics);
+ }
+
+ return 0;
+}
+
+void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
+{
+ u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
+ struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct ixgbe_tx_buffer *tx_bi;
+ u32 xsk_frames = 0;
+
+ while (ntc != ntu) {
+ tx_bi = &tx_ring->tx_buffer_info[ntc];
+
+ if (tx_bi->xdpf)
+ ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
+ else
+ xsk_frames++;
+
+ tx_bi->xdpf = NULL;
+
+ ntc++;
+ if (ntc == tx_ring->count)
+ ntc = 0;
+ }
+
+ if (xsk_frames)
+ xsk_umem_complete_tx(umem, xsk_frames);
+}
diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile
index 297d0f0858b5..186a4bb24fde 100644
--- a/drivers/net/ethernet/intel/ixgbevf/Makefile
+++ b/drivers/net/ethernet/intel/ixgbevf/Makefile
@@ -10,5 +10,5 @@ ixgbevf-objs := vf.o \
mbx.o \
ethtool.o \
ixgbevf_main.o
-ixgbevf-$(CONFIG_XFRM_OFFLOAD) += ipsec.o
+ixgbevf-$(CONFIG_IXGBEVF_IPSEC) += ipsec.o
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index 997cea675a37..e8a3231be0bf 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -21,7 +21,6 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
u32 msgbuf[IXGBE_VFMAILBOX_SIZE] = { 0 };
struct ixgbe_hw *hw = &adapter->hw;
struct sa_mbx_msg *sam;
- u16 msglen;
int ret;
/* send the important bits to the PF */
@@ -38,16 +37,14 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
memcpy(sam->key, xs->aead->alg_key, sizeof(sam->key));
msgbuf[0] = IXGBE_VF_IPSEC_ADD;
- msglen = sizeof(*sam) + sizeof(msgbuf[0]);
spin_lock_bh(&adapter->mbx_lock);
- ret = hw->mbx.ops.write_posted(hw, msgbuf, msglen);
+ ret = hw->mbx.ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
if (ret)
goto out;
- msglen = sizeof(msgbuf[0]) * 2;
- ret = hw->mbx.ops.read_posted(hw, msgbuf, msglen);
+ ret = hw->mbx.ops.read_posted(hw, msgbuf, 2);
if (ret)
goto out;
@@ -80,11 +77,11 @@ static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa)
spin_lock_bh(&adapter->mbx_lock);
- err = hw->mbx.ops.write_posted(hw, msgbuf, sizeof(msgbuf));
+ err = hw->mbx.ops.write_posted(hw, msgbuf, 2);
if (err)
goto out;
- err = hw->mbx.ops.read_posted(hw, msgbuf, sizeof(msgbuf));
+ err = hw->mbx.ops.read_posted(hw, msgbuf, 2);
if (err)
goto out;
@@ -470,7 +467,7 @@ int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
}
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
- if (unlikely(sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
+ if (unlikely(sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
__func__, sa_idx, xs->xso.offload_handle);
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index e399e1c0c54a..ecab686574b6 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -459,7 +459,7 @@ int ethtool_ioctl(struct ifreq *ifr);
extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector);
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBEVF_IPSEC
void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter);
void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter);
void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter);
@@ -482,7 +482,7 @@ static inline int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
struct ixgbevf_ipsec_tx_data *itd)
{ return 0; }
-#endif /* CONFIG_XFRM_OFFLOAD */
+#endif /* CONFIG_IXGBEVF_IPSEC */
void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 98707ee11d72..5e47ede7e832 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4150,7 +4150,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
first->tx_flags = tx_flags;
first->protocol = vlan_get_protocol(skb);
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBEVF_IPSEC
if (skb->sp && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
goto out_drop;
#endif
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index f33fd22b351c..3238aa7f5dac 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -167,4 +167,7 @@ config SKY2_DEBUG
If unsure, say N.
+
+source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
+
endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 55d4d10aa7d3..89dea7284d5b 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_MVPP2) += mvpp2/
obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
+obj-y += octeontx2/
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 176c6b56fdcc..398328f10743 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -796,6 +796,7 @@ struct mvpp2_queue_vector {
int nrxqs;
u32 pending_cause_rx;
struct mvpp2_port *port;
+ struct cpumask *mask;
};
struct mvpp2_port {
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 2373cd41a625..7a37a37e3fb3 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1755,7 +1755,7 @@ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
}
/* Set Tx descriptors fields relevant for CSUM calculation */
-static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
+static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
int ip_hdr_len, int l4_proto)
{
u32 command;
@@ -2645,14 +2645,15 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
if (skb->ip_summed == CHECKSUM_PARTIAL) {
int ip_hdr_len = 0;
u8 l4_proto;
+ __be16 l3_proto = vlan_get_protocol(skb);
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (l3_proto == htons(ETH_P_IP)) {
struct iphdr *ip4h = ip_hdr(skb);
/* Calculate IPv4 checksum and L4 checksum */
ip_hdr_len = ip4h->ihl;
l4_proto = ip4h->protocol;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (l3_proto == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = ipv6_hdr(skb);
/* Read l4_protocol from one of IPv6 extra headers */
@@ -2664,7 +2665,7 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
}
return mvpp2_txq_desc_csum(skb_network_offset(skb),
- skb->protocol, ip_hdr_len, l4_proto);
+ l3_proto, ip_hdr_len, l4_proto);
}
return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
@@ -3297,24 +3298,30 @@ static int mvpp2_irqs_init(struct mvpp2_port *port)
for (i = 0; i < port->nqvecs; i++) {
struct mvpp2_queue_vector *qv = port->qvecs + i;
- if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
+ if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
+ qv->mask = kzalloc(cpumask_size(), GFP_KERNEL);
+ if (!qv->mask) {
+ err = -ENOMEM;
+ goto err;
+ }
+
irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
+ }
err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
if (err)
goto err;
if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
- unsigned long mask = 0;
unsigned int cpu;
for_each_present_cpu(cpu) {
if (mvpp2_cpu_to_thread(port->priv, cpu) ==
qv->sw_thread_id)
- mask |= BIT(cpu);
+ cpumask_set_cpu(cpu, qv->mask);
}
- irq_set_affinity_hint(qv->irq, to_cpumask(&mask));
+ irq_set_affinity_hint(qv->irq, qv->mask);
}
}
@@ -3324,6 +3331,8 @@ err:
struct mvpp2_queue_vector *qv = port->qvecs + i;
irq_set_affinity_hint(qv->irq, NULL);
+ kfree(qv->mask);
+ qv->mask = NULL;
free_irq(qv->irq, qv);
}
@@ -3338,6 +3347,8 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
struct mvpp2_queue_vector *qv = port->qvecs + i;
irq_set_affinity_hint(qv->irq, NULL);
+ kfree(qv->mask);
+ qv->mask = NULL;
irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
free_irq(qv->irq, qv);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
new file mode 100644
index 000000000000..35827bdf1878
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -0,0 +1,17 @@
+#
+# Marvell OcteonTX2 drivers configuration
+#
+
+config OCTEONTX2_MBOX
+ tristate
+
+config OCTEONTX2_AF
+ tristate "Marvell OcteonTX2 RVU Admin Function driver"
+ select OCTEONTX2_MBOX
+ depends on (64BIT && COMPILE_TEST) || ARM64
+ depends on PCI
+ help
+ This driver supports Marvell's OcteonTX2 Resource Virtualization
+ Unit's admin function manager which manages all RVU HW resources
+ and provides a medium to other PF/VFs to configure HW. Should be
+ enabled for other RVU device drivers to work.
diff --git a/drivers/net/ethernet/marvell/octeontx2/Makefile b/drivers/net/ethernet/marvell/octeontx2/Makefile
new file mode 100644
index 000000000000..e579dcd54c97
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell OcteonTX2 device drivers.
+#
+
+obj-$(CONFIG_OCTEONTX2_AF) += af/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
new file mode 100644
index 000000000000..06329acf9c2c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 RVU Admin Function driver
+#
+
+obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o
+obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
+
+octeontx2_mbox-y := mbox.o
+octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
+ rvu_reg.o rvu_npc.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
new file mode 100644
index 000000000000..12db256c8c9f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -0,0 +1,721 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "cgx.h"
+
+#define DRV_NAME "octeontx2-cgx"
+#define DRV_STRING "Marvell OcteonTX2 CGX/MAC Driver"
+
+/**
+ * struct lmac
+ * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
+ * @cmd_lock: Lock to serialize the command interface
+ * @resp: command response
+ * @link_info: link related information
+ * @event_cb: callback for linkchange events
+ * @cmd_pend: flag set before new command is started
+ * flag cleared after command response is received
+ * @cgx: parent cgx port
+ * @lmac_id: lmac port id
+ * @name: lmac port name
+ */
+struct lmac {
+ wait_queue_head_t wq_cmd_cmplt;
+ struct mutex cmd_lock;
+ u64 resp;
+ struct cgx_link_user_info link_info;
+ struct cgx_event_cb event_cb;
+ bool cmd_pend;
+ struct cgx *cgx;
+ u8 lmac_id;
+ char *name;
+};
+
+struct cgx {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ u8 cgx_id;
+ u8 lmac_count;
+ struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
+ struct list_head cgx_list;
+};
+
+static LIST_HEAD(cgx_list);
+
+/* Convert firmware speed encoding to user format(Mbps) */
+static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
+
+/* Convert firmware lmac type encoding to string */
+static char *cgx_lmactype_string[LMAC_MODE_MAX];
+
+/* Supported devices */
+static const struct pci_device_id cgx_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
+ { 0, } /* end of table */
+};
+
+MODULE_DEVICE_TABLE(pci, cgx_id_table);
+
+static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
+{
+ writeq(val, cgx->reg_base + (lmac << 18) + offset);
+}
+
+static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
+{
+ return readq(cgx->reg_base + (lmac << 18) + offset);
+}
+
+static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
+{
+ if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
+ return NULL;
+
+ return cgx->lmac_idmap[lmac_id];
+}
+
+int cgx_get_cgx_cnt(void)
+{
+ struct cgx *cgx_dev;
+ int count = 0;
+
+ list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
+ count++;
+
+ return count;
+}
+EXPORT_SYMBOL(cgx_get_cgx_cnt);
+
+int cgx_get_lmac_cnt(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -ENODEV;
+
+ return cgx->lmac_count;
+}
+EXPORT_SYMBOL(cgx_get_lmac_cnt);
+
+void *cgx_get_pdata(int cgx_id)
+{
+ struct cgx *cgx_dev;
+
+ list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
+ if (cgx_dev->cgx_id == cgx_id)
+ return cgx_dev;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(cgx_get_pdata);
+
+/* Ensure the required lock for event queue(where asynchronous events are
+ * posted) is acquired before calling this API. Else an asynchronous event(with
+ * latest link status) can reach the destination before this function returns
+ * and could make the link status appear wrong.
+ */
+int cgx_get_link_info(void *cgxd, int lmac_id,
+ struct cgx_link_user_info *linfo)
+{
+ struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
+
+ if (!lmac)
+ return -ENODEV;
+
+ *linfo = lmac->link_info;
+ return 0;
+}
+EXPORT_SYMBOL(cgx_get_link_info);
+
+static u64 mac2u64 (u8 *mac_addr)
+{
+ u64 mac = 0;
+ int index;
+
+ for (index = ETH_ALEN - 1; index >= 0; index--)
+ mac |= ((u64)*mac_addr++) << (8 * index);
+ return mac;
+}
+
+int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ u64 cfg;
+
+ /* copy 6bytes from macaddr */
+ /* memcpy(&cfg, mac_addr, 6); */
+
+ cfg = mac2u64 (mac_addr);
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
+ cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_addr_set);
+
+u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ u64 cfg;
+
+ cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
+ return cfg & CGX_RX_DMAC_ADR_MASK;
+}
+EXPORT_SYMBOL(cgx_lmac_addr_get);
+
+int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
+ return 0;
+}
+EXPORT_SYMBOL(cgx_set_pkind);
+
+static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id)
+{
+ u64 cfg;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+ return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
+}
+
+/* Configure CGX LMAC in internal loopback mode */
+int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u8 lmac_type;
+ u64 cfg;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ lmac_type = cgx_get_lmac_type(cgx, lmac_id);
+ if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
+ if (enable)
+ cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
+ else
+ cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
+ if (enable)
+ cfg |= CGXX_SPUX_CONTROL1_LBK;
+ else
+ cfg &= ~CGXX_SPUX_CONTROL1_LBK;
+ cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_internal_loopback);
+
+void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgx_get_pdata(cgx_id);
+ u64 cfg = 0;
+
+ if (!cgx)
+ return;
+
+ if (enable) {
+ /* Enable promiscuous mode on LMAC */
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
+ cfg |= CGX_DMAC_BCAST_MODE;
+ cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
+ cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+ } else {
+ /* Disable promiscuous mode */
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
+ cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+ }
+}
+EXPORT_SYMBOL(cgx_lmac_promisc_config);
+
+int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+ *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
+ return 0;
+}
+EXPORT_SYMBOL(cgx_get_rx_stats);
+
+int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+ *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
+ return 0;
+}
+EXPORT_SYMBOL(cgx_get_tx_stats);
+
+int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+ if (enable)
+ cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
+ else
+ cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
+ cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_rx_tx_enable);
+
+/* CGX Firmware interface low level support */
+static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
+{
+ struct cgx *cgx = lmac->cgx;
+ struct device *dev;
+ int err = 0;
+ u64 cmd;
+
+ /* Ensure no other command is in progress */
+ err = mutex_lock_interruptible(&lmac->cmd_lock);
+ if (err)
+ return err;
+
+ /* Ensure command register is free */
+ cmd = cgx_read(cgx, lmac->lmac_id, CGX_COMMAND_REG);
+ if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ /* Update ownership in command request */
+ req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
+
+ /* Mark this lmac as pending, before we start */
+ lmac->cmd_pend = true;
+
+ /* Start command in hardware */
+ cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
+
+ /* Ensure command is completed without errors */
+ if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
+ msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
+ dev = &cgx->pdev->dev;
+ dev_err(dev, "cgx port %d:%d cmd timeout\n",
+ cgx->cgx_id, lmac->lmac_id);
+ err = -EIO;
+ goto unlock;
+ }
+
+ /* we have a valid command response */
+ smp_rmb(); /* Ensure the latest updates are visible */
+ *resp = lmac->resp;
+
+unlock:
+ mutex_unlock(&lmac->cmd_lock);
+
+ return err;
+}
+
+static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
+ struct cgx *cgx, int lmac_id)
+{
+ struct lmac *lmac;
+ int err;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ err = cgx_fwi_cmd_send(req, resp, lmac);
+
+ /* Check for valid response */
+ if (!err) {
+ if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
+ return -EIO;
+ else
+ return 0;
+ }
+
+ return err;
+}
+
+static inline void cgx_link_usertable_init(void)
+{
+ cgx_speed_mbps[CGX_LINK_NONE] = 0;
+ cgx_speed_mbps[CGX_LINK_10M] = 10;
+ cgx_speed_mbps[CGX_LINK_100M] = 100;
+ cgx_speed_mbps[CGX_LINK_1G] = 1000;
+ cgx_speed_mbps[CGX_LINK_2HG] = 2500;
+ cgx_speed_mbps[CGX_LINK_5G] = 5000;
+ cgx_speed_mbps[CGX_LINK_10G] = 10000;
+ cgx_speed_mbps[CGX_LINK_20G] = 20000;
+ cgx_speed_mbps[CGX_LINK_25G] = 25000;
+ cgx_speed_mbps[CGX_LINK_40G] = 40000;
+ cgx_speed_mbps[CGX_LINK_50G] = 50000;
+ cgx_speed_mbps[CGX_LINK_100G] = 100000;
+
+ cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII";
+ cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI";
+ cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI";
+ cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R";
+ cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R";
+ cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII";
+ cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R";
+ cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R";
+ cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R";
+ cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII";
+}
+
+static inline void link_status_user_format(u64 lstat,
+ struct cgx_link_user_info *linfo,
+ struct cgx *cgx, u8 lmac_id)
+{
+ char *lmac_string;
+
+ linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
+ linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
+ linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
+ linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
+ lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
+ strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
+}
+
+/* Hardware event handlers */
+static inline void cgx_link_change_handler(u64 lstat,
+ struct lmac *lmac)
+{
+ struct cgx_link_user_info *linfo;
+ struct cgx *cgx = lmac->cgx;
+ struct cgx_link_event event;
+ struct device *dev;
+ int err_type;
+
+ dev = &cgx->pdev->dev;
+
+ link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
+ err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
+
+ event.cgx_id = cgx->cgx_id;
+ event.lmac_id = lmac->lmac_id;
+
+ /* update the local copy of link status */
+ lmac->link_info = event.link_uinfo;
+ linfo = &lmac->link_info;
+
+ if (!lmac->event_cb.notify_link_chg) {
+ dev_dbg(dev, "cgx port %d:%d Link change handler null",
+ cgx->cgx_id, lmac->lmac_id);
+ if (err_type != CGX_ERR_NONE) {
+ dev_err(dev, "cgx port %d:%d Link error %d\n",
+ cgx->cgx_id, lmac->lmac_id, err_type);
+ }
+ dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
+ cgx->cgx_id, lmac->lmac_id,
+ linfo->link_up ? "UP" : "DOWN", linfo->speed);
+ return;
+ }
+
+ if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
+ dev_err(dev, "event notification failure\n");
+}
+
+static inline bool cgx_cmdresp_is_linkevent(u64 event)
+{
+ u8 id;
+
+ id = FIELD_GET(EVTREG_ID, event);
+ if (id == CGX_CMD_LINK_BRING_UP ||
+ id == CGX_CMD_LINK_BRING_DOWN)
+ return true;
+ else
+ return false;
+}
+
+static inline bool cgx_event_is_linkevent(u64 event)
+{
+ if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
+ return true;
+ else
+ return false;
+}
+
+static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
+{
+ struct lmac *lmac = data;
+ struct cgx *cgx;
+ u64 event;
+
+ cgx = lmac->cgx;
+
+ event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
+
+ if (!FIELD_GET(EVTREG_ACK, event))
+ return IRQ_NONE;
+
+ switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
+ case CGX_EVT_CMD_RESP:
+ /* Copy the response. Since only one command is active at a
+ * time, there is no way a response can get overwritten
+ */
+ lmac->resp = event;
+ /* Ensure response is updated before thread context starts */
+ smp_wmb();
+
+ /* There wont be separate events for link change initiated from
+ * software; Hence report the command responses as events
+ */
+ if (cgx_cmdresp_is_linkevent(event))
+ cgx_link_change_handler(event, lmac);
+
+ /* Release thread waiting for completion */
+ lmac->cmd_pend = false;
+ wake_up_interruptible(&lmac->wq_cmd_cmplt);
+ break;
+ case CGX_EVT_ASYNC:
+ if (cgx_event_is_linkevent(event))
+ cgx_link_change_handler(event, lmac);
+ break;
+ }
+
+ /* Any new event or command response will be posted by firmware
+ * only after the current status is acked.
+ * Ack the interrupt register as well.
+ */
+ cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
+ cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT);
+
+ return IRQ_HANDLED;
+}
+
+/* APIs for PHY management using CGX firmware interface */
+
+/* callback registration for hardware events like link change */
+int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ struct lmac *lmac;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ lmac->event_cb = *cb;
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_evh_register);
+
+static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
+{
+ u64 req = 0;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
+ return cgx_fwi_cmd_generic(req, resp, cgx, 0);
+}
+
+static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
+{
+ struct device *dev = &cgx->pdev->dev;
+ int major_ver, minor_ver;
+ u64 resp;
+ int err;
+
+ if (!cgx->lmac_count)
+ return 0;
+
+ err = cgx_fwi_read_version(&resp, cgx);
+ if (err)
+ return err;
+
+ major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
+ minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
+ dev_dbg(dev, "Firmware command interface version = %d.%d\n",
+ major_ver, minor_ver);
+ if (major_ver != CGX_FIRMWARE_MAJOR_VER ||
+ minor_ver != CGX_FIRMWARE_MINOR_VER)
+ return -EIO;
+ else
+ return 0;
+}
+
+static int cgx_lmac_init(struct cgx *cgx)
+{
+ struct lmac *lmac;
+ int i, err;
+
+ cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7;
+ if (cgx->lmac_count > MAX_LMAC_PER_CGX)
+ cgx->lmac_count = MAX_LMAC_PER_CGX;
+
+ for (i = 0; i < cgx->lmac_count; i++) {
+ lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL);
+ if (!lmac)
+ return -ENOMEM;
+ lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
+ if (!lmac->name)
+ return -ENOMEM;
+ sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
+ lmac->lmac_id = i;
+ lmac->cgx = cgx;
+ init_waitqueue_head(&lmac->wq_cmd_cmplt);
+ mutex_init(&lmac->cmd_lock);
+ err = request_irq(pci_irq_vector(cgx->pdev,
+ CGX_LMAC_FWI + i * 9),
+ cgx_fwi_event_handler, 0, lmac->name, lmac);
+ if (err)
+ return err;
+
+ /* Enable interrupt */
+ cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
+ FW_CGX_INT);
+
+ /* Add reference */
+ cgx->lmac_idmap[i] = lmac;
+ }
+
+ return cgx_lmac_verify_fwi_version(cgx);
+}
+
+static int cgx_lmac_exit(struct cgx *cgx)
+{
+ struct lmac *lmac;
+ int i;
+
+ /* Free all lmac related resources */
+ for (i = 0; i < cgx->lmac_count; i++) {
+ lmac = cgx->lmac_idmap[i];
+ if (!lmac)
+ continue;
+ free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac);
+ kfree(lmac->name);
+ kfree(lmac);
+ }
+
+ return 0;
+}
+
+static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct cgx *cgx;
+ int err, nvec;
+
+ cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
+ if (!cgx)
+ return -ENOMEM;
+ cgx->pdev = pdev;
+
+ pci_set_drvdata(pdev, cgx);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ /* MAP configuration registers */
+ cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!cgx->reg_base) {
+ dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ nvec = CGX_NVEC;
+ err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
+ if (err < 0 || err != nvec) {
+ dev_err(dev, "Request for %d msix vectors failed, err %d\n",
+ nvec, err);
+ goto err_release_regions;
+ }
+
+ list_add(&cgx->cgx_list, &cgx_list);
+ cgx->cgx_id = cgx_get_cgx_cnt() - 1;
+
+ cgx_link_usertable_init();
+
+ err = cgx_lmac_init(cgx);
+ if (err)
+ goto err_release_lmac;
+
+ return 0;
+
+err_release_lmac:
+ cgx_lmac_exit(cgx);
+ list_del(&cgx->cgx_list);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void cgx_remove(struct pci_dev *pdev)
+{
+ struct cgx *cgx = pci_get_drvdata(pdev);
+
+ cgx_lmac_exit(cgx);
+ list_del(&cgx->cgx_list);
+ pci_free_irq_vectors(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+struct pci_driver cgx_driver = {
+ .name = DRV_NAME,
+ .id_table = cgx_id_table,
+ .probe = cgx_probe,
+ .remove = cgx_remove,
+};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
new file mode 100644
index 000000000000..0a66d2717442
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CGX_H
+#define CGX_H
+
+#include "mbox.h"
+#include "cgx_fw_if.h"
+
+ /* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_CGX 0xA059
+
+/* PCI BAR nos */
+#define PCI_CFG_REG_BAR_NUM 0
+
+#define MAX_CGX 3
+#define MAX_LMAC_PER_CGX 4
+#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
+
+/* Registers */
+#define CGXX_CMRX_CFG 0x00
+#define CMR_EN BIT_ULL(55)
+#define DATA_PKT_TX_EN BIT_ULL(53)
+#define DATA_PKT_RX_EN BIT_ULL(54)
+#define CGX_LMAC_TYPE_SHIFT 40
+#define CGX_LMAC_TYPE_MASK 0xF
+#define CGXX_CMRX_INT 0x040
+#define FW_CGX_INT BIT_ULL(1)
+#define CGXX_CMRX_INT_ENA_W1S 0x058
+#define CGXX_CMRX_RX_ID_MAP 0x060
+#define CGXX_CMRX_RX_STAT0 0x070
+#define CGXX_CMRX_RX_LMACS 0x128
+#define CGXX_CMRX_RX_DMAC_CTL0 0x1F8
+#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
+#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
+#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
+#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
+#define CGXX_CMRX_RX_DMAC_CAM0 0x200
+#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
+#define CGXX_CMRX_RX_DMAC_CAM1 0x400
+#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
+#define CGXX_CMRX_TX_STAT0 0x700
+#define CGXX_SCRATCH0_REG 0x1050
+#define CGXX_SCRATCH1_REG 0x1058
+#define CGX_CONST 0x2000
+#define CGXX_SPUX_CONTROL1 0x10000
+#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14)
+#define CGXX_GMP_PCS_MRX_CTL 0x30000
+#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
+
+#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
+#define CGX_EVENT_REG CGXX_SCRATCH0_REG
+#define CGX_CMD_TIMEOUT 2200 /* msecs */
+
+#define CGX_NVEC 37
+#define CGX_LMAC_FWI 0
+
+enum LMAC_TYPE {
+ LMAC_MODE_SGMII = 0,
+ LMAC_MODE_XAUI = 1,
+ LMAC_MODE_RXAUI = 2,
+ LMAC_MODE_10G_R = 3,
+ LMAC_MODE_40G_R = 4,
+ LMAC_MODE_QSGMII = 6,
+ LMAC_MODE_25G_R = 7,
+ LMAC_MODE_50G_R = 8,
+ LMAC_MODE_100G_R = 9,
+ LMAC_MODE_USXGMII = 10,
+ LMAC_MODE_MAX,
+};
+
+struct cgx_link_event {
+ struct cgx_link_user_info link_uinfo;
+ u8 cgx_id;
+ u8 lmac_id;
+};
+
+/**
+ * struct cgx_event_cb
+ * @notify_link_chg: callback for link change notification
+ * @data: data passed to callback function
+ */
+struct cgx_event_cb {
+ int (*notify_link_chg)(struct cgx_link_event *event, void *data);
+ void *data;
+};
+
+extern struct pci_driver cgx_driver;
+
+int cgx_get_cgx_cnt(void);
+int cgx_get_lmac_cnt(void *cgxd);
+void *cgx_get_pdata(int cgx_id);
+int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind);
+int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
+int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
+int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
+int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
+int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
+void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
+int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
+int cgx_get_link_info(void *cgxd, int lmac_id,
+ struct cgx_link_user_info *linfo);
+#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
new file mode 100644
index 000000000000..fa17af3f4ba7
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CGX_FW_INTF_H__
+#define __CGX_FW_INTF_H__
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#define CGX_FIRMWARE_MAJOR_VER 1
+#define CGX_FIRMWARE_MINOR_VER 0
+
+#define CGX_EVENT_ACK 1UL
+
+/* CGX error types. set for cmd response status as CGX_STAT_FAIL */
+enum cgx_error_type {
+ CGX_ERR_NONE,
+ CGX_ERR_LMAC_NOT_ENABLED,
+ CGX_ERR_LMAC_MODE_INVALID,
+ CGX_ERR_REQUEST_ID_INVALID,
+ CGX_ERR_PREV_ACK_NOT_CLEAR,
+ CGX_ERR_PHY_LINK_DOWN,
+ CGX_ERR_PCS_RESET_FAIL,
+ CGX_ERR_AN_CPT_FAIL,
+ CGX_ERR_TX_NOT_IDLE,
+ CGX_ERR_RX_NOT_IDLE,
+ CGX_ERR_SPUX_BR_BLKLOCK_FAIL,
+ CGX_ERR_SPUX_RX_ALIGN_FAIL,
+ CGX_ERR_SPUX_TX_FAULT,
+ CGX_ERR_SPUX_RX_FAULT,
+ CGX_ERR_SPUX_RESET_FAIL,
+ CGX_ERR_SPUX_AN_RESET_FAIL,
+ CGX_ERR_SPUX_USX_AN_RESET_FAIL,
+ CGX_ERR_SMUX_RX_LINK_NOT_OK,
+ CGX_ERR_PCS_RECV_LINK_FAIL,
+ CGX_ERR_TRAINING_FAIL,
+ CGX_ERR_RX_EQU_FAIL,
+ CGX_ERR_SPUX_BER_FAIL,
+ CGX_ERR_SPUX_RSFEC_ALGN_FAIL, /* = 22 */
+};
+
+/* LINK speed types */
+enum cgx_link_speed {
+ CGX_LINK_NONE,
+ CGX_LINK_10M,
+ CGX_LINK_100M,
+ CGX_LINK_1G,
+ CGX_LINK_2HG,
+ CGX_LINK_5G,
+ CGX_LINK_10G,
+ CGX_LINK_20G,
+ CGX_LINK_25G,
+ CGX_LINK_40G,
+ CGX_LINK_50G,
+ CGX_LINK_100G,
+ CGX_LINK_SPEED_MAX,
+};
+
+/* REQUEST ID types. Input to firmware */
+enum cgx_cmd_id {
+ CGX_CMD_NONE,
+ CGX_CMD_GET_FW_VER,
+ CGX_CMD_GET_MAC_ADDR,
+ CGX_CMD_SET_MTU,
+ CGX_CMD_GET_LINK_STS, /* optional to user */
+ CGX_CMD_LINK_BRING_UP,
+ CGX_CMD_LINK_BRING_DOWN,
+ CGX_CMD_INTERNAL_LBK,
+ CGX_CMD_EXTERNAL_LBK,
+ CGX_CMD_HIGIG,
+ CGX_CMD_LINK_STATE_CHANGE,
+ CGX_CMD_MODE_CHANGE, /* hot plug support */
+ CGX_CMD_INTF_SHUTDOWN,
+ CGX_CMD_IRQ_ENABLE,
+ CGX_CMD_IRQ_DISABLE,
+};
+
+/* async event ids */
+enum cgx_evt_id {
+ CGX_EVT_NONE,
+ CGX_EVT_LINK_CHANGE,
+};
+
+/* event types - cause of interrupt */
+enum cgx_evt_type {
+ CGX_EVT_ASYNC,
+ CGX_EVT_CMD_RESP
+};
+
+enum cgx_stat {
+ CGX_STAT_SUCCESS,
+ CGX_STAT_FAIL
+};
+
+enum cgx_cmd_own {
+ CGX_CMD_OWN_NS,
+ CGX_CMD_OWN_FIRMWARE,
+};
+
+/* m - bit mask
+ * y - value to be written in the bitrange
+ * x - input value whose bitrange to be modified
+ */
+#define FIELD_SET(m, y, x) \
+ (((x) & ~(m)) | \
+ FIELD_PREP((m), (y)))
+
+/* scratchx(0) CSR used for ATF->non-secure SW communication.
+ * This acts as the status register
+ * Provides details on command ack/status, command response, error details
+ */
+#define EVTREG_ACK BIT_ULL(0)
+#define EVTREG_EVT_TYPE BIT_ULL(1)
+#define EVTREG_STAT BIT_ULL(2)
+#define EVTREG_ID GENMASK_ULL(8, 3)
+
+/* Response to command IDs with command status as CGX_STAT_FAIL
+ *
+ * Not applicable for commands :
+ * CGX_CMD_LINK_BRING_UP/DOWN/CGX_EVT_LINK_CHANGE
+ */
+#define EVTREG_ERRTYPE GENMASK_ULL(18, 9)
+
+/* Response to cmd ID as CGX_CMD_GET_FW_VER with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MAJOR_VER GENMASK_ULL(12, 9)
+#define RESP_MINOR_VER GENMASK_ULL(16, 13)
+
+/* Response to cmd ID as CGX_CMD_GET_MAC_ADDR with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MAC_ADDR GENMASK_ULL(56, 9)
+
+/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
+ * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
+ *
+ * In case of CGX_STAT_FAIL, it indicates CGX configuration failed
+ * when processing link up/down/change command.
+ * Both err_type and current link status will be updated
+ *
+ * In case of CGX_STAT_SUCCESS, err_type will be CGX_ERR_NONE and current
+ * link status will be updated
+ */
+struct cgx_lnk_sts {
+ uint64_t reserved1:9;
+ uint64_t link_up:1;
+ uint64_t full_duplex:1;
+ uint64_t speed:4; /* cgx_link_speed */
+ uint64_t err_type:10;
+ uint64_t reserved2:39;
+};
+
+#define RESP_LINKSTAT_UP GENMASK_ULL(9, 9)
+#define RESP_LINKSTAT_FDUPLEX GENMASK_ULL(10, 10)
+#define RESP_LINKSTAT_SPEED GENMASK_ULL(14, 11)
+#define RESP_LINKSTAT_ERRTYPE GENMASK_ULL(24, 15)
+
+/* scratchx(1) CSR used for non-secure SW->ATF communication
+ * This CSR acts as a command register
+ */
+#define CMDREG_OWN BIT_ULL(0)
+#define CMDREG_ID GENMASK_ULL(7, 2)
+
+/* Any command using enable/disable as an argument need
+ * to set this bitfield.
+ * Ex: Loopback, HiGig...
+ */
+#define CMDREG_ENABLE BIT_ULL(8)
+
+/* command argument to be passed for cmd ID - CGX_CMD_SET_MTU */
+#define CMDMTU_SIZE GENMASK_ULL(23, 8)
+
+/* command argument to be passed for cmd ID - CGX_CMD_LINK_CHANGE */
+#define CMDLINKCHANGE_LINKUP BIT_ULL(8)
+#define CMDLINKCHANGE_FULLDPLX BIT_ULL(9)
+#define CMDLINKCHANGE_SPEED GENMASK_ULL(13, 10)
+
+#endif /* __CGX_FW_INTF_H__ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
new file mode 100644
index 000000000000..d39ada404c8f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef COMMON_H
+#define COMMON_H
+
+#include "rvu_struct.h"
+
+#define OTX2_ALIGN 128 /* Align to cacheline */
+
+#define Q_SIZE_16 0ULL /* 16 entries */
+#define Q_SIZE_64 1ULL /* 64 entries */
+#define Q_SIZE_256 2ULL
+#define Q_SIZE_1K 3ULL
+#define Q_SIZE_4K 4ULL
+#define Q_SIZE_16K 5ULL
+#define Q_SIZE_64K 6ULL
+#define Q_SIZE_256K 7ULL
+#define Q_SIZE_1M 8ULL /* Million entries */
+#define Q_SIZE_MIN Q_SIZE_16
+#define Q_SIZE_MAX Q_SIZE_1M
+
+#define Q_COUNT(x) (16ULL << (2 * x))
+#define Q_SIZE(x, n) ((ilog2(x) - (n)) / 2)
+
+/* Admin queue info */
+
+/* Since we intend to add only one instruction at a time,
+ * keep queue size to it's minimum.
+ */
+#define AQ_SIZE Q_SIZE_16
+/* HW head & tail pointer mask */
+#define AQ_PTR_MASK 0xFFFFF
+
+struct qmem {
+ void *base;
+ dma_addr_t iova;
+ int alloc_sz;
+ u8 entry_sz;
+ u8 align;
+ u32 qsize;
+};
+
+static inline int qmem_alloc(struct device *dev, struct qmem **q,
+ int qsize, int entry_sz)
+{
+ struct qmem *qmem;
+ int aligned_addr;
+
+ if (!qsize)
+ return -EINVAL;
+
+ *q = devm_kzalloc(dev, sizeof(*qmem), GFP_KERNEL);
+ if (!*q)
+ return -ENOMEM;
+ qmem = *q;
+
+ qmem->entry_sz = entry_sz;
+ qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
+ qmem->base = dma_zalloc_coherent(dev, qmem->alloc_sz,
+ &qmem->iova, GFP_KERNEL);
+ if (!qmem->base)
+ return -ENOMEM;
+
+ qmem->qsize = qsize;
+
+ aligned_addr = ALIGN((u64)qmem->iova, OTX2_ALIGN);
+ qmem->align = (aligned_addr - qmem->iova);
+ qmem->base += qmem->align;
+ qmem->iova += qmem->align;
+ return 0;
+}
+
+static inline void qmem_free(struct device *dev, struct qmem *qmem)
+{
+ if (!qmem)
+ return;
+
+ if (qmem->base)
+ dma_free_coherent(dev, qmem->alloc_sz,
+ qmem->base - qmem->align,
+ qmem->iova - qmem->align);
+ devm_kfree(dev, qmem);
+}
+
+struct admin_queue {
+ struct qmem *inst;
+ struct qmem *res;
+ spinlock_t lock; /* Serialize inst enqueue from PFs */
+};
+
+/* NPA aura count */
+enum npa_aura_sz {
+ NPA_AURA_SZ_0,
+ NPA_AURA_SZ_128,
+ NPA_AURA_SZ_256,
+ NPA_AURA_SZ_512,
+ NPA_AURA_SZ_1K,
+ NPA_AURA_SZ_2K,
+ NPA_AURA_SZ_4K,
+ NPA_AURA_SZ_8K,
+ NPA_AURA_SZ_16K,
+ NPA_AURA_SZ_32K,
+ NPA_AURA_SZ_64K,
+ NPA_AURA_SZ_128K,
+ NPA_AURA_SZ_256K,
+ NPA_AURA_SZ_512K,
+ NPA_AURA_SZ_1M,
+ NPA_AURA_SZ_MAX,
+};
+
+#define NPA_AURA_COUNT(x) (1ULL << ((x) + 6))
+
+/* NPA AQ result structure for init/read/write of aura HW contexts */
+struct npa_aq_aura_res {
+ struct npa_aq_res_s res;
+ struct npa_aura_s aura_ctx;
+ struct npa_aura_s ctx_mask;
+};
+
+/* NPA AQ result structure for init/read/write of pool HW contexts */
+struct npa_aq_pool_res {
+ struct npa_aq_res_s res;
+ struct npa_pool_s pool_ctx;
+ struct npa_pool_s ctx_mask;
+};
+
+/* NIX Transmit schedulers */
+enum nix_scheduler {
+ NIX_TXSCH_LVL_SMQ = 0x0,
+ NIX_TXSCH_LVL_MDQ = 0x0,
+ NIX_TXSCH_LVL_TL4 = 0x1,
+ NIX_TXSCH_LVL_TL3 = 0x2,
+ NIX_TXSCH_LVL_TL2 = 0x3,
+ NIX_TXSCH_LVL_TL1 = 0x4,
+ NIX_TXSCH_LVL_CNT = 0x5,
+};
+
+/* NIX RX action operation*/
+#define NIX_RX_ACTIONOP_DROP (0x0ull)
+#define NIX_RX_ACTIONOP_UCAST (0x1ull)
+#define NIX_RX_ACTIONOP_UCAST_IPSEC (0x2ull)
+#define NIX_RX_ACTIONOP_MCAST (0x3ull)
+#define NIX_RX_ACTIONOP_RSS (0x4ull)
+
+/* NIX TX action operation*/
+#define NIX_TX_ACTIONOP_DROP (0x0ull)
+#define NIX_TX_ACTIONOP_UCAST_DEFAULT (0x1ull)
+#define NIX_TX_ACTIONOP_UCAST_CHAN (0x2ull)
+#define NIX_TX_ACTIONOP_MCAST (0x3ull)
+#define NIX_TX_ACTIONOP_DROP_VIOL (0x5ull)
+
+#define NPC_MCAM_KEY_X1 0
+#define NPC_MCAM_KEY_X2 1
+#define NPC_MCAM_KEY_X4 2
+
+#define NIX_INTF_RX 0
+#define NIX_INTF_TX 1
+
+#define NIX_INTF_TYPE_CGX 0
+#define NIX_INTF_TYPE_LBK 1
+
+#define MAX_LMAC_PKIND 12
+#define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b))
+#define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c))
+
+/* NIX LSO format indices.
+ * As of now TSO is the only one using, so statically assigning indices.
+ */
+#define NIX_LSO_FORMAT_IDX_TSOV4 0
+#define NIX_LSO_FORMAT_IDX_TSOV6 1
+
+/* RSS info */
+#define MAX_RSS_GROUPS 8
+/* Group 0 has to be used in default pkt forwarding MCAM entries
+ * reserved for NIXLFs. Groups 1-7 can be used for RSS for ntuple
+ * filters.
+ */
+#define DEFAULT_RSS_CONTEXT_GROUP 0
+#define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */
+
+/* NIX flow tag, key type flags */
+#define FLOW_KEY_TYPE_PORT BIT(0)
+#define FLOW_KEY_TYPE_IPV4 BIT(1)
+#define FLOW_KEY_TYPE_IPV6 BIT(2)
+#define FLOW_KEY_TYPE_TCP BIT(3)
+#define FLOW_KEY_TYPE_UDP BIT(4)
+#define FLOW_KEY_TYPE_SCTP BIT(5)
+
+/* NIX flow tag algorithm indices, max is 31 */
+enum {
+ FLOW_KEY_ALG_PORT,
+ FLOW_KEY_ALG_IP,
+ FLOW_KEY_ALG_TCP,
+ FLOW_KEY_ALG_UDP,
+ FLOW_KEY_ALG_SCTP,
+ FLOW_KEY_ALG_TCP_UDP,
+ FLOW_KEY_ALG_TCP_SCTP,
+ FLOW_KEY_ALG_UDP_SCTP,
+ FLOW_KEY_ALG_TCP_UDP_SCTP,
+ FLOW_KEY_ALG_MAX,
+};
+
+#endif /* COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
new file mode 100644
index 000000000000..85ba24a05774
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+
+#include "rvu_reg.h"
+#include "mbox.h"
+
+static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *tx_hdr, *rx_hdr;
+
+ tx_hdr = mdev->mbase + mbox->tx_start;
+ rx_hdr = mdev->mbase + mbox->rx_start;
+
+ spin_lock(&mdev->mbox_lock);
+ mdev->msg_size = 0;
+ mdev->rsp_size = 0;
+ tx_hdr->num_msgs = 0;
+ rx_hdr->num_msgs = 0;
+ spin_unlock(&mdev->mbox_lock);
+}
+EXPORT_SYMBOL(otx2_mbox_reset);
+
+void otx2_mbox_destroy(struct otx2_mbox *mbox)
+{
+ mbox->reg_base = NULL;
+ mbox->hwbase = NULL;
+
+ kfree(mbox->dev);
+ mbox->dev = NULL;
+}
+EXPORT_SYMBOL(otx2_mbox_destroy);
+
+int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs)
+{
+ struct otx2_mbox_dev *mdev;
+ int devid;
+
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ case MBOX_DIR_PFVF:
+ mbox->tx_start = MBOX_DOWN_TX_START;
+ mbox->rx_start = MBOX_DOWN_RX_START;
+ mbox->tx_size = MBOX_DOWN_TX_SIZE;
+ mbox->rx_size = MBOX_DOWN_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF:
+ case MBOX_DIR_VFPF:
+ mbox->tx_start = MBOX_DOWN_RX_START;
+ mbox->rx_start = MBOX_DOWN_TX_START;
+ mbox->tx_size = MBOX_DOWN_RX_SIZE;
+ mbox->rx_size = MBOX_DOWN_TX_SIZE;
+ break;
+ case MBOX_DIR_AFPF_UP:
+ case MBOX_DIR_PFVF_UP:
+ mbox->tx_start = MBOX_UP_TX_START;
+ mbox->rx_start = MBOX_UP_RX_START;
+ mbox->tx_size = MBOX_UP_TX_SIZE;
+ mbox->rx_size = MBOX_UP_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF_UP:
+ case MBOX_DIR_VFPF_UP:
+ mbox->tx_start = MBOX_UP_RX_START;
+ mbox->rx_start = MBOX_UP_TX_START;
+ mbox->tx_size = MBOX_UP_RX_SIZE;
+ mbox->rx_size = MBOX_UP_TX_SIZE;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ case MBOX_DIR_AFPF_UP:
+ mbox->trigger = RVU_AF_AFPF_MBOX0;
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_PFAF:
+ case MBOX_DIR_PFAF_UP:
+ mbox->trigger = RVU_PF_PFAF_MBOX1;
+ mbox->tr_shift = 0;
+ break;
+ case MBOX_DIR_PFVF:
+ case MBOX_DIR_PFVF_UP:
+ mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
+ mbox->tr_shift = 12;
+ break;
+ case MBOX_DIR_VFPF:
+ case MBOX_DIR_VFPF_UP:
+ mbox->trigger = RVU_VF_VFPF_MBOX1;
+ mbox->tr_shift = 0;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ mbox->reg_base = reg_base;
+ mbox->hwbase = hwbase;
+ mbox->pdev = pdev;
+
+ mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
+ if (!mbox->dev) {
+ otx2_mbox_destroy(mbox);
+ return -ENOMEM;
+ }
+
+ mbox->ndevs = ndevs;
+ for (devid = 0; devid < ndevs; devid++) {
+ mdev = &mbox->dev[devid];
+ mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
+ spin_lock_init(&mdev->mbox_lock);
+ /* Init header to reset value */
+ otx2_mbox_reset(mbox, devid);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_mbox_init);
+
+int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ int timeout = 0, sleep = 1;
+
+ while (mdev->num_msgs != mdev->msgs_acked) {
+ msleep(sleep);
+ timeout += sleep;
+ if (timeout >= MBOX_RSP_TIMEOUT)
+ return -EIO;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
+
+int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ unsigned long timeout = jiffies + 1 * HZ;
+
+ while (!time_after(jiffies, timeout)) {
+ if (mdev->num_msgs == mdev->msgs_acked)
+ return 0;
+ cpu_relax();
+ }
+ return -EIO;
+}
+EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
+
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *tx_hdr, *rx_hdr;
+
+ tx_hdr = mdev->mbase + mbox->tx_start;
+ rx_hdr = mdev->mbase + mbox->rx_start;
+
+ spin_lock(&mdev->mbox_lock);
+ /* Reset header for next messages */
+ mdev->msg_size = 0;
+ mdev->rsp_size = 0;
+ mdev->msgs_acked = 0;
+
+ /* Sync mbox data into memory */
+ smp_wmb();
+
+ /* num_msgs != 0 signals to the peer that the buffer has a number of
+ * messages. So this should be written after writing all the messages
+ * to the shared memory.
+ */
+ tx_hdr->num_msgs = mdev->num_msgs;
+ rx_hdr->num_msgs = 0;
+ spin_unlock(&mdev->mbox_lock);
+
+ /* The interrupt should be fired after num_msgs is written
+ * to the shared memory
+ */
+ writeq(1, (void __iomem *)mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift)));
+}
+EXPORT_SYMBOL(otx2_mbox_msg_send);
+
+struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+ int size, int size_rsp)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_msghdr *msghdr = NULL;
+
+ spin_lock(&mdev->mbox_lock);
+ size = ALIGN(size, MBOX_MSG_ALIGN);
+ size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN);
+ /* Check if there is space in mailbox */
+ if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset)
+ goto exit;
+ if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset)
+ goto exit;
+
+ if (mdev->msg_size == 0)
+ mdev->num_msgs = 0;
+ mdev->num_msgs++;
+
+ msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
+
+ /* Clear the whole msg region */
+ memset(msghdr, 0, sizeof(*msghdr) + size);
+ /* Init message header with reset values */
+ msghdr->ver = OTX2_MBOX_VERSION;
+ mdev->msg_size += size;
+ mdev->rsp_size += size_rsp;
+ msghdr->next_msgoff = mdev->msg_size + msgs_offset;
+exit:
+ spin_unlock(&mdev->mbox_lock);
+
+ return msghdr;
+}
+EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp);
+
+struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
+ struct mbox_msghdr *msg)
+{
+ unsigned long imsg = mbox->tx_start + msgs_offset;
+ unsigned long irsp = mbox->rx_start + msgs_offset;
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ u16 msgs;
+
+ if (mdev->num_msgs != mdev->msgs_acked)
+ return ERR_PTR(-ENODEV);
+
+ for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
+ struct mbox_msghdr *pmsg = mdev->mbase + imsg;
+ struct mbox_msghdr *prsp = mdev->mbase + irsp;
+
+ if (msg == pmsg) {
+ if (pmsg->id != prsp->id)
+ return ERR_PTR(-ENODEV);
+ return prsp;
+ }
+
+ imsg = pmsg->next_msgoff;
+ irsp = prsp->next_msgoff;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL(otx2_mbox_get_rsp);
+
+int
+otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
+{
+ struct msg_rsp *rsp;
+
+ rsp = (struct msg_rsp *)
+ otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+ rsp->hdr.id = id;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.rc = MBOX_MSG_INVALID;
+ rsp->hdr.pcifunc = pcifunc;
+ return 0;
+}
+EXPORT_SYMBOL(otx2_reply_invalid_msg);
+
+bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ bool ret;
+
+ spin_lock(&mdev->mbox_lock);
+ ret = mdev->num_msgs != 0;
+ spin_unlock(&mdev->mbox_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(otx2_mbox_nonempty);
+
+const char *otx2_mbox_id2name(u16 id)
+{
+ switch (id) {
+#define M(_name, _id, _1, _2) case _id: return # _name;
+ MBOX_MESSAGES
+#undef M
+ default:
+ return "INVALID ID";
+ }
+}
+EXPORT_SYMBOL(otx2_mbox_id2name);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
new file mode 100644
index 000000000000..a15a59c9a239
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -0,0 +1,525 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef MBOX_H
+#define MBOX_H
+
+#include <linux/etherdevice.h>
+#include <linux/sizes.h>
+
+#include "rvu_struct.h"
+#include "common.h"
+
+#define MBOX_SIZE SZ_64K
+
+/* AF/PF: PF initiated, PF/VF VF initiated */
+#define MBOX_DOWN_RX_START 0
+#define MBOX_DOWN_RX_SIZE (46 * SZ_1K)
+#define MBOX_DOWN_TX_START (MBOX_DOWN_RX_START + MBOX_DOWN_RX_SIZE)
+#define MBOX_DOWN_TX_SIZE (16 * SZ_1K)
+/* AF/PF: AF initiated, PF/VF PF initiated */
+#define MBOX_UP_RX_START (MBOX_DOWN_TX_START + MBOX_DOWN_TX_SIZE)
+#define MBOX_UP_RX_SIZE SZ_1K
+#define MBOX_UP_TX_START (MBOX_UP_RX_START + MBOX_UP_RX_SIZE)
+#define MBOX_UP_TX_SIZE SZ_1K
+
+#if MBOX_UP_TX_SIZE + MBOX_UP_TX_START != MBOX_SIZE
+# error "incorrect mailbox area sizes"
+#endif
+
+#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
+
+#define MBOX_RSP_TIMEOUT 1000 /* in ms, Time to wait for mbox response */
+
+#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
+
+/* Mailbox directions */
+#define MBOX_DIR_AFPF 0 /* AF replies to PF */
+#define MBOX_DIR_PFAF 1 /* PF sends messages to AF */
+#define MBOX_DIR_PFVF 2 /* PF replies to VF */
+#define MBOX_DIR_VFPF 3 /* VF sends messages to PF */
+#define MBOX_DIR_AFPF_UP 4 /* AF sends messages to PF */
+#define MBOX_DIR_PFAF_UP 5 /* PF replies to AF */
+#define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */
+#define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */
+
+struct otx2_mbox_dev {
+ void *mbase; /* This dev's mbox region */
+ spinlock_t mbox_lock;
+ u16 msg_size; /* Total msg size to be sent */
+ u16 rsp_size; /* Total rsp size to be sure the reply is ok */
+ u16 num_msgs; /* No of msgs sent or waiting for response */
+ u16 msgs_acked; /* No of msgs for which response is received */
+};
+
+struct otx2_mbox {
+ struct pci_dev *pdev;
+ void *hwbase; /* Mbox region advertised by HW */
+ void *reg_base;/* CSR base for this dev */
+ u64 trigger; /* Trigger mbox notification */
+ u16 tr_shift; /* Mbox trigger shift */
+ u64 rx_start; /* Offset of Rx region in mbox memory */
+ u64 tx_start; /* Offset of Tx region in mbox memory */
+ u16 rx_size; /* Size of Rx region */
+ u16 tx_size; /* Size of Tx region */
+ u16 ndevs; /* The number of peers */
+ struct otx2_mbox_dev *dev;
+};
+
+/* Header which preceeds all mbox messages */
+struct mbox_hdr {
+ u16 num_msgs; /* No of msgs embedded */
+};
+
+/* Header which preceeds every msg and is also part of it */
+struct mbox_msghdr {
+ u16 pcifunc; /* Who's sending this msg */
+ u16 id; /* Mbox message ID */
+#define OTX2_MBOX_REQ_SIG (0xdead)
+#define OTX2_MBOX_RSP_SIG (0xbeef)
+ u16 sig; /* Signature, for validating corrupted msgs */
+#define OTX2_MBOX_VERSION (0x0001)
+ u16 ver; /* Version of msg's structure for this ID */
+ u16 next_msgoff; /* Offset of next msg within mailbox region */
+ int rc; /* Msg process'ed response code */
+};
+
+void otx2_mbox_reset(struct otx2_mbox *mbox, int devid);
+void otx2_mbox_destroy(struct otx2_mbox *mbox);
+int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
+ struct pci_dev *pdev, void __force *reg_base,
+ int direction, int ndevs);
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
+int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
+int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
+struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+ int size, int size_rsp);
+struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
+ struct mbox_msghdr *msg);
+int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid,
+ u16 pcifunc, u16 id);
+bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid);
+const char *otx2_mbox_id2name(u16 id);
+static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
+ int devid, int size)
+{
+ return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
+}
+
+/* Mailbox message types */
+#define MBOX_MSG_MASK 0xFFFF
+#define MBOX_MSG_INVALID 0xFFFE
+#define MBOX_MSG_MAX 0xFFFF
+
+#define MBOX_MESSAGES \
+/* Generic mbox IDs (range 0x000 - 0x1FF) */ \
+M(READY, 0x001, msg_req, ready_msg_rsp) \
+M(ATTACH_RESOURCES, 0x002, rsrc_attach, msg_rsp) \
+M(DETACH_RESOURCES, 0x003, rsrc_detach, msg_rsp) \
+M(MSIX_OFFSET, 0x004, msg_req, msix_offset_rsp) \
+/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
+M(CGX_START_RXTX, 0x200, msg_req, msg_rsp) \
+M(CGX_STOP_RXTX, 0x201, msg_req, msg_rsp) \
+M(CGX_STATS, 0x202, msg_req, cgx_stats_rsp) \
+M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set_or_get, \
+ cgx_mac_addr_set_or_get) \
+M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_set_or_get, \
+ cgx_mac_addr_set_or_get) \
+M(CGX_PROMISC_ENABLE, 0x205, msg_req, msg_rsp) \
+M(CGX_PROMISC_DISABLE, 0x206, msg_req, msg_rsp) \
+M(CGX_START_LINKEVENTS, 0x207, msg_req, msg_rsp) \
+M(CGX_STOP_LINKEVENTS, 0x208, msg_req, msg_rsp) \
+M(CGX_GET_LINKINFO, 0x209, msg_req, cgx_link_info_msg) \
+M(CGX_INTLBK_ENABLE, 0x20A, msg_req, msg_rsp) \
+M(CGX_INTLBK_DISABLE, 0x20B, msg_req, msg_rsp) \
+/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
+M(NPA_LF_ALLOC, 0x400, npa_lf_alloc_req, npa_lf_alloc_rsp) \
+M(NPA_LF_FREE, 0x401, msg_req, msg_rsp) \
+M(NPA_AQ_ENQ, 0x402, npa_aq_enq_req, npa_aq_enq_rsp) \
+M(NPA_HWCTX_DISABLE, 0x403, hwctx_disable_req, msg_rsp) \
+/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
+/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
+/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
+/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
+/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
+M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc_req, nix_lf_alloc_rsp) \
+M(NIX_LF_FREE, 0x8001, msg_req, msg_rsp) \
+M(NIX_AQ_ENQ, 0x8002, nix_aq_enq_req, nix_aq_enq_rsp) \
+M(NIX_HWCTX_DISABLE, 0x8003, hwctx_disable_req, msg_rsp) \
+M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
+M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free_req, msg_rsp) \
+M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_config, msg_rsp) \
+M(NIX_STATS_RST, 0x8007, msg_req, msg_rsp) \
+M(NIX_VTAG_CFG, 0x8008, nix_vtag_config, msg_rsp) \
+M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, msg_rsp) \
+M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, msg_rsp) \
+M(NIX_SET_RX_MODE, 0x800b, nix_rx_mode, msg_rsp)
+
+/* Messages initiated by AF (range 0xC00 - 0xDFF) */
+#define MBOX_UP_CGX_MESSAGES \
+M(CGX_LINK_EVENT, 0xC00, cgx_link_info_msg, msg_rsp)
+
+enum {
+#define M(_name, _id, _1, _2) MBOX_MSG_ ## _name = _id,
+MBOX_MESSAGES
+MBOX_UP_CGX_MESSAGES
+#undef M
+};
+
+/* Mailbox message formats */
+
+#define RVU_DEFAULT_PF_FUNC 0xFFFF
+
+/* Generic request msg used for those mbox messages which
+ * don't send any data in the request.
+ */
+struct msg_req {
+ struct mbox_msghdr hdr;
+};
+
+/* Generic rsponse msg used a ack or response for those mbox
+ * messages which doesn't have a specific rsp msg format.
+ */
+struct msg_rsp {
+ struct mbox_msghdr hdr;
+};
+
+struct ready_msg_rsp {
+ struct mbox_msghdr hdr;
+ u16 sclk_feq; /* SCLK frequency */
+};
+
+/* Structure for requesting resource provisioning.
+ * 'modify' flag to be used when either requesting more
+ * or to detach partial of a cetain resource type.
+ * Rest of the fields specify how many of what type to
+ * be attached.
+ */
+struct rsrc_attach {
+ struct mbox_msghdr hdr;
+ u8 modify:1;
+ u8 npalf:1;
+ u8 nixlf:1;
+ u16 sso;
+ u16 ssow;
+ u16 timlfs;
+ u16 cptlfs;
+};
+
+/* Structure for relinquishing resources.
+ * 'partial' flag to be used when relinquishing all resources
+ * but only of a certain type. If not set, all resources of all
+ * types provisioned to the RVU function will be detached.
+ */
+struct rsrc_detach {
+ struct mbox_msghdr hdr;
+ u8 partial:1;
+ u8 npalf:1;
+ u8 nixlf:1;
+ u8 sso:1;
+ u8 ssow:1;
+ u8 timlfs:1;
+ u8 cptlfs:1;
+};
+
+#define MSIX_VECTOR_INVALID 0xFFFF
+#define MAX_RVU_BLKLF_CNT 256
+
+struct msix_offset_rsp {
+ struct mbox_msghdr hdr;
+ u16 npa_msixoff;
+ u16 nix_msixoff;
+ u8 sso;
+ u8 ssow;
+ u8 timlfs;
+ u8 cptlfs;
+ u16 sso_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 ssow_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 timlf_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
+};
+
+/* CGX mbox message formats */
+
+struct cgx_stats_rsp {
+ struct mbox_msghdr hdr;
+#define CGX_RX_STATS_COUNT 13
+#define CGX_TX_STATS_COUNT 18
+ u64 rx_stats[CGX_RX_STATS_COUNT];
+ u64 tx_stats[CGX_TX_STATS_COUNT];
+};
+
+/* Structure for requesting the operation for
+ * setting/getting mac address in the CGX interface
+ */
+struct cgx_mac_addr_set_or_get {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct cgx_link_user_info {
+ uint64_t link_up:1;
+ uint64_t full_duplex:1;
+ uint64_t lmac_type_id:4;
+ uint64_t speed:20; /* speed in Mbps */
+#define LMACTYPE_STR_LEN 16
+ char lmac_type[LMACTYPE_STR_LEN];
+};
+
+struct cgx_link_info_msg {
+ struct mbox_msghdr hdr;
+ struct cgx_link_user_info link_info;
+};
+
+/* NPA mbox message formats */
+
+/* NPA mailbox error codes
+ * Range 301 - 400.
+ */
+enum npa_af_status {
+ NPA_AF_ERR_PARAM = -301,
+ NPA_AF_ERR_AQ_FULL = -302,
+ NPA_AF_ERR_AQ_ENQUEUE = -303,
+ NPA_AF_ERR_AF_LF_INVALID = -304,
+ NPA_AF_ERR_AF_LF_ALLOC = -305,
+ NPA_AF_ERR_LF_RESET = -306,
+};
+
+/* For NPA LF context alloc and init */
+struct npa_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int node;
+ int aura_sz; /* No of auras */
+ u32 nr_pools; /* No of pools */
+};
+
+struct npa_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u32 stack_pg_ptrs; /* No of ptrs per stack page */
+ u32 stack_pg_bytes; /* Size of stack page */
+ u16 qints; /* NPA_AF_CONST::QINTS */
+};
+
+/* NPA AQ enqueue msg */
+struct npa_aq_enq_req {
+ struct mbox_msghdr hdr;
+ u32 aura_id;
+ u8 ctype;
+ u8 op;
+ union {
+ /* Valid when op == WRITE/INIT and ctype == AURA.
+ * LF fills the pool_id in aura.pool_addr. AF will translate
+ * the pool_id to pool context pointer.
+ */
+ struct npa_aura_s aura;
+ /* Valid when op == WRITE/INIT and ctype == POOL */
+ struct npa_pool_s pool;
+ };
+ /* Mask data when op == WRITE (1=write, 0=don't write) */
+ union {
+ /* Valid when op == WRITE and ctype == AURA */
+ struct npa_aura_s aura_mask;
+ /* Valid when op == WRITE and ctype == POOL */
+ struct npa_pool_s pool_mask;
+ };
+};
+
+struct npa_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ /* Valid when op == READ and ctype == AURA */
+ struct npa_aura_s aura;
+ /* Valid when op == READ and ctype == POOL */
+ struct npa_pool_s pool;
+ };
+};
+
+/* Disable all contexts of type 'ctype' */
+struct hwctx_disable_req {
+ struct mbox_msghdr hdr;
+ u8 ctype;
+};
+
+/* NIX mailbox error codes
+ * Range 401 - 500.
+ */
+enum nix_af_status {
+ NIX_AF_ERR_PARAM = -401,
+ NIX_AF_ERR_AQ_FULL = -402,
+ NIX_AF_ERR_AQ_ENQUEUE = -403,
+ NIX_AF_ERR_AF_LF_INVALID = -404,
+ NIX_AF_ERR_AF_LF_ALLOC = -405,
+ NIX_AF_ERR_TLX_ALLOC_FAIL = -406,
+ NIX_AF_ERR_TLX_INVALID = -407,
+ NIX_AF_ERR_RSS_SIZE_INVALID = -408,
+ NIX_AF_ERR_RSS_GRPS_INVALID = -409,
+ NIX_AF_ERR_FRS_INVALID = -410,
+ NIX_AF_ERR_RX_LINK_INVALID = -411,
+ NIX_AF_INVAL_TXSCHQ_CFG = -412,
+ NIX_AF_SMQ_FLUSH_FAILED = -413,
+ NIX_AF_ERR_LF_RESET = -414,
+};
+
+/* For NIX LF context alloc and init */
+struct nix_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int node;
+ u32 rq_cnt; /* No of receive queues */
+ u32 sq_cnt; /* No of send queues */
+ u32 cq_cnt; /* No of completion queues */
+ u8 xqe_sz;
+ u16 rss_sz;
+ u8 rss_grps;
+ u16 npa_func;
+ u16 sso_func;
+ u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
+};
+
+struct nix_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u16 sqb_size;
+ u16 rx_chan_base;
+ u16 tx_chan_base;
+ u8 rx_chan_cnt; /* total number of RX channels */
+ u8 tx_chan_cnt; /* total number of TX channels */
+ u8 lso_tsov4_idx;
+ u8 lso_tsov6_idx;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* NIX AQ enqueue msg */
+struct nix_aq_enq_req {
+ struct mbox_msghdr hdr;
+ u32 qidx;
+ u8 ctype;
+ u8 op;
+ union {
+ struct nix_rq_ctx_s rq;
+ struct nix_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ };
+ union {
+ struct nix_rq_ctx_s rq_mask;
+ struct nix_sq_ctx_s sq_mask;
+ struct nix_cq_ctx_s cq_mask;
+ struct nix_rsse_s rss_mask;
+ struct nix_rx_mce_s mce_mask;
+ };
+};
+
+struct nix_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ struct nix_rq_ctx_s rq;
+ struct nix_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ };
+};
+
+/* Tx scheduler/shaper mailbox messages */
+
+#define MAX_TXSCHQ_PER_FUNC 128
+
+struct nix_txsch_alloc_req {
+ struct mbox_msghdr hdr;
+ /* Scheduler queue count request at each level */
+ u16 schq_contig[NIX_TXSCH_LVL_CNT]; /* No of contiguous queues */
+ u16 schq[NIX_TXSCH_LVL_CNT]; /* No of non-contiguous queues */
+};
+
+struct nix_txsch_alloc_rsp {
+ struct mbox_msghdr hdr;
+ /* Scheduler queue count allocated at each level */
+ u16 schq_contig[NIX_TXSCH_LVL_CNT];
+ u16 schq[NIX_TXSCH_LVL_CNT];
+ /* Scheduler queue list allocated at each level */
+ u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+};
+
+struct nix_txsch_free_req {
+ struct mbox_msghdr hdr;
+#define TXSCHQ_FREE_ALL BIT_ULL(0)
+ u16 flags;
+ /* Scheduler queue level to be freed */
+ u16 schq_lvl;
+ /* List of scheduler queues to be freed */
+ u16 schq;
+};
+
+struct nix_txschq_config {
+ struct mbox_msghdr hdr;
+ u8 lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */
+#define TXSCHQ_IDX_SHIFT 16
+#define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1)
+#define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK)
+ u8 num_regs;
+#define MAX_REGS_PER_MBOX_MSG 20
+ u64 reg[MAX_REGS_PER_MBOX_MSG];
+ u64 regval[MAX_REGS_PER_MBOX_MSG];
+};
+
+struct nix_vtag_config {
+ struct mbox_msghdr hdr;
+ u8 vtag_size;
+ /* cfg_type is '0' for tx vlan cfg
+ * cfg_type is '1' for rx vlan cfg
+ */
+ u8 cfg_type;
+ union {
+ /* valid when cfg_type is '0' */
+ struct {
+ /* tx vlan0 tag(C-VLAN) */
+ u64 vlan0;
+ /* tx vlan1 tag(S-VLAN) */
+ u64 vlan1;
+ /* insert tx vlan tag */
+ u8 insert_vlan :1;
+ /* insert tx double vlan tag */
+ u8 double_vlan :1;
+ } tx;
+
+ /* valid when cfg_type is '1' */
+ struct {
+ /* rx vtag type index */
+ u8 vtag_type;
+ /* rx vtag strip */
+ u8 strip_vtag :1;
+ /* rx vtag capture */
+ u8 capture_vtag :1;
+ } rx;
+ };
+};
+
+struct nix_rss_flowkey_cfg {
+ struct mbox_msghdr hdr;
+ int mcam_index; /* MCAM entry index to modify */
+ u32 flowkey_cfg; /* Flowkey types selected */
+ u8 group; /* RSS context or group */
+};
+
+struct nix_set_mac_addr {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN]; /* MAC address to be set for this pcifunc */
+};
+
+struct nix_rx_mode {
+ struct mbox_msghdr hdr;
+#define NIX_RX_MODE_UCAST BIT(0)
+#define NIX_RX_MODE_PROMISC BIT(1)
+#define NIX_RX_MODE_ALLMULTI BIT(2)
+ u16 mode;
+};
+
+#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
new file mode 100644
index 000000000000..f98b0113def3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -0,0 +1,262 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef NPC_H
+#define NPC_H
+
+enum NPC_LID_E {
+ NPC_LID_LA = 0,
+ NPC_LID_LB,
+ NPC_LID_LC,
+ NPC_LID_LD,
+ NPC_LID_LE,
+ NPC_LID_LF,
+ NPC_LID_LG,
+ NPC_LID_LH,
+};
+
+#define NPC_LT_NA 0
+
+enum npc_kpu_la_ltype {
+ NPC_LT_LA_8023 = 1,
+ NPC_LT_LA_ETHER,
+};
+
+enum npc_kpu_lb_ltype {
+ NPC_LT_LB_ETAG = 1,
+ NPC_LT_LB_CTAG,
+ NPC_LT_LB_STAG,
+ NPC_LT_LB_BTAG,
+ NPC_LT_LB_QINQ,
+ NPC_LT_LB_ITAG,
+};
+
+enum npc_kpu_lc_ltype {
+ NPC_LT_LC_IP = 1,
+ NPC_LT_LC_IP6,
+ NPC_LT_LC_ARP,
+ NPC_LT_LC_RARP,
+ NPC_LT_LC_MPLS,
+ NPC_LT_LC_NSH,
+ NPC_LT_LC_PTP,
+ NPC_LT_LC_FCOE,
+};
+
+/* Don't modify Ltypes upto SCTP, otherwise it will
+ * effect flow tag calculation and thus RSS.
+ */
+enum npc_kpu_ld_ltype {
+ NPC_LT_LD_TCP = 1,
+ NPC_LT_LD_UDP,
+ NPC_LT_LD_ICMP,
+ NPC_LT_LD_SCTP,
+ NPC_LT_LD_IGMP,
+ NPC_LT_LD_ICMP6,
+ NPC_LT_LD_ESP,
+ NPC_LT_LD_AH,
+ NPC_LT_LD_GRE,
+ NPC_LT_LD_GRE_MPLS,
+ NPC_LT_LD_GRE_NSH,
+ NPC_LT_LD_TU_MPLS,
+};
+
+enum npc_kpu_le_ltype {
+ NPC_LT_LE_TU_ETHER = 1,
+ NPC_LT_LE_TU_PPP,
+ NPC_LT_LE_TU_MPLS_IN_NSH,
+ NPC_LT_LE_TU_3RD_NSH,
+};
+
+enum npc_kpu_lf_ltype {
+ NPC_LT_LF_TU_IP = 1,
+ NPC_LT_LF_TU_IP6,
+ NPC_LT_LF_TU_ARP,
+ NPC_LT_LF_TU_MPLS_IP,
+ NPC_LT_LF_TU_MPLS_IP6,
+ NPC_LT_LF_TU_MPLS_ETHER,
+};
+
+enum npc_kpu_lg_ltype {
+ NPC_LT_LG_TU_TCP = 1,
+ NPC_LT_LG_TU_UDP,
+ NPC_LT_LG_TU_SCTP,
+ NPC_LT_LG_TU_ICMP,
+ NPC_LT_LG_TU_IGMP,
+ NPC_LT_LG_TU_ICMP6,
+ NPC_LT_LG_TU_ESP,
+ NPC_LT_LG_TU_AH,
+};
+
+enum npc_kpu_lh_ltype {
+ NPC_LT_LH_TCP_DATA = 1,
+ NPC_LT_LH_HTTP_DATA,
+ NPC_LT_LH_HTTPS_DATA,
+ NPC_LT_LH_PPTP_DATA,
+ NPC_LT_LH_UDP_DATA,
+};
+
+struct npc_kpu_profile_cam {
+ u8 state;
+ u8 state_mask;
+ u16 dp0;
+ u16 dp0_mask;
+ u16 dp1;
+ u16 dp1_mask;
+ u16 dp2;
+ u16 dp2_mask;
+};
+
+struct npc_kpu_profile_action {
+ u8 errlev;
+ u8 errcode;
+ u8 dp0_offset;
+ u8 dp1_offset;
+ u8 dp2_offset;
+ u8 bypass_count;
+ u8 parse_done;
+ u8 next_state;
+ u8 ptr_advance;
+ u8 cap_ena;
+ u8 lid;
+ u8 ltype;
+ u8 flags;
+ u8 offset;
+ u8 mask;
+ u8 right;
+ u8 shift;
+};
+
+struct npc_kpu_profile {
+ int cam_entries;
+ int action_entries;
+ struct npc_kpu_profile_cam *cam;
+ struct npc_kpu_profile_action *action;
+};
+
+/* NPC KPU register formats */
+struct npc_kpu_cam {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_56 : 8;
+ u64 state : 8;
+ u64 dp2_data : 16;
+ u64 dp1_data : 16;
+ u64 dp0_data : 16;
+#else
+ u64 dp0_data : 16;
+ u64 dp1_data : 16;
+ u64 dp2_data : 16;
+ u64 state : 8;
+ u64 rsvd_63_56 : 8;
+#endif
+};
+
+struct npc_kpu_action0 {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_57 : 7;
+ u64 byp_count : 3;
+ u64 capture_ena : 1;
+ u64 parse_done : 1;
+ u64 next_state : 8;
+ u64 rsvd_43 : 1;
+ u64 capture_lid : 3;
+ u64 capture_ltype : 4;
+ u64 capture_flags : 8;
+ u64 ptr_advance : 8;
+ u64 var_len_offset : 8;
+ u64 var_len_mask : 8;
+ u64 var_len_right : 1;
+ u64 var_len_shift : 3;
+#else
+ u64 var_len_shift : 3;
+ u64 var_len_right : 1;
+ u64 var_len_mask : 8;
+ u64 var_len_offset : 8;
+ u64 ptr_advance : 8;
+ u64 capture_flags : 8;
+ u64 capture_ltype : 4;
+ u64 capture_lid : 3;
+ u64 rsvd_43 : 1;
+ u64 next_state : 8;
+ u64 parse_done : 1;
+ u64 capture_ena : 1;
+ u64 byp_count : 3;
+ u64 rsvd_63_57 : 7;
+#endif
+};
+
+struct npc_kpu_action1 {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_36 : 28;
+ u64 errlev : 4;
+ u64 errcode : 8;
+ u64 dp2_offset : 8;
+ u64 dp1_offset : 8;
+ u64 dp0_offset : 8;
+#else
+ u64 dp0_offset : 8;
+ u64 dp1_offset : 8;
+ u64 dp2_offset : 8;
+ u64 errcode : 8;
+ u64 errlev : 4;
+ u64 rsvd_63_36 : 28;
+#endif
+};
+
+struct npc_kpu_pkind_cpi_def {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 ena : 1;
+ u64 rsvd_62_59 : 4;
+ u64 lid : 3;
+ u64 ltype_match : 4;
+ u64 ltype_mask : 4;
+ u64 flags_match : 8;
+ u64 flags_mask : 8;
+ u64 add_offset : 8;
+ u64 add_mask : 8;
+ u64 rsvd_15 : 1;
+ u64 add_shift : 3;
+ u64 rsvd_11_10 : 2;
+ u64 cpi_base : 10;
+#else
+ u64 cpi_base : 10;
+ u64 rsvd_11_10 : 2;
+ u64 add_shift : 3;
+ u64 rsvd_15 : 1;
+ u64 add_mask : 8;
+ u64 add_offset : 8;
+ u64 flags_mask : 8;
+ u64 flags_match : 8;
+ u64 ltype_mask : 4;
+ u64 ltype_match : 4;
+ u64 lid : 3;
+ u64 rsvd_62_59 : 4;
+ u64 ena : 1;
+#endif
+};
+
+struct nix_rx_action {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_61 :3;
+ u64 flow_key_alg :5;
+ u64 match_id :16;
+ u64 index :20;
+ u64 pf_func :16;
+ u64 op :4;
+#else
+ u64 op :4;
+ u64 pf_func :16;
+ u64 index :20;
+ u64 match_id :16;
+ u64 flow_key_alg :5;
+ u64 rsvd_63_61 :3;
+#endif
+};
+
+#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
new file mode 100644
index 000000000000..b2ce957605bb
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -0,0 +1,5709 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef NPC_PROFILE_H
+#define NPC_PROFILE_H
+
+#define NPC_ETYPE_IP 0x0800
+#define NPC_ETYPE_IP6 0x86dd
+#define NPC_ETYPE_ARP 0x0806
+#define NPC_ETYPE_RARP 0x8035
+#define NPC_ETYPE_MPLSU 0x8847
+#define NPC_ETYPE_MPLSM 0x8848
+#define NPC_ETYPE_ETAG 0x893f
+#define NPC_ETYPE_CTAG 0x8100
+#define NPC_ETYPE_SBTAG 0x88a8
+#define NPC_ETYPE_ITAG 0x88e7
+#define NPC_ETYPE_PTP 0x88f7
+#define NPC_ETYPE_FCOE 0x8906
+#define NPC_ETYPE_QINQ 0x9100
+#define NPC_ETYPE_TRANS_ETH_BR 0x6558
+#define NPC_ETYPE_PPP 0x880b
+#define NPC_ETYPE_NSH 0x894f
+
+#define NPC_IPNH_HOP 0
+#define NPC_IPNH_ICMP 1
+#define NPC_IPNH_IGMP 2
+#define NPC_IPNH_IP 4
+#define NPC_IPNH_TCP 6
+#define NPC_IPNH_UDP 17
+#define NPC_IPNH_IP6 41
+#define NPC_IPNH_ROUT 43
+#define NPC_IPNH_FRAG 44
+#define NPC_IPNH_GRE 47
+#define NPC_IPNH_ESP 50
+#define NPC_IPNH_AH 51
+#define NPC_IPNH_ICMP6 58
+#define NPC_IPNH_NONH 59
+#define NPC_IPNH_DEST 60
+#define NPC_IPNH_SCTP 132
+#define NPC_IPNH_MPLS 137
+
+#define NPC_UDP_PORT_GTPC 2123
+#define NPC_UDP_PORT_GTPU 2152
+#define NPC_UDP_PORT_VXLAN 4789
+#define NPC_UDP_PORT_VXLANGPE 4790
+#define NPC_UDP_PORT_GENEVE 6081
+
+#define NPC_VXLANGPE_NP_IP 0x1
+#define NPC_VXLANGPE_NP_IP6 0x2
+#define NPC_VXLANGPE_NP_ETH 0x3
+#define NPC_VXLANGPE_NP_NSH 0x4
+#define NPC_VXLANGPE_NP_MPLS 0x5
+#define NPC_VXLANGPE_NP_GBP 0x6
+#define NPC_VXLANGPE_NP_VBNG 0x7
+
+#define NPC_NSH_NP_IP 0x1
+#define NPC_NSH_NP_IP6 0x2
+#define NPC_NSH_NP_ETH 0x3
+#define NPC_NSH_NP_NSH 0x4
+#define NPC_NSH_NP_MPLS 0x5
+
+#define NPC_TCP_PORT_HTTP 80
+#define NPC_TCP_PORT_HTTPS 443
+#define NPC_TCP_PORT_PPTP 1723
+
+#define NPC_MPLS_S 0x0100
+
+#define NPC_IP_VER_4 0x4000
+#define NPC_IP_VER_6 0x6000
+#define NPC_IP_VER_MASK 0xf000
+#define NPC_IP_HDR_LEN_5 0x0500
+#define NPC_IP_HDR_LEN_MASK 0x0f00
+
+#define NPC_GRE_F_CSUM (0x1 << 15)
+#define NPC_GRE_F_ROUTE (0x1 << 14)
+#define NPC_GRE_F_KEY (0x1 << 13)
+#define NPC_GRE_F_SEQ (0x1 << 12)
+#define NPC_GRE_F_ACK (0x1 << 7)
+#define NPC_GRE_FLAG_MASK (NPC_GRE_F_CSUM | NPC_GRE_F_ROUTE | \
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK)
+#define NPC_GRE_VER_MASK 0x0003
+#define NPC_GRE_VER_1 0x0001
+
+#define NPC_VXLAN_I 0x0800
+
+#define NPC_VXLANGPE_VER (0x3 << 12)
+#define NPC_VXLANGPE_I (0x1 << 11)
+#define NPC_VXLANGPE_P (0x1 << 10)
+#define NPC_VXLANGPE_B (0x1 << 9)
+#define NPC_VXLANGPE_NP_MASK 0x00ff
+
+#define NPC_NSH_NP_MASK 0x00ff
+
+#define NPC_GENEVE_F_OAM (0x1 << 7)
+#define NPC_GENEVE_F_CRI_OPT (0x1 << 6)
+
+#define NPC_GTP_PT_GTP (0x1 << 12)
+#define NPC_GTP_PT_MASK (0x1 << 12)
+#define NPC_GTP_VER1 (0x1 << 13)
+#define NPC_GTP_VER_MASK (0x7 << 13)
+#define NPC_GTP_MT_G_PDU 0xff
+#define NPC_GTP_MT_MASK 0xff
+
+#define NPC_TCP_DATA_OFFSET_5 0x5000
+#define NPC_TCP_DATA_OFFSET_MASK 0xf000
+
+enum npc_kpu_parser_state {
+ NPC_S_NA = 0,
+ NPC_S_KPU1_ETHER,
+ NPC_S_KPU1_PKI,
+ NPC_S_KPU2_CTAG,
+ NPC_S_KPU2_SBTAG,
+ NPC_S_KPU2_QINQ,
+ NPC_S_KPU2_ETAG,
+ NPC_S_KPU2_ITAG,
+ NPC_S_KPU3_CTAG,
+ NPC_S_KPU3_STAG,
+ NPC_S_KPU3_QINQ,
+ NPC_S_KPU3_ITAG,
+ NPC_S_KPU4_MPLS,
+ NPC_S_KPU4_NSH,
+ NPC_S_KPU5_IP,
+ NPC_S_KPU5_IP6,
+ NPC_S_KPU5_ARP,
+ NPC_S_KPU5_RARP,
+ NPC_S_KPU5_PTP,
+ NPC_S_KPU5_FCOE,
+ NPC_S_KPU5_MPLS,
+ NPC_S_KPU5_MPLS_PL,
+ NPC_S_KPU5_NSH,
+ NPC_S_KPU6_IP6_EXT,
+ NPC_S_KPU7_IP6_EXT,
+ NPC_S_KPU8_TCP,
+ NPC_S_KPU8_UDP,
+ NPC_S_KPU8_SCTP,
+ NPC_S_KPU8_ICMP,
+ NPC_S_KPU8_IGMP,
+ NPC_S_KPU8_ICMP6,
+ NPC_S_KPU8_GRE,
+ NPC_S_KPU8_ESP,
+ NPC_S_KPU8_AH,
+ NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN,
+ NPC_S_KPU9_TU_MPLS,
+ NPC_S_KPU9_TU_NSH,
+ NPC_S_KPU10_TU_MPLS_PL,
+ NPC_S_KPU10_TU_MPLS,
+ NPC_S_KPU10_TU_NSH,
+ NPC_S_KPU11_TU_ETHER,
+ NPC_S_KPU11_TU_PPP,
+ NPC_S_KPU11_TU_MPLS_IN_NSH,
+ NPC_S_KPU11_TU_3RD_NSH,
+ NPC_S_KPU12_TU_IP,
+ NPC_S_KPU12_TU_IP6,
+ NPC_S_KPU12_TU_ARP,
+ NPC_S_KPU13_TU_IP6_EXT,
+ NPC_S_KPU14_TU_IP6_EXT,
+ NPC_S_KPU15_TU_TCP,
+ NPC_S_KPU15_TU_UDP,
+ NPC_S_KPU15_TU_SCTP,
+ NPC_S_KPU15_TU_ICMP,
+ NPC_S_KPU15_TU_IGMP,
+ NPC_S_KPU15_TU_ICMP6,
+ NPC_S_KPU15_TU_ESP,
+ NPC_S_KPU15_TU_AH,
+ NPC_S_KPU16_HTTP_DATA,
+ NPC_S_KPU16_HTTPS_DATA,
+ NPC_S_KPU16_PPTP_DATA,
+ NPC_S_KPU16_TCP_DATA,
+ NPC_S_KPU16_UDP_DATA,
+ NPC_S_LAST /* has to be the last item */
+};
+
+enum npc_kpu_parser_flag {
+ NPC_F_NA = 0,
+ NPC_F_PKI,
+ NPC_F_PKI_VLAN,
+ NPC_F_PKI_ETAG,
+ NPC_F_PKI_ITAG,
+ NPC_F_PKI_MPLS,
+ NPC_F_PKI_NSH,
+ NPC_F_ETYPE_UNK,
+ NPC_F_ETHER_VLAN,
+ NPC_F_ETHER_ETAG,
+ NPC_F_ETHER_ITAG,
+ NPC_F_ETHER_MPLS,
+ NPC_F_ETHER_NSH,
+ NPC_F_STAG_CTAG,
+ NPC_F_STAG_CTAG_UNK,
+ NPC_F_STAG_STAG_CTAG,
+ NPC_F_STAG_STAG_STAG,
+ NPC_F_QINQ_CTAG,
+ NPC_F_QINQ_CTAG_UNK,
+ NPC_F_QINQ_QINQ_CTAG,
+ NPC_F_QINQ_QINQ_QINQ,
+ NPC_F_BTAG_ITAG,
+ NPC_F_BTAG_ITAG_STAG,
+ NPC_F_BTAG_ITAG_CTAG,
+ NPC_F_BTAG_ITAG_UNK,
+ NPC_F_ETAG_CTAG,
+ NPC_F_ETAG_BTAG_ITAG,
+ NPC_F_ETAG_STAG,
+ NPC_F_ETAG_QINQ,
+ NPC_F_ETAG_ITAG,
+ NPC_F_ETAG_ITAG_STAG,
+ NPC_F_ETAG_ITAG_CTAG,
+ NPC_F_ETAG_ITAG_UNK,
+ NPC_F_ITAG_STAG_CTAG,
+ NPC_F_ITAG_STAG,
+ NPC_F_ITAG_CTAG,
+ NPC_F_MPLS_4_LABELS,
+ NPC_F_MPLS_3_LABELS,
+ NPC_F_MPLS_2_LABELS,
+ NPC_F_IP_HAS_OPTIONS,
+ NPC_F_IP_IP_IN_IP,
+ NPC_F_IP_6TO4,
+ NPC_F_IP_MPLS_IN_IP,
+ NPC_F_IP_UNK_PROTO,
+ NPC_F_IP_IP_IN_IP_HAS_OPTIONS,
+ NPC_F_IP_6TO4_HAS_OPTIONS,
+ NPC_F_IP_MPLS_IN_IP_HAS_OPTIONS,
+ NPC_F_IP_UNK_PROTO_HAS_OPTIONS,
+ NPC_F_IP6_HAS_EXT,
+ NPC_F_IP6_TUN_IP6,
+ NPC_F_IP6_MPLS_IN_IP,
+ NPC_F_TCP_HAS_OPTIONS,
+ NPC_F_TCP_HTTP,
+ NPC_F_TCP_HTTPS,
+ NPC_F_TCP_PPTP,
+ NPC_F_TCP_UNK_PORT,
+ NPC_F_TCP_HTTP_HAS_OPTIONS,
+ NPC_F_TCP_HTTPS_HAS_OPTIONS,
+ NPC_F_TCP_PPTP_HAS_OPTIONS,
+ NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
+ NPC_F_UDP_VXLAN,
+ NPC_F_UDP_VXLAN_NOVNI,
+ NPC_F_UDP_VXLAN_NOVNI_NSH,
+ NPC_F_UDP_VXLANGPE,
+ NPC_F_UDP_VXLANGPE_NSH,
+ NPC_F_UDP_VXLANGPE_MPLS,
+ NPC_F_UDP_VXLANGPE_NOVNI,
+ NPC_F_UDP_VXLANGPE_NOVNI_NSH,
+ NPC_F_UDP_VXLANGPE_NOVNI_MPLS,
+ NPC_F_UDP_VXLANGPE_UNK,
+ NPC_F_UDP_VXLANGPE_NONP,
+ NPC_F_UDP_GTP_GTPC,
+ NPC_F_UDP_GTP_GTPU_G_PDU,
+ NPC_F_UDP_GTP_GTPU_UNK,
+ NPC_F_UDP_UNK_PORT,
+ NPC_F_UDP_GENEVE,
+ NPC_F_UDP_GENEVE_OAM,
+ NPC_F_UDP_GENEVE_CRI_OPT,
+ NPC_F_UDP_GENEVE_OAM_CRI_OPT,
+ NPC_F_GRE_NVGRE,
+ NPC_F_GRE_HAS_SRE,
+ NPC_F_GRE_HAS_CSUM,
+ NPC_F_GRE_HAS_KEY,
+ NPC_F_GRE_HAS_SEQ,
+ NPC_F_GRE_HAS_CSUM_KEY,
+ NPC_F_GRE_HAS_CSUM_SEQ,
+ NPC_F_GRE_HAS_KEY_SEQ,
+ NPC_F_GRE_HAS_CSUM_KEY_SEQ,
+ NPC_F_GRE_HAS_ROUTE,
+ NPC_F_GRE_UNK_PROTO,
+ NPC_F_GRE_VER1,
+ NPC_F_GRE_VER1_HAS_SEQ,
+ NPC_F_GRE_VER1_HAS_ACK,
+ NPC_F_GRE_VER1_HAS_SEQ_ACK,
+ NPC_F_GRE_VER1_UNK_PROTO,
+ NPC_F_TU_ETHER_UNK,
+ NPC_F_TU_ETHER_CTAG,
+ NPC_F_TU_ETHER_CTAG_UNK,
+ NPC_F_TU_ETHER_STAG_CTAG,
+ NPC_F_TU_ETHER_STAG_CTAG_UNK,
+ NPC_F_TU_ETHER_STAG,
+ NPC_F_TU_ETHER_STAG_UNK,
+ NPC_F_TU_ETHER_QINQ_CTAG,
+ NPC_F_TU_ETHER_QINQ_CTAG_UNK,
+ NPC_F_TU_ETHER_QINQ,
+ NPC_F_TU_ETHER_QINQ_UNK,
+ NPC_F_LAST /* has to be the last item */
+};
+
+enum npc_kpu_err_code {
+ NPC_EC_NOERR = 0, /* has to be zero */
+ NPC_EC_UNK,
+ NPC_EC_L2_K1,
+ NPC_EC_L2_K2,
+ NPC_EC_L2_K3,
+ NPC_EC_L2_K3_ETYPE_UNK,
+ NPC_EC_L2_MPLS_2MANY,
+ NPC_EC_L2_K4,
+ NPC_EC_IP_VER,
+ NPC_EC_IP6_VER,
+ NPC_EC_VXLAN,
+ NPC_EC_NVGRE,
+ NPC_EC_GRE,
+ NPC_EC_GRE_VER1,
+ NPC_EC_L4,
+ NPC_EC_LAST /* has to be the last item */
+};
+
+enum NPC_ERRLEV_E {
+ NPC_ERRLEV_RE = 0,
+ NPC_ERRLEV_LA = 1,
+ NPC_ERRLEV_LB = 2,
+ NPC_ERRLEV_LC = 3,
+ NPC_ERRLEV_LD = 4,
+ NPC_ERRLEV_LE = 5,
+ NPC_ERRLEV_LF = 6,
+ NPC_ERRLEV_LG = 7,
+ NPC_ERRLEV_LH = 8,
+ NPC_ERRLEV_R9 = 9,
+ NPC_ERRLEV_R10 = 10,
+ NPC_ERRLEV_R11 = 11,
+ NPC_ERRLEV_R12 = 12,
+ NPC_ERRLEV_R13 = 13,
+ NPC_ERRLEV_R14 = 14,
+ NPC_ERRLEV_NIX = 15,
+ NPC_ERRLEV_ENUM_LAST = 16,
+};
+
+static struct npc_kpu_profile_action ikpu_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 1, 0xff,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ETAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, 0x0000, 0xfc00,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, 0x0400, 0xfe00,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ETAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0010, 0x0010, 0x0000, 0xffff,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0010, 0x0010, 0x0000, 0xffff,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_SBTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_ARP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_RARP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_PTP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_FCOE, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_MPLSU, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_MPLSM, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_NSH, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_SBTAG, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_CTAG, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_QINQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_ITAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_ARP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_SBTAG, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_CTAG, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
+ {
+ NPC_S_KPU4_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_AH, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_GRE, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP6, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_MPLS, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_AH, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_GRE, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP6, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_MPLS, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_ARP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_RARP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_PTP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_FCOE, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_TCP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_UDP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_SCTP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ICMP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ICMP6 << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ESP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_AH << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_GRE << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_IP6 << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_MPLS << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, 0x0000, 0x0000,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff, NPC_IP_VER_4, NPC_IP_VER_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff, NPC_IP_VER_6, NPC_IP_VER_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff, 0x0000, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
+ {
+ NPC_S_KPU6_IP6_EXT, 0xff, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
+ {
+ NPC_S_KPU7_IP6_EXT, 0xff, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
+ {
+ NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
+ NPC_VXLAN_I, NPC_VXLAN_I, 0x0000, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
+ 0x0000, 0xffff, 0x0000, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP6, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_ETH, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_NSH, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_MPLS, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP6, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_ETH, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_NSH, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_MPLS, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P, NPC_VXLANGPE_P, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ 0x0000, NPC_VXLANGPE_P, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_CRI_OPT, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_CRI_OPT, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPC, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPU, 0xffff,
+ NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU,
+ NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK,
+ 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_SCTP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_ICMP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_IGMP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_ICMP6, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_ESP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_AH, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
+ NPC_GRE_F_ROUTE, 0x4fff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
+ 0x0000, 0x4fff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
+ 0x0000, 0x0003, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_VER_1, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_VER_1,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_ACK | NPC_GRE_VER_1,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK | NPC_GRE_VER_1,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
+ 0x2001, 0xef7f, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
+ 0x0001, 0x0003, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
+ NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff, NPC_IP_VER_4, NPC_IP_VER_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff, NPC_IP_VER_6, NPC_IP_VER_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff, 0x0000, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_PPP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS_IN_NSH, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_3RD_NSH, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_AH, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_AH, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_ARP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_TCP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_UDP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_SCTP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ICMP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ICMP6 << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ESP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_AH << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, 0x0000, 0x0000,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
+ {
+ NPC_S_KPU13_TU_IP6_EXT, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
+ {
+ NPC_S_KPU14_TU_IP6_EXT, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_UDP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_SCTP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ICMP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_IGMP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ICMP6, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ESP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_AH, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
+ {
+ NPC_S_KPU16_TCP_DATA, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU16_HTTP_DATA, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU16_HTTPS_DATA, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU16_PPTP_DATA, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU16_UDP_DATA, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_action kpu1_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_ARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_RARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_PTP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_FCOE, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU2_CTAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 20,
+ 0, 0, NPC_S_KPU2_SBTAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU2_QINQ, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 10, 24,
+ 0, 0, NPC_S_KPU2_ETAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_ETAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
+ 0, 0, NPC_S_KPU2_ITAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 2, 0, NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_MPLS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 2, 0, NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_MPLS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 2, 0, NPC_S_KPU4_NSH, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_NSH, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_8023, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_8023, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETYPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_ARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_RARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_PTP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_FCOE, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU2_CTAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 20,
+ 0, 0, NPC_S_KPU2_SBTAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU2_QINQ, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 10, 24,
+ 0, 0, NPC_S_KPU2_ETAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_ETAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
+ 0, 0, NPC_S_KPU2_ITAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 2, 0, NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_MPLS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 2, 0, NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_MPLS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 2, 0, NPC_S_KPU4_NSH, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_NSH, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETYPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LA, NPC_EC_L2_K1, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu2_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 1, 0, NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, NPC_F_ETYPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 1, 0, NPC_S_KPU4_NSH, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU3_CTAG, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU3_STAG, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_STAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 1, 0, NPC_S_KPU4_NSH, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU3_STAG, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU3_CTAG, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 1, 0, NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_ETYPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 1, 0, NPC_S_KPU4_NSH, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU3_CTAG, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU3_QINQ, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_QINQ_QINQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 1, 0, NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_ETYPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 1, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 2, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_NSH, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 2, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU3_CTAG, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
+ 0, 0, NPC_S_KPU3_ITAG, 12, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU3_STAG, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU3_QINQ, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_QINQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU3_STAG, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU3_CTAG, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETYPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu3_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_PTP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_FCOE, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_RARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_PTP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_FCOE, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU4_NSH, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_RARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_PTP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_FCOE, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU4_NSH, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_PTP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_FCOE, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 18, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 18, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 18, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 18, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 26, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 26, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 26, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 22, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 22, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu4_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU5_MPLS_PL, 4, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU5_MPLS_PL, 8, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_2_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU5_MPLS_PL, 12, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_3_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
+ 0, 0, NPC_S_KPU5_MPLS, 12, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_4_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 7, 0, NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 7, 0, NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 6, 0, NPC_S_KPU11_TU_ETHER, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU5_NSH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 4, 0, NPC_S_KPU9_TU_MPLS, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K4, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu5_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+ 2, 0, NPC_S_KPU8_TCP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
+ 2, 0, NPC_S_KPU8_UDP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_SCTP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_ICMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_IGMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU8_ESP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU8_AH, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 2, 0, NPC_S_KPU8_GRE, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_IP_IN_IP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP6, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_6TO4, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 3, 0, NPC_S_KPU9_TU_MPLS, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_MPLS_IN_IP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_UNK_PROTO, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+ 2, 0, NPC_S_KPU8_TCP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
+ 2, 0, NPC_S_KPU8_UDP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_SCTP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_ICMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_IGMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU8_ESP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU8_AH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 2, 0, NPC_S_KPU8_GRE, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_IP_IN_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_6TO4_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 3, 0, NPC_S_KPU9_TU_MPLS, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_MPLS_IN_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_UNK_PROTO_HAS_OPTIONS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_VER, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_ARP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_RARP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_PTP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_FCOE, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+ 2, 0, NPC_S_KPU8_TCP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
+ 2, 0, NPC_S_KPU8_UDP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_SCTP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_ICMP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_ICMP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_ESP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_AH, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_GRE, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_TUN_IP6, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 3, 0, NPC_S_KPU9_TU_MPLS, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_MPLS_IN_IP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU6_IP6_EXT, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_HAS_EXT, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_VER, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 5, 0, NPC_S_KPU11_TU_ETHER, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 5, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_MPLS_2MANY, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 5, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 5, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 5, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 5, 0, NPC_S_KPU11_TU_3RD_NSH, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU9_TU_MPLS, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu6_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu7_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu8_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_HTTP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_HTTPS_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTPS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_PPTP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_PPTP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_TCP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_UNK_PORT, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_HTTP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_HTTPS_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTPS_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_PPTP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_PPTP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_TCP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLAN_NOVNI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_VXLAN, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NSH, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_MPLS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI_NSH, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI_MPLS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NONP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM, 8, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT, 8, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
+ 8, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM,
+ 8, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT,
+ 8, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
+ 8, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM, 8, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT,
+ 8, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
+ 8, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPC, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPU_G_PDU, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPU_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_UDP_DATA, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_UNK_PORT, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_SCTP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ICMP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_IGMP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ICMP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ESP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_AH, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_NVGRE, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_NVGRE, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_ROUTE, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_UNK_PROTO, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_GRE, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU11_TU_PPP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU11_TU_PPP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU11_TU_PPP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_ACK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU11_TU_PPP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_SEQ_ACK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_UNK_PROTO, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_GRE_VER1, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu9_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS_PL, 4, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS_PL, 8, 0,
+ NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_2_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS_PL, 12, 0,
+ NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_3_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS, 12, 0,
+ NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_4_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_2_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_3_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_4_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 1, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_NSH, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU11_TU_MPLS_IN_NSH, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu10_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 0, 0, NPC_S_KPU11_TU_ETHER, 8, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 0, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_MPLS_2MANY, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 0, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 0, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 0, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU11_TU_3RD_NSH, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU11_TU_MPLS_IN_NSH, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu11_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP, 14, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP6, 14, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_ARP, 14, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP, 22, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP6, 22, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_ARP, 22, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER,
+ NPC_F_TU_ETHER_STAG_CTAG_UNK, 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP, 22, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP6, 22, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_ARP, 22, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER,
+ NPC_F_TU_ETHER_QINQ_CTAG_UNK, 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_PPP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_NSH, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_3RD_NSH, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LE, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu12_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+ 2, 0, NPC_S_KPU15_TU_TCP, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_UDP, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_SCTP, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ICMP, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_IGMP, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ESP, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_AH, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_UNK_PROTO, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+ 2, 0, NPC_S_KPU15_TU_TCP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_UDP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_SCTP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ICMP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_IGMP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ESP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_AH, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP,
+ NPC_F_IP_UNK_PROTO_HAS_OPTIONS, 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_IP_VER, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ARP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+ 2, 0, NPC_S_KPU15_TU_TCP, 40, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_UDP, 40, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_SCTP, 40, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ICMP, 40, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ICMP6, 40, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ESP, 40, 1,
+ NPC_LID_LC, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_AH, 40, 1,
+ NPC_LID_LC, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU13_TU_IP6_EXT, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, NPC_F_IP6_HAS_EXT, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_IP6_VER, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu13_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu14_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu15_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_HTTP_DATA, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_HTTPS_DATA, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTPS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_PPTP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LG_TU_TCP, NPC_F_TCP_PPTP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_TCP_DATA, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_UNK_PORT, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_HTTP_DATA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_HTTPS_DATA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTPS_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_PPTP_DATA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_PPTP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_TCP_DATA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_UDP_DATA, 8, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_UDP, NPC_F_UDP_UNK_PORT, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_SCTP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_ICMP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IGMP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_ICMP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_ESP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_AH, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LG, NPC_EC_L4, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LG, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu16_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TCP_DATA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_HTTP_DATA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_HTTPS_DATA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_PPTP_DATA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_UDP_DATA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile npc_kpu_profiles[] = {
+ {
+ ARRAY_SIZE(kpu1_cam_entries),
+ ARRAY_SIZE(kpu1_action_entries),
+ &kpu1_cam_entries[0],
+ &kpu1_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu2_cam_entries),
+ ARRAY_SIZE(kpu2_action_entries),
+ &kpu2_cam_entries[0],
+ &kpu2_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu3_cam_entries),
+ ARRAY_SIZE(kpu3_action_entries),
+ &kpu3_cam_entries[0],
+ &kpu3_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu4_cam_entries),
+ ARRAY_SIZE(kpu4_action_entries),
+ &kpu4_cam_entries[0],
+ &kpu4_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu5_cam_entries),
+ ARRAY_SIZE(kpu5_action_entries),
+ &kpu5_cam_entries[0],
+ &kpu5_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu6_cam_entries),
+ ARRAY_SIZE(kpu6_action_entries),
+ &kpu6_cam_entries[0],
+ &kpu6_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu7_cam_entries),
+ ARRAY_SIZE(kpu7_action_entries),
+ &kpu7_cam_entries[0],
+ &kpu7_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu8_cam_entries),
+ ARRAY_SIZE(kpu8_action_entries),
+ &kpu8_cam_entries[0],
+ &kpu8_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu9_cam_entries),
+ ARRAY_SIZE(kpu9_action_entries),
+ &kpu9_cam_entries[0],
+ &kpu9_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu10_cam_entries),
+ ARRAY_SIZE(kpu10_action_entries),
+ &kpu10_cam_entries[0],
+ &kpu10_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu11_cam_entries),
+ ARRAY_SIZE(kpu11_action_entries),
+ &kpu11_cam_entries[0],
+ &kpu11_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu12_cam_entries),
+ ARRAY_SIZE(kpu12_action_entries),
+ &kpu12_cam_entries[0],
+ &kpu12_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu13_cam_entries),
+ ARRAY_SIZE(kpu13_action_entries),
+ &kpu13_cam_entries[0],
+ &kpu13_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu14_cam_entries),
+ ARRAY_SIZE(kpu14_action_entries),
+ &kpu14_cam_entries[0],
+ &kpu14_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu15_cam_entries),
+ ARRAY_SIZE(kpu15_action_entries),
+ &kpu15_cam_entries[0],
+ &kpu15_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu16_cam_entries),
+ ARRAY_SIZE(kpu16_action_entries),
+ &kpu16_cam_entries[0],
+ &kpu16_action_entries[0],
+ },
+};
+
+#endif /* NPC_PROFILE_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
new file mode 100644
index 000000000000..dc28fa2b9481
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -0,0 +1,1772 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+
+#include "cgx.h"
+#include "rvu.h"
+#include "rvu_reg.h"
+
+#define DRV_NAME "octeontx2-af"
+#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
+#define DRV_VERSION "1.0"
+
+static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
+
+static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf);
+static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf);
+
+/* Supported devices */
+static const struct pci_device_id rvu_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, rvu_id_table);
+
+/* Poll a RVU block's register 'offset', for a 'zero'
+ * or 'nonzero' at bits specified by 'mask'
+ */
+int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(100);
+ void __iomem *reg;
+ u64 reg_val;
+
+ reg = rvu->afreg_base + ((block << 28) | offset);
+ while (time_before(jiffies, timeout)) {
+ reg_val = readq(reg);
+ if (zero && !(reg_val & mask))
+ return 0;
+ if (!zero && (reg_val & mask))
+ return 0;
+ usleep_range(1, 5);
+ timeout--;
+ }
+ return -EBUSY;
+}
+
+int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
+{
+ int id;
+
+ if (!rsrc->bmap)
+ return -EINVAL;
+
+ id = find_first_zero_bit(rsrc->bmap, rsrc->max);
+ if (id >= rsrc->max)
+ return -ENOSPC;
+
+ __set_bit(id, rsrc->bmap);
+
+ return id;
+}
+
+int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
+{
+ int start;
+
+ if (!rsrc->bmap)
+ return -EINVAL;
+
+ start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
+ if (start >= rsrc->max)
+ return -ENOSPC;
+
+ bitmap_set(rsrc->bmap, start, nrsrc);
+ return start;
+}
+
+static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
+{
+ if (!rsrc->bmap)
+ return;
+ if (start >= rsrc->max)
+ return;
+
+ bitmap_clear(rsrc->bmap, start, nrsrc);
+}
+
+bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
+{
+ int start;
+
+ if (!rsrc->bmap)
+ return false;
+
+ start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
+ if (start >= rsrc->max)
+ return false;
+
+ return true;
+}
+
+void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
+{
+ if (!rsrc->bmap)
+ return;
+
+ __clear_bit(id, rsrc->bmap);
+}
+
+int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
+{
+ int used;
+
+ if (!rsrc->bmap)
+ return 0;
+
+ used = bitmap_weight(rsrc->bmap, rsrc->max);
+ return (rsrc->max - used);
+}
+
+int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
+{
+ rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
+ sizeof(long), GFP_KERNEL);
+ if (!rsrc->bmap)
+ return -ENOMEM;
+ return 0;
+}
+
+/* Get block LF's HW index from a PF_FUNC's block slot number */
+int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
+{
+ u16 match = 0;
+ int lf;
+
+ spin_lock(&rvu->rsrc_lock);
+ for (lf = 0; lf < block->lf.max; lf++) {
+ if (block->fn_map[lf] == pcifunc) {
+ if (slot == match) {
+ spin_unlock(&rvu->rsrc_lock);
+ return lf;
+ }
+ match++;
+ }
+ }
+ spin_unlock(&rvu->rsrc_lock);
+ return -ENODEV;
+}
+
+/* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
+ * Some silicon variants of OcteonTX2 supports
+ * multiple blocks of same type.
+ *
+ * @pcifunc has to be zero when no LF is yet attached.
+ */
+int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
+{
+ int devnum, blkaddr = -ENODEV;
+ u64 cfg, reg;
+ bool is_pf;
+
+ switch (blktype) {
+ case BLKTYPE_NPC:
+ blkaddr = BLKADDR_NPC;
+ goto exit;
+ case BLKTYPE_NPA:
+ blkaddr = BLKADDR_NPA;
+ goto exit;
+ case BLKTYPE_NIX:
+ /* For now assume NIX0 */
+ if (!pcifunc) {
+ blkaddr = BLKADDR_NIX0;
+ goto exit;
+ }
+ break;
+ case BLKTYPE_SSO:
+ blkaddr = BLKADDR_SSO;
+ goto exit;
+ case BLKTYPE_SSOW:
+ blkaddr = BLKADDR_SSOW;
+ goto exit;
+ case BLKTYPE_TIM:
+ blkaddr = BLKADDR_TIM;
+ goto exit;
+ case BLKTYPE_CPT:
+ /* For now assume CPT0 */
+ if (!pcifunc) {
+ blkaddr = BLKADDR_CPT0;
+ goto exit;
+ }
+ break;
+ }
+
+ /* Check if this is a RVU PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK) {
+ is_pf = false;
+ devnum = rvu_get_hwvf(rvu, pcifunc);
+ } else {
+ is_pf = true;
+ devnum = rvu_get_pf(pcifunc);
+ }
+
+ /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */
+ if (blktype == BLKTYPE_NIX) {
+ reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_NIX0;
+ }
+
+ /* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */
+ if (blktype == BLKTYPE_CPT) {
+ reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_CPT0;
+ }
+
+exit:
+ if (is_block_implemented(rvu->hw, blkaddr))
+ return blkaddr;
+ return -ENODEV;
+}
+
+static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, u16 pcifunc,
+ u16 lf, bool attach)
+{
+ int devnum, num_lfs = 0;
+ bool is_pf;
+ u64 reg;
+
+ if (lf >= block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
+ __func__, lf, block->name, block->lf.max);
+ return;
+ }
+
+ /* Check if this is for a RVU PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK) {
+ is_pf = false;
+ devnum = rvu_get_hwvf(rvu, pcifunc);
+ } else {
+ is_pf = true;
+ devnum = rvu_get_pf(pcifunc);
+ }
+
+ block->fn_map[lf] = attach ? pcifunc : 0;
+
+ switch (block->type) {
+ case BLKTYPE_NPA:
+ pfvf->npalf = attach ? true : false;
+ num_lfs = pfvf->npalf;
+ break;
+ case BLKTYPE_NIX:
+ pfvf->nixlf = attach ? true : false;
+ num_lfs = pfvf->nixlf;
+ break;
+ case BLKTYPE_SSO:
+ attach ? pfvf->sso++ : pfvf->sso--;
+ num_lfs = pfvf->sso;
+ break;
+ case BLKTYPE_SSOW:
+ attach ? pfvf->ssow++ : pfvf->ssow--;
+ num_lfs = pfvf->ssow;
+ break;
+ case BLKTYPE_TIM:
+ attach ? pfvf->timlfs++ : pfvf->timlfs--;
+ num_lfs = pfvf->timlfs;
+ break;
+ case BLKTYPE_CPT:
+ attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
+ num_lfs = pfvf->cptlfs;
+ break;
+ }
+
+ reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
+ rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
+}
+
+inline int rvu_get_pf(u16 pcifunc)
+{
+ return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+}
+
+void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
+{
+ u64 cfg;
+
+ /* Get numVFs attached to this PF and first HWVF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ *numvfs = (cfg >> 12) & 0xFF;
+ *hwvf = cfg & 0xFFF;
+}
+
+static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
+{
+ int pf, func;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ func = pcifunc & RVU_PFVF_FUNC_MASK;
+
+ /* Get first HWVF attached to this PF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+
+ return ((cfg & 0xFFF) + func - 1);
+}
+
+struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
+{
+ /* Check if it is a PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
+ else
+ return &rvu->pf[rvu_get_pf(pcifunc)];
+}
+
+bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
+{
+ struct rvu_block *block;
+
+ if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
+ return false;
+
+ block = &hw->block[blkaddr];
+ return block->implemented;
+}
+
+static void rvu_check_block_implemented(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid;
+ u64 cfg;
+
+ /* For each block check if 'implemented' bit is set */
+ for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+ block = &hw->block[blkid];
+ cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
+ if (cfg & BIT_ULL(11))
+ block->implemented = true;
+ }
+}
+
+int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
+{
+ int err;
+
+ if (!block->implemented)
+ return 0;
+
+ rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
+ err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
+ true);
+ return err;
+}
+
+static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
+{
+ struct rvu_block *block = &rvu->hw->block[blkaddr];
+
+ if (!block->implemented)
+ return;
+
+ rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
+ rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
+}
+
+static void rvu_reset_all_blocks(struct rvu *rvu)
+{
+ /* Do a HW reset of all RVU blocks */
+ rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC0, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC1, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC2, NDC_AF_BLK_RST);
+}
+
+static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
+{
+ struct rvu_pfvf *pfvf;
+ u64 cfg;
+ int lf;
+
+ for (lf = 0; lf < block->lf.max; lf++) {
+ cfg = rvu_read64(rvu, block->addr,
+ block->lfcfg_reg | (lf << block->lfshift));
+ if (!(cfg & BIT_ULL(63)))
+ continue;
+
+ /* Set this resource as being used */
+ __set_bit(lf, block->lf.bmap);
+
+ /* Get, to whom this LF is attached */
+ pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
+ rvu_update_rsrc_map(rvu, pfvf, block,
+ (cfg >> 8) & 0xFFFF, lf, true);
+
+ /* Set start MSIX vector for this LF within this PF/VF */
+ rvu_set_msix_offset(rvu, pfvf, block, lf);
+ }
+}
+
+static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
+{
+ int min_vecs;
+
+ if (!vf)
+ goto check_pf;
+
+ if (!nvecs) {
+ dev_warn(rvu->dev,
+ "PF%d:VF%d is configured with zero msix vectors, %d\n",
+ pf, vf - 1, nvecs);
+ }
+ return;
+
+check_pf:
+ if (pf == 0)
+ min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
+ else
+ min_vecs = RVU_PF_INT_VEC_CNT;
+
+ if (!(nvecs < min_vecs))
+ return;
+ dev_warn(rvu->dev,
+ "PF%d is configured with too few vectors, %d, min is %d\n",
+ pf, nvecs, min_vecs);
+}
+
+static int rvu_setup_msix_resources(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs, hwvf, err;
+ int nvecs, offset, max_msix;
+ struct rvu_pfvf *pfvf;
+ u64 cfg, phy_addr;
+ dma_addr_t iova;
+
+ for (pf = 0; pf < hw->total_pfs; pf++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ /* If PF is not enabled, nothing to do */
+ if (!((cfg >> 20) & 0x01))
+ continue;
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+
+ pfvf = &rvu->pf[pf];
+ /* Get num of MSIX vectors attached to this PF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
+ pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
+ rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
+
+ /* Alloc msix bitmap for this PF */
+ err = rvu_alloc_bitmap(&pfvf->msix);
+ if (err)
+ return err;
+
+ /* Allocate memory for MSIX vector to RVU block LF mapping */
+ pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!pfvf->msix_lfmap)
+ return -ENOMEM;
+
+ /* For PF0 (AF) firmware will set msix vector offsets for
+ * AF, block AF and PF0_INT vectors, so jump to VFs.
+ */
+ if (!pf)
+ goto setup_vfmsix;
+
+ /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
+ * These are allocated on driver init and never freed,
+ * so no need to set 'msix_lfmap' for these.
+ */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
+ nvecs = (cfg >> 12) & 0xFF;
+ cfg &= ~0x7FFULL;
+ offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
+setup_vfmsix:
+ /* Alloc msix bitmap for VFs */
+ for (vf = 0; vf < numvfs; vf++) {
+ pfvf = &rvu->hwvf[hwvf + vf];
+ /* Get num of MSIX vectors attached to this VF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_MSIX_CFG(pf));
+ pfvf->msix.max = (cfg & 0xFFF) + 1;
+ rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
+
+ /* Alloc msix bitmap for this VF */
+ err = rvu_alloc_bitmap(&pfvf->msix);
+ if (err)
+ return err;
+
+ pfvf->msix_lfmap =
+ devm_kcalloc(rvu->dev, pfvf->msix.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!pfvf->msix_lfmap)
+ return -ENOMEM;
+
+ /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
+ * These are allocated on driver init and never freed,
+ * so no need to set 'msix_lfmap' for these.
+ */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
+ nvecs = (cfg >> 12) & 0xFF;
+ cfg &= ~0x7FFULL;
+ offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
+ cfg | offset);
+ }
+ }
+
+ /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
+ * create a IOMMU mapping for the physcial address configured by
+ * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
+ */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+ max_msix = cfg & 0xFFFFF;
+ phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
+ iova = dma_map_resource(rvu->dev, phy_addr,
+ max_msix * PCI_MSIX_ENTRY_SIZE,
+ DMA_BIDIRECTIONAL, 0);
+
+ if (dma_mapping_error(rvu->dev, iova))
+ return -ENOMEM;
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
+ rvu->msix_base_iova = iova;
+
+ return 0;
+}
+
+static void rvu_free_hw_resources(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ int id, max_msix;
+ u64 cfg;
+
+ rvu_npa_freemem(rvu);
+ rvu_npc_freemem(rvu);
+ rvu_nix_freemem(rvu);
+
+ /* Free block LF bitmaps */
+ for (id = 0; id < BLK_COUNT; id++) {
+ block = &hw->block[id];
+ kfree(block->lf.bmap);
+ }
+
+ /* Free MSIX bitmaps */
+ for (id = 0; id < hw->total_pfs; id++) {
+ pfvf = &rvu->pf[id];
+ kfree(pfvf->msix.bmap);
+ }
+
+ for (id = 0; id < hw->total_vfs; id++) {
+ pfvf = &rvu->hwvf[id];
+ kfree(pfvf->msix.bmap);
+ }
+
+ /* Unmap MSIX vector base IOVA mapping */
+ if (!rvu->msix_base_iova)
+ return;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+ max_msix = cfg & 0xFFFFF;
+ dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
+ max_msix * PCI_MSIX_ENTRY_SIZE,
+ DMA_BIDIRECTIONAL, 0);
+}
+
+static int rvu_setup_hw_resources(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid, err;
+ u64 cfg;
+
+ /* Get HW supported max RVU PF & VF count */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+ hw->total_pfs = (cfg >> 32) & 0xFF;
+ hw->total_vfs = (cfg >> 20) & 0xFFF;
+ hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
+
+ /* Init NPA LF's bitmap */
+ block = &hw->block[BLKADDR_NPA];
+ if (!block->implemented)
+ goto nix;
+ cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
+ block->lf.max = (cfg >> 16) & 0xFFF;
+ block->addr = BLKADDR_NPA;
+ block->type = BLKTYPE_NPA;
+ block->lfshift = 8;
+ block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
+ block->lfcfg_reg = NPA_PRIV_LFX_CFG;
+ block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = NPA_AF_LF_RST;
+ sprintf(block->name, "NPA");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+nix:
+ /* Init NIX LF's bitmap */
+ block = &hw->block[BLKADDR_NIX0];
+ if (!block->implemented)
+ goto sso;
+ cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
+ block->lf.max = cfg & 0xFFF;
+ block->addr = BLKADDR_NIX0;
+ block->type = BLKTYPE_NIX;
+ block->lfshift = 8;
+ block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG;
+ block->lfcfg_reg = NIX_PRIV_LFX_CFG;
+ block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = NIX_AF_LF_RST;
+ sprintf(block->name, "NIX");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+sso:
+ /* Init SSO group's bitmap */
+ block = &hw->block[BLKADDR_SSO];
+ if (!block->implemented)
+ goto ssow;
+ cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
+ block->lf.max = cfg & 0xFFFF;
+ block->addr = BLKADDR_SSO;
+ block->type = BLKTYPE_SSO;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
+ block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
+ block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
+ block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
+ sprintf(block->name, "SSO GROUP");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+ssow:
+ /* Init SSO workslot's bitmap */
+ block = &hw->block[BLKADDR_SSOW];
+ if (!block->implemented)
+ goto tim;
+ block->lf.max = (cfg >> 56) & 0xFF;
+ block->addr = BLKADDR_SSOW;
+ block->type = BLKTYPE_SSOW;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
+ block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
+ block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
+ block->lfreset_reg = SSOW_AF_LF_HWS_RST;
+ sprintf(block->name, "SSOWS");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+tim:
+ /* Init TIM LF's bitmap */
+ block = &hw->block[BLKADDR_TIM];
+ if (!block->implemented)
+ goto cpt;
+ cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
+ block->lf.max = cfg & 0xFFFF;
+ block->addr = BLKADDR_TIM;
+ block->type = BLKTYPE_TIM;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
+ block->lfcfg_reg = TIM_PRIV_LFX_CFG;
+ block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = TIM_AF_LF_RST;
+ sprintf(block->name, "TIM");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+cpt:
+ /* Init CPT LF's bitmap */
+ block = &hw->block[BLKADDR_CPT0];
+ if (!block->implemented)
+ goto init;
+ cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0);
+ block->lf.max = cfg & 0xFF;
+ block->addr = BLKADDR_CPT0;
+ block->type = BLKTYPE_CPT;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG;
+ block->lfcfg_reg = CPT_PRIV_LFX_CFG;
+ block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = CPT_AF_LF_RST;
+ sprintf(block->name, "CPT");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+init:
+ /* Allocate memory for PFVF data */
+ rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
+ sizeof(struct rvu_pfvf), GFP_KERNEL);
+ if (!rvu->pf)
+ return -ENOMEM;
+
+ rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
+ sizeof(struct rvu_pfvf), GFP_KERNEL);
+ if (!rvu->hwvf)
+ return -ENOMEM;
+
+ spin_lock_init(&rvu->rsrc_lock);
+
+ err = rvu_setup_msix_resources(rvu);
+ if (err)
+ return err;
+
+ for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+ block = &hw->block[blkid];
+ if (!block->lf.bmap)
+ continue;
+
+ /* Allocate memory for block LF/slot to pcifunc mapping info */
+ block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!block->fn_map)
+ return -ENOMEM;
+
+ /* Scan all blocks to check if low level firmware has
+ * already provisioned any of the resources to a PF/VF.
+ */
+ rvu_scan_block(rvu, block);
+ }
+
+ err = rvu_npc_init(rvu);
+ if (err)
+ return err;
+
+ err = rvu_npa_init(rvu);
+ if (err)
+ return err;
+
+ err = rvu_nix_init(rvu);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* NPA and NIX admin queue APIs */
+void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
+{
+ if (!aq)
+ return;
+
+ qmem_free(rvu->dev, aq->inst);
+ qmem_free(rvu->dev, aq->res);
+ devm_kfree(rvu->dev, aq);
+}
+
+int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
+ int qsize, int inst_size, int res_size)
+{
+ struct admin_queue *aq;
+ int err;
+
+ *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
+ if (!*ad_queue)
+ return -ENOMEM;
+ aq = *ad_queue;
+
+ /* Alloc memory for instructions i.e AQ */
+ err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
+ if (err) {
+ devm_kfree(rvu->dev, aq);
+ return err;
+ }
+
+ /* Alloc memory for results */
+ err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
+ if (err) {
+ rvu_aq_free(rvu, aq);
+ return err;
+ }
+
+ spin_lock_init(&aq->lock);
+ return 0;
+}
+
+static int rvu_mbox_handler_READY(struct rvu *rvu, struct msg_req *req,
+ struct ready_msg_rsp *rsp)
+{
+ return 0;
+}
+
+/* Get current count of a RVU block's LF/slots
+ * provisioned to a given RVU func.
+ */
+static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
+{
+ switch (blktype) {
+ case BLKTYPE_NPA:
+ return pfvf->npalf ? 1 : 0;
+ case BLKTYPE_NIX:
+ return pfvf->nixlf ? 1 : 0;
+ case BLKTYPE_SSO:
+ return pfvf->sso;
+ case BLKTYPE_SSOW:
+ return pfvf->ssow;
+ case BLKTYPE_TIM:
+ return pfvf->timlfs;
+ case BLKTYPE_CPT:
+ return pfvf->cptlfs;
+ }
+ return 0;
+}
+
+static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
+ int pcifunc, int slot)
+{
+ u64 val;
+
+ val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
+ rvu_write64(rvu, block->addr, block->lookup_reg, val);
+ /* Wait for the lookup to finish */
+ /* TODO: put some timeout here */
+ while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
+ ;
+
+ val = rvu_read64(rvu, block->addr, block->lookup_reg);
+
+ /* Check LF valid bit */
+ if (!(val & (1ULL << 12)))
+ return -1;
+
+ return (val & 0xFFF);
+}
+
+static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int slot, lf, num_lfs;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+
+ num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ if (!num_lfs)
+ return;
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
+ if (lf < 0) /* This should never happen */
+ continue;
+
+ /* Disable the LF */
+ rvu_write64(rvu, blkaddr, block->lfcfg_reg |
+ (lf << block->lfshift), 0x00ULL);
+
+ /* Update SW maintained mapping info as well */
+ rvu_update_rsrc_map(rvu, pfvf, block,
+ pcifunc, lf, false);
+
+ /* Free the resource */
+ rvu_free_rsrc(&block->lf, lf);
+
+ /* Clear MSIX vector offset for this LF */
+ rvu_clear_msix_offset(rvu, pfvf, block, lf);
+ }
+}
+
+static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
+ u16 pcifunc)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ bool detach_all = true;
+ struct rvu_block *block;
+ int blkid;
+
+ spin_lock(&rvu->rsrc_lock);
+
+ /* Check for partial resource detach */
+ if (detach && detach->partial)
+ detach_all = false;
+
+ /* Check for RVU block's LFs attached to this func,
+ * if so, detach them.
+ */
+ for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+ block = &hw->block[blkid];
+ if (!block->lf.bmap)
+ continue;
+ if (!detach_all && detach) {
+ if (blkid == BLKADDR_NPA && !detach->npalf)
+ continue;
+ else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
+ continue;
+ else if ((blkid == BLKADDR_SSO) && !detach->sso)
+ continue;
+ else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
+ continue;
+ else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
+ continue;
+ else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
+ continue;
+ }
+ rvu_detach_block(rvu, pcifunc, block->type);
+ }
+
+ spin_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+static int rvu_mbox_handler_DETACH_RESOURCES(struct rvu *rvu,
+ struct rsrc_detach *detach,
+ struct msg_rsp *rsp)
+{
+ return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
+}
+
+static void rvu_attach_block(struct rvu *rvu, int pcifunc,
+ int blktype, int num_lfs)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int slot, lf;
+ int blkaddr;
+ u64 cfg;
+
+ if (!num_lfs)
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (!block->lf.bmap)
+ return;
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ /* Allocate the resource */
+ lf = rvu_alloc_rsrc(&block->lf);
+ if (lf < 0)
+ return;
+
+ cfg = (1ULL << 63) | (pcifunc << 8) | slot;
+ rvu_write64(rvu, blkaddr, block->lfcfg_reg |
+ (lf << block->lfshift), cfg);
+ rvu_update_rsrc_map(rvu, pfvf, block,
+ pcifunc, lf, true);
+
+ /* Set start MSIX vector for this LF within this PF/VF */
+ rvu_set_msix_offset(rvu, pfvf, block, lf);
+ }
+}
+
+static int rvu_check_rsrc_availability(struct rvu *rvu,
+ struct rsrc_attach *req, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int free_lfs, mappedlfs;
+
+ /* Only one NPA LF can be attached */
+ if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) {
+ block = &hw->block[BLKADDR_NPA];
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (!free_lfs)
+ goto fail;
+ } else if (req->npalf) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid req, already has NPA\n",
+ pcifunc);
+ return -EINVAL;
+ }
+
+ /* Only one NIX LF can be attached */
+ if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) {
+ block = &hw->block[BLKADDR_NIX0];
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (!free_lfs)
+ goto fail;
+ } else if (req->nixlf) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid req, already has NIX\n",
+ pcifunc);
+ return -EINVAL;
+ }
+
+ if (req->sso) {
+ block = &hw->block[BLKADDR_SSO];
+ /* Is request within limits ? */
+ if (req->sso > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid SSO req, %d > max %d\n",
+ pcifunc, req->sso, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ /* Check if additional resources are available */
+ if (req->sso > mappedlfs &&
+ ((req->sso - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ if (req->ssow) {
+ block = &hw->block[BLKADDR_SSOW];
+ if (req->ssow > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid SSOW req, %d > max %d\n",
+ pcifunc, req->sso, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (req->ssow > mappedlfs &&
+ ((req->ssow - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ if (req->timlfs) {
+ block = &hw->block[BLKADDR_TIM];
+ if (req->timlfs > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
+ pcifunc, req->timlfs, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (req->timlfs > mappedlfs &&
+ ((req->timlfs - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ if (req->cptlfs) {
+ block = &hw->block[BLKADDR_CPT0];
+ if (req->cptlfs > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
+ pcifunc, req->cptlfs, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (req->cptlfs > mappedlfs &&
+ ((req->cptlfs - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_info(rvu->dev, "Request for %s failed\n", block->name);
+ return -ENOSPC;
+}
+
+static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu,
+ struct rsrc_attach *attach,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = attach->hdr.pcifunc;
+ int err;
+
+ /* If first request, detach all existing attached resources */
+ if (!attach->modify)
+ rvu_detach_rsrcs(rvu, NULL, pcifunc);
+
+ spin_lock(&rvu->rsrc_lock);
+
+ /* Check if the request can be accommodated */
+ err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
+ if (err)
+ goto exit;
+
+ /* Now attach the requested resources */
+ if (attach->npalf)
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1);
+
+ if (attach->nixlf)
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1);
+
+ if (attach->sso) {
+ /* RVU func doesn't know which exact LF or slot is attached
+ * to it, it always sees as slot 0,1,2. So for a 'modify'
+ * request, simply detach all existing attached LFs/slots
+ * and attach a fresh.
+ */
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso);
+ }
+
+ if (attach->ssow) {
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow);
+ }
+
+ if (attach->timlfs) {
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs);
+ }
+
+ if (attach->cptlfs) {
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs);
+ }
+
+exit:
+ spin_unlock(&rvu->rsrc_lock);
+ return err;
+}
+
+static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ int blkaddr, int lf)
+{
+ u16 vec;
+
+ if (lf < 0)
+ return MSIX_VECTOR_INVALID;
+
+ for (vec = 0; vec < pfvf->msix.max; vec++) {
+ if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
+ return vec;
+ }
+ return MSIX_VECTOR_INVALID;
+}
+
+static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf)
+{
+ u16 nvecs, vec, offset;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift));
+ nvecs = (cfg >> 12) & 0xFF;
+
+ /* Check and alloc MSIX vectors, must be contiguous */
+ if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
+ return;
+
+ offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+
+ /* Config MSIX offset in LF */
+ rvu_write64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
+
+ /* Update the bitmap as well */
+ for (vec = 0; vec < nvecs; vec++)
+ pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
+}
+
+static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf)
+{
+ u16 nvecs, vec, offset;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift));
+ nvecs = (cfg >> 12) & 0xFF;
+
+ /* Clear MSIX offset in LF */
+ rvu_write64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift), cfg & ~0x7FFULL);
+
+ offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
+
+ /* Update the mapping */
+ for (vec = 0; vec < nvecs; vec++)
+ pfvf->msix_lfmap[offset + vec] = 0;
+
+ /* Free the same in MSIX bitmap */
+ rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
+}
+
+static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req,
+ struct msix_offset_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int lf, slot;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (!pfvf->msix.bmap)
+ return 0;
+
+ /* Set MSIX offsets for each block's LFs attached to this PF/VF */
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
+ rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
+
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0);
+ rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf);
+
+ rsp->sso = pfvf->sso;
+ for (slot = 0; slot < rsp->sso; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
+ rsp->sso_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
+ }
+
+ rsp->ssow = pfvf->ssow;
+ for (slot = 0; slot < rsp->ssow; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
+ rsp->ssow_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
+ }
+
+ rsp->timlfs = pfvf->timlfs;
+ for (slot = 0; slot < rsp->timlfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
+ rsp->timlf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
+ }
+
+ rsp->cptlfs = pfvf->cptlfs;
+ for (slot = 0; slot < rsp->cptlfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
+ rsp->cptlf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
+ }
+ return 0;
+}
+
+static int rvu_process_mbox_msg(struct rvu *rvu, int devid,
+ struct mbox_msghdr *req)
+{
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG)
+ goto bad_message;
+
+ switch (req->id) {
+#define M(_name, _id, _req_type, _rsp_type) \
+ case _id: { \
+ struct _rsp_type *rsp; \
+ int err; \
+ \
+ rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
+ &rvu->mbox, devid, \
+ sizeof(struct _rsp_type)); \
+ if (rsp) { \
+ rsp->hdr.id = _id; \
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
+ rsp->hdr.pcifunc = req->pcifunc; \
+ rsp->hdr.rc = 0; \
+ } \
+ \
+ err = rvu_mbox_handler_ ## _name(rvu, \
+ (struct _req_type *)req, \
+ rsp); \
+ if (rsp && err) \
+ rsp->hdr.rc = err; \
+ \
+ return rsp ? err : -ENOMEM; \
+ }
+MBOX_MESSAGES
+#undef M
+ break;
+bad_message:
+ default:
+ otx2_reply_invalid_msg(&rvu->mbox, devid, req->pcifunc,
+ req->id);
+ return -ENODEV;
+ }
+}
+
+static void rvu_mbox_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+ struct rvu *rvu = mwork->rvu;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ int offset, id, err;
+ u16 pf;
+
+ mbox = &rvu->mbox;
+ pf = mwork - rvu->mbox_wrk;
+ mdev = &mbox->dev[pf];
+
+ /* Process received mbox messages */
+ req_hdr = mdev->mbase + mbox->rx_start;
+ if (req_hdr->num_msgs == 0)
+ return;
+
+ offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < req_hdr->num_msgs; id++) {
+ msg = mdev->mbase + offset;
+
+ /* Set which PF sent this message based on mbox IRQ */
+ msg->pcifunc &= ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
+ msg->pcifunc |= (pf << RVU_PFVF_PF_SHIFT);
+ err = rvu_process_mbox_msg(rvu, pf, msg);
+ if (!err) {
+ offset = mbox->rx_start + msg->next_msgoff;
+ continue;
+ }
+
+ if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
+ dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
+ err, otx2_mbox_id2name(msg->id), msg->id, pf,
+ (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+ else
+ dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
+ err, otx2_mbox_id2name(msg->id), msg->id, pf);
+ }
+
+ /* Send mbox responses to PF */
+ otx2_mbox_msg_send(mbox, pf);
+}
+
+static void rvu_mbox_up_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+ struct rvu *rvu = mwork->rvu;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ int offset, id;
+ u16 pf;
+
+ mbox = &rvu->mbox_up;
+ pf = mwork - rvu->mbox_wrk_up;
+ mdev = &mbox->dev[pf];
+
+ rsp_hdr = mdev->mbase + mbox->rx_start;
+ if (rsp_hdr->num_msgs == 0) {
+ dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
+ return;
+ }
+
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < rsp_hdr->num_msgs; id++) {
+ msg = mdev->mbase + offset;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(rvu->dev,
+ "Mbox msg with unknown ID 0x%x\n", msg->id);
+ goto end;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(rvu->dev,
+ "Mbox msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ goto end;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_CGX_LINK_EVENT:
+ break;
+ default:
+ if (msg->rc)
+ dev_err(rvu->dev,
+ "Mbox msg response has err %d, ID 0x%x\n",
+ msg->rc, msg->id);
+ break;
+ }
+end:
+ offset = mbox->rx_start + msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+
+ otx2_mbox_reset(mbox, 0);
+}
+
+static int rvu_mbox_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ void __iomem *hwbase = NULL;
+ struct rvu_work *mwork;
+ u64 bar4_addr;
+ int err, pf;
+
+ rvu->mbox_wq = alloc_workqueue("rvu_afpf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ hw->total_pfs);
+ if (!rvu->mbox_wq)
+ return -ENOMEM;
+
+ rvu->mbox_wrk = devm_kcalloc(rvu->dev, hw->total_pfs,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!rvu->mbox_wrk) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ rvu->mbox_wrk_up = devm_kcalloc(rvu->dev, hw->total_pfs,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!rvu->mbox_wrk_up) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* Map mbox region shared with PFs */
+ bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * RVU devices, shouldn't be mapped as device memory to allow
+ * unaligned accesses.
+ */
+ hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * hw->total_pfs);
+ if (!hwbase) {
+ dev_err(rvu->dev, "Unable to map mailbox region\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = otx2_mbox_init(&rvu->mbox, hwbase, rvu->pdev, rvu->afreg_base,
+ MBOX_DIR_AFPF, hw->total_pfs);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_init(&rvu->mbox_up, hwbase, rvu->pdev, rvu->afreg_base,
+ MBOX_DIR_AFPF_UP, hw->total_pfs);
+ if (err)
+ goto exit;
+
+ for (pf = 0; pf < hw->total_pfs; pf++) {
+ mwork = &rvu->mbox_wrk[pf];
+ mwork->rvu = rvu;
+ INIT_WORK(&mwork->work, rvu_mbox_handler);
+ }
+
+ for (pf = 0; pf < hw->total_pfs; pf++) {
+ mwork = &rvu->mbox_wrk_up[pf];
+ mwork->rvu = rvu;
+ INIT_WORK(&mwork->work, rvu_mbox_up_handler);
+ }
+
+ return 0;
+exit:
+ if (hwbase)
+ iounmap((void __iomem *)hwbase);
+ destroy_workqueue(rvu->mbox_wq);
+ return err;
+}
+
+static void rvu_mbox_destroy(struct rvu *rvu)
+{
+ if (rvu->mbox_wq) {
+ flush_workqueue(rvu->mbox_wq);
+ destroy_workqueue(rvu->mbox_wq);
+ rvu->mbox_wq = NULL;
+ }
+
+ if (rvu->mbox.hwbase)
+ iounmap((void __iomem *)rvu->mbox.hwbase);
+
+ otx2_mbox_destroy(&rvu->mbox);
+ otx2_mbox_destroy(&rvu->mbox_up);
+}
+
+static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ u64 intr;
+ u8 pf;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
+ /* Clear interrupts */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
+
+ /* Sync with mbox memory region */
+ smp_wmb();
+
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (intr & (1ULL << pf)) {
+ mbox = &rvu->mbox;
+ mdev = &mbox->dev[pf];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs)
+ queue_work(rvu->mbox_wq,
+ &rvu->mbox_wrk[pf].work);
+ mbox = &rvu->mbox_up;
+ mdev = &mbox->dev[pf];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs)
+ queue_work(rvu->mbox_wq,
+ &rvu->mbox_wrk_up[pf].work);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_enable_mbox_intr(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ /* Clear spurious irqs, if any */
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
+
+ /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
+ INTR_MASK(hw->total_pfs) & ~1ULL);
+}
+
+static void rvu_unregister_interrupts(struct rvu *rvu)
+{
+ int irq;
+
+ /* Disable the Mbox interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ for (irq = 0; irq < rvu->num_vec; irq++) {
+ if (rvu->irq_allocated[irq])
+ free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
+ }
+
+ pci_free_irq_vectors(rvu->pdev);
+ rvu->num_vec = 0;
+}
+
+static int rvu_register_interrupts(struct rvu *rvu)
+{
+ int ret;
+
+ rvu->num_vec = pci_msix_vec_count(rvu->pdev);
+
+ rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
+ NAME_SIZE, GFP_KERNEL);
+ if (!rvu->irq_name)
+ return -ENOMEM;
+
+ rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
+ sizeof(bool), GFP_KERNEL);
+ if (!rvu->irq_allocated)
+ return -ENOMEM;
+
+ /* Enable MSI-X */
+ ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
+ rvu->num_vec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(rvu->dev,
+ "RVUAF: Request for %d msix vectors failed, ret %d\n",
+ rvu->num_vec, ret);
+ return ret;
+ }
+
+ /* Register mailbox interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
+ ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
+ rvu_mbox_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for mbox irq\n");
+ goto fail;
+ }
+
+ rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
+
+ /* Enable mailbox interrupts from all PFs */
+ rvu_enable_mbox_intr(rvu);
+
+ return 0;
+
+fail:
+ pci_free_irq_vectors(rvu->pdev);
+ return ret;
+}
+
+static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct rvu *rvu;
+ int err;
+
+ rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
+ if (!rvu)
+ return -ENOMEM;
+
+ rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
+ if (!rvu->hw) {
+ devm_kfree(dev, rvu);
+ return -ENOMEM;
+ }
+
+ pci_set_drvdata(pdev, rvu);
+ rvu->pdev = pdev;
+ rvu->dev = &pdev->dev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto err_freemem;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to set DMA mask\n");
+ goto err_release_regions;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to set consistent DMA mask\n");
+ goto err_release_regions;
+ }
+
+ /* Map Admin function CSRs */
+ rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
+ rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
+ if (!rvu->afreg_base || !rvu->pfreg_base) {
+ dev_err(dev, "Unable to map admin function CSRs, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ /* Check which blocks the HW supports */
+ rvu_check_block_implemented(rvu);
+
+ rvu_reset_all_blocks(rvu);
+
+ err = rvu_setup_hw_resources(rvu);
+ if (err)
+ goto err_release_regions;
+
+ err = rvu_mbox_init(rvu);
+ if (err)
+ goto err_hwsetup;
+
+ err = rvu_cgx_probe(rvu);
+ if (err)
+ goto err_mbox;
+
+ err = rvu_register_interrupts(rvu);
+ if (err)
+ goto err_cgx;
+
+ return 0;
+err_cgx:
+ rvu_cgx_wq_destroy(rvu);
+err_mbox:
+ rvu_mbox_destroy(rvu);
+err_hwsetup:
+ rvu_reset_all_blocks(rvu);
+ rvu_free_hw_resources(rvu);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_freemem:
+ pci_set_drvdata(pdev, NULL);
+ devm_kfree(&pdev->dev, rvu->hw);
+ devm_kfree(dev, rvu);
+ return err;
+}
+
+static void rvu_remove(struct pci_dev *pdev)
+{
+ struct rvu *rvu = pci_get_drvdata(pdev);
+
+ rvu_unregister_interrupts(rvu);
+ rvu_cgx_wq_destroy(rvu);
+ rvu_mbox_destroy(rvu);
+ rvu_reset_all_blocks(rvu);
+ rvu_free_hw_resources(rvu);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ devm_kfree(&pdev->dev, rvu->hw);
+ devm_kfree(&pdev->dev, rvu);
+}
+
+static struct pci_driver rvu_driver = {
+ .name = DRV_NAME,
+ .id_table = rvu_id_table,
+ .probe = rvu_probe,
+ .remove = rvu_remove,
+};
+
+static int __init rvu_init_module(void)
+{
+ int err;
+
+ pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
+
+ err = pci_register_driver(&cgx_driver);
+ if (err < 0)
+ return err;
+
+ err = pci_register_driver(&rvu_driver);
+ if (err < 0)
+ pci_unregister_driver(&cgx_driver);
+
+ return err;
+}
+
+static void __exit rvu_cleanup_module(void)
+{
+ pci_unregister_driver(&rvu_driver);
+ pci_unregister_driver(&cgx_driver);
+}
+
+module_init(rvu_init_module);
+module_exit(rvu_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
new file mode 100644
index 000000000000..2c0580cd2807
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -0,0 +1,368 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RVU_H
+#define RVU_H
+
+#include "rvu_struct.h"
+#include "common.h"
+#include "mbox.h"
+
+/* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
+
+/* PCI BAR nos */
+#define PCI_AF_REG_BAR_NUM 0
+#define PCI_PF_REG_BAR_NUM 2
+#define PCI_MBOX_BAR_NUM 4
+
+#define NAME_SIZE 32
+
+/* PF_FUNC */
+#define RVU_PFVF_PF_SHIFT 10
+#define RVU_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+
+struct rvu_work {
+ struct work_struct work;
+ struct rvu *rvu;
+};
+
+struct rsrc_bmap {
+ unsigned long *bmap; /* Pointer to resource bitmap */
+ u16 max; /* Max resource id or count */
+};
+
+struct rvu_block {
+ struct rsrc_bmap lf;
+ struct admin_queue *aq; /* NIX/NPA AQ */
+ u16 *fn_map; /* LF to pcifunc mapping */
+ bool multislot;
+ bool implemented;
+ u8 addr; /* RVU_BLOCK_ADDR_E */
+ u8 type; /* RVU_BLOCK_TYPE_E */
+ u8 lfshift;
+ u64 lookup_reg;
+ u64 pf_lfcnt_reg;
+ u64 vf_lfcnt_reg;
+ u64 lfcfg_reg;
+ u64 msixcfg_reg;
+ u64 lfreset_reg;
+ unsigned char name[NAME_SIZE];
+};
+
+struct nix_mcast {
+ struct qmem *mce_ctx;
+ struct qmem *mcast_buf;
+ int replay_pkind;
+ int next_free_mce;
+ spinlock_t mce_lock; /* Serialize MCE updates */
+};
+
+struct nix_mce_list {
+ struct hlist_head head;
+ int count;
+ int max;
+};
+
+struct npc_mcam {
+ spinlock_t lock; /* MCAM entries and counters update lock */
+ u8 keysize; /* MCAM keysize 112/224/448 bits */
+ u8 banks; /* Number of MCAM banks */
+ u8 banks_per_entry;/* Number of keywords in key */
+ u16 banksize; /* Number of MCAM entries in each bank */
+ u16 total_entries; /* Total number of MCAM entries */
+ u16 entries; /* Total minus reserved for NIX LFs */
+ u16 nixlf_offset; /* Offset of nixlf rsvd uncast entries */
+ u16 pf_offset; /* Offset of PF's rsvd bcast, promisc entries */
+};
+
+/* Structure for per RVU func info ie PF/VF */
+struct rvu_pfvf {
+ bool npalf; /* Only one NPALF per RVU_FUNC */
+ bool nixlf; /* Only one NIXLF per RVU_FUNC */
+ u16 sso;
+ u16 ssow;
+ u16 cptlfs;
+ u16 timlfs;
+ u8 cgx_lmac;
+
+ /* Block LF's MSIX vector info */
+ struct rsrc_bmap msix; /* Bitmap for MSIX vector alloc */
+#define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF))
+ u16 *msix_lfmap; /* Vector to block LF mapping */
+
+ /* NPA contexts */
+ struct qmem *aura_ctx;
+ struct qmem *pool_ctx;
+ struct qmem *npa_qints_ctx;
+ unsigned long *aura_bmap;
+ unsigned long *pool_bmap;
+
+ /* NIX contexts */
+ struct qmem *rq_ctx;
+ struct qmem *sq_ctx;
+ struct qmem *cq_ctx;
+ struct qmem *rss_ctx;
+ struct qmem *cq_ints_ctx;
+ struct qmem *nix_qints_ctx;
+ unsigned long *sq_bmap;
+ unsigned long *rq_bmap;
+ unsigned long *cq_bmap;
+
+ u16 rx_chan_base;
+ u16 tx_chan_base;
+ u8 rx_chan_cnt; /* total number of RX channels */
+ u8 tx_chan_cnt; /* total number of TX channels */
+
+ u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
+
+ /* Broadcast pkt replication info */
+ u16 bcast_mce_idx;
+ struct nix_mce_list bcast_mce_list;
+};
+
+struct nix_txsch {
+ struct rsrc_bmap schq;
+ u8 lvl;
+ u16 *pfvf_map;
+};
+
+struct npc_pkind {
+ struct rsrc_bmap rsrc;
+ u32 *pfchan_map;
+};
+
+struct nix_hw {
+ struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
+ struct nix_mcast mcast;
+};
+
+struct rvu_hwinfo {
+ u8 total_pfs; /* MAX RVU PFs HW supports */
+ u16 total_vfs; /* Max RVU VFs HW supports */
+ u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */
+ u8 cgx;
+ u8 lmac_per_cgx;
+ u8 cgx_links;
+ u8 lbk_links;
+ u8 sdp_links;
+ u8 npc_kpus; /* No of parser units */
+
+
+ struct rvu_block block[BLK_COUNT]; /* Block info */
+ struct nix_hw *nix0;
+ struct npc_pkind pkind;
+ struct npc_mcam mcam;
+};
+
+struct rvu {
+ void __iomem *afreg_base;
+ void __iomem *pfreg_base;
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct rvu_hwinfo *hw;
+ struct rvu_pfvf *pf;
+ struct rvu_pfvf *hwvf;
+ spinlock_t rsrc_lock; /* Serialize resource alloc/free */
+
+ /* Mbox */
+ struct otx2_mbox mbox;
+ struct rvu_work *mbox_wrk;
+ struct otx2_mbox mbox_up;
+ struct rvu_work *mbox_wrk_up;
+ struct workqueue_struct *mbox_wq;
+
+ /* MSI-X */
+ u16 num_vec;
+ char *irq_name;
+ bool *irq_allocated;
+ dma_addr_t msix_base_iova;
+
+ /* CGX */
+#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
+ u8 cgx_mapped_pfs;
+ u8 cgx_cnt; /* available cgx ports */
+ u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
+ u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for
+ * every cgx lmac port
+ */
+ unsigned long pf_notify_bmap; /* Flags for PF notification */
+ void **cgx_idmap; /* cgx id to cgx data map table */
+ struct work_struct cgx_evh_work;
+ struct workqueue_struct *cgx_evh_wq;
+ spinlock_t cgx_evq_lock; /* cgx event queue lock */
+ struct list_head cgx_evq_head; /* cgx event queue head */
+};
+
+static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
+{
+ writeq(val, rvu->afreg_base + ((block << 28) | offset));
+}
+
+static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset)
+{
+ return readq(rvu->afreg_base + ((block << 28) | offset));
+}
+
+static inline void rvupf_write64(struct rvu *rvu, u64 offset, u64 val)
+{
+ writeq(val, rvu->pfreg_base + offset);
+}
+
+static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
+{
+ return readq(rvu->pfreg_base + offset);
+}
+
+/* Function Prototypes
+ * RVU
+ */
+int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
+int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
+void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
+int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
+int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
+bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
+int rvu_get_pf(u16 pcifunc);
+struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
+void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
+bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
+int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
+int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
+int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
+int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
+
+/* RVU HW reg validation */
+enum regmap_block {
+ TXSCHQ_HWREGMAP = 0,
+ MAX_HWREGMAP,
+};
+
+bool rvu_check_valid_reg(int regmap, int regblk, u64 reg);
+
+/* NPA/NIX AQ APIs */
+int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
+ int qsize, int inst_size, int res_size);
+void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
+
+/* CGX APIs */
+static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
+{
+ return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs);
+}
+
+static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
+{
+ *cgx_id = (map >> 4) & 0xF;
+ *lmac_id = (map & 0xF);
+}
+
+int rvu_cgx_probe(struct rvu *rvu);
+void rvu_cgx_wq_destroy(struct rvu *rvu);
+void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
+int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
+int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
+ struct cgx_stats_rsp *rsp);
+int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
+ struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp);
+int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
+ struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp);
+int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
+ struct cgx_link_info_msg *rsp);
+int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+
+/* NPA APIs */
+int rvu_npa_init(struct rvu *rvu);
+void rvu_npa_freemem(struct rvu *rvu);
+int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
+ struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp);
+int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu,
+ struct hwctx_disable_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
+ struct npa_lf_alloc_req *req,
+ struct npa_lf_alloc_rsp *rsp);
+int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+
+/* NIX APIs */
+int rvu_nix_init(struct rvu *rvu);
+void rvu_nix_freemem(struct rvu *rvu);
+int rvu_get_nixlf_count(struct rvu *rvu);
+int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
+ struct nix_lf_alloc_req *req,
+ struct nix_lf_alloc_rsp *rsp);
+int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
+ struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp);
+int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
+ struct hwctx_disable_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
+ struct nix_txsch_alloc_req *req,
+ struct nix_txsch_alloc_rsp *rsp);
+int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
+ struct nix_txsch_free_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
+ struct nix_txschq_config *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu,
+ struct nix_vtag_config *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu,
+ struct nix_rss_flowkey_cfg *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu,
+ struct nix_set_mac_addr *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req,
+ struct msg_rsp *rsp);
+
+/* NPC APIs */
+int rvu_npc_init(struct rvu *rvu);
+void rvu_npc_freemem(struct rvu *rvu);
+int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
+void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf);
+void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan, u8 *mac_addr);
+void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan, bool allmulti);
+void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan);
+void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
+ int group, int alg_idx, int mcam_index);
+#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
new file mode 100644
index 000000000000..188185c15b4a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu.h"
+#include "cgx.h"
+
+struct cgx_evq_entry {
+ struct list_head evq_node;
+ struct cgx_link_event link_event;
+};
+
+#define M(_name, _id, _req_type, _rsp_type) \
+static struct _req_type __maybe_unused \
+*otx2_mbox_alloc_msg_ ## _name(struct rvu *rvu, int devid) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ &rvu->mbox_up, devid, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ return req; \
+}
+
+MBOX_UP_CGX_MESSAGES
+#undef M
+
+/* Returns bitmap of mapped PFs */
+static inline u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
+{
+ return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
+}
+
+static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
+{
+ return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
+}
+
+void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
+{
+ if (cgx_id >= rvu->cgx_cnt)
+ return NULL;
+
+ return rvu->cgx_idmap[cgx_id];
+}
+
+static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ int cgx_cnt = rvu->cgx_cnt;
+ int cgx, lmac_cnt, lmac;
+ int pf = PF_CGXMAP_BASE;
+ int size, free_pkind;
+
+ if (!cgx_cnt)
+ return 0;
+
+ if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF)
+ return -EINVAL;
+
+ /* Alloc map table
+ * An additional entry is required since PF id starts from 1 and
+ * hence entry at offset 0 is invalid.
+ */
+ size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
+ rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL);
+ if (!rvu->pf2cgxlmac_map)
+ return -ENOMEM;
+
+ /* Initialize offset 0 with an invalid cgx and lmac id */
+ rvu->pf2cgxlmac_map[0] = 0xFF;
+
+ /* Reverse map table */
+ rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
+ cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16),
+ GFP_KERNEL);
+ if (!rvu->cgxlmac2pf_map)
+ return -ENOMEM;
+
+ rvu->cgx_mapped_pfs = 0;
+ for (cgx = 0; cgx < cgx_cnt; cgx++) {
+ lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
+ rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
+ rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
+ free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
+ pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
+ rvu->cgx_mapped_pfs++;
+ }
+ }
+ return 0;
+}
+
+static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
+{
+ struct cgx_evq_entry *qentry;
+ unsigned long flags;
+ int err;
+
+ qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
+ if (!qentry)
+ return -ENOMEM;
+
+ /* Lock the event queue before we read the local link status */
+ spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
+ err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ &qentry->link_event.link_uinfo);
+ qentry->link_event.cgx_id = cgx_id;
+ qentry->link_event.lmac_id = lmac_id;
+ if (err)
+ goto skip_add;
+ list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
+skip_add:
+ spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
+
+ /* start worker to process the events */
+ queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
+
+ return 0;
+}
+
+/* This is called from interrupt context and is expected to be atomic */
+static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
+{
+ struct cgx_evq_entry *qentry;
+ struct rvu *rvu = data;
+
+ /* post event to the event queue */
+ qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
+ if (!qentry)
+ return -ENOMEM;
+ qentry->link_event = *event;
+ spin_lock(&rvu->cgx_evq_lock);
+ list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
+ spin_unlock(&rvu->cgx_evq_lock);
+
+ /* start worker to process the events */
+ queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
+
+ return 0;
+}
+
+static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
+{
+ struct cgx_link_user_info *linfo;
+ struct cgx_link_info_msg *msg;
+ unsigned long pfmap;
+ int err, pfid;
+
+ linfo = &event->link_uinfo;
+ pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
+
+ do {
+ pfid = find_first_bit(&pfmap, 16);
+ clear_bit(pfid, &pfmap);
+
+ /* check if notification is enabled */
+ if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
+ dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
+ event->cgx_id, event->lmac_id,
+ linfo->link_up ? "UP" : "DOWN");
+ continue;
+ }
+
+ /* Send mbox message to PF */
+ msg = otx2_mbox_alloc_msg_CGX_LINK_EVENT(rvu, pfid);
+ if (!msg)
+ continue;
+ msg->link_info = *linfo;
+ otx2_mbox_msg_send(&rvu->mbox_up, pfid);
+ err = otx2_mbox_wait_for_rsp(&rvu->mbox_up, pfid);
+ if (err)
+ dev_warn(rvu->dev, "notification to pf %d failed\n",
+ pfid);
+ } while (pfmap);
+}
+
+static void cgx_evhandler_task(struct work_struct *work)
+{
+ struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
+ struct cgx_evq_entry *qentry;
+ struct cgx_link_event *event;
+ unsigned long flags;
+
+ do {
+ /* Dequeue an event */
+ spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
+ qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
+ struct cgx_evq_entry,
+ evq_node);
+ if (qentry)
+ list_del(&qentry->evq_node);
+ spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
+ if (!qentry)
+ break; /* nothing more to process */
+
+ event = &qentry->link_event;
+
+ /* process event */
+ cgx_notify_pfs(event, rvu);
+ kfree(qentry);
+ } while (1);
+}
+
+static void cgx_lmac_event_handler_init(struct rvu *rvu)
+{
+ struct cgx_event_cb cb;
+ int cgx, lmac, err;
+ void *cgxd;
+
+ spin_lock_init(&rvu->cgx_evq_lock);
+ INIT_LIST_HEAD(&rvu->cgx_evq_head);
+ INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
+ rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
+ if (!rvu->cgx_evh_wq) {
+ dev_err(rvu->dev, "alloc workqueue failed");
+ return;
+ }
+
+ cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
+ cb.data = rvu;
+
+ for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
+ err = cgx_lmac_evh_register(&cb, cgxd, lmac);
+ if (err)
+ dev_err(rvu->dev,
+ "%d:%d handler register failed\n",
+ cgx, lmac);
+ }
+ }
+}
+
+void rvu_cgx_wq_destroy(struct rvu *rvu)
+{
+ if (rvu->cgx_evh_wq) {
+ flush_workqueue(rvu->cgx_evh_wq);
+ destroy_workqueue(rvu->cgx_evh_wq);
+ rvu->cgx_evh_wq = NULL;
+ }
+}
+
+int rvu_cgx_probe(struct rvu *rvu)
+{
+ int i, err;
+
+ /* find available cgx ports */
+ rvu->cgx_cnt = cgx_get_cgx_cnt();
+ if (!rvu->cgx_cnt) {
+ dev_info(rvu->dev, "No CGX devices found!\n");
+ return -ENODEV;
+ }
+
+ rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *),
+ GFP_KERNEL);
+ if (!rvu->cgx_idmap)
+ return -ENOMEM;
+
+ /* Initialize the cgxdata table */
+ for (i = 0; i < rvu->cgx_cnt; i++)
+ rvu->cgx_idmap[i] = cgx_get_pdata(i);
+
+ /* Map CGX LMAC interfaces to RVU PFs */
+ err = rvu_map_cgx_lmac_pf(rvu);
+ if (err)
+ return err;
+
+ /* Register for CGX events */
+ cgx_lmac_event_handler_init(rvu);
+ return 0;
+}
+
+int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
+
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
+ struct cgx_stats_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ int stat = 0, err = 0;
+ u64 tx_stat, rx_stat;
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
+ !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+
+ /* Rx stats */
+ while (stat < CGX_RX_STATS_COUNT) {
+ err = cgx_get_rx_stats(cgxd, lmac, stat, &rx_stat);
+ if (err)
+ return err;
+ rsp->rx_stats[stat] = rx_stat;
+ stat++;
+ }
+
+ /* Tx stats */
+ stat = 0;
+ while (stat < CGX_TX_STATS_COUNT) {
+ err = cgx_get_tx_stats(cgxd, lmac, stat, &tx_stat);
+ if (err)
+ return err;
+ rsp->tx_stats[stat] = tx_stat;
+ stat++;
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
+ struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
+
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
+ struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+ int rc = 0, i;
+ u64 cfg;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ rsp->hdr.rc = rc;
+ cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
+ /* copy 48 bit mac address to req->mac_addr */
+ for (i = 0; i < ETH_ALEN; i++)
+ rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
+ !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ cgx_lmac_promisc_config(cgx_id, lmac_id, true);
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
+ !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ cgx_lmac_promisc_config(cgx_id, lmac_id, false);
+ return 0;
+}
+
+static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ if (en) {
+ set_bit(pf, &rvu->pf_notify_bmap);
+ /* Send the current link status to PF */
+ rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
+ } else {
+ clear_bit(pf, &rvu->pf_notify_bmap);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
+ struct cgx_link_info_msg *rsp)
+{
+ u8 cgx_id, lmac_id;
+ int pf, err;
+
+ pf = rvu_get_pf(req->hdr.pcifunc);
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ &rsp->link_info);
+ return err;
+}
+
+static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, en);
+}
+
+int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
new file mode 100644
index 000000000000..a5ab7eff2301
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -0,0 +1,1959 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+#include "cgx.h"
+
+static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
+
+enum mc_tbl_sz {
+ MC_TBL_SZ_256,
+ MC_TBL_SZ_512,
+ MC_TBL_SZ_1K,
+ MC_TBL_SZ_2K,
+ MC_TBL_SZ_4K,
+ MC_TBL_SZ_8K,
+ MC_TBL_SZ_16K,
+ MC_TBL_SZ_32K,
+ MC_TBL_SZ_64K,
+};
+
+enum mc_buf_cnt {
+ MC_BUF_CNT_8,
+ MC_BUF_CNT_16,
+ MC_BUF_CNT_32,
+ MC_BUF_CNT_64,
+ MC_BUF_CNT_128,
+ MC_BUF_CNT_256,
+ MC_BUF_CNT_512,
+ MC_BUF_CNT_1024,
+ MC_BUF_CNT_2048,
+};
+
+/* For now considering MC resources needed for broadcast
+ * pkt replication only. i.e 256 HWVFs + 12 PFs.
+ */
+#define MC_TBL_SIZE MC_TBL_SZ_512
+#define MC_BUF_CNT MC_BUF_CNT_128
+
+struct mce {
+ struct hlist_node node;
+ u16 idx;
+ u16 pcifunc;
+};
+
+int rvu_get_nixlf_count(struct rvu *rvu)
+{
+ struct rvu_block *block;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return 0;
+ block = &rvu->hw->block[blkaddr];
+ return block->lf.max;
+}
+
+static void nix_mce_list_init(struct nix_mce_list *list, int max)
+{
+ INIT_HLIST_HEAD(&list->head);
+ list->count = 0;
+ list->max = max;
+}
+
+static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
+{
+ int idx;
+
+ if (!mcast)
+ return 0;
+
+ idx = mcast->next_free_mce;
+ mcast->next_free_mce += count;
+ return idx;
+}
+
+static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
+{
+ if (blkaddr == BLKADDR_NIX0 && hw->nix0)
+ return hw->nix0;
+
+ return NULL;
+}
+
+static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
+ int lvl, u16 pcifunc, u16 schq)
+{
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return false;
+
+ txsch = &nix_hw->txsch[lvl];
+ /* Check out of bounds */
+ if (schq >= txsch->schq.max)
+ return false;
+
+ spin_lock(&rvu->rsrc_lock);
+ if (txsch->pfvf_map[schq] != pcifunc) {
+ spin_unlock(&rvu->rsrc_lock);
+ return false;
+ }
+ spin_unlock(&rvu->rsrc_lock);
+ return true;
+}
+
+static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u8 cgx_id, lmac_id;
+ int pkind, pf;
+ int err;
+
+ pf = rvu_get_pf(pcifunc);
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
+ return 0;
+
+ switch (type) {
+ case NIX_INTF_TYPE_CGX:
+ pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
+ rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
+
+ pkind = rvu_npc_get_pkind(rvu, pf);
+ if (pkind < 0) {
+ dev_err(rvu->dev,
+ "PF_Func 0x%x: Invalid pkind\n", pcifunc);
+ return -EINVAL;
+ }
+ pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
+ pfvf->tx_chan_base = pfvf->rx_chan_base;
+ pfvf->rx_chan_cnt = 1;
+ pfvf->tx_chan_cnt = 1;
+ cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
+ rvu_npc_set_pkind(rvu, pkind, pfvf);
+ break;
+ case NIX_INTF_TYPE_LBK:
+ break;
+ }
+
+ /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
+ * RVU PF/VF's MAC address.
+ */
+ rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base, pfvf->mac_addr);
+
+ /* Add this PF_FUNC to bcast pkt replication list */
+ err = nix_update_bcast_mce_list(rvu, pcifunc, true);
+ if (err) {
+ dev_err(rvu->dev,
+ "Bcast list, failed to enable PF_FUNC 0x%x\n",
+ pcifunc);
+ return err;
+ }
+
+ rvu_npc_install_bcast_match_entry(rvu, pcifunc,
+ nixlf, pfvf->rx_chan_base);
+
+ return 0;
+}
+
+static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
+{
+ int err;
+
+ /* Remove this PF_FUNC from bcast pkt replication list */
+ err = nix_update_bcast_mce_list(rvu, pcifunc, false);
+ if (err) {
+ dev_err(rvu->dev,
+ "Bcast list, failed to disable PF_FUNC 0x%x\n",
+ pcifunc);
+ }
+
+ /* Free and disable any MCAM entries used by this NIX LF */
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+}
+
+static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
+ u64 format, bool v4, u64 *fidx)
+{
+ struct nix_lso_format field = {0};
+
+ /* IP's Length field */
+ field.layer = NIX_TXLAYER_OL3;
+ /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
+ field.offset = v4 ? 2 : 4;
+ field.sizem1 = 1; /* i.e 2 bytes */
+ field.alg = NIX_LSOALG_ADD_PAYLEN;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+ *(u64 *)&field);
+
+ /* No ID field in IPv6 header */
+ if (!v4)
+ return;
+
+ /* IP's ID field */
+ field.layer = NIX_TXLAYER_OL3;
+ field.offset = 4;
+ field.sizem1 = 1; /* i.e 2 bytes */
+ field.alg = NIX_LSOALG_ADD_SEGNUM;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+ *(u64 *)&field);
+}
+
+static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
+ u64 format, u64 *fidx)
+{
+ struct nix_lso_format field = {0};
+
+ /* TCP's sequence number field */
+ field.layer = NIX_TXLAYER_OL4;
+ field.offset = 4;
+ field.sizem1 = 3; /* i.e 4 bytes */
+ field.alg = NIX_LSOALG_ADD_OFFSET;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+ *(u64 *)&field);
+
+ /* TCP's flags field */
+ field.layer = NIX_TXLAYER_OL4;
+ field.offset = 12;
+ field.sizem1 = 0; /* not needed */
+ field.alg = NIX_LSOALG_TCP_FLAGS;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+ *(u64 *)&field);
+}
+
+static void nix_setup_lso(struct rvu *rvu, int blkaddr)
+{
+ u64 cfg, idx, fidx = 0;
+
+ /* Enable LSO */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
+ /* For TSO, set first and middle segment flags to
+ * mask out PSH, RST & FIN flags in TCP packet
+ */
+ cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
+ cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
+ rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
+
+ /* Configure format fields for TCPv4 segmentation offload */
+ idx = NIX_LSO_FORMAT_IDX_TSOV4;
+ nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
+ nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
+
+ /* Set rest of the fields to NOP */
+ for (; fidx < 8; fidx++) {
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
+ }
+
+ /* Configure format fields for TCPv6 segmentation offload */
+ idx = NIX_LSO_FORMAT_IDX_TSOV6;
+ fidx = 0;
+ nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
+ nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
+
+ /* Set rest of the fields to NOP */
+ for (; fidx < 8; fidx++) {
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
+ }
+}
+
+static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
+{
+ kfree(pfvf->rq_bmap);
+ kfree(pfvf->sq_bmap);
+ kfree(pfvf->cq_bmap);
+ if (pfvf->rq_ctx)
+ qmem_free(rvu->dev, pfvf->rq_ctx);
+ if (pfvf->sq_ctx)
+ qmem_free(rvu->dev, pfvf->sq_ctx);
+ if (pfvf->cq_ctx)
+ qmem_free(rvu->dev, pfvf->cq_ctx);
+ if (pfvf->rss_ctx)
+ qmem_free(rvu->dev, pfvf->rss_ctx);
+ if (pfvf->nix_qints_ctx)
+ qmem_free(rvu->dev, pfvf->nix_qints_ctx);
+ if (pfvf->cq_ints_ctx)
+ qmem_free(rvu->dev, pfvf->cq_ints_ctx);
+
+ pfvf->rq_bmap = NULL;
+ pfvf->cq_bmap = NULL;
+ pfvf->sq_bmap = NULL;
+ pfvf->rq_ctx = NULL;
+ pfvf->sq_ctx = NULL;
+ pfvf->cq_ctx = NULL;
+ pfvf->rss_ctx = NULL;
+ pfvf->nix_qints_ctx = NULL;
+ pfvf->cq_ints_ctx = NULL;
+}
+
+static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
+ struct rvu_pfvf *pfvf, int nixlf,
+ int rss_sz, int rss_grps, int hwctx_size)
+{
+ int err, grp, num_indices;
+
+ /* RSS is not requested for this NIXLF */
+ if (!rss_sz)
+ return 0;
+ num_indices = rss_sz * rss_grps;
+
+ /* Alloc NIX RSS HW context memory and config the base */
+ err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
+ if (err)
+ return err;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
+ (u64)pfvf->rss_ctx->iova);
+
+ /* Config full RSS table size, enable RSS and caching */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
+ BIT_ULL(36) | BIT_ULL(4) |
+ ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
+ /* Config RSS group offset and sizes */
+ for (grp = 0; grp < rss_grps; grp++)
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
+ ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
+ return 0;
+}
+
+static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
+ struct nix_aq_inst_s *inst)
+{
+ struct admin_queue *aq = block->aq;
+ struct nix_aq_res_s *result;
+ int timeout = 1000;
+ u64 reg, head;
+
+ result = (struct nix_aq_res_s *)aq->res->base;
+
+ /* Get current head pointer where to append this instruction */
+ reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
+ head = (reg >> 4) & AQ_PTR_MASK;
+
+ memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
+ (void *)inst, aq->inst->entry_sz);
+ memset(result, 0, sizeof(*result));
+ /* sync into memory */
+ wmb();
+
+ /* Ring the doorbell and wait for result */
+ rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
+ while (result->compcode == NIX_AQ_COMP_NOTDONE) {
+ cpu_relax();
+ udelay(1);
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ }
+
+ if (result->compcode != NIX_AQ_COMP_GOOD)
+ /* TODO: Replace this with some error code */
+ return -EBUSY;
+
+ return 0;
+}
+
+static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int nixlf, blkaddr, rc = 0;
+ struct nix_aq_inst_s inst;
+ struct rvu_block *block;
+ struct admin_queue *aq;
+ struct rvu_pfvf *pfvf;
+ void *ctx, *mask;
+ bool ena;
+ u64 cfg;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ aq = block->aq;
+ if (!aq) {
+ dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
+ return NIX_AF_ERR_AQ_ENQUEUE;
+ }
+
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ switch (req->ctype) {
+ case NIX_AQ_CTYPE_RQ:
+ /* Check if index exceeds max no of queues */
+ if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_SQ:
+ if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_CQ:
+ if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_RSS:
+ /* Check if RSS is enabled and qidx is within range */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
+ if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
+ (req->qidx >= (256UL << (cfg & 0xF))))
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_MCE:
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
+ /* Check if index exceeds MCE list length */
+ if (!hw->nix0->mcast.mce_ctx ||
+ (req->qidx >= (256UL << (cfg & 0xF))))
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+
+ /* Adding multicast lists for requests from PF/VFs is not
+ * yet supported, so ignore this.
+ */
+ if (rsp)
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ default:
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ }
+
+ if (rc)
+ return rc;
+
+ /* Check if SQ pointed SMQ belongs to this PF/VF or not */
+ if (req->ctype == NIX_AQ_CTYPE_SQ &&
+ req->op != NIX_AQ_INSTOP_WRITE) {
+ if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
+ pcifunc, req->sq.smq))
+ return NIX_AF_ERR_AQ_ENQUEUE;
+ }
+
+ memset(&inst, 0, sizeof(struct nix_aq_inst_s));
+ inst.lf = nixlf;
+ inst.cindex = req->qidx;
+ inst.ctype = req->ctype;
+ inst.op = req->op;
+ /* Currently we are not supporting enqueuing multiple instructions,
+ * so always choose first entry in result memory.
+ */
+ inst.res_addr = (u64)aq->res->iova;
+
+ /* Clean result + context memory */
+ memset(aq->res->base, 0, aq->res->entry_sz);
+ /* Context needs to be written at RES_ADDR + 128 */
+ ctx = aq->res->base + 128;
+ /* Mask needs to be written at RES_ADDR + 256 */
+ mask = aq->res->base + 256;
+
+ switch (req->op) {
+ case NIX_AQ_INSTOP_WRITE:
+ if (req->ctype == NIX_AQ_CTYPE_RQ)
+ memcpy(mask, &req->rq_mask,
+ sizeof(struct nix_rq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_SQ)
+ memcpy(mask, &req->sq_mask,
+ sizeof(struct nix_sq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_CQ)
+ memcpy(mask, &req->cq_mask,
+ sizeof(struct nix_cq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_RSS)
+ memcpy(mask, &req->rss_mask,
+ sizeof(struct nix_rsse_s));
+ else if (req->ctype == NIX_AQ_CTYPE_MCE)
+ memcpy(mask, &req->mce_mask,
+ sizeof(struct nix_rx_mce_s));
+ /* Fall through */
+ case NIX_AQ_INSTOP_INIT:
+ if (req->ctype == NIX_AQ_CTYPE_RQ)
+ memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_SQ)
+ memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_CQ)
+ memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_RSS)
+ memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
+ else if (req->ctype == NIX_AQ_CTYPE_MCE)
+ memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
+ break;
+ case NIX_AQ_INSTOP_NOP:
+ case NIX_AQ_INSTOP_READ:
+ case NIX_AQ_INSTOP_LOCK:
+ case NIX_AQ_INSTOP_UNLOCK:
+ break;
+ default:
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ return rc;
+ }
+
+ spin_lock(&aq->lock);
+
+ /* Submit the instruction to AQ */
+ rc = nix_aq_enqueue_wait(rvu, block, &inst);
+ if (rc) {
+ spin_unlock(&aq->lock);
+ return rc;
+ }
+
+ /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
+ if (req->op == NIX_AQ_INSTOP_INIT) {
+ if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
+ __set_bit(req->qidx, pfvf->rq_bmap);
+ if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
+ __set_bit(req->qidx, pfvf->sq_bmap);
+ if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
+ __set_bit(req->qidx, pfvf->cq_bmap);
+ }
+
+ if (req->op == NIX_AQ_INSTOP_WRITE) {
+ if (req->ctype == NIX_AQ_CTYPE_RQ) {
+ ena = (req->rq.ena & req->rq_mask.ena) |
+ (test_bit(req->qidx, pfvf->rq_bmap) &
+ ~req->rq_mask.ena);
+ if (ena)
+ __set_bit(req->qidx, pfvf->rq_bmap);
+ else
+ __clear_bit(req->qidx, pfvf->rq_bmap);
+ }
+ if (req->ctype == NIX_AQ_CTYPE_SQ) {
+ ena = (req->rq.ena & req->sq_mask.ena) |
+ (test_bit(req->qidx, pfvf->sq_bmap) &
+ ~req->sq_mask.ena);
+ if (ena)
+ __set_bit(req->qidx, pfvf->sq_bmap);
+ else
+ __clear_bit(req->qidx, pfvf->sq_bmap);
+ }
+ if (req->ctype == NIX_AQ_CTYPE_CQ) {
+ ena = (req->rq.ena & req->cq_mask.ena) |
+ (test_bit(req->qidx, pfvf->cq_bmap) &
+ ~req->cq_mask.ena);
+ if (ena)
+ __set_bit(req->qidx, pfvf->cq_bmap);
+ else
+ __clear_bit(req->qidx, pfvf->cq_bmap);
+ }
+ }
+
+ if (rsp) {
+ /* Copy read context into mailbox */
+ if (req->op == NIX_AQ_INSTOP_READ) {
+ if (req->ctype == NIX_AQ_CTYPE_RQ)
+ memcpy(&rsp->rq, ctx,
+ sizeof(struct nix_rq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_SQ)
+ memcpy(&rsp->sq, ctx,
+ sizeof(struct nix_sq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_CQ)
+ memcpy(&rsp->cq, ctx,
+ sizeof(struct nix_cq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_RSS)
+ memcpy(&rsp->rss, ctx,
+ sizeof(struct nix_rsse_s));
+ else if (req->ctype == NIX_AQ_CTYPE_MCE)
+ memcpy(&rsp->mce, ctx,
+ sizeof(struct nix_rx_mce_s));
+ }
+ }
+
+ spin_unlock(&aq->lock);
+ return 0;
+}
+
+static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ struct nix_aq_enq_req aq_req;
+ unsigned long *bmap;
+ int qidx, q_cnt = 0;
+ int err = 0, rc;
+
+ if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
+ return NIX_AF_ERR_AQ_ENQUEUE;
+
+ memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
+ aq_req.hdr.pcifunc = req->hdr.pcifunc;
+
+ if (req->ctype == NIX_AQ_CTYPE_CQ) {
+ aq_req.cq.ena = 0;
+ aq_req.cq_mask.ena = 1;
+ q_cnt = pfvf->cq_ctx->qsize;
+ bmap = pfvf->cq_bmap;
+ }
+ if (req->ctype == NIX_AQ_CTYPE_SQ) {
+ aq_req.sq.ena = 0;
+ aq_req.sq_mask.ena = 1;
+ q_cnt = pfvf->sq_ctx->qsize;
+ bmap = pfvf->sq_bmap;
+ }
+ if (req->ctype == NIX_AQ_CTYPE_RQ) {
+ aq_req.rq.ena = 0;
+ aq_req.rq_mask.ena = 1;
+ q_cnt = pfvf->rq_ctx->qsize;
+ bmap = pfvf->rq_bmap;
+ }
+
+ aq_req.ctype = req->ctype;
+ aq_req.op = NIX_AQ_INSTOP_WRITE;
+
+ for (qidx = 0; qidx < q_cnt; qidx++) {
+ if (!test_bit(qidx, bmap))
+ continue;
+ aq_req.qidx = qidx;
+ rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
+ if (rc) {
+ err = rc;
+ dev_err(rvu->dev, "Failed to disable %s:%d context\n",
+ (req->ctype == NIX_AQ_CTYPE_CQ) ?
+ "CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
+ "RQ" : "SQ"), qidx);
+ }
+ }
+
+ return err;
+}
+
+int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
+ struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ return rvu_nix_aq_enq_inst(rvu, req, rsp);
+}
+
+int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
+ struct hwctx_disable_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_lf_hwctx_disable(rvu, req);
+}
+
+int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
+ struct nix_lf_alloc_req *req,
+ struct nix_lf_alloc_rsp *rsp)
+{
+ int nixlf, qints, hwctx_size, err, rc = 0;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ u64 cfg, ctx_cfg;
+ int blkaddr;
+
+ if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
+ return NIX_AF_ERR_PARAM;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ /* If RSS is being enabled, check if requested config is valid.
+ * RSS table size should be power of two, otherwise
+ * RSS_GRP::OFFSET + adder might go beyond that group or
+ * won't be able to use entire table.
+ */
+ if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
+ !is_power_of_2(req->rss_sz)))
+ return NIX_AF_ERR_RSS_SIZE_INVALID;
+
+ if (req->rss_sz &&
+ (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
+ return NIX_AF_ERR_RSS_GRPS_INVALID;
+
+ /* Reset this NIX LF */
+ err = rvu_lf_reset(rvu, block, nixlf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
+ block->addr - BLKADDR_NIX0, nixlf);
+ return NIX_AF_ERR_LF_RESET;
+ }
+
+ ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
+
+ /* Alloc NIX RQ HW context memory and config the base */
+ hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
+ if (!pfvf->rq_bmap)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
+ (u64)pfvf->rq_ctx->iova);
+
+ /* Set caching and queue count in HW */
+ cfg = BIT_ULL(36) | (req->rq_cnt - 1);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
+
+ /* Alloc NIX SQ HW context memory and config the base */
+ hwctx_size = 1UL << (ctx_cfg & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
+ if (!pfvf->sq_bmap)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
+ (u64)pfvf->sq_ctx->iova);
+ cfg = BIT_ULL(36) | (req->sq_cnt - 1);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
+
+ /* Alloc NIX CQ HW context memory and config the base */
+ hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
+ if (!pfvf->cq_bmap)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
+ (u64)pfvf->cq_ctx->iova);
+ cfg = BIT_ULL(36) | (req->cq_cnt - 1);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
+
+ /* Initialize receive side scaling (RSS) */
+ hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
+ err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
+ req->rss_sz, req->rss_grps, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ /* Alloc memory for CQINT's HW contexts */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ qints = (cfg >> 24) & 0xFFF;
+ hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
+ (u64)pfvf->cq_ints_ctx->iova);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
+
+ /* Alloc memory for QINT's HW contexts */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ qints = (cfg >> 12) & 0xFFF;
+ hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
+ (u64)pfvf->nix_qints_ctx->iova);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
+
+ /* Enable LMTST for this NIX LF */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
+
+ /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC
+ * If requester has sent a 'RVU_DEFAULT_PF_FUNC' use this NIX LF's
+ * PCIFUNC itself.
+ */
+ if (req->npa_func == RVU_DEFAULT_PF_FUNC)
+ cfg = pcifunc;
+ else
+ cfg = req->npa_func;
+
+ if (req->sso_func == RVU_DEFAULT_PF_FUNC)
+ cfg |= (u64)pcifunc << 16;
+ else
+ cfg |= (u64)req->sso_func << 16;
+
+ cfg |= (u64)req->xqe_sz << 33;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
+
+ /* Config Rx pkt length, csum checks and apad enable / disable */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
+
+ err = nix_interface_init(rvu, pcifunc, NIX_INTF_TYPE_CGX, nixlf);
+ if (err)
+ goto free_mem;
+
+ goto exit;
+
+free_mem:
+ nix_ctx_free(rvu, pfvf);
+ rc = -ENOMEM;
+
+exit:
+ /* Set macaddr of this PF/VF */
+ ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
+
+ /* set SQB size info */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
+ rsp->sqb_size = (cfg >> 34) & 0xFFFF;
+ rsp->rx_chan_base = pfvf->rx_chan_base;
+ rsp->tx_chan_base = pfvf->tx_chan_base;
+ rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
+ rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
+ rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
+ rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
+ return rc;
+}
+
+int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int blkaddr, nixlf, err;
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_interface_deinit(rvu, pcifunc, nixlf);
+
+ /* Reset this NIX LF */
+ err = rvu_lf_reset(rvu, block, nixlf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
+ block->addr - BLKADDR_NIX0, nixlf);
+ return NIX_AF_ERR_LF_RESET;
+ }
+
+ nix_ctx_free(rvu, pfvf);
+
+ return 0;
+}
+
+/* Disable shaping of pkts by a scheduler queue
+ * at a given scheduler level.
+ */
+static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
+ int lvl, int schq)
+{
+ u64 cir_reg = 0, pir_reg = 0;
+ u64 cfg;
+
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL1:
+ cir_reg = NIX_AF_TL1X_CIR(schq);
+ pir_reg = 0; /* PIR not available at TL1 */
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ cir_reg = NIX_AF_TL2X_CIR(schq);
+ pir_reg = NIX_AF_TL2X_PIR(schq);
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ cir_reg = NIX_AF_TL3X_CIR(schq);
+ pir_reg = NIX_AF_TL3X_PIR(schq);
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ cir_reg = NIX_AF_TL4X_CIR(schq);
+ pir_reg = NIX_AF_TL4X_PIR(schq);
+ break;
+ }
+
+ if (!cir_reg)
+ return;
+ cfg = rvu_read64(rvu, blkaddr, cir_reg);
+ rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
+
+ if (!pir_reg)
+ return;
+ cfg = rvu_read64(rvu, blkaddr, pir_reg);
+ rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
+}
+
+static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
+ int lvl, int schq)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int link;
+
+ /* Reset TL4's SDP link config */
+ if (lvl == NIX_TXSCH_LVL_TL4)
+ rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
+
+ if (lvl != NIX_TXSCH_LVL_TL2)
+ return;
+
+ /* Reset TL2's CGX or LBK link config */
+ for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
+}
+
+int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
+ struct nix_txsch_alloc_req *req,
+ struct nix_txsch_alloc_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_txsch *txsch;
+ int lvl, idx, req_schq;
+ struct rvu_pfvf *pfvf;
+ struct nix_hw *nix_hw;
+ int blkaddr, rc = 0;
+ u16 schq;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ spin_lock(&rvu->rsrc_lock);
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ req_schq = req->schq_contig[lvl] + req->schq[lvl];
+
+ /* There are only 28 TL1s */
+ if (lvl == NIX_TXSCH_LVL_TL1 && req_schq > txsch->schq.max)
+ goto err;
+
+ /* Check if request is valid */
+ if (!req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
+ goto err;
+
+ /* If contiguous queues are needed, check for availability */
+ if (req->schq_contig[lvl] &&
+ !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
+ goto err;
+
+ /* Check if full request can be accommodated */
+ if (req_schq >= rvu_rsrc_free_count(&txsch->schq))
+ goto err;
+ }
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ rsp->schq_contig[lvl] = req->schq_contig[lvl];
+ rsp->schq[lvl] = req->schq[lvl];
+
+ schq = 0;
+ /* Alloc contiguous queues first */
+ if (req->schq_contig[lvl]) {
+ schq = rvu_alloc_rsrc_contig(&txsch->schq,
+ req->schq_contig[lvl]);
+
+ for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
+ txsch->pfvf_map[schq] = pcifunc;
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
+ rsp->schq_contig_list[lvl][idx] = schq;
+ schq++;
+ }
+ }
+
+ /* Alloc non-contiguous queues */
+ for (idx = 0; idx < req->schq[lvl]; idx++) {
+ schq = rvu_alloc_rsrc(&txsch->schq);
+ txsch->pfvf_map[schq] = pcifunc;
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
+ rsp->schq_list[lvl][idx] = schq;
+ }
+ }
+ goto exit;
+err:
+ rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
+exit:
+ spin_unlock(&rvu->rsrc_lock);
+ return rc;
+}
+
+static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, nixlf, lvl, schq, err;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ /* Disable TL2/3 queue links before SMQ flush*/
+ spin_lock(&rvu->rsrc_lock);
+ for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
+ continue;
+
+ txsch = &nix_hw->txsch[lvl];
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (txsch->pfvf_map[schq] != pcifunc)
+ continue;
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ }
+ }
+
+ /* Flush SMQs */
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (txsch->pfvf_map[schq] != pcifunc)
+ continue;
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
+ /* Do SMQ flush and set enqueue xoff */
+ cfg |= BIT_ULL(50) | BIT_ULL(49);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
+
+ /* Wait for flush to complete */
+ err = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
+ if (err) {
+ dev_err(rvu->dev,
+ "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
+ }
+ }
+
+ /* Now free scheduler queues to free pool */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (txsch->pfvf_map[schq] != pcifunc)
+ continue;
+ rvu_free_rsrc(&txsch->schq, schq);
+ txsch->pfvf_map[schq] = 0;
+ }
+ }
+ spin_unlock(&rvu->rsrc_lock);
+
+ /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
+ rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
+ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
+ if (err)
+ dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
+
+ return 0;
+}
+
+int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
+ struct nix_txsch_free_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_txschq_free(rvu, req->hdr.pcifunc);
+}
+
+static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
+ int lvl, u64 reg, u64 regval)
+{
+ u64 regbase = reg & 0xFFFF;
+ u16 schq, parent;
+
+ if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
+ return false;
+
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+ /* Check if this schq belongs to this PF/VF or not */
+ if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
+ return false;
+
+ parent = (regval >> 16) & 0x1FF;
+ /* Validate MDQ's TL4 parent */
+ if (regbase == NIX_AF_MDQX_PARENT(0) &&
+ !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
+ return false;
+
+ /* Validate TL4's TL3 parent */
+ if (regbase == NIX_AF_TL4X_PARENT(0) &&
+ !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
+ return false;
+
+ /* Validate TL3's TL2 parent */
+ if (regbase == NIX_AF_TL3X_PARENT(0) &&
+ !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
+ return false;
+
+ /* Validate TL2's TL1 parent */
+ if (regbase == NIX_AF_TL2X_PARENT(0) &&
+ !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
+ return false;
+
+ return true;
+}
+
+int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
+ struct nix_txschq_config *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ u64 reg, regval, schq_regbase;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ int blkaddr, idx, err;
+ int nixlf;
+
+ if (req->lvl >= NIX_TXSCH_LVL_CNT ||
+ req->num_regs > MAX_REGS_PER_MBOX_MSG)
+ return NIX_AF_INVAL_TXSCHQ_CFG;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ txsch = &nix_hw->txsch[req->lvl];
+ for (idx = 0; idx < req->num_regs; idx++) {
+ reg = req->reg[idx];
+ regval = req->regval[idx];
+ schq_regbase = reg & 0xFFFF;
+
+ if (!is_txschq_config_valid(rvu, pcifunc, blkaddr,
+ txsch->lvl, reg, regval))
+ return NIX_AF_INVAL_TXSCHQ_CFG;
+
+ /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
+ if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
+ pcifunc, 0);
+ regval &= ~(0x7FULL << 24);
+ regval |= ((u64)nixlf << 24);
+ }
+
+ rvu_write64(rvu, blkaddr, reg, regval);
+
+ /* Check for SMQ flush, if so, poll for its completion */
+ if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
+ (regval & BIT_ULL(49))) {
+ err = rvu_poll_reg(rvu, blkaddr,
+ reg, BIT_ULL(49), true);
+ if (err)
+ return NIX_AF_SMQ_FLUSH_FAILED;
+ }
+ }
+ return 0;
+}
+
+static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
+ struct nix_vtag_config *req)
+{
+ u64 regval = 0;
+
+#define NIX_VTAGTYPE_MAX 0x8ull
+#define NIX_VTAGSIZE_MASK 0x7ull
+#define NIX_VTAGSTRIP_CAP_MASK 0x30ull
+
+ if (req->rx.vtag_type >= NIX_VTAGTYPE_MAX ||
+ req->vtag_size > VTAGSIZE_T8)
+ return -EINVAL;
+
+ regval = rvu_read64(rvu, blkaddr,
+ NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type));
+
+ if (req->rx.strip_vtag && req->rx.capture_vtag)
+ regval |= BIT_ULL(4) | BIT_ULL(5);
+ else if (req->rx.strip_vtag)
+ regval |= BIT_ULL(4);
+ else
+ regval &= ~(BIT_ULL(4) | BIT_ULL(5));
+
+ regval &= ~NIX_VTAGSIZE_MASK;
+ regval |= req->vtag_size & NIX_VTAGSIZE_MASK;
+
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
+ return 0;
+}
+
+int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu,
+ struct nix_vtag_config *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, nixlf, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (req->cfg_type) {
+ err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
+ if (err)
+ return NIX_AF_ERR_PARAM;
+ } else {
+ /* TODO: handle tx vtag configuration */
+ return 0;
+ }
+
+ return 0;
+}
+
+static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
+ u16 pcifunc, int next, bool eol)
+{
+ struct nix_aq_enq_req aq_req;
+ int err;
+
+ aq_req.hdr.pcifunc = pcifunc;
+ aq_req.ctype = NIX_AQ_CTYPE_MCE;
+ aq_req.op = op;
+ aq_req.qidx = mce;
+
+ /* Forward bcast pkts to RQ0, RSS not needed */
+ aq_req.mce.op = 0;
+ aq_req.mce.index = 0;
+ aq_req.mce.eol = eol;
+ aq_req.mce.pf_func = pcifunc;
+ aq_req.mce.next = next;
+
+ /* All fields valid */
+ *(u64 *)(&aq_req.mce_mask) = ~0ULL;
+
+ err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
+ if (err) {
+ dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
+ rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+ return err;
+ }
+ return 0;
+}
+
+static int nix_update_mce_list(struct nix_mce_list *mce_list,
+ u16 pcifunc, int idx, bool add)
+{
+ struct mce *mce, *tail = NULL;
+ bool delete = false;
+
+ /* Scan through the current list */
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ /* If already exists, then delete */
+ if (mce->pcifunc == pcifunc && !add) {
+ delete = true;
+ break;
+ }
+ tail = mce;
+ }
+
+ if (delete) {
+ hlist_del(&mce->node);
+ kfree(mce);
+ mce_list->count--;
+ return 0;
+ }
+
+ if (!add)
+ return 0;
+
+ /* Add a new one to the list, at the tail */
+ mce = kzalloc(sizeof(*mce), GFP_ATOMIC);
+ if (!mce)
+ return -ENOMEM;
+ mce->idx = idx;
+ mce->pcifunc = pcifunc;
+ if (!tail)
+ hlist_add_head(&mce->node, &mce_list->head);
+ else
+ hlist_add_behind(&mce->node, &tail->node);
+ mce_list->count++;
+ return 0;
+}
+
+static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
+{
+ int err = 0, idx, next_idx, count;
+ struct nix_mce_list *mce_list;
+ struct mce *mce, *next_mce;
+ struct nix_mcast *mcast;
+ struct nix_hw *nix_hw;
+ struct rvu_pfvf *pfvf;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return 0;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return 0;
+
+ mcast = &nix_hw->mcast;
+
+ /* Get this PF/VF func's MCE index */
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+ idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
+
+ mce_list = &pfvf->bcast_mce_list;
+ if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
+ dev_err(rvu->dev,
+ "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
+ __func__, idx, mce_list->max,
+ pcifunc >> RVU_PFVF_PF_SHIFT);
+ return -EINVAL;
+ }
+
+ spin_lock(&mcast->mce_lock);
+
+ err = nix_update_mce_list(mce_list, pcifunc, idx, add);
+ if (err)
+ goto end;
+
+ /* Disable MCAM entry in NPC */
+
+ if (!mce_list->count)
+ goto end;
+ count = mce_list->count;
+
+ /* Dump the updated list to HW */
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ next_idx = 0;
+ count--;
+ if (count) {
+ next_mce = hlist_entry(mce->node.next,
+ struct mce, node);
+ next_idx = next_mce->idx;
+ }
+ /* EOL should be set in last MCE */
+ err = nix_setup_mce(rvu, mce->idx,
+ NIX_AQ_INSTOP_WRITE, mce->pcifunc,
+ next_idx, count ? false : true);
+ if (err)
+ goto end;
+ }
+
+end:
+ spin_unlock(&mcast->mce_lock);
+ return err;
+}
+
+static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ struct nix_mcast *mcast = &nix_hw->mcast;
+ int err, pf, numvfs, idx;
+ struct rvu_pfvf *pfvf;
+ u16 pcifunc;
+ u64 cfg;
+
+ /* Skip PF0 (i.e AF) */
+ for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ /* If PF is not enabled, nothing to do */
+ if (!((cfg >> 20) & 0x01))
+ continue;
+ /* Get numVFs attached to this PF */
+ numvfs = (cfg >> 12) & 0xFF;
+
+ pfvf = &rvu->pf[pf];
+ /* Save the start MCE */
+ pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+
+ nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
+
+ for (idx = 0; idx < (numvfs + 1); idx++) {
+ /* idx-0 is for PF, followed by VFs */
+ pcifunc = (pf << RVU_PFVF_PF_SHIFT);
+ pcifunc |= idx;
+ /* Add dummy entries now, so that we don't have to check
+ * for whether AQ_OP should be INIT/WRITE later on.
+ * Will be updated when a NIXLF is attached/detached to
+ * these PF/VFs.
+ */
+ err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
+{
+ struct nix_mcast *mcast = &nix_hw->mcast;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int err, size;
+
+ size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
+ size = (1ULL << size);
+
+ /* Alloc memory for multicast/mirror replication entries */
+ err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
+ (256UL << MC_TBL_SIZE), size);
+ if (err)
+ return -ENOMEM;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
+ (u64)mcast->mce_ctx->iova);
+
+ /* Set max list length equal to max no of VFs per PF + PF itself */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
+ BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
+
+ /* Alloc memory for multicast replication buffers */
+ size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
+ err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
+ (8UL << MC_BUF_CNT), size);
+ if (err)
+ return -ENOMEM;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
+ (u64)mcast->mcast_buf->iova);
+
+ /* Alloc pkind for NIX internal RX multicast/mirror replay */
+ mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
+ BIT_ULL(63) | (mcast->replay_pkind << 24) |
+ BIT_ULL(20) | MC_BUF_CNT);
+
+ spin_lock_init(&mcast->mce_lock);
+
+ return nix_setup_bcast_tables(rvu, nix_hw);
+}
+
+static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
+{
+ struct nix_txsch *txsch;
+ u64 cfg, reg;
+ int err, lvl;
+
+ /* Get scheduler queue count of each type and alloc
+ * bitmap for each for alloc/free/attach operations.
+ */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ txsch->lvl = lvl;
+ switch (lvl) {
+ case NIX_TXSCH_LVL_SMQ:
+ reg = NIX_AF_MDQ_CONST;
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ reg = NIX_AF_TL4_CONST;
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ reg = NIX_AF_TL3_CONST;
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ reg = NIX_AF_TL2_CONST;
+ break;
+ case NIX_TXSCH_LVL_TL1:
+ reg = NIX_AF_TL1_CONST;
+ break;
+ }
+ cfg = rvu_read64(rvu, blkaddr, reg);
+ txsch->schq.max = cfg & 0xFFFF;
+ err = rvu_alloc_bitmap(&txsch->schq);
+ if (err)
+ return err;
+
+ /* Allocate memory for scheduler queues to
+ * PF/VF pcifunc mapping info.
+ */
+ txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!txsch->pfvf_map)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int i, nixlf, blkaddr;
+ u64 stats;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ /* Get stats count supported by HW */
+ stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+
+ /* Reset tx stats */
+ for (i = 0; i < ((stats >> 24) & 0xFF); i++)
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
+
+ /* Reset rx stats */
+ for (i = 0; i < ((stats >> 32) & 0xFF); i++)
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
+
+ return 0;
+}
+
+/* Returns the ALG index to be set into NPC_RX_ACTION */
+static int get_flowkey_alg_idx(u32 flow_cfg)
+{
+ u32 ip_cfg;
+
+ flow_cfg &= ~FLOW_KEY_TYPE_PORT;
+ ip_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6;
+ if (flow_cfg == ip_cfg)
+ return FLOW_KEY_ALG_IP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP))
+ return FLOW_KEY_ALG_TCP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP))
+ return FLOW_KEY_ALG_UDP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_SCTP))
+ return FLOW_KEY_ALG_SCTP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP))
+ return FLOW_KEY_ALG_TCP_UDP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP))
+ return FLOW_KEY_ALG_TCP_SCTP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP))
+ return FLOW_KEY_ALG_UDP_SCTP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP |
+ FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP))
+ return FLOW_KEY_ALG_TCP_UDP_SCTP;
+
+ return FLOW_KEY_ALG_PORT;
+}
+
+int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu,
+ struct nix_rss_flowkey_cfg *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int alg_idx, nixlf, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ alg_idx = get_flowkey_alg_idx(req->flowkey_cfg);
+
+ rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
+ alg_idx, req->mcam_index);
+ return 0;
+}
+
+static void set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+{
+ struct nix_rx_flowkey_alg *field = NULL;
+ int idx, key_type;
+
+ if (!alg)
+ return;
+
+ /* FIELD0: IPv4
+ * FIELD1: IPv6
+ * FIELD2: TCP/UDP/SCTP/ALL
+ * FIELD3: Unused
+ * FIELD4: Unused
+ *
+ * Each of the 32 possible flow key algorithm definitions should
+ * fall into above incremental config (except ALG0). Otherwise a
+ * single NPC MCAM entry is not sufficient for supporting RSS.
+ *
+ * If a different definition or combination needed then NPC MCAM
+ * has to be programmed to filter such pkts and it's action should
+ * point to this definition to calculate flowtag or hash.
+ */
+ for (idx = 0; idx < 32; idx++) {
+ key_type = flow_cfg & BIT_ULL(idx);
+ if (!key_type)
+ continue;
+ switch (key_type) {
+ case FLOW_KEY_TYPE_PORT:
+ field = &alg[0];
+ field->sel_chan = true;
+ /* This should be set to 1, when SEL_CHAN is set */
+ field->bytesm1 = 1;
+ break;
+ case FLOW_KEY_TYPE_IPV4:
+ field = &alg[0];
+ field->lid = NPC_LID_LC;
+ field->ltype_match = NPC_LT_LC_IP;
+ field->hdr_offset = 12; /* SIP offset */
+ field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
+ field->ltype_mask = 0xF; /* Match only IPv4 */
+ break;
+ case FLOW_KEY_TYPE_IPV6:
+ field = &alg[1];
+ field->lid = NPC_LID_LC;
+ field->ltype_match = NPC_LT_LC_IP6;
+ field->hdr_offset = 8; /* SIP offset */
+ field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
+ field->ltype_mask = 0xF; /* Match only IPv6 */
+ break;
+ case FLOW_KEY_TYPE_TCP:
+ case FLOW_KEY_TYPE_UDP:
+ case FLOW_KEY_TYPE_SCTP:
+ field = &alg[2];
+ field->lid = NPC_LID_LD;
+ field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
+ if (key_type == FLOW_KEY_TYPE_TCP)
+ field->ltype_match |= NPC_LT_LD_TCP;
+ else if (key_type == FLOW_KEY_TYPE_UDP)
+ field->ltype_match |= NPC_LT_LD_UDP;
+ else if (key_type == FLOW_KEY_TYPE_SCTP)
+ field->ltype_match |= NPC_LT_LD_SCTP;
+ field->key_offset = 32; /* After IPv4/v6 SIP, DIP */
+ field->ltype_mask = ~field->ltype_match;
+ break;
+ }
+ if (field)
+ field->ena = 1;
+ field = NULL;
+ }
+}
+
+static void nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
+{
+#define FIELDS_PER_ALG 5
+ u64 field[FLOW_KEY_ALG_MAX][FIELDS_PER_ALG];
+ u32 flowkey_cfg, minkey_cfg;
+ int alg, fid;
+
+ memset(&field, 0, sizeof(u64) * FLOW_KEY_ALG_MAX * FIELDS_PER_ALG);
+
+ /* Only incoming channel number */
+ flowkey_cfg = FLOW_KEY_TYPE_PORT;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_PORT], flowkey_cfg);
+
+ /* For a incoming pkt if none of the fields match then flowkey
+ * will be zero, hence tag generated will also be zero.
+ * RSS entry at rsse_index = NIX_AF_LF()_RSS_GRP()[OFFSET] will
+ * be used to queue the packet.
+ */
+
+ /* IPv4/IPv6 SIP/DIPs */
+ flowkey_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_IP], flowkey_cfg);
+
+ /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
+ minkey_cfg = flowkey_cfg;
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP], flowkey_cfg);
+
+ /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP], flowkey_cfg);
+
+ /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_SCTP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_SCTP], flowkey_cfg);
+
+ /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP], flowkey_cfg);
+
+ /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_SCTP], flowkey_cfg);
+
+ /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP_SCTP], flowkey_cfg);
+
+ /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP |
+ FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP_SCTP],
+ flowkey_cfg);
+
+ for (alg = 0; alg < FLOW_KEY_ALG_MAX; alg++) {
+ for (fid = 0; fid < FIELDS_PER_ALG; fid++)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
+ field[alg][fid]);
+ }
+}
+
+int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu,
+ struct nix_set_mac_addr *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, nixlf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+
+ rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base, req->mac_addr);
+ return 0;
+}
+
+int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req,
+ struct msg_rsp *rsp)
+{
+ bool allmulti = false, disable_promisc = false;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, nixlf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (req->mode & NIX_RX_MODE_PROMISC)
+ allmulti = false;
+ else if (req->mode & NIX_RX_MODE_ALLMULTI)
+ allmulti = true;
+ else
+ disable_promisc = true;
+
+ if (disable_promisc)
+ rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
+ else
+ rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base, allmulti);
+ return 0;
+}
+
+static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
+{
+ int idx, err;
+ u64 status;
+
+ /* Start X2P bus calibration */
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
+ /* Wait for calibration to complete */
+ err = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_STATUS, BIT_ULL(10), false);
+ if (err) {
+ dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
+ return err;
+ }
+
+ status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
+ /* Check if CGX devices are ready */
+ for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) {
+ if (status & (BIT_ULL(16 + idx)))
+ continue;
+ dev_err(rvu->dev,
+ "CGX%d didn't respond to NIX X2P calibration\n", idx);
+ err = -EBUSY;
+ }
+
+ /* Check if LBK is ready */
+ if (!(status & BIT_ULL(19))) {
+ dev_err(rvu->dev,
+ "LBK didn't respond to NIX X2P calibration\n");
+ err = -EBUSY;
+ }
+
+ /* Clear 'calibrate_x2p' bit */
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
+ if (err || (status & 0x3FFULL))
+ dev_err(rvu->dev,
+ "NIX X2P calibration failed, status 0x%llx\n", status);
+ if (err)
+ return err;
+ return 0;
+}
+
+static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
+{
+ u64 cfg;
+ int err;
+
+ /* Set admin queue endianness */
+ cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
+#ifdef __BIG_ENDIAN
+ cfg |= BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
+#else
+ cfg &= ~BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
+#endif
+
+ /* Do not bypass NDC cache */
+ cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
+ cfg &= ~0x3FFEULL;
+ rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
+
+ /* Result structure can be followed by RQ/SQ/CQ context at
+ * RES + 128bytes and a write mask at RES + 256 bytes, depending on
+ * operation type. Alloc sufficient result memory for all operations.
+ */
+ err = rvu_aq_alloc(rvu, &block->aq,
+ Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
+ ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
+ if (err)
+ return err;
+
+ rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
+ rvu_write64(rvu, block->addr,
+ NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
+ return 0;
+}
+
+int rvu_nix_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr, err;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return 0;
+ block = &hw->block[blkaddr];
+
+ /* Calibrate X2P bus to check if CGX/LBK links are fine */
+ err = nix_calibrate_x2p(rvu, blkaddr);
+ if (err)
+ return err;
+
+ /* Set num of links of each type */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ hw->cgx = (cfg >> 12) & 0xF;
+ hw->lmac_per_cgx = (cfg >> 8) & 0xF;
+ hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
+ hw->lbk_links = 1;
+ hw->sdp_links = 1;
+
+ /* Initialize admin queue */
+ err = nix_aq_init(rvu, block);
+ if (err)
+ return err;
+
+ /* Restore CINT timer delay to HW reset values */
+ rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
+
+ /* Configure segmentation offload formats */
+ nix_setup_lso(rvu, blkaddr);
+
+ if (blkaddr == BLKADDR_NIX0) {
+ hw->nix0 = devm_kzalloc(rvu->dev,
+ sizeof(struct nix_hw), GFP_KERNEL);
+ if (!hw->nix0)
+ return -ENOMEM;
+
+ err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
+ if (err)
+ return err;
+
+ err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
+ if (err)
+ return err;
+
+ /* Config Outer L2, IP, TCP and UDP's NPC layer info.
+ * This helps HW protocol checker to identify headers
+ * and validate length and checksums.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
+ (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
+ (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
+ (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
+ (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
+
+ nix_rx_flowkey_alg_cfg(rvu, blkaddr);
+ }
+ return 0;
+}
+
+void rvu_nix_freemem(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ struct nix_txsch *txsch;
+ struct nix_mcast *mcast;
+ struct nix_hw *nix_hw;
+ int blkaddr, lvl;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ rvu_aq_free(rvu, block->aq);
+
+ if (blkaddr == BLKADDR_NIX0) {
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ kfree(txsch->schq.bmap);
+ }
+
+ mcast = &nix_hw->mcast;
+ qmem_free(rvu->dev, mcast->mce_ctx);
+ qmem_free(rvu->dev, mcast->mcast_buf);
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
new file mode 100644
index 000000000000..7531fdc54fa1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -0,0 +1,472 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+
+static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
+ struct npa_aq_inst_s *inst)
+{
+ struct admin_queue *aq = block->aq;
+ struct npa_aq_res_s *result;
+ int timeout = 1000;
+ u64 reg, head;
+
+ result = (struct npa_aq_res_s *)aq->res->base;
+
+ /* Get current head pointer where to append this instruction */
+ reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS);
+ head = (reg >> 4) & AQ_PTR_MASK;
+
+ memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
+ (void *)inst, aq->inst->entry_sz);
+ memset(result, 0, sizeof(*result));
+ /* sync into memory */
+ wmb();
+
+ /* Ring the doorbell and wait for result */
+ rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1);
+ while (result->compcode == NPA_AQ_COMP_NOTDONE) {
+ cpu_relax();
+ udelay(1);
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ }
+
+ if (result->compcode != NPA_AQ_COMP_GOOD)
+ /* TODO: Replace this with some error code */
+ return -EBUSY;
+
+ return 0;
+}
+
+static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, npalf, rc = 0;
+ struct npa_aq_inst_s inst;
+ struct rvu_block *block;
+ struct admin_queue *aq;
+ struct rvu_pfvf *pfvf;
+ void *ctx, *mask;
+ bool ena;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
+ return NPA_AF_ERR_AQ_ENQUEUE;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (!pfvf->npalf || blkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ aq = block->aq;
+ if (!aq) {
+ dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__);
+ return NPA_AF_ERR_AQ_ENQUEUE;
+ }
+
+ npalf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (npalf < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ memset(&inst, 0, sizeof(struct npa_aq_inst_s));
+ inst.cindex = req->aura_id;
+ inst.lf = npalf;
+ inst.ctype = req->ctype;
+ inst.op = req->op;
+ /* Currently we are not supporting enqueuing multiple instructions,
+ * so always choose first entry in result memory.
+ */
+ inst.res_addr = (u64)aq->res->iova;
+
+ /* Clean result + context memory */
+ memset(aq->res->base, 0, aq->res->entry_sz);
+ /* Context needs to be written at RES_ADDR + 128 */
+ ctx = aq->res->base + 128;
+ /* Mask needs to be written at RES_ADDR + 256 */
+ mask = aq->res->base + 256;
+
+ switch (req->op) {
+ case NPA_AQ_INSTOP_WRITE:
+ /* Copy context and write mask */
+ if (req->ctype == NPA_AQ_CTYPE_AURA) {
+ memcpy(mask, &req->aura_mask,
+ sizeof(struct npa_aura_s));
+ memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
+ } else {
+ memcpy(mask, &req->pool_mask,
+ sizeof(struct npa_pool_s));
+ memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
+ }
+ break;
+ case NPA_AQ_INSTOP_INIT:
+ if (req->ctype == NPA_AQ_CTYPE_AURA) {
+ if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) {
+ rc = NPA_AF_ERR_AQ_FULL;
+ break;
+ }
+ /* Set pool's context address */
+ req->aura.pool_addr = pfvf->pool_ctx->iova +
+ (req->aura.pool_addr * pfvf->pool_ctx->entry_sz);
+ memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
+ } else { /* POOL's context */
+ memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
+ }
+ break;
+ case NPA_AQ_INSTOP_NOP:
+ case NPA_AQ_INSTOP_READ:
+ case NPA_AQ_INSTOP_LOCK:
+ case NPA_AQ_INSTOP_UNLOCK:
+ break;
+ default:
+ rc = NPA_AF_ERR_AQ_FULL;
+ break;
+ }
+
+ if (rc)
+ return rc;
+
+ spin_lock(&aq->lock);
+
+ /* Submit the instruction to AQ */
+ rc = npa_aq_enqueue_wait(rvu, block, &inst);
+ if (rc) {
+ spin_unlock(&aq->lock);
+ return rc;
+ }
+
+ /* Set aura bitmap if aura hw context is enabled */
+ if (req->ctype == NPA_AQ_CTYPE_AURA) {
+ if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
+ __set_bit(req->aura_id, pfvf->aura_bmap);
+ if (req->op == NPA_AQ_INSTOP_WRITE) {
+ ena = (req->aura.ena & req->aura_mask.ena) |
+ (test_bit(req->aura_id, pfvf->aura_bmap) &
+ ~req->aura_mask.ena);
+ if (ena)
+ __set_bit(req->aura_id, pfvf->aura_bmap);
+ else
+ __clear_bit(req->aura_id, pfvf->aura_bmap);
+ }
+ }
+
+ /* Set pool bitmap if pool hw context is enabled */
+ if (req->ctype == NPA_AQ_CTYPE_POOL) {
+ if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
+ __set_bit(req->aura_id, pfvf->pool_bmap);
+ if (req->op == NPA_AQ_INSTOP_WRITE) {
+ ena = (req->pool.ena & req->pool_mask.ena) |
+ (test_bit(req->aura_id, pfvf->pool_bmap) &
+ ~req->pool_mask.ena);
+ if (ena)
+ __set_bit(req->aura_id, pfvf->pool_bmap);
+ else
+ __clear_bit(req->aura_id, pfvf->pool_bmap);
+ }
+ }
+ spin_unlock(&aq->lock);
+
+ if (rsp) {
+ /* Copy read context into mailbox */
+ if (req->op == NPA_AQ_INSTOP_READ) {
+ if (req->ctype == NPA_AQ_CTYPE_AURA)
+ memcpy(&rsp->aura, ctx,
+ sizeof(struct npa_aura_s));
+ else
+ memcpy(&rsp->pool, ctx,
+ sizeof(struct npa_pool_s));
+ }
+ }
+
+ return 0;
+}
+
+static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ struct npa_aq_enq_req aq_req;
+ unsigned long *bmap;
+ int id, cnt = 0;
+ int err = 0, rc;
+
+ if (!pfvf->pool_ctx || !pfvf->aura_ctx)
+ return NPA_AF_ERR_AQ_ENQUEUE;
+
+ memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
+ aq_req.hdr.pcifunc = req->hdr.pcifunc;
+
+ if (req->ctype == NPA_AQ_CTYPE_POOL) {
+ aq_req.pool.ena = 0;
+ aq_req.pool_mask.ena = 1;
+ cnt = pfvf->pool_ctx->qsize;
+ bmap = pfvf->pool_bmap;
+ } else if (req->ctype == NPA_AQ_CTYPE_AURA) {
+ aq_req.aura.ena = 0;
+ aq_req.aura_mask.ena = 1;
+ cnt = pfvf->aura_ctx->qsize;
+ bmap = pfvf->aura_bmap;
+ }
+
+ aq_req.ctype = req->ctype;
+ aq_req.op = NPA_AQ_INSTOP_WRITE;
+
+ for (id = 0; id < cnt; id++) {
+ if (!test_bit(id, bmap))
+ continue;
+ aq_req.aura_id = id;
+ rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL);
+ if (rc) {
+ err = rc;
+ dev_err(rvu->dev, "Failed to disable %s:%d context\n",
+ (req->ctype == NPA_AQ_CTYPE_AURA) ?
+ "Aura" : "Pool", id);
+ }
+ }
+
+ return err;
+}
+
+int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
+ struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp)
+{
+ return rvu_npa_aq_enq_inst(rvu, req, rsp);
+}
+
+int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu,
+ struct hwctx_disable_req *req,
+ struct msg_rsp *rsp)
+{
+ return npa_lf_hwctx_disable(rvu, req);
+}
+
+static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
+{
+ kfree(pfvf->aura_bmap);
+ pfvf->aura_bmap = NULL;
+
+ qmem_free(rvu->dev, pfvf->aura_ctx);
+ pfvf->aura_ctx = NULL;
+
+ kfree(pfvf->pool_bmap);
+ pfvf->pool_bmap = NULL;
+
+ qmem_free(rvu->dev, pfvf->pool_ctx);
+ pfvf->pool_ctx = NULL;
+
+ qmem_free(rvu->dev, pfvf->npa_qints_ctx);
+ pfvf->npa_qints_ctx = NULL;
+}
+
+int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
+ struct npa_lf_alloc_req *req,
+ struct npa_lf_alloc_rsp *rsp)
+{
+ int npalf, qints, hwctx_size, err, rc = 0;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ u64 cfg, ctx_cfg;
+ int blkaddr;
+
+ if (req->aura_sz > NPA_AURA_SZ_MAX ||
+ req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
+ return NPA_AF_ERR_PARAM;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (!pfvf->npalf || blkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ npalf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (npalf < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ /* Reset this NPA LF */
+ err = rvu_lf_reset(rvu, block, npalf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
+ return NPA_AF_ERR_LF_RESET;
+ }
+
+ ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1);
+
+ /* Alloc memory for aura HW contexts */
+ hwctx_size = 1UL << (ctx_cfg & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->aura_ctx,
+ NPA_AURA_COUNT(req->aura_sz), hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
+ GFP_KERNEL);
+ if (!pfvf->aura_bmap)
+ goto free_mem;
+
+ /* Alloc memory for pool HW contexts */
+ hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
+ GFP_KERNEL);
+ if (!pfvf->pool_bmap)
+ goto free_mem;
+
+ /* Get no of queue interrupts supported */
+ cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
+ qints = (cfg >> 28) & 0xFFF;
+
+ /* Alloc memory for Qints HW contexts */
+ hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf));
+ /* Clear way partition mask and set aura offset to '0' */
+ cfg &= ~(BIT_ULL(34) - 1);
+ /* Set aura size & enable caching of contexts */
+ cfg |= (req->aura_sz << 16) | BIT_ULL(34);
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
+
+ /* Configure aura HW context's base */
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf),
+ (u64)pfvf->aura_ctx->iova);
+
+ /* Enable caching of qints hw context */
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), BIT_ULL(36));
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
+ (u64)pfvf->npa_qints_ctx->iova);
+
+ goto exit;
+
+free_mem:
+ npa_ctx_free(rvu, pfvf);
+ rc = -ENOMEM;
+
+exit:
+ /* set stack page info */
+ cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
+ rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
+ rsp->stack_pg_bytes = cfg & 0xFF;
+ rsp->qints = (cfg >> 28) & 0xFFF;
+ return rc;
+}
+
+int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ int npalf, err;
+ int blkaddr;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (!pfvf->npalf || blkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ npalf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (npalf < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ /* Reset this NPA LF */
+ err = rvu_lf_reset(rvu, block, npalf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
+ return NPA_AF_ERR_LF_RESET;
+ }
+
+ npa_ctx_free(rvu, pfvf);
+
+ return 0;
+}
+
+static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
+{
+ u64 cfg;
+ int err;
+
+ /* Set admin queue endianness */
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG);
+#ifdef __BIG_ENDIAN
+ cfg |= BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
+#else
+ cfg &= ~BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
+#endif
+
+ /* Do not bypass NDC cache */
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
+ cfg &= ~0x03DULL;
+ rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
+
+ /* Result structure can be followed by Aura/Pool context at
+ * RES + 128bytes and a write mask at RES + 256 bytes, depending on
+ * operation type. Alloc sufficient result memory for all operations.
+ */
+ err = rvu_aq_alloc(rvu, &block->aq,
+ Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s),
+ ALIGN(sizeof(struct npa_aq_res_s), 128) + 256);
+ if (err)
+ return err;
+
+ rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE);
+ rvu_write64(rvu, block->addr,
+ NPA_AF_AQ_BASE, (u64)block->aq->inst->iova);
+ return 0;
+}
+
+int rvu_npa_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return 0;
+
+ /* Initialize admin queue */
+ err = npa_aq_init(rvu, &hw->block[blkaddr]);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void rvu_npa_freemem(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ rvu_aq_free(rvu, block->aq);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
new file mode 100644
index 000000000000..23ff47f7efc5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -0,0 +1,816 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+#include "npc_profile.h"
+
+#define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */
+#define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */
+
+#define NIXLF_UCAST_ENTRY 0
+#define NIXLF_BCAST_ENTRY 1
+#define NIXLF_PROMISC_ENTRY 2
+
+#define NPC_PARSE_RESULT_DMAC_OFFSET 8
+
+struct mcam_entry {
+#define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max keywidth */
+ u64 kw[NPC_MAX_KWS_IN_KEY];
+ u64 kw_mask[NPC_MAX_KWS_IN_KEY];
+ u64 action;
+ u64 vtag_action;
+};
+
+void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
+{
+ int blkaddr;
+ u64 val = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Config CPI base for the PKIND */
+ val = pkind | 1ULL << 62;
+ rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_CPI_DEFX(pkind, 0), val);
+}
+
+int rvu_npc_get_pkind(struct rvu *rvu, u16 pf)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ u32 map;
+ int i;
+
+ for (i = 0; i < pkind->rsrc.max; i++) {
+ map = pkind->pfchan_map[i];
+ if (((map >> 16) & 0x3F) == pf)
+ return i;
+ }
+ return -1;
+}
+
+static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
+ u16 pcifunc, int nixlf, int type)
+{
+ int pf = rvu_get_pf(pcifunc);
+ int index;
+
+ /* Check if this is for a PF */
+ if (pf && !(pcifunc & RVU_PFVF_FUNC_MASK)) {
+ /* Reserved entries exclude PF0 */
+ pf--;
+ index = mcam->pf_offset + (pf * RSVD_MCAM_ENTRIES_PER_PF);
+ /* Broadcast address matching entry should be first so
+ * that the packet can be replicated to all VFs.
+ */
+ if (type == NIXLF_BCAST_ENTRY)
+ return index;
+ else if (type == NIXLF_PROMISC_ENTRY)
+ return index + 1;
+ }
+
+ return (mcam->nixlf_offset + (nixlf * RSVD_MCAM_ENTRIES_PER_NIXLF));
+}
+
+static int npc_get_bank(struct npc_mcam *mcam, int index)
+{
+ int bank = index / mcam->banksize;
+
+ /* 0,1 & 2,3 banks are combined for this keysize */
+ if (mcam->keysize == NPC_MCAM_KEY_X2)
+ return bank ? 2 : 0;
+
+ return bank;
+}
+
+static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
+{
+ int bank = npc_get_bank(mcam, index);
+ u64 cfg;
+
+ index &= (mcam->banksize - 1);
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(index, bank));
+ return (cfg & 1);
+}
+
+static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, bool enable)
+{
+ int bank = npc_get_bank(mcam, index);
+ int actbank = bank;
+
+ index &= (mcam->banksize - 1);
+ for (; bank < (actbank + mcam->banks_per_entry); bank++) {
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(index, bank),
+ enable ? 1 : 0);
+ }
+}
+
+static void npc_get_keyword(struct mcam_entry *entry, int idx,
+ u64 *cam0, u64 *cam1)
+{
+ u64 kw_mask = 0x00;
+
+#define CAM_MASK(n) (BIT_ULL(n) - 1)
+
+ /* 0, 2, 4, 6 indices refer to BANKX_CAMX_W0 and
+ * 1, 3, 5, 7 indices refer to BANKX_CAMX_W1.
+ *
+ * Also, only 48 bits of BANKX_CAMX_W1 are valid.
+ */
+ switch (idx) {
+ case 0:
+ /* BANK(X)_CAM_W0<63:0> = MCAM_KEY[KW0]<63:0> */
+ *cam1 = entry->kw[0];
+ kw_mask = entry->kw_mask[0];
+ break;
+ case 1:
+ /* BANK(X)_CAM_W1<47:0> = MCAM_KEY[KW1]<47:0> */
+ *cam1 = entry->kw[1] & CAM_MASK(48);
+ kw_mask = entry->kw_mask[1] & CAM_MASK(48);
+ break;
+ case 2:
+ /* BANK(X + 1)_CAM_W0<15:0> = MCAM_KEY[KW1]<63:48>
+ * BANK(X + 1)_CAM_W0<63:16> = MCAM_KEY[KW2]<47:0>
+ */
+ *cam1 = (entry->kw[1] >> 48) & CAM_MASK(16);
+ *cam1 |= ((entry->kw[2] & CAM_MASK(48)) << 16);
+ kw_mask = (entry->kw_mask[1] >> 48) & CAM_MASK(16);
+ kw_mask |= ((entry->kw_mask[2] & CAM_MASK(48)) << 16);
+ break;
+ case 3:
+ /* BANK(X + 1)_CAM_W1<15:0> = MCAM_KEY[KW2]<63:48>
+ * BANK(X + 1)_CAM_W1<47:16> = MCAM_KEY[KW3]<31:0>
+ */
+ *cam1 = (entry->kw[2] >> 48) & CAM_MASK(16);
+ *cam1 |= ((entry->kw[3] & CAM_MASK(32)) << 16);
+ kw_mask = (entry->kw_mask[2] >> 48) & CAM_MASK(16);
+ kw_mask |= ((entry->kw_mask[3] & CAM_MASK(32)) << 16);
+ break;
+ case 4:
+ /* BANK(X + 2)_CAM_W0<31:0> = MCAM_KEY[KW3]<63:32>
+ * BANK(X + 2)_CAM_W0<63:32> = MCAM_KEY[KW4]<31:0>
+ */
+ *cam1 = (entry->kw[3] >> 32) & CAM_MASK(32);
+ *cam1 |= ((entry->kw[4] & CAM_MASK(32)) << 32);
+ kw_mask = (entry->kw_mask[3] >> 32) & CAM_MASK(32);
+ kw_mask |= ((entry->kw_mask[4] & CAM_MASK(32)) << 32);
+ break;
+ case 5:
+ /* BANK(X + 2)_CAM_W1<31:0> = MCAM_KEY[KW4]<63:32>
+ * BANK(X + 2)_CAM_W1<47:32> = MCAM_KEY[KW5]<15:0>
+ */
+ *cam1 = (entry->kw[4] >> 32) & CAM_MASK(32);
+ *cam1 |= ((entry->kw[5] & CAM_MASK(16)) << 32);
+ kw_mask = (entry->kw_mask[4] >> 32) & CAM_MASK(32);
+ kw_mask |= ((entry->kw_mask[5] & CAM_MASK(16)) << 32);
+ break;
+ case 6:
+ /* BANK(X + 3)_CAM_W0<47:0> = MCAM_KEY[KW5]<63:16>
+ * BANK(X + 3)_CAM_W0<63:48> = MCAM_KEY[KW6]<15:0>
+ */
+ *cam1 = (entry->kw[5] >> 16) & CAM_MASK(48);
+ *cam1 |= ((entry->kw[6] & CAM_MASK(16)) << 48);
+ kw_mask = (entry->kw_mask[5] >> 16) & CAM_MASK(48);
+ kw_mask |= ((entry->kw_mask[6] & CAM_MASK(16)) << 48);
+ break;
+ case 7:
+ /* BANK(X + 3)_CAM_W1<47:0> = MCAM_KEY[KW6]<63:16> */
+ *cam1 = (entry->kw[6] >> 16) & CAM_MASK(48);
+ kw_mask = (entry->kw_mask[6] >> 16) & CAM_MASK(48);
+ break;
+ }
+
+ *cam1 &= kw_mask;
+ *cam0 = ~*cam1 & kw_mask;
+}
+
+static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, u8 intf,
+ struct mcam_entry *entry, bool enable)
+{
+ int bank = npc_get_bank(mcam, index);
+ int kw = 0, actbank, actindex;
+ u64 cam0, cam1;
+
+ actbank = bank; /* Save bank id, to set action later on */
+ actindex = index;
+ index &= (mcam->banksize - 1);
+
+ /* CAM1 takes the comparison value and
+ * CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'.
+ * CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0
+ * CAM1<n> = 1 & CAM0<n> = 0 => match if key<n> = 1
+ * CAM1<n> = 0 & CAM0<n> = 0 => always match i.e dontcare.
+ */
+ for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
+ /* Interface should be set in all banks */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1),
+ intf);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0),
+ ~intf & 0x3);
+
+ /* Set the match key */
+ npc_get_keyword(entry, kw, &cam0, &cam1);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), cam1);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), cam0);
+
+ npc_get_keyword(entry, kw + 1, &cam0, &cam1);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), cam1);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0);
+ }
+
+ /* Set 'action' */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action);
+
+ /* Set TAG 'action' */
+ rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_TAG_ACT(index, actbank),
+ entry->vtag_action);
+
+ /* Enable the entry */
+ if (enable)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true);
+ else
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false);
+}
+
+static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
+{
+ int bank = npc_get_bank(mcam, index);
+
+ index &= (mcam->banksize - 1);
+ return rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+}
+
+void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan, u8 *mac_addr)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct mcam_entry entry = { {0} };
+ struct nix_rx_action action;
+ int blkaddr, index, kwi;
+ u64 mac = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ for (index = ETH_ALEN - 1; index >= 0; index--)
+ mac |= ((u64)*mac_addr++) << (8 * index);
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+
+ /* Match ingress channel and DMAC */
+ entry.kw[0] = chan;
+ entry.kw_mask[0] = 0xFFFULL;
+
+ kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64);
+ entry.kw[kwi] = mac;
+ entry.kw_mask[kwi] = BIT_ULL(48) - 1;
+
+ /* Don't change the action if entry is already enabled
+ * Otherwise RSS action may get overwritten.
+ */
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
+ *(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+ blkaddr, index);
+ } else {
+ *(u64 *)&action = 0x00;
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+ }
+
+ entry.action = *(u64 *)&action;
+ npc_config_mcam_entry(rvu, mcam, blkaddr, index,
+ NIX_INTF_RX, &entry, true);
+}
+
+void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan, bool allmulti)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct mcam_entry entry = { {0} };
+ struct nix_rx_action action;
+ int blkaddr, index, kwi;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Only PF or AF VF can add a promiscuous entry */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_PROMISC_ENTRY);
+
+ entry.kw[0] = chan;
+ entry.kw_mask[0] = 0xFFFULL;
+
+ if (allmulti) {
+ kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64);
+ entry.kw[kwi] = BIT_ULL(40); /* LSB bit of 1st byte in DMAC */
+ entry.kw_mask[kwi] = BIT_ULL(40);
+ }
+
+ *(u64 *)&action = 0x00;
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+
+ entry.action = *(u64 *)&action;
+ npc_config_mcam_entry(rvu, mcam, blkaddr, index,
+ NIX_INTF_RX, &entry, true);
+}
+
+void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Only PF's have a promiscuous entry */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_PROMISC_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+}
+
+void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct mcam_entry entry = { {0} };
+ struct nix_rx_action action;
+#ifdef MCAST_MCE
+ struct rvu_pfvf *pfvf;
+#endif
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Only PF can add a bcast match entry */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return;
+#ifdef MCAST_MCE
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+#endif
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_BCAST_ENTRY);
+
+ /* Check for L2B bit and LMAC channel */
+ entry.kw[0] = BIT_ULL(25) | chan;
+ entry.kw_mask[0] = BIT_ULL(25) | 0xFFFULL;
+
+ *(u64 *)&action = 0x00;
+#ifdef MCAST_MCE
+ /* Early silicon doesn't support pkt replication,
+ * so install entry with UCAST action, so that PF
+ * receives all broadcast packets.
+ */
+ action.op = NIX_RX_ACTIONOP_MCAST;
+ action.pf_func = pcifunc;
+ action.index = pfvf->bcast_mce_idx;
+#else
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+#endif
+
+ entry.action = *(u64 *)&action;
+ npc_config_mcam_entry(rvu, mcam, blkaddr, index,
+ NIX_INTF_RX, &entry, true);
+}
+
+void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
+ int group, int alg_idx, int mcam_index)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct nix_rx_action action;
+ int blkaddr, index, bank;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Check if this is for reserved default entry */
+ if (mcam_index < 0) {
+ if (group != DEFAULT_RSS_CONTEXT_GROUP)
+ return;
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ } else {
+ /* TODO: validate this mcam index */
+ index = mcam_index;
+ }
+
+ if (index >= mcam->total_entries)
+ return;
+
+ bank = npc_get_bank(mcam, index);
+ index &= (mcam->banksize - 1);
+
+ *(u64 *)&action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+ /* Ignore if no action was set earlier */
+ if (!*(u64 *)&action)
+ return;
+
+ action.op = NIX_RX_ACTIONOP_RSS;
+ action.pf_func = pcifunc;
+ action.index = group;
+ action.flow_key_alg = alg_idx;
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action);
+}
+
+void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct nix_rx_action action;
+ int blkaddr, index, bank;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Disable ucast MCAM match entry of this PF/VF */
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+
+ /* For PF, disable promisc and bcast MCAM match entries */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_BCAST_ENTRY);
+ /* For bcast, disable only if it's action is not
+ * packet replication, incase if action is replication
+ * then this PF's nixlf is removed from bcast replication
+ * list.
+ */
+ bank = npc_get_bank(mcam, index);
+ index &= (mcam->banksize - 1);
+ *(u64 *)&action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+ if (action.op != NIX_RX_ACTIONOP_MCAST)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+
+ rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
+ }
+}
+
+#define LDATA_EXTRACT_CONFIG(intf, lid, ltype, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg)
+
+#define LDATA_FLAGS_CONFIG(intf, ld, flags, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
+
+static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int lid, ltype;
+ int lid_count;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+ lid_count = (cfg >> 4) & 0xF;
+
+ /* First clear any existing config i.e
+ * disable LDATA and FLAGS extraction.
+ */
+ for (lid = 0; lid < lid_count; lid++) {
+ for (ltype = 0; ltype < 16; ltype++) {
+ LDATA_EXTRACT_CONFIG(NIX_INTF_RX, lid, ltype, 0, 0ULL);
+ LDATA_EXTRACT_CONFIG(NIX_INTF_RX, lid, ltype, 1, 0ULL);
+ LDATA_EXTRACT_CONFIG(NIX_INTF_TX, lid, ltype, 0, 0ULL);
+ LDATA_EXTRACT_CONFIG(NIX_INTF_TX, lid, ltype, 1, 0ULL);
+
+ LDATA_FLAGS_CONFIG(NIX_INTF_RX, 0, ltype, 0ULL);
+ LDATA_FLAGS_CONFIG(NIX_INTF_RX, 1, ltype, 0ULL);
+ LDATA_FLAGS_CONFIG(NIX_INTF_TX, 0, ltype, 0ULL);
+ LDATA_FLAGS_CONFIG(NIX_INTF_TX, 1, ltype, 0ULL);
+ }
+ }
+
+ /* If we plan to extract Outer IPv4 tuple for TCP/UDP pkts
+ * then 112bit key is not sufficient
+ */
+ if (mcam->keysize != NPC_MCAM_KEY_X2)
+ return;
+
+ /* Start placing extracted data/flags from 64bit onwards, for now */
+ /* Extract DMAC from the packet */
+ cfg = (0x05 << 16) | BIT_ULL(7) | NPC_PARSE_RESULT_DMAC_OFFSET;
+ LDATA_EXTRACT_CONFIG(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg);
+}
+
+static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
+ struct npc_kpu_profile_action *kpuaction,
+ int kpu, int entry, bool pkind)
+{
+ struct npc_kpu_action0 action0 = {0};
+ struct npc_kpu_action1 action1 = {0};
+ u64 reg;
+
+ action1.errlev = kpuaction->errlev;
+ action1.errcode = kpuaction->errcode;
+ action1.dp0_offset = kpuaction->dp0_offset;
+ action1.dp1_offset = kpuaction->dp1_offset;
+ action1.dp2_offset = kpuaction->dp2_offset;
+
+ if (pkind)
+ reg = NPC_AF_PKINDX_ACTION1(entry);
+ else
+ reg = NPC_AF_KPUX_ENTRYX_ACTION1(kpu, entry);
+
+ rvu_write64(rvu, blkaddr, reg, *(u64 *)&action1);
+
+ action0.byp_count = kpuaction->bypass_count;
+ action0.capture_ena = kpuaction->cap_ena;
+ action0.parse_done = kpuaction->parse_done;
+ action0.next_state = kpuaction->next_state;
+ action0.capture_lid = kpuaction->lid;
+ action0.capture_ltype = kpuaction->ltype;
+ action0.capture_flags = kpuaction->flags;
+ action0.ptr_advance = kpuaction->ptr_advance;
+ action0.var_len_offset = kpuaction->offset;
+ action0.var_len_mask = kpuaction->mask;
+ action0.var_len_right = kpuaction->right;
+ action0.var_len_shift = kpuaction->shift;
+
+ if (pkind)
+ reg = NPC_AF_PKINDX_ACTION0(entry);
+ else
+ reg = NPC_AF_KPUX_ENTRYX_ACTION0(kpu, entry);
+
+ rvu_write64(rvu, blkaddr, reg, *(u64 *)&action0);
+}
+
+static void npc_config_kpucam(struct rvu *rvu, int blkaddr,
+ struct npc_kpu_profile_cam *kpucam,
+ int kpu, int entry)
+{
+ struct npc_kpu_cam cam0 = {0};
+ struct npc_kpu_cam cam1 = {0};
+
+ cam1.state = kpucam->state & kpucam->state_mask;
+ cam1.dp0_data = kpucam->dp0 & kpucam->dp0_mask;
+ cam1.dp1_data = kpucam->dp1 & kpucam->dp1_mask;
+ cam1.dp2_data = kpucam->dp2 & kpucam->dp2_mask;
+
+ cam0.state = ~kpucam->state & kpucam->state_mask;
+ cam0.dp0_data = ~kpucam->dp0 & kpucam->dp0_mask;
+ cam0.dp1_data = ~kpucam->dp1 & kpucam->dp1_mask;
+ cam0.dp2_data = ~kpucam->dp2 & kpucam->dp2_mask;
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 0), *(u64 *)&cam0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 1), *(u64 *)&cam1);
+}
+
+static inline u64 enable_mask(int count)
+{
+ return (((count) < 64) ? ~(BIT_ULL(count) - 1) : (0x00ULL));
+}
+
+static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
+ struct npc_kpu_profile *profile)
+{
+ int entry, num_entries, max_entries;
+
+ if (profile->cam_entries != profile->action_entries) {
+ dev_err(rvu->dev,
+ "KPU%d: CAM and action entries [%d != %d] not equal\n",
+ kpu, profile->cam_entries, profile->action_entries);
+ }
+
+ max_entries = rvu_read64(rvu, blkaddr, NPC_AF_CONST1) & 0xFFF;
+
+ /* Program CAM match entries for previous KPU extracted data */
+ num_entries = min_t(int, profile->cam_entries, max_entries);
+ for (entry = 0; entry < num_entries; entry++)
+ npc_config_kpucam(rvu, blkaddr,
+ &profile->cam[entry], kpu, entry);
+
+ /* Program this KPU's actions */
+ num_entries = min_t(int, profile->action_entries, max_entries);
+ for (entry = 0; entry < num_entries; entry++)
+ npc_config_kpuaction(rvu, blkaddr, &profile->action[entry],
+ kpu, entry, false);
+
+ /* Enable all programmed entries */
+ num_entries = min_t(int, profile->action_entries, profile->cam_entries);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRY_DISX(kpu, 0), enable_mask(num_entries));
+ if (num_entries > 64) {
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRY_DISX(kpu, 1),
+ enable_mask(num_entries - 64));
+ }
+
+ /* Enable this KPU */
+ rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(kpu), 0x01);
+}
+
+static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int num_pkinds, num_kpus, idx;
+ struct npc_pkind *pkind;
+
+ /* Get HW limits */
+ hw->npc_kpus = (rvu_read64(rvu, blkaddr, NPC_AF_CONST) >> 8) & 0x1F;
+
+ /* Disable all KPUs and their entries */
+ for (idx = 0; idx < hw->npc_kpus; idx++) {
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRY_DISX(idx, 0), ~0ULL);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRY_DISX(idx, 1), ~0ULL);
+ rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x00);
+ }
+
+ /* First program IKPU profile i.e PKIND configs.
+ * Check HW max count to avoid configuring junk or
+ * writing to unsupported CSR addresses.
+ */
+ pkind = &hw->pkind;
+ num_pkinds = ARRAY_SIZE(ikpu_action_entries);
+ num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds);
+
+ for (idx = 0; idx < num_pkinds; idx++)
+ npc_config_kpuaction(rvu, blkaddr,
+ &ikpu_action_entries[idx], 0, idx, true);
+
+ /* Program KPU CAM and Action profiles */
+ num_kpus = ARRAY_SIZE(npc_kpu_profiles);
+ num_kpus = min_t(int, hw->npc_kpus, num_kpus);
+
+ for (idx = 0; idx < num_kpus; idx++)
+ npc_program_kpu_profile(rvu, blkaddr,
+ idx, &npc_kpu_profiles[idx]);
+}
+
+static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
+{
+ int nixlf_count = rvu_get_nixlf_count(rvu);
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int rsvd;
+ u64 cfg;
+
+ /* Get HW limits */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+ mcam->banks = (cfg >> 44) & 0xF;
+ mcam->banksize = (cfg >> 28) & 0xFFFF;
+
+ /* Actual number of MCAM entries vary by entry size */
+ cfg = (rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07;
+ mcam->total_entries = (mcam->banks / BIT_ULL(cfg)) * mcam->banksize;
+ mcam->keysize = cfg;
+
+ /* Number of banks combined per MCAM entry */
+ if (cfg == NPC_MCAM_KEY_X4)
+ mcam->banks_per_entry = 4;
+ else if (cfg == NPC_MCAM_KEY_X2)
+ mcam->banks_per_entry = 2;
+ else
+ mcam->banks_per_entry = 1;
+
+ /* Reserve one MCAM entry for each of the NIX LF to
+ * guarantee space to install default matching DMAC rule.
+ * Also reserve 2 MCAM entries for each PF for default
+ * channel based matching or 'bcast & promisc' matching to
+ * support BCAST and PROMISC modes of operation for PFs.
+ * PF0 is excluded.
+ */
+ rsvd = (nixlf_count * RSVD_MCAM_ENTRIES_PER_NIXLF) +
+ ((rvu->hw->total_pfs - 1) * RSVD_MCAM_ENTRIES_PER_PF);
+ if (mcam->total_entries <= rsvd) {
+ dev_warn(rvu->dev,
+ "Insufficient NPC MCAM size %d for pkt I/O, exiting\n",
+ mcam->total_entries);
+ return -ENOMEM;
+ }
+
+ mcam->entries = mcam->total_entries - rsvd;
+ mcam->nixlf_offset = mcam->entries;
+ mcam->pf_offset = mcam->nixlf_offset + nixlf_count;
+
+ spin_lock_init(&mcam->lock);
+
+ return 0;
+}
+
+int rvu_npc_init(struct rvu *rvu)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ u64 keyz = NPC_MCAM_KEY_X2;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -ENODEV;
+ }
+
+ /* Allocate resource bimap for pkind*/
+ pkind->rsrc.max = (rvu_read64(rvu, blkaddr,
+ NPC_AF_CONST1) >> 12) & 0xFF;
+ err = rvu_alloc_bitmap(&pkind->rsrc);
+ if (err)
+ return err;
+
+ /* Allocate mem for pkind to PF and channel mapping info */
+ pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max,
+ sizeof(u32), GFP_KERNEL);
+ if (!pkind->pfchan_map)
+ return -ENOMEM;
+
+ /* Configure KPU profile */
+ npc_parser_profile_init(rvu, blkaddr);
+
+ /* Config Outer L2, IPv4's NPC layer info */
+ rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OL2,
+ (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4,
+ (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
+
+ /* Enable below for Rx pkts.
+ * - Outer IPv4 header checksum validation.
+ * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2M].
+ */
+ rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG,
+ rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) |
+ BIT_ULL(6) | BIT_ULL(2));
+
+ /* Set RX and TX side MCAM search key size.
+ * Also enable parse key extract nibbles suchthat except
+ * layer E to H, rest of the key is included for MCAM search.
+ */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
+ ((keyz & 0x3) << 32) | ((1ULL << 20) - 1));
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
+ ((keyz & 0x3) << 32) | ((1ULL << 20) - 1));
+
+ err = npc_mcam_rsrcs_init(rvu, blkaddr);
+ if (err)
+ return err;
+
+ /* Config packet data and flags extraction into PARSE result */
+ npc_config_ldata_extract(rvu, blkaddr);
+
+ /* Set TX miss action to UCAST_DEFAULT i.e
+ * transmit the packet on NIX LF SQ's default channel.
+ */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_TX),
+ NIX_TX_ACTIONOP_UCAST_DEFAULT);
+
+ /* If MCAM lookup doesn't result in a match, drop the received packet */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_RX),
+ NIX_RX_ACTIONOP_DROP);
+
+ return 0;
+}
+
+void rvu_npc_freemem(struct rvu *rvu)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+
+ kfree(pkind->rsrc.bmap);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
new file mode 100644
index 000000000000..9d7c135c7965
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "common.h"
+#include "mbox.h"
+#include "rvu.h"
+
+struct reg_range {
+ u64 start;
+ u64 end;
+};
+
+struct hw_reg_map {
+ u8 regblk;
+ u8 num_ranges;
+ u64 mask;
+#define MAX_REG_RANGES 8
+ struct reg_range range[MAX_REG_RANGES];
+};
+
+static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = {
+ {NIX_TXSCH_LVL_SMQ, 2, 0xFFFF, {{0x0700, 0x0708}, {0x1400, 0x14C8} } },
+ {NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18},
+ {0x1200, 0x12E0} } },
+ {NIX_TXSCH_LVL_TL3, 3, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
+ {0x1610, 0x1618} } },
+ {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x1768} } },
+ {NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } },
+};
+
+bool rvu_check_valid_reg(int regmap, int regblk, u64 reg)
+{
+ int idx;
+ struct hw_reg_map *map;
+
+ /* Only 64bit offsets */
+ if (reg & 0x07)
+ return false;
+
+ if (regmap == TXSCHQ_HWREGMAP) {
+ if (regblk >= NIX_TXSCH_LVL_CNT)
+ return false;
+ map = &txsch_reg_map[regblk];
+ } else {
+ return false;
+ }
+
+ /* Should never happen */
+ if (map->regblk != regblk)
+ return false;
+
+ reg &= map->mask;
+
+ for (idx = 0; idx < map->num_ranges; idx++) {
+ if (reg >= map->range[idx].start &&
+ reg < map->range[idx].end)
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
new file mode 100644
index 000000000000..09a8d61f3144
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -0,0 +1,502 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RVU_REG_H
+#define RVU_REG_H
+
+/* Admin function registers */
+#define RVU_AF_MSIXTR_BASE (0x10)
+#define RVU_AF_ECO (0x20)
+#define RVU_AF_BLK_RST (0x30)
+#define RVU_AF_PF_BAR4_ADDR (0x40)
+#define RVU_AF_RAS (0x100)
+#define RVU_AF_RAS_W1S (0x108)
+#define RVU_AF_RAS_ENA_W1S (0x110)
+#define RVU_AF_RAS_ENA_W1C (0x118)
+#define RVU_AF_GEN_INT (0x120)
+#define RVU_AF_GEN_INT_W1S (0x128)
+#define RVU_AF_GEN_INT_ENA_W1S (0x130)
+#define RVU_AF_GEN_INT_ENA_W1C (0x138)
+#define RVU_AF_AFPF_MBOX0 (0x02000)
+#define RVU_AF_AFPF_MBOX1 (0x02008)
+#define RVU_AF_AFPFX_MBOXX(a, b) (0x2000 | (a) << 4 | (b) << 3)
+#define RVU_AF_PFME_STATUS (0x2800)
+#define RVU_AF_PFTRPEND (0x2810)
+#define RVU_AF_PFTRPEND_W1S (0x2820)
+#define RVU_AF_PF_RST (0x2840)
+#define RVU_AF_HWVF_RST (0x2850)
+#define RVU_AF_PFAF_MBOX_INT (0x2880)
+#define RVU_AF_PFAF_MBOX_INT_W1S (0x2888)
+#define RVU_AF_PFAF_MBOX_INT_ENA_W1S (0x2890)
+#define RVU_AF_PFAF_MBOX_INT_ENA_W1C (0x2898)
+#define RVU_AF_PFFLR_INT (0x28a0)
+#define RVU_AF_PFFLR_INT_W1S (0x28a8)
+#define RVU_AF_PFFLR_INT_ENA_W1S (0x28b0)
+#define RVU_AF_PFFLR_INT_ENA_W1C (0x28b8)
+#define RVU_AF_PFME_INT (0x28c0)
+#define RVU_AF_PFME_INT_W1S (0x28c8)
+#define RVU_AF_PFME_INT_ENA_W1S (0x28d0)
+#define RVU_AF_PFME_INT_ENA_W1C (0x28d8)
+
+/* Admin function's privileged PF/VF registers */
+#define RVU_PRIV_CONST (0x8000000)
+#define RVU_PRIV_GEN_CFG (0x8000010)
+#define RVU_PRIV_CLK_CFG (0x8000020)
+#define RVU_PRIV_ACTIVE_PC (0x8000030)
+#define RVU_PRIV_PFX_CFG(a) (0x8000100 | (a) << 16)
+#define RVU_PRIV_PFX_MSIX_CFG(a) (0x8000110 | (a) << 16)
+#define RVU_PRIV_PFX_ID_CFG(a) (0x8000120 | (a) << 16)
+#define RVU_PRIV_PFX_INT_CFG(a) (0x8000200 | (a) << 16)
+#define RVU_PRIV_PFX_NIX0_CFG (0x8000300)
+#define RVU_PRIV_PFX_NPA_CFG (0x8000310)
+#define RVU_PRIV_PFX_SSO_CFG (0x8000320)
+#define RVU_PRIV_PFX_SSOW_CFG (0x8000330)
+#define RVU_PRIV_PFX_TIM_CFG (0x8000340)
+#define RVU_PRIV_PFX_CPT0_CFG (0x8000350)
+#define RVU_PRIV_BLOCK_TYPEX_REV(a) (0x8000400 | (a) << 3)
+#define RVU_PRIV_HWVFX_INT_CFG(a) (0x8001280 | (a) << 16)
+#define RVU_PRIV_HWVFX_NIX0_CFG (0x8001300)
+#define RVU_PRIV_HWVFX_NPA_CFG (0x8001310)
+#define RVU_PRIV_HWVFX_SSO_CFG (0x8001320)
+#define RVU_PRIV_HWVFX_SSOW_CFG (0x8001330)
+#define RVU_PRIV_HWVFX_TIM_CFG (0x8001340)
+#define RVU_PRIV_HWVFX_CPT0_CFG (0x8001350)
+
+/* RVU PF registers */
+#define RVU_PF_VFX_PFVF_MBOX0 (0x00000)
+#define RVU_PF_VFX_PFVF_MBOX1 (0x00008)
+#define RVU_PF_VFX_PFVF_MBOXX(a, b) (0x0 | (a) << 12 | (b) << 3)
+#define RVU_PF_VF_BAR4_ADDR (0x10)
+#define RVU_PF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3)
+#define RVU_PF_VFME_STATUSX(a) (0x800 | (a) << 3)
+#define RVU_PF_VFTRPENDX(a) (0x820 | (a) << 3)
+#define RVU_PF_VFTRPEND_W1SX(a) (0x840 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INTX(a) (0x880 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_W1SX(a) (0x8A0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (0x8C0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (0x8E0 | (a) << 3)
+#define RVU_PF_VFFLR_INTX(a) (0x900 | (a) << 3)
+#define RVU_PF_VFFLR_INT_W1SX(a) (0x920 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1SX(a) (0x940 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1CX(a) (0x960 | (a) << 3)
+#define RVU_PF_VFME_INTX(a) (0x980 | (a) << 3)
+#define RVU_PF_VFME_INT_W1SX(a) (0x9A0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1SX(a) (0x9C0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1CX(a) (0x9E0 | (a) << 3)
+#define RVU_PF_PFAF_MBOX0 (0xC00)
+#define RVU_PF_PFAF_MBOX1 (0xC08)
+#define RVU_PF_PFAF_MBOXX(a) (0xC00 | (a) << 3)
+#define RVU_PF_INT (0xc20)
+#define RVU_PF_INT_W1S (0xc28)
+#define RVU_PF_INT_ENA_W1S (0xc30)
+#define RVU_PF_INT_ENA_W1C (0xc38)
+#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
+#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
+#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+
+/* RVU VF registers */
+#define RVU_VF_VFPF_MBOX0 (0x00000)
+#define RVU_VF_VFPF_MBOX1 (0x00008)
+
+/* NPA block's admin function registers */
+#define NPA_AF_BLK_RST (0x0000)
+#define NPA_AF_CONST (0x0010)
+#define NPA_AF_CONST1 (0x0018)
+#define NPA_AF_LF_RST (0x0020)
+#define NPA_AF_GEN_CFG (0x0030)
+#define NPA_AF_NDC_CFG (0x0040)
+#define NPA_AF_INP_CTL (0x00D0)
+#define NPA_AF_ACTIVE_CYCLES_PC (0x00F0)
+#define NPA_AF_AVG_DELAY (0x0100)
+#define NPA_AF_GEN_INT (0x0140)
+#define NPA_AF_GEN_INT_W1S (0x0148)
+#define NPA_AF_GEN_INT_ENA_W1S (0x0150)
+#define NPA_AF_GEN_INT_ENA_W1C (0x0158)
+#define NPA_AF_RVU_INT (0x0160)
+#define NPA_AF_RVU_INT_W1S (0x0168)
+#define NPA_AF_RVU_INT_ENA_W1S (0x0170)
+#define NPA_AF_RVU_INT_ENA_W1C (0x0178)
+#define NPA_AF_ERR_INT (0x0180)
+#define NPA_AF_ERR_INT_W1S (0x0188)
+#define NPA_AF_ERR_INT_ENA_W1S (0x0190)
+#define NPA_AF_ERR_INT_ENA_W1C (0x0198)
+#define NPA_AF_RAS (0x01A0)
+#define NPA_AF_RAS_W1S (0x01A8)
+#define NPA_AF_RAS_ENA_W1S (0x01B0)
+#define NPA_AF_RAS_ENA_W1C (0x01B8)
+#define NPA_AF_BP_TEST (0x0200)
+#define NPA_AF_ECO (0x0300)
+#define NPA_AF_AQ_CFG (0x0600)
+#define NPA_AF_AQ_BASE (0x0610)
+#define NPA_AF_AQ_STATUS (0x0620)
+#define NPA_AF_AQ_DOOR (0x0630)
+#define NPA_AF_AQ_DONE_WAIT (0x0640)
+#define NPA_AF_AQ_DONE (0x0650)
+#define NPA_AF_AQ_DONE_ACK (0x0660)
+#define NPA_AF_AQ_DONE_INT (0x0680)
+#define NPA_AF_AQ_DONE_INT_W1S (0x0688)
+#define NPA_AF_AQ_DONE_ENA_W1S (0x0690)
+#define NPA_AF_AQ_DONE_ENA_W1C (0x0698)
+#define NPA_AF_LFX_AURAS_CFG(a) (0x4000 | (a) << 18)
+#define NPA_AF_LFX_LOC_AURAS_BASE(a) (0x4010 | (a) << 18)
+#define NPA_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 18)
+#define NPA_AF_LFX_QINTS_BASE(a) (0x4110 | (a) << 18)
+#define NPA_PRIV_AF_INT_CFG (0x10000)
+#define NPA_PRIV_LFX_CFG (0x10010)
+#define NPA_PRIV_LFX_INT_CFG (0x10020)
+#define NPA_AF_RVU_LF_CFG_DEBUG (0x10030)
+
+/* NIX block's admin function registers */
+#define NIX_AF_CFG (0x0000)
+#define NIX_AF_STATUS (0x0010)
+#define NIX_AF_NDC_CFG (0x0018)
+#define NIX_AF_CONST (0x0020)
+#define NIX_AF_CONST1 (0x0028)
+#define NIX_AF_CONST2 (0x0030)
+#define NIX_AF_CONST3 (0x0038)
+#define NIX_AF_SQ_CONST (0x0040)
+#define NIX_AF_CQ_CONST (0x0048)
+#define NIX_AF_RQ_CONST (0x0050)
+#define NIX_AF_PSE_CONST (0x0060)
+#define NIX_AF_TL1_CONST (0x0070)
+#define NIX_AF_TL2_CONST (0x0078)
+#define NIX_AF_TL3_CONST (0x0080)
+#define NIX_AF_TL4_CONST (0x0088)
+#define NIX_AF_MDQ_CONST (0x0090)
+#define NIX_AF_MC_MIRROR_CONST (0x0098)
+#define NIX_AF_LSO_CFG (0x00A8)
+#define NIX_AF_BLK_RST (0x00B0)
+#define NIX_AF_TX_TSTMP_CFG (0x00C0)
+#define NIX_AF_RX_CFG (0x00D0)
+#define NIX_AF_AVG_DELAY (0x00E0)
+#define NIX_AF_CINT_DELAY (0x00F0)
+#define NIX_AF_RX_MCAST_BASE (0x0100)
+#define NIX_AF_RX_MCAST_CFG (0x0110)
+#define NIX_AF_RX_MCAST_BUF_BASE (0x0120)
+#define NIX_AF_RX_MCAST_BUF_CFG (0x0130)
+#define NIX_AF_RX_MIRROR_BUF_BASE (0x0140)
+#define NIX_AF_RX_MIRROR_BUF_CFG (0x0148)
+#define NIX_AF_LF_RST (0x0150)
+#define NIX_AF_GEN_INT (0x0160)
+#define NIX_AF_GEN_INT_W1S (0x0168)
+#define NIX_AF_GEN_INT_ENA_W1S (0x0170)
+#define NIX_AF_GEN_INT_ENA_W1C (0x0178)
+#define NIX_AF_ERR_INT (0x0180)
+#define NIX_AF_ERR_INT_W1S (0x0188)
+#define NIX_AF_ERR_INT_ENA_W1S (0x0190)
+#define NIX_AF_ERR_INT_ENA_W1C (0x0198)
+#define NIX_AF_RAS (0x01A0)
+#define NIX_AF_RAS_W1S (0x01A8)
+#define NIX_AF_RAS_ENA_W1S (0x01B0)
+#define NIX_AF_RAS_ENA_W1C (0x01B8)
+#define NIX_AF_RVU_INT (0x01C0)
+#define NIX_AF_RVU_INT_W1S (0x01C8)
+#define NIX_AF_RVU_INT_ENA_W1S (0x01D0)
+#define NIX_AF_RVU_INT_ENA_W1C (0x01D8)
+#define NIX_AF_TCP_TIMER (0x01E0)
+#define NIX_AF_RX_WQE_TAG_CTL (0x01F0)
+#define NIX_AF_RX_DEF_OL2 (0x0200)
+#define NIX_AF_RX_DEF_OIP4 (0x0210)
+#define NIX_AF_RX_DEF_IIP4 (0x0220)
+#define NIX_AF_RX_DEF_OIP6 (0x0230)
+#define NIX_AF_RX_DEF_IIP6 (0x0240)
+#define NIX_AF_RX_DEF_OTCP (0x0250)
+#define NIX_AF_RX_DEF_ITCP (0x0260)
+#define NIX_AF_RX_DEF_OUDP (0x0270)
+#define NIX_AF_RX_DEF_IUDP (0x0280)
+#define NIX_AF_RX_DEF_OSCTP (0x0290)
+#define NIX_AF_RX_DEF_ISCTP (0x02A0)
+#define NIX_AF_RX_DEF_IPSECX (0x02B0)
+#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300)
+#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
+#define NIX_AF_NDC_TX_SYNC (0x03F0)
+#define NIX_AF_AQ_CFG (0x0400)
+#define NIX_AF_AQ_BASE (0x0410)
+#define NIX_AF_AQ_STATUS (0x0420)
+#define NIX_AF_AQ_DOOR (0x0430)
+#define NIX_AF_AQ_DONE_WAIT (0x0440)
+#define NIX_AF_AQ_DONE (0x0450)
+#define NIX_AF_AQ_DONE_ACK (0x0460)
+#define NIX_AF_AQ_DONE_TIMER (0x0470)
+#define NIX_AF_AQ_DONE_INT (0x0480)
+#define NIX_AF_AQ_DONE_INT_W1S (0x0488)
+#define NIX_AF_AQ_DONE_ENA_W1S (0x0490)
+#define NIX_AF_AQ_DONE_ENA_W1C (0x0498)
+#define NIX_AF_RX_LINKX_SLX_SPKT_CNT (0x0500)
+#define NIX_AF_RX_LINKX_SLX_SXQE_CNT (0x0510)
+#define NIX_AF_RX_MCAST_JOBSX_SW_CNT (0x0520)
+#define NIX_AF_RX_MIRROR_JOBSX_SW_CNT (0x0530)
+#define NIX_AF_RX_LINKX_CFG(a) (0x0540 | (a) << 16)
+#define NIX_AF_RX_SW_SYNC (0x0550)
+#define NIX_AF_RX_SW_SYNC_DONE (0x0560)
+#define NIX_AF_SEB_ECO (0x0600)
+#define NIX_AF_SEB_TEST_BP (0x0610)
+#define NIX_AF_NORM_TX_FIFO_STATUS (0x0620)
+#define NIX_AF_EXPR_TX_FIFO_STATUS (0x0630)
+#define NIX_AF_SDP_TX_FIFO_STATUS (0x0640)
+#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660)
+#define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670)
+
+#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
+#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
+#define NIX_AF_PSE_CHANNEL_LEVEL (0x800)
+#define NIX_AF_PSE_SHAPER_CFG (0x810)
+#define NIX_AF_TX_EXPR_CREDIT (0x830)
+#define NIX_AF_MARK_FORMATX_CTL(a) (0x900 | (a) << 18)
+#define NIX_AF_TX_LINKX_NORM_CREDIT(a) (0xA00 | (a) << 16)
+#define NIX_AF_TX_LINKX_EXPR_CREDIT(a) (0xA10 | (a) << 16)
+#define NIX_AF_TX_LINKX_SW_XOFF(a) (0xA20 | (a) << 16)
+#define NIX_AF_TX_LINKX_HW_XOFF(a) (0xA30 | (a) << 16)
+#define NIX_AF_SDP_LINK_CREDIT (0xa40)
+#define NIX_AF_SDP_SW_XOFFX(a) (0xA60 | (a) << 3)
+#define NIX_AF_SDP_HW_XOFFX(a) (0xAC0 | (a) << 3)
+#define NIX_AF_TL4X_BP_STATUS(a) (0xB00 | (a) << 16)
+#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (a) << 16)
+#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16)
+#define NIX_AF_TL1X_SHAPE(a) (0xC10 | (a) << 16)
+#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16)
+#define NIX_AF_TL1X_SHAPE_STATE(a) (0xC50 | (a) << 16)
+#define NIX_AF_TL1X_SW_XOFF(a) (0xC70 | (a) << 16)
+#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16)
+#define NIX_AF_TL1X_GREEN(a) (0xC90 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW(a) (0xCA0 | (a) << 16)
+#define NIX_AF_TL1X_RED(a) (0xCB0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG0(a) (0xCC0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG1(a) (0xCC8 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG2(a) (0xCD0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG3(a) (0xCD8 | (a) << 16)
+#define NIX_AF_TL1A_DEBUG (0xce0)
+#define NIX_AF_TL1B_DEBUG (0xcf0)
+#define NIX_AF_TL1_DEBUG_GREEN (0xd00)
+#define NIX_AF_TL1_DEBUG_NODE (0xd10)
+#define NIX_AF_TL1X_DROPPED_PACKETS(a) (0xD20 | (a) << 16)
+#define NIX_AF_TL1X_DROPPED_BYTES(a) (0xD30 | (a) << 16)
+#define NIX_AF_TL1X_RED_PACKETS(a) (0xD40 | (a) << 16)
+#define NIX_AF_TL1X_RED_BYTES(a) (0xD50 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW_PACKETS(a) (0xD60 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW_BYTES(a) (0xD70 | (a) << 16)
+#define NIX_AF_TL1X_GREEN_PACKETS(a) (0xD80 | (a) << 16)
+#define NIX_AF_TL1X_GREEN_BYTES(a) (0xD90 | (a) << 16)
+#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16)
+#define NIX_AF_TL2X_SHAPE(a) (0xE10 | (a) << 16)
+#define NIX_AF_TL2X_CIR(a) (0xE20 | (a) << 16)
+#define NIX_AF_TL2X_PIR(a) (0xE30 | (a) << 16)
+#define NIX_AF_TL2X_SCHED_STATE(a) (0xE40 | (a) << 16)
+#define NIX_AF_TL2X_SHAPE_STATE(a) (0xE50 | (a) << 16)
+#define NIX_AF_TL2X_POINTERS(a) (0xE60 | (a) << 16)
+#define NIX_AF_TL2X_SW_XOFF(a) (0xE70 | (a) << 16)
+#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (a) << 16)
+#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16)
+#define NIX_AF_TL2X_GREEN(a) (0xE90 | (a) << 16)
+#define NIX_AF_TL2X_YELLOW(a) (0xEA0 | (a) << 16)
+#define NIX_AF_TL2X_RED(a) (0xEB0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG0(a) (0xEC0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG1(a) (0xEC8 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG2(a) (0xED0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG3(a) (0xED8 | (a) << 16)
+#define NIX_AF_TL2A_DEBUG (0xee0)
+#define NIX_AF_TL2B_DEBUG (0xef0)
+#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
+#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (a) << 16)
+#define NIX_AF_TL3X_CIR(a) (0x1020 | (a) << 16)
+#define NIX_AF_TL3X_PIR(a) (0x1030 | (a) << 16)
+#define NIX_AF_TL3X_SCHED_STATE(a) (0x1040 | (a) << 16)
+#define NIX_AF_TL3X_SHAPE_STATE(a) (0x1050 | (a) << 16)
+#define NIX_AF_TL3X_POINTERS(a) (0x1060 | (a) << 16)
+#define NIX_AF_TL3X_SW_XOFF(a) (0x1070 | (a) << 16)
+#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (a) << 16)
+#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16)
+#define NIX_AF_TL3X_GREEN(a) (0x1090 | (a) << 16)
+#define NIX_AF_TL3X_YELLOW(a) (0x10A0 | (a) << 16)
+#define NIX_AF_TL3X_RED(a) (0x10B0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG0(a) (0x10C0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG1(a) (0x10C8 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG2(a) (0x10D0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG3(a) (0x10D8 | (a) << 16)
+#define NIX_AF_TL3A_DEBUG (0x10e0)
+#define NIX_AF_TL3B_DEBUG (0x10f0)
+#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
+#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (a) << 16)
+#define NIX_AF_TL4X_CIR(a) (0x1220 | (a) << 16)
+#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16)
+#define NIX_AF_TL4X_SCHED_STATE(a) (0x1240 | (a) << 16)
+#define NIX_AF_TL4X_SHAPE_STATE(a) (0x1250 | (a) << 16)
+#define NIX_AF_TL4X_POINTERS(a) (0x1260 | (a) << 16)
+#define NIX_AF_TL4X_SW_XOFF(a) (0x1270 | (a) << 16)
+#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (a) << 16)
+#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
+#define NIX_AF_TL4X_GREEN(a) (0x1290 | (a) << 16)
+#define NIX_AF_TL4X_YELLOW(a) (0x12A0 | (a) << 16)
+#define NIX_AF_TL4X_RED(a) (0x12B0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG0(a) (0x12C0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG1(a) (0x12C8 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG2(a) (0x12D0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG3(a) (0x12D8 | (a) << 16)
+#define NIX_AF_TL4A_DEBUG (0x12e0)
+#define NIX_AF_TL4B_DEBUG (0x12f0)
+#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
+#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (a) << 16)
+#define NIX_AF_MDQX_CIR(a) (0x1420 | (a) << 16)
+#define NIX_AF_MDQX_PIR(a) (0x1430 | (a) << 16)
+#define NIX_AF_MDQX_SCHED_STATE(a) (0x1440 | (a) << 16)
+#define NIX_AF_MDQX_SHAPE_STATE(a) (0x1450 | (a) << 16)
+#define NIX_AF_MDQX_POINTERS(a) (0x1460 | (a) << 16)
+#define NIX_AF_MDQX_SW_XOFF(a) (0x1470 | (a) << 16)
+#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
+#define NIX_AF_MDQX_MD_DEBUG(a) (0x14C0 | (a) << 16)
+#define NIX_AF_MDQX_PTR_FIFO(a) (0x14D0 | (a) << 16)
+#define NIX_AF_MDQA_DEBUG (0x14e0)
+#define NIX_AF_MDQB_DEBUG (0x14f0)
+#define NIX_AF_TL3_TL2X_CFG(a) (0x1600 | (a) << 18)
+#define NIX_AF_TL3_TL2X_BP_STATUS(a) (0x1610 | (a) << 16)
+#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
+#define NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(a, b) (0x1800 | (a) << 18 | (b) << 3)
+#define NIX_AF_TX_MCASTX(a) (0x1900 | (a) << 15)
+#define NIX_AF_TX_VTAG_DEFX_CTL(a) (0x1A00 | (a) << 16)
+#define NIX_AF_TX_VTAG_DEFX_DATA(a) (0x1A10 | (a) << 16)
+#define NIX_AF_RX_BPIDX_STATUS(a) (0x1A20 | (a) << 17)
+#define NIX_AF_RX_CHANX_CFG(a) (0x1A30 | (a) << 15)
+#define NIX_AF_CINT_TIMERX(a) (0x1A40 | (a) << 18)
+#define NIX_AF_LSO_FORMATX_FIELDX(a, b) (0x1B00 | (a) << 16 | (b) << 3)
+#define NIX_AF_LFX_CFG(a) (0x4000 | (a) << 17)
+#define NIX_AF_LFX_SQS_CFG(a) (0x4020 | (a) << 17)
+#define NIX_AF_LFX_TX_CFG2(a) (0x4028 | (a) << 17)
+#define NIX_AF_LFX_SQS_BASE(a) (0x4030 | (a) << 17)
+#define NIX_AF_LFX_RQS_CFG(a) (0x4040 | (a) << 17)
+#define NIX_AF_LFX_RQS_BASE(a) (0x4050 | (a) << 17)
+#define NIX_AF_LFX_CQS_CFG(a) (0x4060 | (a) << 17)
+#define NIX_AF_LFX_CQS_BASE(a) (0x4070 | (a) << 17)
+#define NIX_AF_LFX_TX_CFG(a) (0x4080 | (a) << 17)
+#define NIX_AF_LFX_TX_PARSE_CFG(a) (0x4090 | (a) << 17)
+#define NIX_AF_LFX_RX_CFG(a) (0x40A0 | (a) << 17)
+#define NIX_AF_LFX_RSS_CFG(a) (0x40C0 | (a) << 17)
+#define NIX_AF_LFX_RSS_BASE(a) (0x40D0 | (a) << 17)
+#define NIX_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 17)
+#define NIX_AF_LFX_QINTS_BASE(a) (0x4110 | (a) << 17)
+#define NIX_AF_LFX_CINTS_CFG(a) (0x4120 | (a) << 17)
+#define NIX_AF_LFX_CINTS_BASE(a) (0x4130 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_CFG0(a) (0x4140 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_CFG1(a) (0x4148 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_CFG(a) (0x4150 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_BASE(a) (0x4158 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_SA_BASE(a) (0x4170 | (a) << 17)
+#define NIX_AF_LFX_TX_STATUS(a) (0x4180 | (a) << 17)
+#define NIX_AF_LFX_RX_VTAG_TYPEX(a, b) (0x4200 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_LOCKX(a, b) (0x4300 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_TX_STATX(a, b) (0x4400 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_RX_STATX(a, b) (0x4500 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_RSS_GRPX(a, b) (0x4600 | (a) << 17 | (b) << 3)
+#define NIX_AF_RX_NPC_MC_RCV (0x4700)
+#define NIX_AF_RX_NPC_MC_DROP (0x4710)
+#define NIX_AF_RX_NPC_MIRROR_RCV (0x4720)
+#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730)
+#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16)
+
+#define NIX_PRIV_AF_INT_CFG (0x8000000)
+#define NIX_PRIV_LFX_CFG (0x8000010)
+#define NIX_PRIV_LFX_INT_CFG (0x8000020)
+#define NIX_AF_RVU_LF_CFG_DEBUG (0x8000030)
+
+/* SSO */
+#define SSO_AF_CONST (0x1000)
+#define SSO_AF_CONST1 (0x1008)
+#define SSO_AF_BLK_RST (0x10f8)
+#define SSO_AF_LF_HWGRP_RST (0x10e0)
+#define SSO_AF_RVU_LF_CFG_DEBUG (0x3800)
+#define SSO_PRIV_LFX_HWGRP_CFG (0x10000)
+#define SSO_PRIV_LFX_HWGRP_INT_CFG (0x20000)
+
+/* SSOW */
+#define SSOW_AF_RVU_LF_HWS_CFG_DEBUG (0x0010)
+#define SSOW_AF_LF_HWS_RST (0x0030)
+#define SSOW_PRIV_LFX_HWS_CFG (0x1000)
+#define SSOW_PRIV_LFX_HWS_INT_CFG (0x2000)
+
+/* TIM */
+#define TIM_AF_CONST (0x90)
+#define TIM_PRIV_LFX_CFG (0x20000)
+#define TIM_PRIV_LFX_INT_CFG (0x24000)
+#define TIM_AF_RVU_LF_CFG_DEBUG (0x30000)
+#define TIM_AF_BLK_RST (0x10)
+#define TIM_AF_LF_RST (0x20)
+
+/* CPT */
+#define CPT_AF_CONSTANTS0 (0x0000)
+#define CPT_PRIV_LFX_CFG (0x41000)
+#define CPT_PRIV_LFX_INT_CFG (0x43000)
+#define CPT_AF_RVU_LF_CFG_DEBUG (0x45000)
+#define CPT_AF_LF_RST (0x44000)
+#define CPT_AF_BLK_RST (0x46000)
+
+#define NDC_AF_BLK_RST (0x002F0)
+#define NPC_AF_BLK_RST (0x00040)
+
+/* NPC */
+#define NPC_AF_CFG (0x00000)
+#define NPC_AF_ACTIVE_PC (0x00010)
+#define NPC_AF_CONST (0x00020)
+#define NPC_AF_CONST1 (0x00030)
+#define NPC_AF_BLK_RST (0x00040)
+#define NPC_AF_MCAM_SCRUB_CTL (0x000a0)
+#define NPC_AF_KCAM_SCRUB_CTL (0x000b0)
+#define NPC_AF_KPUX_CFG(a) (0x00500 | (a) << 3)
+#define NPC_AF_PCK_CFG (0x00600)
+#define NPC_AF_PCK_DEF_OL2 (0x00610)
+#define NPC_AF_PCK_DEF_OIP4 (0x00620)
+#define NPC_AF_PCK_DEF_OIP6 (0x00630)
+#define NPC_AF_PCK_DEF_IIP4 (0x00640)
+#define NPC_AF_KEX_LDATAX_FLAGS_CFG(a) (0x00800 | (a) << 3)
+#define NPC_AF_INTFX_KEX_CFG(a) (0x01010 | (a) << 8)
+#define NPC_AF_PKINDX_ACTION0(a) (0x80000ull | (a) << 6)
+#define NPC_AF_PKINDX_ACTION1(a) (0x80008ull | (a) << 6)
+#define NPC_AF_PKINDX_CPI_DEFX(a, b) (0x80020ull | (a) << 6 | (b) << 3)
+#define NPC_AF_KPUX_ENTRYX_CAMX(a, b, c) \
+ (0x100000 | (a) << 14 | (b) << 6 | (c) << 3)
+#define NPC_AF_KPUX_ENTRYX_ACTION0(a, b) \
+ (0x100020 | (a) << 14 | (b) << 6)
+#define NPC_AF_KPUX_ENTRYX_ACTION1(a, b) \
+ (0x100028 | (a) << 14 | (b) << 6)
+#define NPC_AF_KPUX_ENTRY_DISX(a, b) (0x180000 | (a) << 6 | (b) << 3)
+#define NPC_AF_CPIX_CFG(a) (0x200000 | (a) << 3)
+#define NPC_AF_INTFX_LIDX_LTX_LDX_CFG(a, b, c, d) \
+ (0x900000 | (a) << 16 | (b) << 12 | (c) << 5 | (d) << 3)
+#define NPC_AF_INTFX_LDATAX_FLAGSX_CFG(a, b, c) \
+ (0x980000 | (a) << 16 | (b) << 12 | (c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) \
+ (0x1000000ull | (a) << 10 | (b) << 6 | (c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) \
+ (0x1000010ull | (a) << 10 | (b) << 6 | (c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) \
+ (0x1000020ull | (a) << 10 | (b) << 6 | (c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CFG(a, b) (0x1800000ull | (a) << 8 | (b) << 4)
+#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) \
+ (0x1880000 | (a) << 8 | (b) << 4)
+#define NPC_AF_MATCH_STATX(a) (0x1880008 | (a) << 8)
+#define NPC_AF_INTFX_MISS_STAT_ACT(a) (0x1880040 + (a) * 0x8)
+#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) (0x1900000ull | (a) << 8 | (b) << 4)
+#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) \
+ (0x1900008 | (a) << 8 | (b) << 4)
+#define NPC_AF_INTFX_MISS_ACT(a) (0x1a00000 | (a) << 4)
+#define NPC_AF_INTFX_MISS_TAG_ACT(a) (0x1b00008 | (a) << 4)
+#define NPC_AF_MCAM_BANKX_HITX(a, b) (0x1c80000 | (a) << 8 | (b) << 4)
+#define NPC_AF_LKUP_CTL (0x2000000)
+#define NPC_AF_LKUP_DATAX(a) (0x2000200 | (a) << 4)
+#define NPC_AF_LKUP_RESULTX(a) (0x2000400 | (a) << 4)
+#define NPC_AF_INTFX_STAT(a) (0x2000800 | (a) << 4)
+#define NPC_AF_DBG_CTL (0x3000000)
+#define NPC_AF_DBG_STATUS (0x3000010)
+#define NPC_AF_KPUX_DBG(a) (0x3000020 | (a) << 8)
+#define NPC_AF_IKPU_ERR_CTL (0x3000080)
+#define NPC_AF_KPUX_ERR_CTL(a) (0x30000a0 | (a) << 8)
+#define NPC_AF_MCAM_DBG (0x3001000)
+#define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4)
+#define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4)
+
+#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
new file mode 100644
index 000000000000..f920dac74e6c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -0,0 +1,917 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RVU_STRUCT_H
+#define RVU_STRUCT_H
+
+/* RVU Block Address Enumeration */
+enum rvu_block_addr_e {
+ BLKADDR_RVUM = 0x0ULL,
+ BLKADDR_LMT = 0x1ULL,
+ BLKADDR_MSIX = 0x2ULL,
+ BLKADDR_NPA = 0x3ULL,
+ BLKADDR_NIX0 = 0x4ULL,
+ BLKADDR_NIX1 = 0x5ULL,
+ BLKADDR_NPC = 0x6ULL,
+ BLKADDR_SSO = 0x7ULL,
+ BLKADDR_SSOW = 0x8ULL,
+ BLKADDR_TIM = 0x9ULL,
+ BLKADDR_CPT0 = 0xaULL,
+ BLKADDR_CPT1 = 0xbULL,
+ BLKADDR_NDC0 = 0xcULL,
+ BLKADDR_NDC1 = 0xdULL,
+ BLKADDR_NDC2 = 0xeULL,
+ BLK_COUNT = 0xfULL,
+};
+
+/* RVU Block Type Enumeration */
+enum rvu_block_type_e {
+ BLKTYPE_RVUM = 0x0,
+ BLKTYPE_MSIX = 0x1,
+ BLKTYPE_LMT = 0x2,
+ BLKTYPE_NIX = 0x3,
+ BLKTYPE_NPA = 0x4,
+ BLKTYPE_NPC = 0x5,
+ BLKTYPE_SSO = 0x6,
+ BLKTYPE_SSOW = 0x7,
+ BLKTYPE_TIM = 0x8,
+ BLKTYPE_CPT = 0x9,
+ BLKTYPE_NDC = 0xa,
+ BLKTYPE_MAX = 0xa,
+};
+
+/* RVU Admin function Interrupt Vector Enumeration */
+enum rvu_af_int_vec_e {
+ RVU_AF_INT_VEC_POISON = 0x0,
+ RVU_AF_INT_VEC_PFFLR = 0x1,
+ RVU_AF_INT_VEC_PFME = 0x2,
+ RVU_AF_INT_VEC_GEN = 0x3,
+ RVU_AF_INT_VEC_MBOX = 0x4,
+ RVU_AF_INT_VEC_CNT = 0x5,
+};
+
+/**
+ * RVU PF Interrupt Vector Enumeration
+ */
+enum rvu_pf_int_vec_e {
+ RVU_PF_INT_VEC_VFFLR0 = 0x0,
+ RVU_PF_INT_VEC_VFFLR1 = 0x1,
+ RVU_PF_INT_VEC_VFME0 = 0x2,
+ RVU_PF_INT_VEC_VFME1 = 0x3,
+ RVU_PF_INT_VEC_VFPF_MBOX0 = 0x4,
+ RVU_PF_INT_VEC_VFPF_MBOX1 = 0x5,
+ RVU_PF_INT_VEC_AFPF_MBOX = 0x6,
+ RVU_PF_INT_VEC_CNT = 0x7,
+};
+
+/* NPA admin queue completion enumeration */
+enum npa_aq_comp {
+ NPA_AQ_COMP_NOTDONE = 0x0,
+ NPA_AQ_COMP_GOOD = 0x1,
+ NPA_AQ_COMP_SWERR = 0x2,
+ NPA_AQ_COMP_CTX_POISON = 0x3,
+ NPA_AQ_COMP_CTX_FAULT = 0x4,
+ NPA_AQ_COMP_LOCKERR = 0x5,
+};
+
+/* NPA admin queue context types */
+enum npa_aq_ctype {
+ NPA_AQ_CTYPE_AURA = 0x0,
+ NPA_AQ_CTYPE_POOL = 0x1,
+};
+
+/* NPA admin queue instruction opcodes */
+enum npa_aq_instop {
+ NPA_AQ_INSTOP_NOP = 0x0,
+ NPA_AQ_INSTOP_INIT = 0x1,
+ NPA_AQ_INSTOP_WRITE = 0x2,
+ NPA_AQ_INSTOP_READ = 0x3,
+ NPA_AQ_INSTOP_LOCK = 0x4,
+ NPA_AQ_INSTOP_UNLOCK = 0x5,
+};
+
+/* NPA admin queue instruction structure */
+struct npa_aq_inst_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 doneint : 1; /* W0 */
+ u64 reserved_44_62 : 19;
+ u64 cindex : 20;
+ u64 reserved_17_23 : 7;
+ u64 lf : 9;
+ u64 ctype : 4;
+ u64 op : 4;
+#else
+ u64 op : 4;
+ u64 ctype : 4;
+ u64 lf : 9;
+ u64 reserved_17_23 : 7;
+ u64 cindex : 20;
+ u64 reserved_44_62 : 19;
+ u64 doneint : 1;
+#endif
+ u64 res_addr; /* W1 */
+};
+
+/* NPA admin queue result structure */
+struct npa_aq_res_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_17_63 : 47; /* W0 */
+ u64 doneint : 1;
+ u64 compcode : 8;
+ u64 ctype : 4;
+ u64 op : 4;
+#else
+ u64 op : 4;
+ u64 ctype : 4;
+ u64 compcode : 8;
+ u64 doneint : 1;
+ u64 reserved_17_63 : 47;
+#endif
+ u64 reserved_64_127; /* W1 */
+};
+
+struct npa_aura_s {
+ u64 pool_addr; /* W0 */
+#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
+ u64 avg_level : 8;
+ u64 reserved_118_119 : 2;
+ u64 shift : 6;
+ u64 aura_drop : 8;
+ u64 reserved_98_103 : 6;
+ u64 bp_ena : 2;
+ u64 aura_drop_ena : 1;
+ u64 pool_drop_ena : 1;
+ u64 reserved_93 : 1;
+ u64 avg_con : 9;
+ u64 pool_way_mask : 16;
+ u64 pool_caching : 1;
+ u64 reserved_65 : 2;
+ u64 ena : 1;
+#else
+ u64 ena : 1;
+ u64 reserved_65 : 2;
+ u64 pool_caching : 1;
+ u64 pool_way_mask : 16;
+ u64 avg_con : 9;
+ u64 reserved_93 : 1;
+ u64 pool_drop_ena : 1;
+ u64 aura_drop_ena : 1;
+ u64 bp_ena : 2;
+ u64 reserved_98_103 : 6;
+ u64 aura_drop : 8;
+ u64 shift : 6;
+ u64 reserved_118_119 : 2;
+ u64 avg_level : 8;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
+ u64 reserved_189_191 : 3;
+ u64 nix1_bpid : 9;
+ u64 reserved_177_179 : 3;
+ u64 nix0_bpid : 9;
+ u64 reserved_164_167 : 4;
+ u64 count : 36;
+#else
+ u64 count : 36;
+ u64 reserved_164_167 : 4;
+ u64 nix0_bpid : 9;
+ u64 reserved_177_179 : 3;
+ u64 nix1_bpid : 9;
+ u64 reserved_189_191 : 3;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
+ u64 reserved_252_255 : 4;
+ u64 fc_hyst_bits : 4;
+ u64 fc_stype : 2;
+ u64 fc_up_crossing : 1;
+ u64 fc_ena : 1;
+ u64 reserved_240_243 : 4;
+ u64 bp : 8;
+ u64 reserved_228_231 : 4;
+ u64 limit : 36;
+#else
+ u64 limit : 36;
+ u64 reserved_228_231 : 4;
+ u64 bp : 8;
+ u64 reserved_240_243 : 4;
+ u64 fc_ena : 1;
+ u64 fc_up_crossing : 1;
+ u64 fc_stype : 2;
+ u64 fc_hyst_bits : 4;
+ u64 reserved_252_255 : 4;
+#endif
+ u64 fc_addr; /* W4 */
+#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */
+ u64 reserved_379_383 : 5;
+ u64 err_qint_idx : 7;
+ u64 reserved_371 : 1;
+ u64 thresh_qint_idx : 7;
+ u64 reserved_363 : 1;
+ u64 thresh_up : 1;
+ u64 thresh_int_ena : 1;
+ u64 thresh_int : 1;
+ u64 err_int_ena : 8;
+ u64 err_int : 8;
+ u64 update_time : 16;
+ u64 pool_drop : 8;
+#else
+ u64 pool_drop : 8;
+ u64 update_time : 16;
+ u64 err_int : 8;
+ u64 err_int_ena : 8;
+ u64 thresh_int : 1;
+ u64 thresh_int_ena : 1;
+ u64 thresh_up : 1;
+ u64 reserved_363 : 1;
+ u64 thresh_qint_idx : 7;
+ u64 reserved_371 : 1;
+ u64 err_qint_idx : 7;
+ u64 reserved_379_383 : 5;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */
+ u64 reserved_420_447 : 28;
+ u64 thresh : 36;
+#else
+ u64 thresh : 36;
+ u64 reserved_420_447 : 28;
+#endif
+ u64 reserved_448_511; /* W7 */
+};
+
+struct npa_pool_s {
+ u64 stack_base; /* W0 */
+#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
+ u64 reserved_115_127 : 13;
+ u64 buf_size : 11;
+ u64 reserved_100_103 : 4;
+ u64 buf_offset : 12;
+ u64 stack_way_mask : 16;
+ u64 reserved_70_71 : 3;
+ u64 stack_caching : 1;
+ u64 reserved_66_67 : 2;
+ u64 nat_align : 1;
+ u64 ena : 1;
+#else
+ u64 ena : 1;
+ u64 nat_align : 1;
+ u64 reserved_66_67 : 2;
+ u64 stack_caching : 1;
+ u64 reserved_70_71 : 3;
+ u64 stack_way_mask : 16;
+ u64 buf_offset : 12;
+ u64 reserved_100_103 : 4;
+ u64 buf_size : 11;
+ u64 reserved_115_127 : 13;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
+ u64 stack_pages : 32;
+ u64 stack_max_pages : 32;
+#else
+ u64 stack_max_pages : 32;
+ u64 stack_pages : 32;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
+ u64 reserved_240_255 : 16;
+ u64 op_pc : 48;
+#else
+ u64 op_pc : 48;
+ u64 reserved_240_255 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */
+ u64 reserved_316_319 : 4;
+ u64 update_time : 16;
+ u64 reserved_297_299 : 3;
+ u64 fc_up_crossing : 1;
+ u64 fc_hyst_bits : 4;
+ u64 fc_stype : 2;
+ u64 fc_ena : 1;
+ u64 avg_con : 9;
+ u64 avg_level : 8;
+ u64 reserved_270_271 : 2;
+ u64 shift : 6;
+ u64 reserved_260_263 : 4;
+ u64 stack_offset : 4;
+#else
+ u64 stack_offset : 4;
+ u64 reserved_260_263 : 4;
+ u64 shift : 6;
+ u64 reserved_270_271 : 2;
+ u64 avg_level : 8;
+ u64 avg_con : 9;
+ u64 fc_ena : 1;
+ u64 fc_stype : 2;
+ u64 fc_hyst_bits : 4;
+ u64 fc_up_crossing : 1;
+ u64 reserved_297_299 : 3;
+ u64 update_time : 16;
+ u64 reserved_316_319 : 4;
+#endif
+ u64 fc_addr; /* W5 */
+ u64 ptr_start; /* W6 */
+ u64 ptr_end; /* W7 */
+#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */
+ u64 reserved_571_575 : 5;
+ u64 err_qint_idx : 7;
+ u64 reserved_563 : 1;
+ u64 thresh_qint_idx : 7;
+ u64 reserved_555 : 1;
+ u64 thresh_up : 1;
+ u64 thresh_int_ena : 1;
+ u64 thresh_int : 1;
+ u64 err_int_ena : 8;
+ u64 err_int : 8;
+ u64 reserved_512_535 : 24;
+#else
+ u64 reserved_512_535 : 24;
+ u64 err_int : 8;
+ u64 err_int_ena : 8;
+ u64 thresh_int : 1;
+ u64 thresh_int_ena : 1;
+ u64 thresh_up : 1;
+ u64 reserved_555 : 1;
+ u64 thresh_qint_idx : 7;
+ u64 reserved_563 : 1;
+ u64 err_qint_idx : 7;
+ u64 reserved_571_575 : 5;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
+ u64 reserved_612_639 : 28;
+ u64 thresh : 36;
+#else
+ u64 thresh : 36;
+ u64 reserved_612_639 : 28;
+#endif
+ u64 reserved_640_703; /* W10 */
+ u64 reserved_704_767; /* W11 */
+ u64 reserved_768_831; /* W12 */
+ u64 reserved_832_895; /* W13 */
+ u64 reserved_896_959; /* W14 */
+ u64 reserved_960_1023; /* W15 */
+};
+
+/* NIX admin queue completion status */
+enum nix_aq_comp {
+ NIX_AQ_COMP_NOTDONE = 0x0,
+ NIX_AQ_COMP_GOOD = 0x1,
+ NIX_AQ_COMP_SWERR = 0x2,
+ NIX_AQ_COMP_CTX_POISON = 0x3,
+ NIX_AQ_COMP_CTX_FAULT = 0x4,
+ NIX_AQ_COMP_LOCKERR = 0x5,
+ NIX_AQ_COMP_SQB_ALLOC_FAIL = 0x6,
+};
+
+/* NIX admin queue context types */
+enum nix_aq_ctype {
+ NIX_AQ_CTYPE_RQ = 0x0,
+ NIX_AQ_CTYPE_SQ = 0x1,
+ NIX_AQ_CTYPE_CQ = 0x2,
+ NIX_AQ_CTYPE_MCE = 0x3,
+ NIX_AQ_CTYPE_RSS = 0x4,
+ NIX_AQ_CTYPE_DYNO = 0x5,
+};
+
+/* NIX admin queue instruction opcodes */
+enum nix_aq_instop {
+ NIX_AQ_INSTOP_NOP = 0x0,
+ NIX_AQ_INSTOP_INIT = 0x1,
+ NIX_AQ_INSTOP_WRITE = 0x2,
+ NIX_AQ_INSTOP_READ = 0x3,
+ NIX_AQ_INSTOP_LOCK = 0x4,
+ NIX_AQ_INSTOP_UNLOCK = 0x5,
+};
+
+/* NIX admin queue instruction structure */
+struct nix_aq_inst_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 doneint : 1; /* W0 */
+ u64 reserved_44_62 : 19;
+ u64 cindex : 20;
+ u64 reserved_15_23 : 9;
+ u64 lf : 7;
+ u64 ctype : 4;
+ u64 op : 4;
+#else
+ u64 op : 4;
+ u64 ctype : 4;
+ u64 lf : 7;
+ u64 reserved_15_23 : 9;
+ u64 cindex : 20;
+ u64 reserved_44_62 : 19;
+ u64 doneint : 1;
+#endif
+ u64 res_addr; /* W1 */
+};
+
+/* NIX admin queue result structure */
+struct nix_aq_res_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_17_63 : 47; /* W0 */
+ u64 doneint : 1;
+ u64 compcode : 8;
+ u64 ctype : 4;
+ u64 op : 4;
+#else
+ u64 op : 4;
+ u64 ctype : 4;
+ u64 compcode : 8;
+ u64 doneint : 1;
+ u64 reserved_17_63 : 47;
+#endif
+ u64 reserved_64_127; /* W1 */
+};
+
+/* NIX Completion queue context structure */
+struct nix_cq_ctx_s {
+ u64 base;
+#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
+ u64 wrptr : 20;
+ u64 avg_con : 9;
+ u64 cint_idx : 7;
+ u64 cq_err : 1;
+ u64 qint_idx : 7;
+ u64 rsvd_81_83 : 3;
+ u64 bpid : 9;
+ u64 rsvd_69_71 : 3;
+ u64 bp_ena : 1;
+ u64 rsvd_64_67 : 4;
+#else
+ u64 rsvd_64_67 : 4;
+ u64 bp_ena : 1;
+ u64 rsvd_69_71 : 3;
+ u64 bpid : 9;
+ u64 rsvd_81_83 : 3;
+ u64 qint_idx : 7;
+ u64 cq_err : 1;
+ u64 cint_idx : 7;
+ u64 avg_con : 9;
+ u64 wrptr : 20;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
+ u64 update_time : 16;
+ u64 avg_level : 8;
+ u64 head : 20;
+ u64 tail : 20;
+#else
+ u64 tail : 20;
+ u64 head : 20;
+ u64 avg_level : 8;
+ u64 update_time : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
+ u64 cq_err_int_ena : 8;
+ u64 cq_err_int : 8;
+ u64 qsize : 4;
+ u64 rsvd_233_235 : 3;
+ u64 caching : 1;
+ u64 substream : 20;
+ u64 rsvd_210_211 : 2;
+ u64 ena : 1;
+ u64 drop_ena : 1;
+ u64 drop : 8;
+ u64 dp : 8;
+#else
+ u64 dp : 8;
+ u64 drop : 8;
+ u64 drop_ena : 1;
+ u64 ena : 1;
+ u64 rsvd_210_211 : 2;
+ u64 substream : 20;
+ u64 caching : 1;
+ u64 rsvd_233_235 : 3;
+ u64 qsize : 4;
+ u64 cq_err_int : 8;
+ u64 cq_err_int_ena : 8;
+#endif
+};
+
+/* NIX Receive queue context structure */
+struct nix_rq_ctx_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
+ u64 wqe_aura : 20;
+ u64 substream : 20;
+ u64 cq : 20;
+ u64 ena_wqwd : 1;
+ u64 ipsech_ena : 1;
+ u64 sso_ena : 1;
+ u64 ena : 1;
+#else
+ u64 ena : 1;
+ u64 sso_ena : 1;
+ u64 ipsech_ena : 1;
+ u64 ena_wqwd : 1;
+ u64 cq : 20;
+ u64 substream : 20;
+ u64 wqe_aura : 20;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
+ u64 rsvd_127_122 : 6;
+ u64 lpb_drop_ena : 1;
+ u64 spb_drop_ena : 1;
+ u64 xqe_drop_ena : 1;
+ u64 wqe_caching : 1;
+ u64 pb_caching : 2;
+ u64 sso_tt : 2;
+ u64 sso_grp : 10;
+ u64 lpb_aura : 20;
+ u64 spb_aura : 20;
+#else
+ u64 spb_aura : 20;
+ u64 lpb_aura : 20;
+ u64 sso_grp : 10;
+ u64 sso_tt : 2;
+ u64 pb_caching : 2;
+ u64 wqe_caching : 1;
+ u64 xqe_drop_ena : 1;
+ u64 spb_drop_ena : 1;
+ u64 lpb_drop_ena : 1;
+ u64 rsvd_127_122 : 6;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
+ u64 xqe_hdr_split : 1;
+ u64 xqe_imm_copy : 1;
+ u64 rsvd_189_184 : 6;
+ u64 xqe_imm_size : 6;
+ u64 later_skip : 6;
+ u64 rsvd_171 : 1;
+ u64 first_skip : 7;
+ u64 lpb_sizem1 : 12;
+ u64 spb_ena : 1;
+ u64 rsvd_150_148 : 3;
+ u64 wqe_skip : 2;
+ u64 spb_sizem1 : 6;
+ u64 rsvd_139_128 : 12;
+#else
+ u64 rsvd_139_128 : 12;
+ u64 spb_sizem1 : 6;
+ u64 wqe_skip : 2;
+ u64 rsvd_150_148 : 3;
+ u64 spb_ena : 1;
+ u64 lpb_sizem1 : 12;
+ u64 first_skip : 7;
+ u64 rsvd_171 : 1;
+ u64 later_skip : 6;
+ u64 xqe_imm_size : 6;
+ u64 rsvd_189_184 : 6;
+ u64 xqe_imm_copy : 1;
+ u64 xqe_hdr_split : 1;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
+ u64 spb_pool_pass : 8;
+ u64 spb_pool_drop : 8;
+ u64 spb_aura_pass : 8;
+ u64 spb_aura_drop : 8;
+ u64 wqe_pool_pass : 8;
+ u64 wqe_pool_drop : 8;
+ u64 xqe_pass : 8;
+ u64 xqe_drop : 8;
+#else
+ u64 xqe_drop : 8;
+ u64 xqe_pass : 8;
+ u64 wqe_pool_drop : 8;
+ u64 wqe_pool_pass : 8;
+ u64 spb_aura_drop : 8;
+ u64 spb_aura_pass : 8;
+ u64 spb_pool_drop : 8;
+ u64 spb_pool_pass : 8;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */
+ u64 rsvd_319_315 : 5;
+ u64 qint_idx : 7;
+ u64 rq_int_ena : 8;
+ u64 rq_int : 8;
+ u64 rsvd_291_288 : 4;
+ u64 lpb_pool_pass : 8;
+ u64 lpb_pool_drop : 8;
+ u64 lpb_aura_pass : 8;
+ u64 lpb_aura_drop : 8;
+#else
+ u64 lpb_aura_drop : 8;
+ u64 lpb_aura_pass : 8;
+ u64 lpb_pool_drop : 8;
+ u64 lpb_pool_pass : 8;
+ u64 rsvd_291_288 : 4;
+ u64 rq_int : 8;
+ u64 rq_int_ena : 8;
+ u64 qint_idx : 7;
+ u64 rsvd_319_315 : 5;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */
+ u64 rsvd_383_366 : 18;
+ u64 flow_tagw : 6;
+ u64 bad_utag : 8;
+ u64 good_utag : 8;
+ u64 ltag : 24;
+#else
+ u64 ltag : 24;
+ u64 good_utag : 8;
+ u64 bad_utag : 8;
+ u64 flow_tagw : 6;
+ u64 rsvd_383_366 : 18;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */
+ u64 rsvd_447_432 : 16;
+ u64 octs : 48;
+#else
+ u64 octs : 48;
+ u64 rsvd_447_432 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W7 */
+ u64 rsvd_511_496 : 16;
+ u64 pkts : 48;
+#else
+ u64 pkts : 48;
+ u64 rsvd_511_496 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */
+ u64 rsvd_575_560 : 16;
+ u64 drop_octs : 48;
+#else
+ u64 drop_octs : 48;
+ u64 rsvd_575_560 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
+ u64 rsvd_639_624 : 16;
+ u64 drop_pkts : 48;
+#else
+ u64 drop_pkts : 48;
+ u64 rsvd_639_624 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */
+ u64 rsvd_703_688 : 16;
+ u64 re_pkts : 48;
+#else
+ u64 re_pkts : 48;
+ u64 rsvd_703_688 : 16;
+#endif
+ u64 rsvd_767_704; /* W11 */
+ u64 rsvd_831_768; /* W12 */
+ u64 rsvd_895_832; /* W13 */
+ u64 rsvd_959_896; /* W14 */
+ u64 rsvd_1023_960; /* W15 */
+};
+
+/* NIX sqe sizes */
+enum nix_maxsqesz {
+ NIX_MAXSQESZ_W16 = 0x0,
+ NIX_MAXSQESZ_W8 = 0x1,
+};
+
+/* NIX SQB caching type */
+enum nix_stype {
+ NIX_STYPE_STF = 0x0,
+ NIX_STYPE_STT = 0x1,
+ NIX_STYPE_STP = 0x2,
+};
+
+/* NIX Send queue context structure */
+struct nix_sq_ctx_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
+ u64 sqe_way_mask : 16;
+ u64 cq : 20;
+ u64 sdp_mcast : 1;
+ u64 substream : 20;
+ u64 qint_idx : 6;
+ u64 ena : 1;
+#else
+ u64 ena : 1;
+ u64 qint_idx : 6;
+ u64 substream : 20;
+ u64 sdp_mcast : 1;
+ u64 cq : 20;
+ u64 sqe_way_mask : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
+ u64 sqb_count : 16;
+ u64 default_chan : 12;
+ u64 smq_rr_quantum : 24;
+ u64 sso_ena : 1;
+ u64 xoff : 1;
+ u64 cq_ena : 1;
+ u64 smq : 9;
+#else
+ u64 smq : 9;
+ u64 cq_ena : 1;
+ u64 xoff : 1;
+ u64 sso_ena : 1;
+ u64 smq_rr_quantum : 24;
+ u64 default_chan : 12;
+ u64 sqb_count : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
+ u64 rsvd_191 : 1;
+ u64 sqe_stype : 2;
+ u64 sq_int_ena : 8;
+ u64 sq_int : 8;
+ u64 sqb_aura : 20;
+ u64 smq_rr_count : 25;
+#else
+ u64 smq_rr_count : 25;
+ u64 sqb_aura : 20;
+ u64 sq_int : 8;
+ u64 sq_int_ena : 8;
+ u64 sqe_stype : 2;
+ u64 rsvd_191 : 1;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
+ u64 rsvd_255_253 : 3;
+ u64 smq_next_sq_vld : 1;
+ u64 smq_pend : 1;
+ u64 smenq_next_sqb_vld : 1;
+ u64 head_offset : 6;
+ u64 smenq_offset : 6;
+ u64 tail_offset : 6;
+ u64 smq_lso_segnum : 8;
+ u64 smq_next_sq : 20;
+ u64 mnq_dis : 1;
+ u64 lmt_dis : 1;
+ u64 cq_limit : 8;
+ u64 max_sqe_size : 2;
+#else
+ u64 max_sqe_size : 2;
+ u64 cq_limit : 8;
+ u64 lmt_dis : 1;
+ u64 mnq_dis : 1;
+ u64 smq_next_sq : 20;
+ u64 smq_lso_segnum : 8;
+ u64 tail_offset : 6;
+ u64 smenq_offset : 6;
+ u64 head_offset : 6;
+ u64 smenq_next_sqb_vld : 1;
+ u64 smq_pend : 1;
+ u64 smq_next_sq_vld : 1;
+ u64 rsvd_255_253 : 3;
+#endif
+ u64 next_sqb : 64;/* W4 */
+ u64 tail_sqb : 64;/* W5 */
+ u64 smenq_sqb : 64;/* W6 */
+ u64 smenq_next_sqb : 64;/* W7 */
+ u64 head_sqb : 64;/* W8 */
+#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
+ u64 rsvd_639_630 : 10;
+ u64 vfi_lso_vld : 1;
+ u64 vfi_lso_vlan1_ins_ena : 1;
+ u64 vfi_lso_vlan0_ins_ena : 1;
+ u64 vfi_lso_mps : 14;
+ u64 vfi_lso_sb : 8;
+ u64 vfi_lso_sizem1 : 3;
+ u64 vfi_lso_total : 18;
+ u64 rsvd_583_576 : 8;
+#else
+ u64 rsvd_583_576 : 8;
+ u64 vfi_lso_total : 18;
+ u64 vfi_lso_sizem1 : 3;
+ u64 vfi_lso_sb : 8;
+ u64 vfi_lso_mps : 14;
+ u64 vfi_lso_vlan0_ins_ena : 1;
+ u64 vfi_lso_vlan1_ins_ena : 1;
+ u64 vfi_lso_vld : 1;
+ u64 rsvd_639_630 : 10;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */
+ u64 rsvd_703_658 : 46;
+ u64 scm_lso_rem : 18;
+#else
+ u64 scm_lso_rem : 18;
+ u64 rsvd_703_658 : 46;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W11 */
+ u64 rsvd_767_752 : 16;
+ u64 octs : 48;
+#else
+ u64 octs : 48;
+ u64 rsvd_767_752 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W12 */
+ u64 rsvd_831_816 : 16;
+ u64 pkts : 48;
+#else
+ u64 pkts : 48;
+ u64 rsvd_831_816 : 16;
+#endif
+ u64 rsvd_895_832 : 64;/* W13 */
+#if defined(__BIG_ENDIAN_BITFIELD) /* W14 */
+ u64 rsvd_959_944 : 16;
+ u64 dropped_octs : 48;
+#else
+ u64 dropped_octs : 48;
+ u64 rsvd_959_944 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W15 */
+ u64 rsvd_1023_1008 : 16;
+ u64 dropped_pkts : 48;
+#else
+ u64 dropped_pkts : 48;
+ u64 rsvd_1023_1008 : 16;
+#endif
+};
+
+/* NIX Receive side scaling entry structure*/
+struct nix_rsse_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ uint32_t reserved_20_31 : 12;
+ uint32_t rq : 20;
+#else
+ uint32_t rq : 20;
+ uint32_t reserved_20_31 : 12;
+
+#endif
+};
+
+/* NIX receive multicast/mirror entry structure */
+struct nix_rx_mce_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
+ uint64_t next : 16;
+ uint64_t pf_func : 16;
+ uint64_t rsvd_31_24 : 8;
+ uint64_t index : 20;
+ uint64_t eol : 1;
+ uint64_t rsvd_2 : 1;
+ uint64_t op : 2;
+#else
+ uint64_t op : 2;
+ uint64_t rsvd_2 : 1;
+ uint64_t eol : 1;
+ uint64_t index : 20;
+ uint64_t rsvd_31_24 : 8;
+ uint64_t pf_func : 16;
+ uint64_t next : 16;
+#endif
+};
+
+enum nix_lsoalg {
+ NIX_LSOALG_NOP,
+ NIX_LSOALG_ADD_SEGNUM,
+ NIX_LSOALG_ADD_PAYLEN,
+ NIX_LSOALG_ADD_OFFSET,
+ NIX_LSOALG_TCP_FLAGS,
+};
+
+enum nix_txlayer {
+ NIX_TXLAYER_OL3,
+ NIX_TXLAYER_OL4,
+ NIX_TXLAYER_IL3,
+ NIX_TXLAYER_IL4,
+};
+
+struct nix_lso_format {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_19_63 : 45;
+ u64 alg : 3;
+ u64 rsvd_14_15 : 2;
+ u64 sizem1 : 2;
+ u64 rsvd_10_11 : 2;
+ u64 layer : 2;
+ u64 offset : 8;
+#else
+ u64 offset : 8;
+ u64 layer : 2;
+ u64 rsvd_10_11 : 2;
+ u64 sizem1 : 2;
+ u64 rsvd_14_15 : 2;
+ u64 alg : 3;
+ u64 rsvd_19_63 : 45;
+#endif
+};
+
+struct nix_rx_flowkey_alg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_35_63 :29;
+ u64 ltype_match :4;
+ u64 ltype_mask :4;
+ u64 sel_chan :1;
+ u64 ena :1;
+ u64 reserved_24_24 :1;
+ u64 lid :3;
+ u64 bytesm1 :5;
+ u64 hdr_offset :8;
+ u64 fn_mask :1;
+ u64 ln_mask :1;
+ u64 key_offset :6;
+#else
+ u64 key_offset :6;
+ u64 ln_mask :1;
+ u64 fn_mask :1;
+ u64 hdr_offset :8;
+ u64 bytesm1 :5;
+ u64 lid :3;
+ u64 reserved_24_24 :1;
+ u64 ena :1;
+ u64 sel_chan :1;
+ u64 ltype_mask :4;
+ u64 ltype_match :4;
+ u64 reserved_35_63 :29;
+#endif
+};
+
+/* NIX VTAG size */
+enum nix_vtag_size {
+ VTAGSIZE_T4 = 0x0,
+ VTAGSIZE_T8 = 0x1,
+};
+#endif /* RVU_STRUCT_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index d25e16d2c319..109472d6b61f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -167,8 +167,13 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[i].rx_ppp = pfcrx;
params->prof[i].tx_pause = !(pfcrx || pfctx);
params->prof[i].tx_ppp = pfctx;
- params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
- params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
+ if (mlx4_low_memory_profile()) {
+ params->prof[i].tx_ring_size = MLX4_EN_MIN_TX_SIZE;
+ params->prof[i].rx_ring_size = MLX4_EN_MIN_RX_SIZE;
+ } else {
+ params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
+ params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
+ }
params->prof[i].num_up = MLX4_EN_NUM_UP_LOW;
params->prof[i].num_tx_rings_p_up = params->max_num_tx_rings_p_up;
params->prof[i].tx_ring_num[TX] = params->max_num_tx_rings_p_up *
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index fe49384eba48..b744cd49a785 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -39,7 +39,6 @@
#include <linux/slab.h>
#include <linux/hash.h>
#include <net/ip.h>
-#include <net/busy_poll.h>
#include <net/vxlan.h>
#include <net/devlink.h>
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index a1aeeb8094c3..db00bf1c23f5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -31,7 +31,6 @@
*
*/
-#include <net/busy_poll.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/mlx4/cq.h>
@@ -44,6 +43,7 @@
#include <linux/vmalloc.h>
#include <linux/irq.h>
+#include <net/ip.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ip6_checksum.h>
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d2d59444f562..6a046030e873 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -260,47 +260,34 @@ static const struct devlink_param mlx4_devlink_params[] = {
NULL, NULL, NULL),
};
-static void mlx4_devlink_set_init_value(struct devlink *devlink, u32 param_id,
- union devlink_param_value init_val)
-{
- struct mlx4_priv *priv = devlink_priv(devlink);
- struct mlx4_dev *dev = &priv->dev;
- int err;
-
- err = devlink_param_driverinit_value_set(devlink, param_id, init_val);
- if (err)
- mlx4_warn(dev,
- "devlink set parameter %u value failed (err = %d)",
- param_id, err);
-}
-
static void mlx4_devlink_set_params_init_values(struct devlink *devlink)
{
union devlink_param_value value;
value.vbool = !!mlx4_internal_err_reset;
- mlx4_devlink_set_init_value(devlink,
- DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
- value);
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ value);
value.vu32 = 1UL << log_num_mac;
- mlx4_devlink_set_init_value(devlink,
- DEVLINK_PARAM_GENERIC_ID_MAX_MACS, value);
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ value);
value.vbool = enable_64b_cqe_eqe;
- mlx4_devlink_set_init_value(devlink,
- MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
- value);
+ devlink_param_driverinit_value_set(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ value);
value.vbool = enable_4k_uar;
- mlx4_devlink_set_init_value(devlink,
- MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
- value);
+ devlink_param_driverinit_value_set(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ value);
value.vbool = false;
- mlx4_devlink_set_init_value(devlink,
- DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
- value);
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ value);
}
static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index c3228b89df46..485d856546c6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -72,7 +72,7 @@
#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
#define DEF_RX_RINGS 16
#define MAX_RX_RINGS 128
-#define MIN_RX_RINGS 4
+#define MIN_RX_RINGS 1
#define LOG_TXBB_SIZE 6
#define TXBB_SIZE BIT(LOG_TXBB_SIZE)
#define HEADROOM (2048 / TXBB_SIZE + 1)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index a53736c26c0c..a5a0823e5ada 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -308,10 +308,11 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
- case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
+ case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT:
case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
case MLX5_CMD_OP_FPGA_DESTROY_QP:
case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_DEALLOC_MEMIC:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -426,7 +427,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
- case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
+ case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
case MLX5_CMD_OP_FPGA_CREATE_QP:
case MLX5_CMD_OP_FPGA_MODIFY_QP:
@@ -435,6 +436,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_ALLOC_MEMIC:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -599,8 +601,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
- MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
- MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
+ MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT);
+ MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP);
@@ -617,6 +619,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT);
MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT);
MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
+ MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
+ MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
default: return "unknown command opcode";
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index a4179122a279..4b85abb5c9f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -109,6 +109,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->cons_index = 0;
cq->arm_sn = 0;
cq->eq = eq;
+ cq->uid = MLX5_GET(create_cq_in, in, uid);
refcount_set(&cq->refcount, 1);
init_completion(&cq->free);
if (!cq->comp)
@@ -144,6 +145,7 @@ err_cmd:
memset(dout, 0, sizeof(dout));
MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, din, cqn, cq->cqn);
+ MLX5_SET(destroy_cq_in, din, uid, cq->uid);
mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
return err;
}
@@ -165,6 +167,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
+ MLX5_SET(destroy_cq_in, in, uid, cq->uid);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
@@ -196,6 +199,7 @@ int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0};
MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ);
+ MLX5_SET(modify_cq_in, in, uid, cq->uid);
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_modify_cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index 0240aee9189e..d027ce00c8ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -133,7 +133,7 @@ TRACE_EVENT(mlx5_fs_del_fg,
{MLX5_FLOW_CONTEXT_ACTION_DROP, "DROP"},\
{MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, "FWD"},\
{MLX5_FLOW_CONTEXT_ACTION_COUNT, "CNT"},\
- {MLX5_FLOW_CONTEXT_ACTION_ENCAP, "ENCAP"},\
+ {MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT, "REFORMAT"},\
{MLX5_FLOW_CONTEXT_ACTION_DECAP, "DECAP"},\
{MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\
{MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH, "VLAN_PUSH"},\
@@ -252,10 +252,10 @@ TRACE_EVENT(mlx5_fs_add_rule,
memcpy(__entry->destination,
&rule->dest_attr,
sizeof(__entry->destination));
- if (rule->dest_attr.type & MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
- rule->dest_attr.counter)
+ if (rule->dest_attr.type &
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
__entry->counter_id =
- rule->dest_attr.counter->id;
+ rule->dest_attr.counter_id;
),
TP_printk("rule=%p fte=%p index=%u sw_action=<%s> [dst] %s\n",
__entry->rule, __entry->fte, __entry->index,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index ef7a44eb9adb..d7fbd5b6ac95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -54,6 +54,7 @@
#include "en_stats.h"
#include "en/fs.h"
+extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
@@ -172,6 +173,7 @@ static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
}
}
+/* Use this function to get max num channels (rxqs/txqs) only to create netdev */
static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
return is_kdump_kernel() ?
@@ -180,6 +182,13 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
MLX5E_MAX_NUM_CHANNELS);
}
+/* Use this function to get max num channels after netdev was created */
+static inline int mlx5e_get_netdev_max_channels(struct net_device *netdev)
+{
+ return min_t(unsigned int, netdev->num_rx_queues,
+ netdev->num_tx_queues);
+}
+
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth;
@@ -672,7 +681,7 @@ struct mlx5e_priv {
struct work_struct update_carrier_work;
struct work_struct set_rx_mode_work;
struct work_struct tx_timeout_work;
- struct delayed_work update_stats_work;
+ struct work_struct update_stats_work;
struct mlx5_core_dev *mdev;
struct net_device *netdev;
@@ -697,7 +706,7 @@ struct mlx5e_priv {
};
struct mlx5e_profile {
- void (*init)(struct mlx5_core_dev *mdev,
+ int (*init)(struct mlx5_core_dev *mdev,
struct net_device *netdev,
const struct mlx5e_profile *profile, void *ppriv);
void (*cleanup)(struct mlx5e_priv *priv);
@@ -709,7 +718,6 @@ struct mlx5e_profile {
void (*disable)(struct mlx5e_priv *priv);
void (*update_stats)(struct mlx5e_priv *priv);
void (*update_carrier)(struct mlx5e_priv *priv);
- int (*max_nch)(struct mlx5_core_dev *mdev);
struct {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
@@ -925,8 +933,8 @@ int mlx5e_create_tises(struct mlx5e_priv *priv);
void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
-void mlx5e_update_stats_work(struct work_struct *work);
+void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
int mlx5e_bits_invert(unsigned long a, int size);
typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv);
@@ -961,9 +969,15 @@ int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash);
/* mlx5e generic netdev management API */
+int mlx5e_netdev_init(struct net_device *netdev,
+ struct mlx5e_priv *priv,
+ struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv);
+void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv);
struct net_device*
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
- void *ppriv);
+ int nch, void *ppriv);
int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index bbf69e859b78..1431232c9a09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -16,6 +16,8 @@ struct mlx5e_tc_table {
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
DECLARE_HASHTABLE(hairpin_tbl, 8);
+
+ struct notifier_block netdevice_nb;
};
struct mlx5e_flow_table {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 24e3b564964f..023dc4bccd28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -235,3 +235,211 @@ out:
kfree(out);
return err;
}
+
+static u32 fec_supported_speeds[] = {
+ 10000,
+ 40000,
+ 25000,
+ 50000,
+ 56000,
+ 100000
+};
+
+#define MLX5E_FEC_SUPPORTED_SPEEDS ARRAY_SIZE(fec_supported_speeds)
+
+/* get/set FEC admin field for a given speed */
+static int mlx5e_fec_admin_field(u32 *pplm,
+ u8 *fec_policy,
+ bool write,
+ u32 speed)
+{
+ switch (speed) {
+ case 10000:
+ case 40000:
+ if (!write)
+ *fec_policy = MLX5_GET(pplm_reg, pplm,
+ fec_override_cap_10g_40g);
+ else
+ MLX5_SET(pplm_reg, pplm,
+ fec_override_admin_10g_40g, *fec_policy);
+ break;
+ case 25000:
+ if (!write)
+ *fec_policy = MLX5_GET(pplm_reg, pplm,
+ fec_override_admin_25g);
+ else
+ MLX5_SET(pplm_reg, pplm,
+ fec_override_admin_25g, *fec_policy);
+ break;
+ case 50000:
+ if (!write)
+ *fec_policy = MLX5_GET(pplm_reg, pplm,
+ fec_override_admin_50g);
+ else
+ MLX5_SET(pplm_reg, pplm,
+ fec_override_admin_50g, *fec_policy);
+ break;
+ case 56000:
+ if (!write)
+ *fec_policy = MLX5_GET(pplm_reg, pplm,
+ fec_override_admin_56g);
+ else
+ MLX5_SET(pplm_reg, pplm,
+ fec_override_admin_56g, *fec_policy);
+ break;
+ case 100000:
+ if (!write)
+ *fec_policy = MLX5_GET(pplm_reg, pplm,
+ fec_override_admin_100g);
+ else
+ MLX5_SET(pplm_reg, pplm,
+ fec_override_admin_100g, *fec_policy);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* returns FEC capabilities for a given speed */
+static int mlx5e_get_fec_cap_field(u32 *pplm,
+ u8 *fec_cap,
+ u32 speed)
+{
+ switch (speed) {
+ case 10000:
+ case 40000:
+ *fec_cap = MLX5_GET(pplm_reg, pplm,
+ fec_override_admin_10g_40g);
+ break;
+ case 25000:
+ *fec_cap = MLX5_GET(pplm_reg, pplm,
+ fec_override_cap_25g);
+ break;
+ case 50000:
+ *fec_cap = MLX5_GET(pplm_reg, pplm,
+ fec_override_cap_50g);
+ break;
+ case 56000:
+ *fec_cap = MLX5_GET(pplm_reg, pplm,
+ fec_override_cap_56g);
+ break;
+ case 100000:
+ *fec_cap = MLX5_GET(pplm_reg, pplm,
+ fec_override_cap_100g);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int mlx5e_get_fec_caps(struct mlx5_core_dev *dev, u8 *fec_caps)
+{
+ u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(pplm_reg);
+ u32 current_fec_speed;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, pcam_reg))
+ return -EOPNOTSUPP;
+
+ if (!MLX5_CAP_PCAM_REG(dev, pplm))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(pplm_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0);
+ if (err)
+ return err;
+
+ err = mlx5e_port_linkspeed(dev, &current_fec_speed);
+ if (err)
+ return err;
+
+ return mlx5e_get_fec_cap_field(out, fec_caps, current_fec_speed);
+}
+
+int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
+ u8 *fec_configured_mode)
+{
+ u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(pplm_reg);
+ u32 link_speed;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, pcam_reg))
+ return -EOPNOTSUPP;
+
+ if (!MLX5_CAP_PCAM_REG(dev, pplm))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(pplm_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0);
+ if (err)
+ return err;
+
+ *fec_mode_active = MLX5_GET(pplm_reg, out, fec_mode_active);
+
+ if (!fec_configured_mode)
+ return 0;
+
+ err = mlx5e_port_linkspeed(dev, &link_speed);
+ if (err)
+ return err;
+
+ return mlx5e_fec_admin_field(out, fec_configured_mode, 0, link_speed);
+}
+
+int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
+{
+ bool fec_mode_not_supp_in_speed = false;
+ u8 no_fec_policy = BIT(MLX5E_FEC_NOFEC);
+ u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(pplm_reg);
+ u32 current_fec_speed;
+ u8 fec_caps = 0;
+ int err;
+ int i;
+
+ if (!MLX5_CAP_GEN(dev, pcam_reg))
+ return -EOPNOTSUPP;
+
+ if (!MLX5_CAP_PCAM_REG(dev, pplm))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(pplm_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0);
+ if (err)
+ return err;
+
+ err = mlx5e_port_linkspeed(dev, &current_fec_speed);
+ if (err)
+ return err;
+
+ memset(in, 0, sz);
+ MLX5_SET(pplm_reg, in, local_port, 1);
+ for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS && !!fec_policy; i++) {
+ mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]);
+ /* policy supported for link speed */
+ if (!!(fec_caps & fec_policy)) {
+ mlx5e_fec_admin_field(in, &fec_policy, 1,
+ fec_supported_speeds[i]);
+ } else {
+ if (fec_supported_speeds[i] == current_fec_speed)
+ return -EOPNOTSUPP;
+ mlx5e_fec_admin_field(in, &no_fec_policy, 1,
+ fec_supported_speeds[i]);
+ fec_mode_not_supp_in_speed = true;
+ }
+ }
+
+ if (fec_mode_not_supp_in_speed)
+ mlx5_core_dbg(dev,
+ "FEC policy 0x%x is not supported for some speeds",
+ fec_policy);
+
+ return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 1);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
index f8cbd8194179..cd2160b8c9bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
@@ -45,4 +45,16 @@ int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
int mlx5e_port_set_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
+
+int mlx5e_get_fec_caps(struct mlx5_core_dev *dev, u8 *fec_caps);
+int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
+ u8 *fec_configured_mode);
+int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy);
+
+enum {
+ MLX5E_FEC_NOFEC,
+ MLX5E_FEC_FIRECODE,
+ MLX5E_FEC_RS_528_514,
+};
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 45cdde694d20..8657e0f26995 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -543,8 +543,11 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
- __func__, arfs_rule->filter_id, arfs_rule->rxq, err);
+ priv->channel_stats[arfs_rule->rxq].rq.arfs_err++;
+ mlx5e_dbg(HW, priv,
+ "%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n",
+ __func__, arfs_rule->filter_id, arfs_rule->rxq,
+ tuple->ip_proto, err);
}
out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index db3278cc052b..3078491cc0d0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -153,7 +153,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
if (enable_uc_lb)
MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_);
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST);
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index c86fd770c463..3e770abfd802 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -319,7 +319,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch)
{
- ch->max_combined = priv->profile->max_nch(priv->mdev);
+ ch->max_combined = mlx5e_get_netdev_max_channels(priv->netdev);
ch->combined_count = priv->channels.params.num_channels;
}
@@ -547,6 +547,70 @@ static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
+static const u32 pplm_fec_2_ethtool[] = {
+ [MLX5E_FEC_NOFEC] = ETHTOOL_FEC_OFF,
+ [MLX5E_FEC_FIRECODE] = ETHTOOL_FEC_BASER,
+ [MLX5E_FEC_RS_528_514] = ETHTOOL_FEC_RS,
+};
+
+static u32 pplm2ethtool_fec(u_long fec_mode, unsigned long size)
+{
+ int mode = 0;
+
+ if (!fec_mode)
+ return ETHTOOL_FEC_AUTO;
+
+ mode = find_first_bit(&fec_mode, size);
+
+ if (mode < ARRAY_SIZE(pplm_fec_2_ethtool))
+ return pplm_fec_2_ethtool[mode];
+
+ return 0;
+}
+
+/* we use ETHTOOL_FEC_* offset and apply it to ETHTOOL_LINK_MODE_FEC_*_BIT */
+static u32 ethtool_fec2ethtool_caps(u_long ethtool_fec_code)
+{
+ u32 offset;
+
+ offset = find_first_bit(&ethtool_fec_code, sizeof(u32));
+ offset -= ETHTOOL_FEC_OFF_BIT;
+ offset += ETHTOOL_LINK_MODE_FEC_NONE_BIT;
+
+ return offset;
+}
+
+static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
+ struct ethtool_link_ksettings *link_ksettings)
+{
+ u_long fec_caps = 0;
+ u32 active_fec = 0;
+ u32 offset;
+ u32 bitn;
+ int err;
+
+ err = mlx5e_get_fec_caps(dev, (u8 *)&fec_caps);
+ if (err)
+ return (err == -EOPNOTSUPP) ? 0 : err;
+
+ err = mlx5e_get_fec_mode(dev, &active_fec, NULL);
+ if (err)
+ return err;
+
+ for_each_set_bit(bitn, &fec_caps, ARRAY_SIZE(pplm_fec_2_ethtool)) {
+ u_long ethtool_bitmask = pplm_fec_2_ethtool[bitn];
+
+ offset = ethtool_fec2ethtool_caps(ethtool_bitmask);
+ __set_bit(offset, link_ksettings->link_modes.supported);
+ }
+
+ active_fec = pplm2ethtool_fec(active_fec, sizeof(u32) * BITS_PER_BYTE);
+ offset = ethtool_fec2ethtool_caps(active_fec);
+ __set_bit(offset, link_ksettings->link_modes.advertising);
+
+ return 0;
+}
+
static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
u32 eth_proto_cap,
u8 connector_type)
@@ -742,7 +806,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
if (err) {
netdev_err(netdev, "%s: query port ptys failed: %d\n",
__func__, err);
- goto err_query_ptys;
+ goto err_query_regs;
}
eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
@@ -778,11 +842,17 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
AUTONEG_ENABLE;
ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
Autoneg);
+
+ err = get_fec_supported_advertised(mdev, link_ksettings);
+ if (err)
+ netdev_dbg(netdev, "%s: FEC caps query failed: %d\n",
+ __func__, err);
+
if (!an_disable_admin)
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, Autoneg);
-err_query_ptys:
+err_query_regs:
return err;
}
@@ -1277,6 +1347,58 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return mlx5_set_port_wol(mdev, mlx5_wol_mode);
}
+static int mlx5e_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 fec_configured = 0;
+ u32 fec_active = 0;
+ int err;
+
+ err = mlx5e_get_fec_mode(mdev, &fec_active, &fec_configured);
+
+ if (err)
+ return err;
+
+ fecparam->active_fec = pplm2ethtool_fec((u_long)fec_active,
+ sizeof(u32) * BITS_PER_BYTE);
+
+ if (!fecparam->active_fec)
+ return -EOPNOTSUPP;
+
+ fecparam->fec = pplm2ethtool_fec((u_long)fec_configured,
+ sizeof(u8) * BITS_PER_BYTE);
+
+ return 0;
+}
+
+static int mlx5e_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 fec_policy = 0;
+ int mode;
+ int err;
+
+ for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) {
+ if (!(pplm_fec_2_ethtool[mode] & fecparam->fec))
+ continue;
+ fec_policy |= (1 << mode);
+ break;
+ }
+
+ err = mlx5e_set_fec_mode(mdev, fec_policy);
+
+ if (err)
+ return err;
+
+ mlx5_toggle_port_link(mdev);
+
+ return 0;
+}
+
static u32 mlx5e_get_msglevel(struct net_device *dev)
{
return ((struct mlx5e_priv *)netdev_priv(dev))->msglevel;
@@ -1699,4 +1821,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.self_test = mlx5e_self_test,
.get_msglevel = mlx5e_get_msglevel,
.set_msglevel = mlx5e_set_msglevel,
+ .get_fecparam = mlx5e_get_fecparam,
+ .set_fecparam = mlx5e_set_fecparam,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 35aca9a8e3d6..1243edbedc9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -272,10 +272,9 @@ static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
mlx5e_stats_grps[i].update_stats(priv);
}
-void mlx5e_update_stats_work(struct work_struct *work)
+static void mlx5e_update_stats_work(struct work_struct *work)
{
- struct delayed_work *dwork = to_delayed_work(work);
- struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
update_stats_work);
mutex_lock(&priv->state_lock);
@@ -283,6 +282,17 @@ void mlx5e_update_stats_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
+void mlx5e_queue_update_stats(struct mlx5e_priv *priv)
+{
+ if (!priv->profile->update_stats)
+ return;
+
+ if (unlikely(test_bit(MLX5E_STATE_DESTROYING, &priv->state)))
+ return;
+
+ queue_work(priv->wq, &priv->update_stats_work);
+}
+
static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
enum mlx5_dev_event event, unsigned long param)
{
@@ -1789,7 +1799,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
struct mlx5e_channel_param *cparam)
{
struct mlx5e_priv *priv = c->priv;
- int err, tc, max_nch = priv->profile->max_nch(priv->mdev);
+ int err, tc, max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
for (tc = 0; tc < params->num_tc; tc++) {
int txq_ix = c->ix + tc * max_nch;
@@ -2429,7 +2439,7 @@ int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
int err;
int ix;
- for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
+ for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
rqt = &priv->direct_tir[ix].rqt;
err = mlx5e_create_rqt(priv, 1 /*size */, rqt);
if (err)
@@ -2450,7 +2460,7 @@ void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv)
{
int i;
- for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
+ for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++)
mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
}
@@ -2544,7 +2554,7 @@ static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
}
- for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
+ for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
struct mlx5e_redirect_rqt_param direct_rrp = {
.is_rss = false,
{
@@ -2745,7 +2755,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
goto free_in;
}
- for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
+ for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
in, inlen);
if (err)
@@ -2845,7 +2855,7 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv)
{
- int max_nch = priv->profile->max_nch(priv->mdev);
+ int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
int i, tc;
for (i = 0; i < max_nch; i++)
@@ -2957,9 +2967,7 @@ int mlx5e_open_locked(struct net_device *netdev)
if (priv->profile->update_carrier)
priv->profile->update_carrier(priv);
- if (priv->profile->update_stats)
- queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
-
+ mlx5e_queue_update_stats(priv);
return 0;
err_clear_state_opened_flag:
@@ -3239,7 +3247,7 @@ err_destroy_inner_tirs:
int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
{
- int nch = priv->profile->max_nch(priv->mdev);
+ int nch = mlx5e_get_netdev_max_channels(priv->netdev);
struct mlx5e_tir *tir;
void *tirc;
int inlen;
@@ -3292,7 +3300,7 @@ void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
{
- int nch = priv->profile->max_nch(priv->mdev);
+ int nch = mlx5e_get_netdev_max_channels(priv->netdev);
int i;
for (i = 0; i < nch; i++)
@@ -3384,9 +3392,6 @@ static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
{
struct mlx5e_priv *priv = cb_priv;
- if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
- return -EOPNOTSUPP;
-
switch (type) {
case TC_SETUP_CLSFLOWER:
return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
@@ -3441,7 +3446,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
/* update HW stats in background for next time */
- queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
+ mlx5e_queue_update_stats(priv);
if (mlx5e_is_uplink_rep(priv)) {
stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
@@ -4318,7 +4323,7 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
-static const struct net_device_ops mlx5e_netdev_ops = {
+const struct net_device_ops mlx5e_netdev_ops = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
.ndo_start_xmit = mlx5e_xmit,
@@ -4560,33 +4565,6 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
mlx5e_build_rss_params(params);
}
-static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- priv->mdev = mdev;
- priv->netdev = netdev;
- priv->profile = profile;
- priv->ppriv = ppriv;
- priv->msglevel = MLX5E_MSG_LEVEL;
- priv->max_opened_tc = 1;
-
- mlx5e_build_nic_params(mdev, &priv->channels.params,
- profile->max_nch(mdev), netdev->mtu);
-
- mutex_init(&priv->state_lock);
-
- INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
- INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
- INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
- INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
-
- mlx5e_timestamp_init(priv);
-}
-
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -4749,15 +4727,23 @@ void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter);
}
-static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
- mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
+ err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
+ if (err)
+ return err;
+
+ mlx5e_build_nic_params(mdev, &priv->channels.params,
+ mlx5e_get_netdev_max_channels(netdev), netdev->mtu);
+
+ mlx5e_timestamp_init(priv);
+
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
@@ -4766,12 +4752,15 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mlx5e_build_nic_netdev(netdev);
mlx5e_build_tc2txq_maps(priv);
+
+ return 0;
}
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
+ mlx5e_netdev_cleanup(priv->netdev, priv);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
@@ -4934,7 +4923,6 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.enable = mlx5e_nic_enable,
.disable = mlx5e_nic_disable,
.update_stats = mlx5e_update_ndo_stats,
- .max_nch = mlx5e_get_max_num_channels,
.update_carrier = mlx5e_update_carrier,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
@@ -4943,13 +4931,53 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
/* mlx5e generic netdev management API (move to en_common.c) */
+/* mlx5e_netdev_init/cleanup must be called from profile->init/cleanup callbacks */
+int mlx5e_netdev_init(struct net_device *netdev,
+ struct mlx5e_priv *priv,
+ struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
+{
+ /* priv init */
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->profile = profile;
+ priv->ppriv = ppriv;
+ priv->msglevel = MLX5E_MSG_LEVEL;
+ priv->max_opened_tc = 1;
+
+ mutex_init(&priv->state_lock);
+ INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
+ INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+ INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
+ INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+
+ priv->wq = create_singlethread_workqueue("mlx5e");
+ if (!priv->wq)
+ return -ENOMEM;
+
+ /* netdev init */
+ netif_carrier_off(netdev);
+
+#ifdef CONFIG_MLX5_EN_ARFS
+ netdev->rx_cpu_rmap = mdev->rmap;
+#endif
+
+ return 0;
+}
+
+void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv)
+{
+ destroy_workqueue(priv->wq);
+}
+
struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
const struct mlx5e_profile *profile,
+ int nch,
void *ppriv)
{
- int nch = profile->max_nch(mdev);
struct net_device *netdev;
- struct mlx5e_priv *priv;
+ int err;
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
nch * profile->max_tc,
@@ -4959,25 +4987,15 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
return NULL;
}
-#ifdef CONFIG_MLX5_EN_ARFS
- netdev->rx_cpu_rmap = mdev->rmap;
-#endif
-
- profile->init(mdev, netdev, profile, ppriv);
-
- netif_carrier_off(netdev);
-
- priv = netdev_priv(netdev);
-
- priv->wq = create_singlethread_workqueue("mlx5e");
- if (!priv->wq)
- goto err_cleanup_nic;
+ err = profile->init(mdev, netdev, profile, ppriv);
+ if (err) {
+ mlx5_core_err(mdev, "failed to init mlx5e profile %d\n", err);
+ goto err_free_netdev;
+ }
return netdev;
-err_cleanup_nic:
- if (profile->cleanup)
- profile->cleanup(priv);
+err_free_netdev:
free_netdev(netdev);
return NULL;
@@ -5023,7 +5041,7 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
profile->cleanup_rx(priv);
profile->cleanup_tx(priv);
- cancel_delayed_work_sync(&priv->update_stats_work);
+ cancel_work_sync(&priv->update_stats_work);
}
void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
@@ -5031,7 +5049,6 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
const struct mlx5e_profile *profile = priv->profile;
struct net_device *netdev = priv->netdev;
- destroy_workqueue(priv->wq);
if (profile->cleanup)
profile->cleanup(priv);
free_netdev(netdev);
@@ -5080,6 +5097,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
void *rpriv = NULL;
void *priv;
int err;
+ int nch;
err = mlx5e_check_required_hca_cap(mdev);
if (err)
@@ -5095,7 +5113,8 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
}
#endif
- netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv);
+ nch = mlx5e_get_max_num_channels(mdev);
+ netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, rpriv);
if (!netdev) {
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
goto err_free_rpriv;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 9264c3332aa6..c3c657548824 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -853,9 +853,6 @@ static int mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type, void *type_data,
{
struct mlx5e_priv *priv = cb_priv;
- if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
- return -EOPNOTSUPP;
-
switch (type) {
case TC_SETUP_CLSFLOWER:
return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_EGRESS);
@@ -869,9 +866,6 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
{
struct mlx5e_priv *priv = cb_priv;
- if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
- return -EOPNOTSUPP;
-
switch (type) {
case TC_SETUP_CLSFLOWER:
return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
@@ -992,8 +986,7 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_priv *priv = netdev_priv(dev);
/* update HW stats in background for next time */
- queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
-
+ mlx5e_queue_update_stats(priv);
memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
}
@@ -1078,28 +1071,33 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
}
-static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
- priv->mdev = mdev;
- priv->netdev = netdev;
- priv->profile = profile;
- priv->ppriv = ppriv;
-
- mutex_init(&priv->state_lock);
+ err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
+ if (err)
+ return err;
- INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
- priv->channels.params.num_channels = 1;
+ priv->channels.params.num_channels =
+ mlx5e_get_netdev_max_channels(netdev);
mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
mlx5e_build_rep_netdev(netdev);
mlx5e_timestamp_init(priv);
+
+ return 0;
+}
+
+static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
+{
+ mlx5e_netdev_cleanup(priv->netdev, priv);
}
static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
@@ -1224,12 +1222,12 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
static const struct mlx5e_profile mlx5e_rep_profile = {
.init = mlx5e_init_rep,
+ .cleanup = mlx5e_cleanup_rep,
.init_rx = mlx5e_init_rep_rx,
.cleanup_rx = mlx5e_cleanup_rep_rx,
.init_tx = mlx5e_init_rep_tx,
.cleanup_tx = mlx5e_cleanup_nic_tx,
.update_stats = mlx5e_rep_update_hw_counters,
- .max_nch = mlx5e_get_max_num_channels,
.update_carrier = NULL,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
@@ -1292,13 +1290,14 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
struct mlx5e_rep_priv *rpriv;
struct net_device *netdev;
struct mlx5e_priv *upriv;
- int err;
+ int nch, err;
rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
if (!rpriv)
return -ENOMEM;
- netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, rpriv);
+ nch = mlx5e_get_max_num_channels(dev);
+ netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, nch, rpriv);
if (!netdev) {
pr_warn("Failed to create representor netdev for vport %d\n",
rep->vport);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index f19067c94272..79638dcbae78 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -34,7 +34,6 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
-#include <net/busy_poll.h>
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
#include <net/inet_ecn.h>
@@ -433,10 +432,9 @@ static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
struct mlx5_wq_cyc *wq,
- u16 pi, u16 frag_pi)
+ u16 pi, u16 nnops)
{
struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi];
- u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
edge_wi = wi + nnops;
@@ -455,15 +453,14 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe;
u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
- u16 pi, frag_pi;
+ u16 pi, contig_wqebbs_room;
int err;
int i;
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
-
- if (unlikely(frag_pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_frag_size(wq))) {
- mlx5e_fill_icosq_frag_edge(sq, wq, pi, frag_pi);
+ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs_room < MLX5E_UMR_WQEBBS)) {
+ mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room);
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
}
@@ -716,43 +713,15 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
rq->stats->ecn_mark += !!rc;
}
-static __be32 mlx5e_get_fcs(struct sk_buff *skb)
+static u32 mlx5e_get_fcs(const struct sk_buff *skb)
{
- int last_frag_sz, bytes_in_prev, nr_frags;
- u8 *fcs_p1, *fcs_p2;
- skb_frag_t *last_frag;
- __be32 fcs_bytes;
-
- if (!skb_is_nonlinear(skb))
- return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN);
-
- nr_frags = skb_shinfo(skb)->nr_frags;
- last_frag = &skb_shinfo(skb)->frags[nr_frags - 1];
- last_frag_sz = skb_frag_size(last_frag);
-
- /* If all FCS data is in last frag */
- if (last_frag_sz >= ETH_FCS_LEN)
- return *(__be32 *)(skb_frag_address(last_frag) +
- last_frag_sz - ETH_FCS_LEN);
-
- fcs_p2 = (u8 *)skb_frag_address(last_frag);
- bytes_in_prev = ETH_FCS_LEN - last_frag_sz;
-
- /* Find where the other part of the FCS is - Linear or another frag */
- if (nr_frags == 1) {
- fcs_p1 = skb_tail_pointer(skb);
- } else {
- skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2];
-
- fcs_p1 = skb_frag_address(prev_frag) +
- skb_frag_size(prev_frag);
- }
- fcs_p1 -= bytes_in_prev;
+ const void *fcs_bytes;
+ u32 _fcs_bytes;
- memcpy(&fcs_bytes, fcs_p1, bytes_in_prev);
- memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz);
+ fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN,
+ ETH_FCS_LEN, &_fcs_bytes);
- return fcs_bytes;
+ return __get_unaligned_cpu32(fcs_bytes);
}
static u8 get_ip_proto(struct sk_buff *skb, __be16 proto)
@@ -800,8 +769,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
network_depth - ETH_HLEN,
skb->csum);
if (unlikely(netdev->features & NETIF_F_RXFCS))
- skb->csum = csum_add(skb->csum,
- (__force __wsum)mlx5e_get_fcs(skb));
+ skb->csum = csum_block_add(skb->csum,
+ (__force __wsum)mlx5e_get_fcs(skb),
+ skb->len - ETH_FCS_LEN);
stats->csum_complete++;
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 90c7607b1f44..1e55b9c27ffc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -93,6 +93,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
@@ -132,7 +133,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
memset(s, 0, sizeof(*s));
- for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) {
+ for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) {
struct mlx5e_channel_stats *channel_stats =
&priv->channel_stats[i];
struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
@@ -170,6 +171,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_cache_busy += rq_stats->cache_busy;
s->rx_cache_waive += rq_stats->cache_waive;
s->rx_congst_umr += rq_stats->congst_umr;
+ s->rx_arfs_err += rq_stats->arfs_err;
s->ch_events += ch_stats->events;
s->ch_poll += ch_stats->poll;
s->ch_arm += ch_stats->arm;
@@ -612,46 +614,82 @@ static const struct counter_desc pport_phy_statistical_stats_desc[] = {
{ "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
};
-#define NUM_PPORT_PHY_STATISTICAL_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc)
+static const struct counter_desc
+pport_phy_statistical_err_lanes_stats_desc[] = {
+ { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
+ { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
+ { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
+ { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
+};
+
+#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
+ ARRAY_SIZE(pport_phy_statistical_stats_desc)
+#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
+ ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int num_stats;
+
/* "1" for link_down_events special counter */
- return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ?
- NUM_PPORT_PHY_STATISTICAL_COUNTERS + 1 : 1;
+ num_stats = 1;
+
+ num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
+ NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
+
+ num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
+ NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
+
+ return num_stats;
}
static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
int i;
strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
- if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
return idx;
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_phy_statistical_stats_desc[i].format);
+
+ if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_phy_statistical_err_lanes_stats_desc[i].format);
+
return idx;
}
static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
int i;
/* link_down_events_phy has special handling since it is not stored in __be64 format */
data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
counter_set.phys_layer_cntrs.link_down_events);
- if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
return idx;
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
data[idx++] =
MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
pport_phy_statistical_stats_desc, i);
+
+ if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
+ pport_phy_statistical_err_lanes_stats_desc,
+ i);
return idx;
}
@@ -1161,6 +1199,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
};
static const struct counter_desc sq_stats_desc[] = {
@@ -1214,7 +1253,7 @@ static const struct counter_desc ch_stats_desc[] = {
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
{
- int max_nch = priv->profile->max_nch(priv->mdev);
+ int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
return (NUM_RQ_STATS * max_nch) +
(NUM_CH_STATS * max_nch) +
@@ -1226,7 +1265,7 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
{
- int max_nch = priv->profile->max_nch(priv->mdev);
+ int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
int i, j, tc;
for (i = 0; i < max_nch; i++)
@@ -1261,7 +1300,7 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx)
{
- int max_nch = priv->profile->max_nch(priv->mdev);
+ int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
int i, j, tc;
for (i = 0; i < max_nch; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index a5fb3dc27f50..77f74ce11280 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -106,6 +106,7 @@ struct mlx5e_sw_stats {
u64 rx_cache_busy;
u64 rx_cache_waive;
u64 rx_congst_umr;
+ u64 rx_arfs_err;
u64 ch_events;
u64 ch_poll;
u64 ch_arm;
@@ -202,6 +203,7 @@ struct mlx5e_rq_stats {
u64 cache_busy;
u64 cache_waive;
u64 congst_umr;
+ u64 arfs_err;
};
struct mlx5e_sq_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 82723a0e509a..608025ca5c04 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -61,6 +61,7 @@ struct mlx5_nic_flow_attr {
u32 hairpin_tirn;
u8 match_level;
struct mlx5_flow_table *hairpin_ft;
+ struct mlx5_fc *counter;
};
#define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
@@ -73,6 +74,7 @@ enum {
MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE + 2),
MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 3),
MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
+ MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 5),
};
#define MLX5E_TC_MAX_SPLITS 1
@@ -81,7 +83,7 @@ struct mlx5e_tc_flow {
struct rhash_head node;
struct mlx5e_priv *priv;
u64 cookie;
- u8 flags;
+ u16 flags;
struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
struct list_head encap; /* flows sharing the same encap ID */
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
@@ -100,11 +102,6 @@ struct mlx5e_tc_flow_parse_attr {
int mirred_ifindex;
};
-enum {
- MLX5_HEADER_TYPE_VXLAN = 0x0,
- MLX5_HEADER_TYPE_NVGRE = 0x1,
-};
-
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
@@ -532,7 +529,8 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
#define UNKNOWN_MATCH_PRIO 8
static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec, u8 *match_prio)
+ struct mlx5_flow_spec *spec, u8 *match_prio,
+ struct netlink_ext_ack *extack)
{
void *headers_c, *headers_v;
u8 prio_val, prio_mask = 0;
@@ -540,8 +538,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
#ifdef CONFIG_MLX5_CORE_EN_DCB
if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
- netdev_warn(priv->netdev,
- "only PCP trust state supported for hairpin\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "only PCP trust state supported for hairpin");
return -EOPNOTSUPP;
}
#endif
@@ -557,8 +555,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
if (!vlan_present || !prio_mask) {
prio_val = UNKNOWN_MATCH_PRIO;
} else if (prio_mask != 0x7) {
- netdev_warn(priv->netdev,
- "masked priority match not supported for hairpin\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "masked priority match not supported for hairpin");
return -EOPNOTSUPP;
}
@@ -568,7 +566,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
- struct mlx5e_tc_flow_parse_attr *parse_attr)
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct netlink_ext_ack *extack)
{
int peer_ifindex = parse_attr->mirred_ifindex;
struct mlx5_hairpin_params params;
@@ -583,12 +582,13 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
- netdev_warn(priv->netdev, "hairpin is not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
return -EOPNOTSUPP;
}
peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
- err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio);
+ err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
+ extack);
if (err)
return err;
hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
@@ -674,29 +674,28 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
}
}
-static struct mlx5_flow_handle *
+static int
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {
.action = attr->action,
- .has_flow_tag = true,
.flow_tag = attr->flow_tag,
- .encap_id = 0,
+ .reformat_id = 0,
+ .flags = FLOW_ACT_HAS_TAG | FLOW_ACT_NO_APPEND,
};
struct mlx5_fc *counter = NULL;
- struct mlx5_flow_handle *rule;
bool table_created = false;
int err, dest_ix = 0;
if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
- err = mlx5e_hairpin_flow_add(priv, flow, parse_attr);
+ err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
if (err) {
- rule = ERR_PTR(err);
goto err_add_hairpin_flow;
}
if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
@@ -716,22 +715,21 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(dev, true);
if (IS_ERR(counter)) {
- rule = ERR_CAST(counter);
+ err = PTR_ERR(counter);
goto err_fc_create;
}
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dest_ix].counter = counter;
+ dest[dest_ix].counter_id = mlx5_fc_id(counter);
dest_ix++;
+ attr->counter = counter;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
flow_act.modify_id = attr->mod_hdr_id;
kfree(parse_attr->mod_hdr_actions);
- if (err) {
- rule = ERR_PTR(err);
+ if (err)
goto err_create_mod_hdr_id;
- }
}
if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
@@ -753,9 +751,11 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
MLX5E_TC_TABLE_NUM_GROUPS,
MLX5E_TC_FT_LEVEL, 0);
if (IS_ERR(priv->fs.tc.t)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to create tc offload table\n");
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
- rule = ERR_CAST(priv->fs.tc.t);
+ err = PTR_ERR(priv->fs.tc.t);
goto err_create_ft;
}
@@ -765,13 +765,15 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
if (attr->match_level != MLX5_MATCH_NONE)
parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
- &flow_act, dest, dest_ix);
+ flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
+ &flow_act, dest, dest_ix);
- if (IS_ERR(rule))
+ if (IS_ERR(flow->rule[0])) {
+ err = PTR_ERR(flow->rule[0]);
goto err_add_rule;
+ }
- return rule;
+ return 0;
err_add_rule:
if (table_created) {
@@ -787,7 +789,7 @@ err_fc_create:
if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
mlx5e_hairpin_flow_del(priv, flow);
err_add_hairpin_flow:
- return rule;
+ return err;
}
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
@@ -796,7 +798,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_fc *counter = NULL;
- counter = mlx5_flow_rule_counter(flow->rule[0]);
+ counter = attr->counter;
mlx5_del_flow_rules(flow->rule[0]);
mlx5_fc_destroy(priv->mdev, counter);
@@ -819,30 +821,119 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct ip_tunnel_info *tun_info,
struct net_device *mirred_dev,
struct net_device **encap_dev,
- struct mlx5e_tc_flow *flow);
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack);
+
+static struct mlx5_flow_handle *
+mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr)
+{
+ struct mlx5_flow_handle *rule;
+
+ rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+ if (IS_ERR(rule))
+ return rule;
+
+ if (attr->mirror_count) {
+ flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
+ if (IS_ERR(flow->rule[1])) {
+ mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+ return flow->rule[1];
+ }
+ }
+
+ flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+ return rule;
+}
+
+static void
+mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_esw_flow_attr *attr)
+{
+ flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
+
+ if (attr->mirror_count)
+ mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
+
+ mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
+}
static struct mlx5_flow_handle *
+mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *slow_attr)
+{
+ struct mlx5_flow_handle *rule;
+
+ memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
+ slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ slow_attr->mirror_count = 0,
+ slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN,
+
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
+ if (!IS_ERR(rule))
+ flow->flags |= MLX5E_TC_FLOW_SLOW;
+
+ return rule;
+}
+
+static void
+mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_esw_flow_attr *slow_attr)
+{
+ memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
+ flow->flags &= ~MLX5E_TC_FLOW_SLOW;
+}
+
+static int
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ u32 max_chain = mlx5_eswitch_get_chain_range(esw);
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ u16 max_prio = mlx5_eswitch_get_prio_range(esw);
struct net_device *out_dev, *encap_dev = NULL;
- struct mlx5_flow_handle *rule = NULL;
+ struct mlx5_fc *counter = NULL;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv;
- int err;
+ int err = 0, encap_err = 0;
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
+ /* if prios are not supported, keep the old behaviour of using same prio
+ * for all offloaded rules.
+ */
+ if (!mlx5_eswitch_prios_supported(esw))
+ attr->prio = 1;
+
+ if (attr->chain > max_chain) {
+ NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
+ err = -EOPNOTSUPP;
+ goto err_max_prio_chain;
+ }
+
+ if (attr->prio > max_prio) {
+ NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
+ err = -EOPNOTSUPP;
+ goto err_max_prio_chain;
+ }
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
out_dev = __dev_get_by_index(dev_net(priv->netdev),
attr->parse_attr->mirred_ifindex);
- err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
- out_dev, &encap_dev, flow);
- if (err) {
- rule = ERR_PTR(err);
- if (err != -EAGAIN)
- goto err_attach_encap;
+ encap_err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
+ out_dev, &encap_dev, flow,
+ extack);
+ if (encap_err && encap_err != -EAGAIN) {
+ err = encap_err;
+ goto err_attach_encap;
}
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
@@ -851,49 +942,58 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
}
err = mlx5_eswitch_add_vlan_action(esw, attr);
- if (err) {
- rule = ERR_PTR(err);
+ if (err)
goto err_add_vlan;
- }
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
kfree(parse_attr->mod_hdr_actions);
- if (err) {
- rule = ERR_PTR(err);
+ if (err)
goto err_mod_hdr;
+ }
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ counter = mlx5_fc_create(esw->dev, true);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_create_counter;
}
+
+ attr->counter = counter;
}
- /* we get here if (1) there's no error (rule being null) or when
+ /* we get here if (1) there's no error or when
* (2) there's an encap action and we're on -EAGAIN (no valid neigh)
*/
- if (rule != ERR_PTR(-EAGAIN)) {
- rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
- if (IS_ERR(rule))
- goto err_add_rule;
-
- if (attr->mirror_count) {
- flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &parse_attr->spec, attr);
- if (IS_ERR(flow->rule[1]))
- goto err_fwd_rule;
- }
+ if (encap_err == -EAGAIN) {
+ /* continue with goto slow path rule instead */
+ struct mlx5_esw_flow_attr slow_attr;
+
+ flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr);
+ } else {
+ flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
}
- return rule;
-err_fwd_rule:
- mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
- rule = flow->rule[1];
+ if (IS_ERR(flow->rule[0])) {
+ err = PTR_ERR(flow->rule[0]);
+ goto err_add_rule;
+ }
+
+ return 0;
+
err_add_rule:
+ mlx5_fc_destroy(esw->dev, counter);
+err_create_counter:
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
err_mod_hdr:
mlx5_eswitch_del_vlan_action(esw, attr);
err_add_vlan:
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
mlx5e_detach_encap(priv, flow);
err_attach_encap:
- return rule;
+err_max_prio_chain:
+ return err;
}
static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
@@ -901,36 +1001,43 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5_esw_flow_attr slow_attr;
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
- flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
- if (attr->mirror_count)
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
+ if (flow->flags & MLX5E_TC_FLOW_SLOW)
+ mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
+ else
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
}
mlx5_eswitch_del_vlan_action(esw, attr);
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
mlx5e_detach_encap(priv, flow);
kvfree(attr->parse_attr);
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
+ mlx5_fc_destroy(esw->dev, attr->counter);
}
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5_esw_flow_attr slow_attr, *esw_attr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
struct mlx5e_tc_flow *flow;
int err;
- err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
- e->encap_size, e->encap_header,
- &e->encap_id);
+ err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
+ e->encap_size, e->encap_header,
+ MLX5_FLOW_NAMESPACE_FDB,
+ &e->encap_id);
if (err) {
mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
err);
@@ -942,26 +1049,20 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
list_for_each_entry(flow, &e->flows, encap) {
esw_attr = flow->esw_attr;
esw_attr->encap_id = e->encap_id;
- flow->rule[0] = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
- if (IS_ERR(flow->rule[0])) {
- err = PTR_ERR(flow->rule[0]);
+ spec = &esw_attr->parse_attr->spec;
+
+ /* update from slow path rule to encap rule */
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
err);
continue;
}
- if (esw_attr->mirror_count) {
- flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
- if (IS_ERR(flow->rule[1])) {
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], esw_attr);
- err = PTR_ERR(flow->rule[1]);
- mlx5_core_warn(priv->mdev, "Failed to update cached mirror flow, %d\n",
- err);
- continue;
- }
- }
-
- flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+ mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
+ flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when slow path rule removed */
+ flow->rule[0] = rule;
}
}
@@ -969,25 +1070,44 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr slow_attr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
struct mlx5e_tc_flow *flow;
+ int err;
list_for_each_entry(flow, &e->flows, encap) {
- if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ spec = &flow->esw_attr->parse_attr->spec;
- flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
- if (attr->mirror_count)
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
+ /* update from encap rule to slow path rule */
+ rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
+
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
+ err);
+ continue;
}
+
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
+ flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when fast path rule removed */
+ flow->rule[0] = rule;
}
if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
- mlx5_encap_dealloc(priv->mdev, e->encap_id);
+ mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
}
}
+static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
+{
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ return flow->esw_attr->counter;
+ else
+ return flow->nic_attr->counter;
+}
+
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
{
struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
@@ -1013,7 +1133,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
continue;
list_for_each_entry(flow, &e->flows, encap) {
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
- counter = mlx5_flow_rule_counter(flow->rule[0]);
+ counter = mlx5e_tc_get_counter(flow);
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
neigh_used = true;
@@ -1053,7 +1173,7 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
if (e->flags & MLX5_ENCAP_ENTRY_VALID)
- mlx5_encap_dealloc(priv->mdev, e->encap_id);
+ mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
hash_del_rcu(&e->encap_hlist);
kfree(e->encap_header);
@@ -1105,6 +1225,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f)
{
+ struct netlink_ext_ack *extack = f->common.extack;
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers);
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
@@ -1133,6 +1254,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
parse_vxlan_attr(spec, f);
else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "port isn't an offloaded vxlan udp dport");
netdev_warn(priv->netdev,
"%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
return -EOPNOTSUPP;
@@ -1149,6 +1272,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
udp_sport, ntohs(key->src));
} else { /* udp dst port must be given */
vxlan_match_offload_err:
+ NL_SET_ERR_MSG_MOD(extack,
+ "IP tunnel decap offload supported only for vxlan, must set UDP dport");
netdev_warn(priv->netdev,
"IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
return -EOPNOTSUPP;
@@ -1225,6 +1350,16 @@ vxlan_match_offload_err:
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
+
+ if (mask->ttl &&
+ !MLX5_CAP_ESW_FLOWTABLE_FDB
+ (priv->mdev,
+ ft_field_support.outer_ipv4_ttl)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on TTL is not supported");
+ return -EOPNOTSUPP;
+ }
+
}
/* Enforce DMAC when offloading incoming tunneled flows.
@@ -1247,6 +1382,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f,
u8 *match_level)
{
+ struct netlink_ext_ack *extack = f->common.extack;
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers);
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
@@ -1277,6 +1413,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_TCP) |
BIT(FLOW_DISSECTOR_KEY_IP) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
f->dissector->used_keys);
return -EOPNOTSUPP;
@@ -1368,6 +1505,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*match_level = MLX5_MATCH_L2;
}
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
@@ -1550,8 +1690,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
if (mask->ttl &&
!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
- ft_field_support.outer_ipv4_ttl))
+ ft_field_support.outer_ipv4_ttl)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on TTL is not supported");
return -EOPNOTSUPP;
+ }
if (mask->tos || mask->ttl)
*match_level = MLX5_MATCH_L3;
@@ -1593,6 +1736,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
udp_dport, ntohs(key->dst));
break;
default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only UDP and TCP transports are supported for L4 matching");
netdev_err(priv->netdev,
"Only UDP and TCP transport are supported\n");
return -EINVAL;
@@ -1629,6 +1774,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f)
{
+ struct netlink_ext_ack *extack = f->common.extack;
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1643,6 +1789,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
if (rep->vport != FDB_UPLINK_VPORT &&
(esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
esw->offloads.inline_mode < match_level)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Flow is not offloaded due to min inline setting");
netdev_warn(priv->netdev,
"Flow is not offloaded due to min inline setting, required %d actual %d\n",
match_level, esw->offloads.inline_mode);
@@ -1744,7 +1892,8 @@ static struct mlx5_fields fields[] = {
*/
static int offload_pedit_fields(struct pedit_headers *masks,
struct pedit_headers *vals,
- struct mlx5e_tc_flow_parse_attr *parse_attr)
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct netlink_ext_ack *extack)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
int i, action_size, nactions, max_actions, first, last, next_z;
@@ -1783,11 +1932,15 @@ static int offload_pedit_fields(struct pedit_headers *masks,
continue;
if (s_mask && a_mask) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't set and add to the same HW field");
printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
return -EOPNOTSUPP;
}
if (nactions == max_actions) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "too many pedit actions, can't offload");
printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
return -EOPNOTSUPP;
}
@@ -1820,6 +1973,8 @@ static int offload_pedit_fields(struct pedit_headers *masks,
next_z = find_next_zero_bit(&mask, field_bsize, first);
last = find_last_bit(&mask, field_bsize);
if (first < next_z && next_z < last) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "rewrite of few sub-fields isn't supported");
printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
mask);
return -EOPNOTSUPP;
@@ -1878,7 +2033,8 @@ static const struct pedit_headers zero_masks = {};
static int parse_tc_pedit_action(struct mlx5e_priv *priv,
const struct tc_action *a, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr)
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct netlink_ext_ack *extack)
{
struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
int nkeys, i, err = -EOPNOTSUPP;
@@ -1896,12 +2052,13 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
err = -EOPNOTSUPP; /* can't be all optimistic */
if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
- netdev_warn(priv->netdev, "legacy pedit isn't offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "legacy pedit isn't offloaded");
goto out_err;
}
if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
- netdev_warn(priv->netdev, "pedit cmd %d isn't offloaded\n", cmd);
+ NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded");
goto out_err;
}
@@ -1918,13 +2075,15 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
if (err)
goto out_err;
- err = offload_pedit_fields(masks, vals, parse_attr);
+ err = offload_pedit_fields(masks, vals, parse_attr, extack);
if (err < 0)
goto out_dealloc_parsed_actions;
for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
cmd_masks = &masks[cmd];
if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "attempt to offload an unsupported field");
netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
16, 1, cmd_masks, sizeof(zero_masks), true);
@@ -1941,19 +2100,26 @@ out_err:
return err;
}
-static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
+static bool csum_offload_supported(struct mlx5e_priv *priv,
+ u32 action,
+ u32 update_flags,
+ struct netlink_ext_ack *extack)
{
u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
TCA_CSUM_UPDATE_FLAG_UDP;
/* The HW recalcs checksums only if re-writing headers */
if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "TC csum action is only offloaded with pedit");
netdev_warn(priv->netdev,
"TC csum action is only offloaded with pedit\n");
return false;
}
if (update_flags & ~prot_flags) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't offload TC csum action for some header/s");
netdev_warn(priv->netdev,
"can't offload TC csum action for some header/s - flags %#x\n",
update_flags);
@@ -1964,7 +2130,8 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 upda
}
static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
- struct tcf_exts *exts)
+ struct tcf_exts *exts,
+ struct netlink_ext_ack *extack)
{
const struct tc_action *a;
bool modify_ip_header;
@@ -2002,6 +2169,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
if (modify_ip_header && ip_proto != IPPROTO_TCP &&
ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't offload re-write of non TCP/UDP");
pr_info("can't offload re-write of ip proto %d\n", ip_proto);
return false;
}
@@ -2013,7 +2182,8 @@ out_ok:
static bool actions_match_supported(struct mlx5e_priv *priv,
struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
u32 actions;
@@ -2027,7 +2197,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
return false;
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- return modify_header_match_supported(&parse_attr->spec, exts);
+ return modify_header_match_supported(&parse_attr->spec, exts,
+ extack);
return true;
}
@@ -2048,7 +2219,8 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
const struct tc_action *a;
@@ -2072,7 +2244,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (is_tcf_pedit(a)) {
err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
- parse_attr);
+ parse_attr, extack);
if (err)
return err;
@@ -2083,7 +2255,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (is_tcf_csum(a)) {
if (csum_offload_supported(priv, action,
- tcf_csum_update_flags(a)))
+ tcf_csum_update_flags(a),
+ extack))
continue;
return -EOPNOTSUPP;
@@ -2099,6 +2272,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
} else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "device is not on same HW, can't offload");
netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
peer_dev->name);
return -EINVAL;
@@ -2110,8 +2285,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
u32 mark = tcf_skbedit_mark(a);
if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
- netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
- mark);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Bad flow mark - only 16 bit is supported");
return -EINVAL;
}
@@ -2124,7 +2299,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
attr->action = action;
- if (!actions_match_supported(priv, exts, parse_attr, flow))
+ if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
return -EOPNOTSUPP;
return 0;
@@ -2328,7 +2503,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
return -ENOMEM;
switch (e->tunnel_type) {
- case MLX5_HEADER_TYPE_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
fl4.flowi4_proto = IPPROTO_UDP;
fl4.fl4_dport = tun_key->tp_dst;
break;
@@ -2372,7 +2547,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
read_unlock_bh(&n->lock);
switch (e->tunnel_type) {
- case MLX5_HEADER_TYPE_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
gen_vxlan_header_ipv4(out_dev, encap_header,
ipv4_encap_size, e->h_dest, tos, ttl,
fl4.daddr,
@@ -2392,8 +2567,10 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
goto out;
}
- err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
- ipv4_encap_size, encap_header, &e->encap_id);
+ err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
+ ipv4_encap_size, encap_header,
+ MLX5_FLOW_NAMESPACE_FDB,
+ &e->encap_id);
if (err)
goto destroy_neigh_entry;
@@ -2437,7 +2614,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
return -ENOMEM;
switch (e->tunnel_type) {
- case MLX5_HEADER_TYPE_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
fl6.flowi6_proto = IPPROTO_UDP;
fl6.fl6_dport = tun_key->tp_dst;
break;
@@ -2481,7 +2658,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
read_unlock_bh(&n->lock);
switch (e->tunnel_type) {
- case MLX5_HEADER_TYPE_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
gen_vxlan_header_ipv6(out_dev, encap_header,
ipv6_encap_size, e->h_dest, tos, ttl,
&fl6.daddr,
@@ -2502,8 +2679,10 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
goto out;
}
- err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
- ipv6_encap_size, encap_header, &e->encap_id);
+ err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
+ ipv6_encap_size, encap_header,
+ MLX5_FLOW_NAMESPACE_FDB,
+ &e->encap_id);
if (err)
goto destroy_neigh_entry;
@@ -2526,7 +2705,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct ip_tunnel_info *tun_info,
struct net_device *mirred_dev,
struct net_device **encap_dev,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
unsigned short family = ip_tunnel_info_af(tun_info);
@@ -2544,6 +2724,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
/* setting udp src port isn't supported */
if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
vxlan_encap_offload_err:
+ NL_SET_ERR_MSG_MOD(extack,
+ "must set udp dst port and not set udp src port");
netdev_warn(priv->netdev,
"must set udp dst port and not set udp src port\n");
return -EOPNOTSUPP;
@@ -2551,8 +2733,10 @@ vxlan_encap_offload_err:
if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
- tunnel_type = MLX5_HEADER_TYPE_VXLAN;
+ tunnel_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
} else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "port isn't an offloaded vxlan udp dport");
netdev_warn(priv->netdev,
"%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
return -EOPNOTSUPP;
@@ -2657,8 +2841,10 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct ip_tunnel_info *info = NULL;
@@ -2683,7 +2869,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (is_tcf_pedit(a)) {
err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
- parse_attr);
+ parse_attr, extack);
if (err)
return err;
@@ -2694,7 +2880,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (is_tcf_csum(a)) {
if (csum_offload_supported(priv, action,
- tcf_csum_update_flags(a)))
+ tcf_csum_update_flags(a),
+ extack))
continue;
return -EOPNOTSUPP;
@@ -2707,6 +2894,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
out_dev = tcf_mirred_dev(a);
if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't support more output ports, can't offload forwarding");
pr_err("can't support more than %d output ports, can't offload forwarding\n",
attr->out_count);
return -EOPNOTSUPP;
@@ -2725,11 +2914,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
parse_attr->mirred_ifindex = out_dev->ifindex;
parse_attr->tun_info = *info;
attr->parse_attr = parse_attr;
- action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
+ action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
/* attr->out_rep is resolved when we handle encap */
} else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are not on same switch HW, can't offload forwarding");
pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
return -EINVAL;
@@ -2762,14 +2953,35 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
continue;
}
+ if (is_tcf_gact_goto_chain(a)) {
+ u32 dest_chain = tcf_gact_goto_chain_index(a);
+ u32 max_chain = mlx5_eswitch_get_chain_range(esw);
+
+ if (dest_chain <= attr->chain) {
+ NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
+ return -EOPNOTSUPP;
+ }
+ if (dest_chain > max_chain) {
+ NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
+ return -EOPNOTSUPP;
+ }
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->dest_chain = dest_chain;
+
+ continue;
+ }
+
return -EINVAL;
}
attr->action = action;
- if (!actions_match_supported(priv, exts, parse_attr, flow))
+ if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
return -EOPNOTSUPP;
if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "current firmware doesn't support split rule for port mirroring");
netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
return -EOPNOTSUPP;
}
@@ -2777,9 +2989,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return 0;
}
-static void get_flags(int flags, u8 *flow_flags)
+static void get_flags(int flags, u16 *flow_flags)
{
- u8 __flow_flags = 0;
+ u16 __flow_flags = 0;
if (flags & MLX5E_TC_INGRESS)
__flow_flags |= MLX5E_TC_FLOW_INGRESS;
@@ -2808,31 +3020,15 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
return &priv->fs.tc.ht;
}
-int mlx5e_configure_flower(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f, int flags)
+static int
+mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
+ struct tc_cls_flower_offload *f, u16 flow_flags,
+ struct mlx5e_tc_flow_parse_attr **__parse_attr,
+ struct mlx5e_tc_flow **__flow)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct rhashtable *tc_ht = get_tc_ht(priv);
struct mlx5e_tc_flow *flow;
- int attr_size, err = 0;
- u8 flow_flags = 0;
-
- get_flags(flags, &flow_flags);
-
- flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
- if (flow) {
- netdev_warn_once(priv->netdev, "flow cookie %lx already exists, ignoring\n", f->cookie);
- return 0;
- }
-
- if (esw && esw->mode == SRIOV_OFFLOADS) {
- flow_flags |= MLX5E_TC_FLOW_ESWITCH;
- attr_size = sizeof(struct mlx5_esw_flow_attr);
- } else {
- flow_flags |= MLX5E_TC_FLOW_NIC;
- attr_size = sizeof(struct mlx5_nic_flow_attr);
- }
+ int err;
flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
@@ -2846,45 +3042,161 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
flow->priv = priv;
err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
- if (err < 0)
+ if (err)
goto err_free;
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
- err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
- if (err < 0)
- goto err_free;
- flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
- } else {
- err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
- if (err < 0)
- goto err_free;
- flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
- }
+ *__flow = flow;
+ *__parse_attr = parse_attr;
- if (IS_ERR(flow->rule[0])) {
- err = PTR_ERR(flow->rule[0]);
- if (err != -EAGAIN)
- goto err_free;
- }
+ return 0;
- if (err != -EAGAIN)
- flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+err_free:
+ kfree(flow);
+ kvfree(parse_attr);
+ return err;
+}
+
+static int
+mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f,
+ u16 flow_flags,
+ struct mlx5e_tc_flow **__flow)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5e_tc_flow *flow;
+ int attr_size, err;
+
+ flow_flags |= MLX5E_TC_FLOW_ESWITCH;
+ attr_size = sizeof(struct mlx5_esw_flow_attr);
+ err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
+ &parse_attr, &flow);
+ if (err)
+ goto out;
+
+ flow->esw_attr->chain = f->common.chain_index;
+ flow->esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16;
+ err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow, extack);
+ if (err)
+ goto err_free;
+
+ err = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, extack);
+ if (err)
+ goto err_free;
- if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
- !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
+ if (!(flow->esw_attr->action &
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT))
kvfree(parse_attr);
- err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
- if (err) {
- mlx5e_tc_del_flow(priv, flow);
- kfree(flow);
- }
+ *__flow = flow;
+ return 0;
+
+err_free:
+ kfree(flow);
+ kvfree(parse_attr);
+out:
return err;
+}
+
+static int
+mlx5e_add_nic_flow(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f,
+ u16 flow_flags,
+ struct mlx5e_tc_flow **__flow)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5e_tc_flow *flow;
+ int attr_size, err;
+
+ /* multi-chain not supported for NIC rules */
+ if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
+ return -EOPNOTSUPP;
+
+ flow_flags |= MLX5E_TC_FLOW_NIC;
+ attr_size = sizeof(struct mlx5_nic_flow_attr);
+ err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
+ &parse_attr, &flow);
+ if (err)
+ goto out;
+
+ err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, extack);
+ if (err)
+ goto err_free;
+
+ err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
+ if (err)
+ goto err_free;
+
+ flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+ kvfree(parse_attr);
+ *__flow = flow;
+
+ return 0;
err_free:
+ kfree(flow);
kvfree(parse_attr);
+out:
+ return err;
+}
+
+static int
+mlx5e_tc_add_flow(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f,
+ int flags,
+ struct mlx5e_tc_flow **flow)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ u16 flow_flags;
+ int err;
+
+ get_flags(flags, &flow_flags);
+
+ if (!tc_can_offload_extack(priv->netdev, f->common.extack))
+ return -EOPNOTSUPP;
+
+ if (esw && esw->mode == SRIOV_OFFLOADS)
+ err = mlx5e_add_fdb_flow(priv, f, flow_flags, flow);
+ else
+ err = mlx5e_add_nic_flow(priv, f, flow_flags, flow);
+
+ return err;
+}
+
+int mlx5e_configure_flower(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f, int flags)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct rhashtable *tc_ht = get_tc_ht(priv);
+ struct mlx5e_tc_flow *flow;
+ int err = 0;
+
+ flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
+ if (flow) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "flow cookie already exists, ignoring");
+ netdev_warn_once(priv->netdev,
+ "flow cookie %lx already exists, ignoring\n",
+ f->cookie);
+ goto out;
+ }
+
+ err = mlx5e_tc_add_flow(priv, f, flags, &flow);
+ if (err)
+ goto out;
+
+ err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
+ if (err)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ mlx5e_tc_del_flow(priv, flow);
kfree(flow);
+out:
return err;
}
@@ -2935,7 +3247,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
return 0;
- counter = mlx5_flow_rule_counter(flow->rule[0]);
+ counter = mlx5e_tc_get_counter(flow);
if (!counter)
return 0;
@@ -2946,14 +3258,71 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
return 0;
}
+static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
+ struct mlx5e_priv *peer_priv)
+{
+ struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
+ struct mlx5e_hairpin_entry *hpe;
+ u16 peer_vhca_id;
+ int bkt;
+
+ if (!same_hw_devs(priv, peer_priv))
+ return;
+
+ peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
+
+ hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
+ if (hpe->peer_vhca_id == peer_vhca_id)
+ hpe->hp->pair->peer_gone = true;
+ }
+}
+
+static int mlx5e_tc_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct mlx5e_flow_steering *fs;
+ struct mlx5e_priv *peer_priv;
+ struct mlx5e_tc_table *tc;
+ struct mlx5e_priv *priv;
+
+ if (ndev->netdev_ops != &mlx5e_netdev_ops ||
+ event != NETDEV_UNREGISTER ||
+ ndev->reg_state == NETREG_REGISTERED)
+ return NOTIFY_DONE;
+
+ tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
+ fs = container_of(tc, struct mlx5e_flow_steering, tc);
+ priv = container_of(fs, struct mlx5e_priv, fs);
+ peer_priv = netdev_priv(ndev);
+ if (priv == peer_priv ||
+ !(priv->netdev->features & NETIF_F_HW_TC))
+ return NOTIFY_DONE;
+
+ mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
+
+ return NOTIFY_DONE;
+}
+
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
+ int err;
hash_init(tc->mod_hdr_tbl);
hash_init(tc->hairpin_tbl);
- return rhashtable_init(&tc->ht, &tc_ht_params);
+ err = rhashtable_init(&tc->ht, &tc_ht_params);
+ if (err)
+ return err;
+
+ tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
+ if (register_netdevice_notifier(&tc->netdevice_nb)) {
+ tc->netdevice_nb.notifier_call = NULL;
+ mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
+ }
+
+ return err;
}
static void _mlx5e_tc_del_flow(void *ptr, void *arg)
@@ -2969,6 +3338,9 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
+ if (tc->netdevice_nb.notifier_call)
+ unregister_netdevice_notifier(&tc->netdevice_nb);
+
rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
if (!IS_ERR_OR_NULL(tc->t)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index ae73ea992845..6dacaeba2fbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -290,10 +290,9 @@ dma_unmap_wqe_err:
static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
struct mlx5_wq_cyc *wq,
- u16 pi, u16 frag_pi)
+ u16 pi, u16 nnops)
{
struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
- u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
edge_wi = wi + nnops;
@@ -348,8 +347,8 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
+ u16 headlen, ihs, contig_wqebbs_room;
u16 ds_cnt, ds_cnt_inl = 0;
- u16 headlen, ihs, frag_pi;
u8 num_wqebbs, opcode;
u32 num_bytes;
int num_dma;
@@ -386,9 +385,9 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
- frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
- if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
- mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
+ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs_room < num_wqebbs)) {
+ mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
}
@@ -636,7 +635,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
- u16 headlen, ihs, pi, frag_pi;
+ u16 headlen, ihs, pi, contig_wqebbs_room;
u16 ds_cnt, ds_cnt_inl = 0;
u8 num_wqebbs, opcode;
u32 num_bytes;
@@ -672,13 +671,14 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
- frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
- if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs_room < num_wqebbs)) {
+ mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
}
- mlx5i_sq_fetch_wqe(sq, &wqe, &pi);
+ mlx5i_sq_fetch_wqe(sq, &wqe, pi);
/* fill wqe */
wi = &sq->db.wqe_info[pi];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 48864f4988a4..c1e1a16a9b07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -273,7 +273,7 @@ static void eq_pf_process(struct mlx5_eq *eq)
case MLX5_PFAULT_SUBTYPE_WQE:
/* WQE based event */
pfault->type =
- be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24;
+ (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
pfault->token =
be32_to_cpu(pf_eqe->wqe.token);
pfault->wqe.wq_num =
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 2b252cde5cc2..d004957328f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -263,7 +263,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
esw_debug(dev, "Create FDB log_max_size(%d)\n",
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
- root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ root_ns = mlx5_get_fdb_sub_ns(dev, 0);
if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n");
return -EOPNOTSUPP;
@@ -1198,7 +1198,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
if (counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter = counter;
+ drop_ctr_dst.counter_id = mlx5_fc_id(counter);
dst = &drop_ctr_dst;
dest_num++;
}
@@ -1285,7 +1285,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
if (counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter = counter;
+ drop_ctr_dst.counter_id = mlx5_fc_id(counter);
dst = &drop_ctr_dst;
dest_num++;
}
@@ -1746,7 +1746,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->enabled_vports = 0;
esw->mode = SRIOV_NONE;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
- if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else
@@ -2000,7 +2000,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
u32 max_guarantee = 0;
int i;
- for (i = 0; i <= esw->total_vports; i++) {
+ for (i = 0; i < esw->total_vports; i++) {
evport = &esw->vports[i];
if (!evport->enabled || evport->info.min_rate < max_guarantee)
continue;
@@ -2020,7 +2020,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
int err;
int i;
- for (i = 0; i <= esw->total_vports; i++) {
+ for (i = 0; i < esw->total_vports; i++) {
evport = &esw->vports[i];
if (!evport->enabled)
continue;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 0b05bf2b91f6..aaafc9f17115 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -59,6 +59,10 @@
#define mlx5_esw_has_fwd_fdb(dev) \
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
+#define FDB_MAX_CHAIN 3
+#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
+#define FDB_MAX_PRIO 16
+
struct vport_ingress {
struct mlx5_flow_table *acl;
struct mlx5_flow_group *allow_untagged_spoofchk_grp;
@@ -120,6 +124,13 @@ struct mlx5_vport {
u16 enabled_events;
};
+enum offloads_fdb_flags {
+ ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED = BIT(0),
+};
+
+extern const unsigned int ESW_POOLS[4];
+
+#define PRIO_LEVELS 2
struct mlx5_eswitch_fdb {
union {
struct legacy_fdb {
@@ -130,16 +141,24 @@ struct mlx5_eswitch_fdb {
} legacy;
struct offloads_fdb {
- struct mlx5_flow_table *fast_fdb;
- struct mlx5_flow_table *fwd_fdb;
struct mlx5_flow_table *slow_fdb;
struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *miss_grp;
struct mlx5_flow_handle *miss_rule_uni;
struct mlx5_flow_handle *miss_rule_multi;
int vlan_push_pop_refcount;
+
+ struct {
+ struct mlx5_flow_table *fdb;
+ u32 num_rules;
+ } fdb_prio[FDB_MAX_CHAIN + 1][FDB_MAX_PRIO + 1][PRIO_LEVELS];
+ /* Protects fdb_prio table */
+ struct mutex fdb_prio_lock;
+
+ int fdb_left[ARRAY_SIZE(ESW_POOLS)];
} offloads;
};
+ u32 flags;
};
struct mlx5_esw_offload {
@@ -181,6 +200,7 @@ struct mlx5_eswitch {
struct mlx5_esw_offload offloads;
int mode;
+ int nvports;
};
void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
@@ -228,6 +248,19 @@ void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *attr);
+void
+mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_esw_flow_attr *attr);
+
+bool
+mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw);
+
+u16
+mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw);
+
+u32
+mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw);
struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
@@ -266,15 +299,22 @@ struct mlx5_esw_flow_attr {
u32 encap_id;
u32 mod_hdr_id;
u8 match_level;
+ struct mlx5_fc *counter;
+ u32 chain;
+ u16 prio;
+ u32 dest_chain;
struct mlx5e_tc_flow_parse_attr *parse_attr;
};
-int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
+int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
-int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
+int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
+ struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
-int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
+int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
+ struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
@@ -315,6 +355,11 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) {}
static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {}
+
+#define FDB_MAX_CHAIN 1
+#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
+#define FDB_MAX_PRIO 1
+
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 21e957083f65..9eac137790f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -37,33 +37,59 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
+#include "en.h"
+#include "fs_core.h"
enum {
FDB_FAST_PATH = 0,
FDB_SLOW_PATH
};
+#define fdb_prio_table(esw, chain, prio, level) \
+ (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
+
+static struct mlx5_flow_table *
+esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
+static void
+esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
+
+bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
+{
+ return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
+}
+
+u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
+{
+ if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
+ return FDB_MAX_CHAIN;
+
+ return 0;
+}
+
+u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
+{
+ if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
+ return FDB_MAX_PRIO;
+
+ return 1;
+}
+
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr)
{
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
- struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_table *ft = NULL;
- struct mlx5_fc *counter = NULL;
+ struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
+ bool mirror = !!(attr->mirror_count);
struct mlx5_flow_handle *rule;
+ struct mlx5_flow_table *fdb;
int j, i = 0;
void *misc;
if (esw->mode != SRIOV_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
- if (attr->mirror_count)
- ft = esw->fdb_table.offloads.fwd_fdb;
- else
- ft = esw->fdb_table.offloads.fast_fdb;
-
flow_act.action = attr->action;
/* if per flow vlan pop/push is emulated, don't set that into the firmware */
if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
@@ -81,23 +107,33 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
}
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- for (j = attr->mirror_count; j < attr->out_count; j++) {
- dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[i].vport.num = attr->out_rep[j]->vport;
- dest[i].vport.vhca_id =
- MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
- dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
+ if (attr->dest_chain) {
+ struct mlx5_flow_table *ft;
+
+ ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
+ if (IS_ERR(ft)) {
+ rule = ERR_CAST(ft);
+ goto err_create_goto_table;
+ }
+
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = ft;
i++;
+ } else {
+ for (j = attr->mirror_count; j < attr->out_count; j++) {
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest[i].vport.num = attr->out_rep[j]->vport;
+ dest[i].vport.vhca_id =
+ MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
+ dest[i].vport.vhca_id_valid =
+ !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
+ i++;
+ }
}
}
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- counter = mlx5_fc_create(esw->dev, true);
- if (IS_ERR(counter)) {
- rule = ERR_CAST(counter);
- goto err_counter_alloc;
- }
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[i].counter = counter;
+ dest[i].counter_id = mlx5_fc_id(attr->counter);
i++;
}
@@ -127,10 +163,16 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_id = attr->mod_hdr_id;
- if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
- flow_act.encap_id = attr->encap_id;
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
+ flow_act.reformat_id = attr->encap_id;
- rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i);
+ fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!mirror);
+ if (IS_ERR(fdb)) {
+ rule = ERR_CAST(fdb);
+ goto err_esw_get;
+ }
+
+ rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
if (IS_ERR(rule))
goto err_add_rule;
else
@@ -139,8 +181,11 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
return rule;
err_add_rule:
- mlx5_fc_destroy(esw->dev, counter);
-err_counter_alloc:
+ esw_put_prio_table(esw, attr->chain, attr->prio, !!mirror);
+err_esw_get:
+ if (attr->dest_chain)
+ esw_put_prio_table(esw, attr->dest_chain, 1, 0);
+err_create_goto_table:
return rule;
}
@@ -150,11 +195,25 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr)
{
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
- struct mlx5_flow_act flow_act = {0};
+ struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
+ struct mlx5_flow_table *fast_fdb;
+ struct mlx5_flow_table *fwd_fdb;
struct mlx5_flow_handle *rule;
void *misc;
int i;
+ fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
+ if (IS_ERR(fast_fdb)) {
+ rule = ERR_CAST(fast_fdb);
+ goto err_get_fast;
+ }
+
+ fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
+ if (IS_ERR(fwd_fdb)) {
+ rule = ERR_CAST(fwd_fdb);
+ goto err_get_fwd;
+ }
+
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < attr->mirror_count; i++) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
@@ -164,7 +223,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
}
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[i].ft = esw->fdb_table.offloads.fwd_fdb,
+ dest[i].ft = fwd_fdb,
i++;
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
@@ -187,25 +246,57 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
MLX5_MATCH_MISC_PARAMETERS;
- rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fast_fdb, spec, &flow_act, dest, i);
+ rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
- if (!IS_ERR(rule))
- esw->offloads.num_flows++;
+ if (IS_ERR(rule))
+ goto add_err;
+ esw->offloads.num_flows++;
+
+ return rule;
+add_err:
+ esw_put_prio_table(esw, attr->chain, attr->prio, 1);
+err_get_fwd:
+ esw_put_prio_table(esw, attr->chain, attr->prio, 0);
+err_get_fast:
return rule;
}
+static void
+__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_esw_flow_attr *attr,
+ bool fwd_rule)
+{
+ bool mirror = (attr->mirror_count > 0);
+
+ mlx5_del_flow_rules(rule);
+ esw->offloads.num_flows--;
+
+ if (fwd_rule) {
+ esw_put_prio_table(esw, attr->chain, attr->prio, 1);
+ esw_put_prio_table(esw, attr->chain, attr->prio, 0);
+ } else {
+ esw_put_prio_table(esw, attr->chain, attr->prio, !!mirror);
+ if (attr->dest_chain)
+ esw_put_prio_table(esw, attr->dest_chain, 1, 0);
+ }
+}
+
void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *attr)
{
- struct mlx5_fc *counter = NULL;
+ __mlx5_eswitch_del_rule(esw, rule, attr, false);
+}
- counter = mlx5_flow_rule_counter(rule);
- mlx5_del_flow_rules(rule);
- mlx5_fc_destroy(esw->dev, counter);
- esw->offloads.num_flows--;
+void
+mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_esw_flow_attr *attr)
+{
+ __mlx5_eswitch_del_rule(esw, rule, attr, true);
}
static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
@@ -294,7 +385,8 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
- fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+ fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
+ !attr->dest_chain);
err = esw_add_vlan_action_check(attr, push, pop, fwd);
if (err)
@@ -501,74 +593,170 @@ out:
#define ESW_OFFLOADS_NUM_GROUPS 4
-static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
+/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
+ * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
+ * for each flow table pool. We can allocate up to 16M of each pool,
+ * and we keep track of how much we used via put/get_sz_to_pool.
+ * Firmware doesn't report any of this for now.
+ * ESW_POOL is expected to be sorted from large to small
+ */
+#define ESW_SIZE (16 * 1024 * 1024)
+const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
+ 64 * 1024, 4 * 1024 };
+
+static int
+get_sz_from_pool(struct mlx5_eswitch *esw)
+{
+ int sz = 0, i;
+
+ for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
+ if (esw->fdb_table.offloads.fdb_left[i]) {
+ --esw->fdb_table.offloads.fdb_left[i];
+ sz = ESW_POOLS[i];
+ break;
+ }
+ }
+
+ return sz;
+}
+
+static void
+put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
+ if (sz >= ESW_POOLS[i]) {
+ ++esw->fdb_table.offloads.fdb_left[i];
+ break;
+ }
+ }
+}
+
+static struct mlx5_flow_table *
+create_next_size_table(struct mlx5_eswitch *esw,
+ struct mlx5_flow_namespace *ns,
+ u16 table_prio,
+ int level,
+ u32 flags)
+{
+ struct mlx5_flow_table *fdb;
+ int sz;
+
+ sz = get_sz_from_pool(esw);
+ if (!sz)
+ return ERR_PTR(-ENOSPC);
+
+ fdb = mlx5_create_auto_grouped_flow_table(ns,
+ table_prio,
+ sz,
+ ESW_OFFLOADS_NUM_GROUPS,
+ level,
+ flags);
+ if (IS_ERR(fdb)) {
+ esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
+ (int)PTR_ERR(fdb), table_prio, level, sz);
+ put_sz_to_pool(esw, sz);
+ }
+
+ return fdb;
+}
+
+static struct mlx5_flow_table *
+esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
{
struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb = NULL;
- int esw_size, err = 0;
+ struct mlx5_flow_namespace *ns;
+ int table_prio, l = 0;
u32 flags = 0;
- u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
- MLX5_CAP_GEN(dev, max_flow_counter_15_0);
- root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
- if (!root_ns) {
- esw_warn(dev, "Failed to get FDB flow namespace\n");
- err = -EOPNOTSUPP;
- goto out_namespace;
- }
+ if (chain == FDB_SLOW_PATH_CHAIN)
+ return esw->fdb_table.offloads.slow_fdb;
- esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
- MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
- max_flow_counter, ESW_OFFLOADS_NUM_GROUPS);
+ mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
- esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS,
- 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+ fdb = fdb_prio_table(esw, chain, prio, level).fdb;
+ if (fdb) {
+ /* take ref on earlier levels as well */
+ while (level >= 0)
+ fdb_prio_table(esw, chain, prio, level--).num_rules++;
+ mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
+ return fdb;
+ }
- if (mlx5_esw_has_fwd_fdb(dev))
- esw_size >>= 1;
+ ns = mlx5_get_fdb_sub_ns(dev, chain);
+ if (!ns) {
+ esw_warn(dev, "Failed to get FDB sub namespace\n");
+ mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
- flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
+ flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
+ MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
- esw_size,
- ESW_OFFLOADS_NUM_GROUPS, 0,
- flags);
- if (IS_ERR(fdb)) {
- err = PTR_ERR(fdb);
- esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
- goto out_namespace;
- }
- esw->fdb_table.offloads.fast_fdb = fdb;
+ table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
+
+ /* create earlier levels for correct fs_core lookup when
+ * connecting tables
+ */
+ for (l = 0; l <= level; l++) {
+ if (fdb_prio_table(esw, chain, prio, l).fdb) {
+ fdb_prio_table(esw, chain, prio, l).num_rules++;
+ continue;
+ }
- if (!mlx5_esw_has_fwd_fdb(dev))
- goto out_namespace;
+ fdb = create_next_size_table(esw, ns, table_prio, l, flags);
+ if (IS_ERR(fdb)) {
+ l--;
+ goto err_create_fdb;
+ }
- fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
- esw_size,
- ESW_OFFLOADS_NUM_GROUPS, 1,
- flags);
- if (IS_ERR(fdb)) {
- err = PTR_ERR(fdb);
- esw_warn(dev, "Failed to create fwd table err %d\n", err);
- goto out_ft;
+ fdb_prio_table(esw, chain, prio, l).fdb = fdb;
+ fdb_prio_table(esw, chain, prio, l).num_rules = 1;
}
- esw->fdb_table.offloads.fwd_fdb = fdb;
- return err;
+ mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
+ return fdb;
-out_ft:
- mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
-out_namespace:
- return err;
+err_create_fdb:
+ mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
+ if (l >= 0)
+ esw_put_prio_table(esw, chain, prio, l);
+
+ return fdb;
+}
+
+static void
+esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
+{
+ int l;
+
+ if (chain == FDB_SLOW_PATH_CHAIN)
+ return;
+
+ mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
+
+ for (l = level; l >= 0; l--) {
+ if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
+ continue;
+
+ put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
+ mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
+ fdb_prio_table(esw, chain, prio, l).fdb = NULL;
+ }
+
+ mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
}
-static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
+static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
{
- if (mlx5_esw_has_fwd_fdb(esw->dev))
- mlx5_destroy_flow_table(esw->fdb_table.offloads.fwd_fdb);
- mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
+ /* If lazy creation isn't supported, deref the fast path tables */
+ if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
+ esw_put_prio_table(esw, 0, 1, 1);
+ esw_put_prio_table(esw, 0, 1, 0);
+ }
}
#define MAX_PF_SQ 256
@@ -579,12 +767,13 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
+ u32 *flow_group_in, max_flow_counter;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb = NULL;
- int table_size, ix, err = 0;
+ int table_size, ix, err = 0, i;
struct mlx5_flow_group *g;
+ u32 flags = 0, fdb_max;
void *match_criteria;
- u32 *flow_group_in;
u8 *dmac;
esw_debug(esw->dev, "Create offloads FDB Tables\n");
@@ -599,12 +788,29 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
goto ns_err;
}
- err = esw_create_offloads_fast_fdb_table(esw);
- if (err)
- goto fast_fdb_err;
+ max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
+ MLX5_CAP_GEN(dev, max_flow_counter_15_0);
+ fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
+
+ esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
+ max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
+ fdb_max);
+
+ for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
+ esw->fdb_table.offloads.fdb_left[i] =
+ ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2;
+ /* create the slow path fdb with encap set, so further table instances
+ * can be created at run time while VFs are probed if the FW allows that.
+ */
+ if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
+ flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
+ MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+
+ ft_attr.flags = flags;
ft_attr.max_fte = table_size;
ft_attr.prio = FDB_SLOW_PATH;
@@ -616,6 +822,18 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
}
esw->fdb_table.offloads.slow_fdb = fdb;
+ /* If lazy creation isn't supported, open the fast path tables now */
+ if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
+ esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
+ esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
+ esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
+ esw_get_prio_table(esw, 0, 1, 0);
+ esw_get_prio_table(esw, 0, 1, 1);
+ } else {
+ esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
+ esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
+ }
+
/* create send-to-vport group */
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
@@ -663,6 +881,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
if (err)
goto miss_rule_err;
+ esw->nvports = nvports;
kvfree(flow_group_in);
return 0;
@@ -671,10 +890,9 @@ miss_rule_err:
miss_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
send_vport_err:
+ esw_destroy_offloads_fast_fdb_tables(esw);
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
slow_fdb_err:
- esw_destroy_offloads_fast_fdb_table(esw);
-fast_fdb_err:
ns_err:
kvfree(flow_group_in);
return err;
@@ -682,7 +900,7 @@ ns_err:
static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
- if (!esw->fdb_table.offloads.fast_fdb)
+ if (!esw->fdb_table.offloads.slow_fdb)
return;
esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
@@ -692,7 +910,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
- esw_destroy_offloads_fast_fdb_table(esw);
+ esw_destroy_offloads_fast_fdb_tables(esw);
}
static int esw_create_offloads_table(struct mlx5_eswitch *esw)
@@ -810,29 +1028,35 @@ out:
return flow_rule;
}
-static int esw_offloads_start(struct mlx5_eswitch *esw)
+static int esw_offloads_start(struct mlx5_eswitch *esw,
+ struct netlink_ext_ack *extack)
{
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
if (esw->mode != SRIOV_LEGACY) {
- esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't set offloads mode, SRIOV legacy not enabled");
return -EINVAL;
}
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
if (err) {
- esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed setting eswitch to offloads");
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
- if (err1)
- esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
+ if (err1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed setting eswitch back to legacy");
+ }
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
if (mlx5_eswitch_inline_mode_get(esw,
num_vfs,
&esw->offloads.inline_mode)) {
esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
- esw_warn(esw->dev, "Inline mode is different between vports\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Inline mode is different between vports");
}
}
return err;
@@ -943,6 +1167,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
{
int err;
+ mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
+
err = esw_create_offloads_fdb_tables(esw, nvports);
if (err)
return err;
@@ -973,17 +1199,20 @@ create_ft_err:
return err;
}
-static int esw_offloads_stop(struct mlx5_eswitch *esw)
+static int esw_offloads_stop(struct mlx5_eswitch *esw,
+ struct netlink_ext_ack *extack)
{
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
if (err) {
- esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
- if (err1)
- esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
+ if (err1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed setting eswitch back to offloads");
+ }
}
/* enable back PF RoCE */
@@ -1092,7 +1321,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
return 0;
}
-int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
u16 cur_mlx5_mode, mlx5_mode = 0;
@@ -1111,9 +1341,9 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
return 0;
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
- return esw_offloads_start(dev->priv.eswitch);
+ return esw_offloads_start(dev->priv.eswitch, extack);
else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
- return esw_offloads_stop(dev->priv.eswitch);
+ return esw_offloads_stop(dev->priv.eswitch, extack);
else
return -EINVAL;
}
@@ -1130,7 +1360,8 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
}
-int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
+int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -1147,14 +1378,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
return 0;
/* fall through */
case MLX5_CAP_INLINE_MODE_L2:
- esw_warn(dev, "Inline mode can't be set\n");
+ NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
return -EOPNOTSUPP;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
break;
}
if (esw->offloads.num_flows > 0) {
- esw_warn(dev, "Can't set inline mode when flows are configured\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't set inline mode when flows are configured");
return -EOPNOTSUPP;
}
@@ -1165,8 +1397,8 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
for (vport = 1; vport < esw->enabled_vports; vport++) {
err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
if (err) {
- esw_warn(dev, "Failed to set min inline on vport %d\n",
- vport);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set min inline on vport");
goto revert_inline_mode;
}
}
@@ -1232,7 +1464,8 @@ out:
return 0;
}
-int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
+int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -1243,7 +1476,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
return err;
if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
- (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
+ (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
return -EOPNOTSUPP;
@@ -1259,19 +1492,24 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
return 0;
if (esw->offloads.num_flows > 0) {
- esw_warn(dev, "Can't set encapsulation when flows are configured\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't set encapsulation when flows are configured");
return -EOPNOTSUPP;
}
- esw_destroy_offloads_fast_fdb_table(esw);
+ esw_destroy_offloads_fdb_tables(esw);
esw->offloads.encap = encap;
- err = esw_create_offloads_fast_fdb_table(esw);
+
+ err = esw_create_offloads_fdb_tables(esw, esw->nvports);
+
if (err) {
- esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed re-creating fast FDB table");
esw->offloads.encap = !encap;
- (void)esw_create_offloads_fast_fdb_table(esw);
+ (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
}
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 5645a4facad2..515e3d6de051 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -245,7 +245,7 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
return ERR_PTR(res);
}
- /* Context will be freed by wait func after completion */
+ /* Context should be freed by the caller after completion. */
return context;
}
@@ -418,10 +418,8 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
cmd.flags = htonl(flags);
context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
- if (IS_ERR(context)) {
- err = PTR_ERR(context);
- goto out;
- }
+ if (IS_ERR(context))
+ return PTR_ERR(context);
err = mlx5_fpga_ipsec_cmd_wait(context);
if (err)
@@ -435,6 +433,7 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
}
out:
+ kfree(context);
return err;
}
@@ -650,7 +649,7 @@ static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
(match_criteria_enable &
~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
(flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
- flow_act->has_flow_tag)
+ (flow_act->flags & FLOW_ACT_HAS_TAG))
return false;
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 8e01f818021b..08a891f9aade 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -152,7 +152,8 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
struct mlx5_flow_table *next_ft,
unsigned int *table_id, u32 flags)
{
- int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN);
+ int en_encap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
+ int en_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
int err;
@@ -169,9 +170,9 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
}
MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
- en_encap_decap);
- MLX5_SET(create_flow_table_in, in, flow_table_context.encap_en,
- en_encap_decap);
+ en_decap);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
+ en_encap);
switch (op_mod) {
case FS_FT_OP_MOD_NORMAL:
@@ -343,7 +344,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
MLX5_SET(flow_context, in_flow_context, action, fte->action.action);
- MLX5_SET(flow_context, in_flow_context, encap_id, fte->action.encap_id);
+ MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
+ fte->action.reformat_id);
MLX5_SET(flow_context, in_flow_context, modify_header_id,
fte->action.modify_id);
@@ -417,7 +419,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
continue;
MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
- dst->dest_attr.counter->id);
+ dst->dest_attr.counter_id);
in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
list_size++;
}
@@ -594,62 +596,78 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
*bytes = MLX5_GET64(traffic_counter, stats, octets);
}
-int mlx5_encap_alloc(struct mlx5_core_dev *dev,
- int header_type,
- size_t size,
- void *encap_header,
- u32 *encap_id)
+int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
+ int reformat_type,
+ size_t size,
+ void *reformat_data,
+ enum mlx5_flow_namespace_type namespace,
+ u32 *packet_reformat_id)
{
- int max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
- u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)];
- void *encap_header_in;
- void *header;
+ u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)];
+ void *packet_reformat_context_in;
+ int max_encap_size;
+ void *reformat;
int inlen;
int err;
u32 *in;
+ if (namespace == MLX5_FLOW_NAMESPACE_FDB)
+ max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
+ else
+ max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
+
if (size > max_encap_size) {
mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
size, max_encap_size);
return -EINVAL;
}
- in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + size,
+ in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) + size,
GFP_KERNEL);
if (!in)
return -ENOMEM;
- encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, encap_header);
- header = MLX5_ADDR_OF(encap_header_in, encap_header_in, encap_header);
- inlen = header - (void *)in + size;
+ packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
+ in, packet_reformat_context);
+ reformat = MLX5_ADDR_OF(packet_reformat_context_in,
+ packet_reformat_context_in,
+ reformat_data);
+ inlen = reformat - (void *)in + size;
memset(in, 0, inlen);
- MLX5_SET(alloc_encap_header_in, in, opcode,
- MLX5_CMD_OP_ALLOC_ENCAP_HEADER);
- MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size);
- MLX5_SET(encap_header_in, encap_header_in, header_type, header_type);
- memcpy(header, encap_header, size);
+ MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
+ MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
+ reformat_data_size, size);
+ MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
+ reformat_type, reformat_type);
+ memcpy(reformat, reformat_data, size);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
- *encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id);
+ *packet_reformat_id = MLX5_GET(alloc_packet_reformat_context_out,
+ out, packet_reformat_id);
kfree(in);
return err;
}
+EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
-void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id)
+void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
+ u32 packet_reformat_id)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)];
- u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)];
+ u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)];
memset(in, 0, sizeof(in));
- MLX5_SET(dealloc_encap_header_in, in, opcode,
- MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
- MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id);
+ MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
+ MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
+ packet_reformat_id);
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
+EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
u8 namespace, u8 num_actions,
@@ -667,9 +685,14 @@ int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
table_type = FS_FT_FDB;
break;
case MLX5_FLOW_NAMESPACE_KERNEL:
+ case MLX5_FLOW_NAMESPACE_BYPASS:
max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_RX;
break;
+ case MLX5_FLOW_NAMESPACE_EGRESS:
+ max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
+ table_type = FS_FT_NIC_TX;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -702,6 +725,7 @@ int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
kfree(in);
return err;
}
+EXPORT_SYMBOL(mlx5_modify_header_alloc);
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
{
@@ -716,6 +740,7 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
+EXPORT_SYMBOL(mlx5_modify_header_dealloc);
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.create_flow_table = mlx5_cmd_create_flow_table,
@@ -760,8 +785,8 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
case FS_FT_FDB:
case FS_FT_SNIFFER_RX:
case FS_FT_SNIFFER_TX:
- return mlx5_fs_cmd_get_fw_cmds();
case FS_FT_NIC_TX:
+ return mlx5_fs_cmd_get_fw_cmds();
default:
return mlx5_fs_cmd_get_stub_cmds();
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 37d114c668b7..9d73eb955f75 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -40,6 +40,7 @@
#include "diag/fs_tracepoint.h"
#include "accel/ipsec.h"
#include "fpga/ipsec.h"
+#include "eswitch.h"
#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
sizeof(struct init_tree_node))
@@ -76,6 +77,14 @@
FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
+#define FS_CHAINING_CAPS_EGRESS \
+ FS_REQUIRED_CAPS( \
+ FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
+ FS_CAP(flow_table_properties_nic_transmit.modify_root), \
+ FS_CAP(flow_table_properties_nic_transmit \
+ .identified_miss_table_mode), \
+ FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
+
#define LEFTOVERS_NUM_LEVELS 1
#define LEFTOVERS_NUM_PRIOS 1
@@ -151,6 +160,17 @@ static struct init_tree_node {
}
};
+static struct init_tree_node egress_root_fs = {
+ .type = FS_TYPE_NAMESPACE,
+ .ar_size = 1,
+ .children = (struct init_tree_node[]) {
+ ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
+ FS_CHAINING_CAPS_EGRESS,
+ ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ BY_PASS_PRIO_NUM_LEVELS))),
+ }
+};
+
enum fs_i_lock_class {
FS_LOCK_GRANDPARENT,
FS_LOCK_PARENT,
@@ -694,7 +714,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
struct fs_node *iter = list_entry(start, struct fs_node, list);
struct mlx5_flow_table *ft = NULL;
- if (!root)
+ if (!root || root->type == FS_TYPE_PRIO_CHAINS)
return NULL;
list_for_each_advance_continue(iter, &root->children, reverse) {
@@ -1388,7 +1408,7 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
return false;
if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_ENCAP |
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
MLX5_FLOW_CONTEXT_ACTION_DECAP |
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
@@ -1408,7 +1428,7 @@ static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act
return -EEXIST;
}
- if (flow_act->has_flow_tag &&
+ if ((flow_act->flags & FLOW_ACT_HAS_TAG) &&
fte->action.flow_tag != flow_act->flow_tag) {
mlx5_core_warn(get_dev(&fte->node),
"FTE flow tag %u already exists with different flow tag %u\n",
@@ -1455,29 +1475,8 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
return handle;
}
-struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handle)
+static bool counter_is_valid(u32 action)
{
- struct mlx5_flow_rule *dst;
- struct fs_fte *fte;
-
- fs_get_obj(fte, handle->rule[0]->node.parent);
-
- fs_for_each_dst(dst, fte) {
- if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
- return dst->dest_attr.counter;
- }
-
- return NULL;
-}
-
-static bool counter_is_valid(struct mlx5_fc *counter, u32 action)
-{
- if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT))
- return !counter;
-
- if (!counter)
- return false;
-
return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
}
@@ -1487,7 +1486,7 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest,
struct mlx5_flow_table *ft)
{
if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
- return counter_is_valid(dest->counter, action);
+ return counter_is_valid(action);
if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return true;
@@ -1629,6 +1628,8 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
search_again_locked:
version = matched_fgs_get_version(match_head);
+ if (flow_act->flags & FLOW_ACT_NO_APPEND)
+ goto skip_search;
/* Try to find a fg that already contains a matching fte */
list_for_each_entry(iter, match_head, list) {
struct fs_fte *fte_tmp;
@@ -1645,6 +1646,11 @@ search_again_locked:
return rule;
}
+skip_search:
+ /* No group with matching fte found, or we skipped the search.
+ * Try to add a new fte to any matching fg.
+ */
+
/* Check the ft version, for case that new flow group
* was added while the fgs weren't locked
*/
@@ -1975,12 +1981,24 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
fg->id);
}
+struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
+ int n)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+
+ if (!steering || !steering->fdb_sub_ns)
+ return NULL;
+
+ return steering->fdb_sub_ns[n];
+}
+EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
+
struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
struct mlx5_flow_root_namespace *root_ns;
- int prio;
+ int prio = 0;
struct fs_prio *fs_prio;
struct mlx5_flow_namespace *ns;
@@ -1988,40 +2006,29 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
return NULL;
switch (type) {
- case MLX5_FLOW_NAMESPACE_BYPASS:
- case MLX5_FLOW_NAMESPACE_LAG:
- case MLX5_FLOW_NAMESPACE_OFFLOADS:
- case MLX5_FLOW_NAMESPACE_ETHTOOL:
- case MLX5_FLOW_NAMESPACE_KERNEL:
- case MLX5_FLOW_NAMESPACE_LEFTOVERS:
- case MLX5_FLOW_NAMESPACE_ANCHOR:
- prio = type;
- break;
case MLX5_FLOW_NAMESPACE_FDB:
if (steering->fdb_root_ns)
return &steering->fdb_root_ns->ns;
- else
- return NULL;
+ return NULL;
case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
if (steering->sniffer_rx_root_ns)
return &steering->sniffer_rx_root_ns->ns;
- else
- return NULL;
+ return NULL;
case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
if (steering->sniffer_tx_root_ns)
return &steering->sniffer_tx_root_ns->ns;
- else
- return NULL;
- case MLX5_FLOW_NAMESPACE_EGRESS:
- if (steering->egress_root_ns)
- return &steering->egress_root_ns->ns;
- else
- return NULL;
- default:
return NULL;
+ default:
+ break;
+ }
+
+ if (type == MLX5_FLOW_NAMESPACE_EGRESS) {
+ root_ns = steering->egress_root_ns;
+ } else { /* Must be NIC RX */
+ root_ns = steering->root_ns;
+ prio = type;
}
- root_ns = steering->root_ns;
if (!root_ns)
return NULL;
@@ -2064,8 +2071,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_d
}
}
-static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
- unsigned int prio, int num_levels)
+static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
+ unsigned int prio,
+ int num_levels,
+ enum fs_node_type type)
{
struct fs_prio *fs_prio;
@@ -2073,7 +2082,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
if (!fs_prio)
return ERR_PTR(-ENOMEM);
- fs_prio->node.type = FS_TYPE_PRIO;
+ fs_prio->node.type = type;
tree_init_node(&fs_prio->node, NULL, del_sw_prio);
tree_add_node(&fs_prio->node, &ns->node);
fs_prio->num_levels = num_levels;
@@ -2083,6 +2092,19 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
return fs_prio;
}
+static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
+ unsigned int prio,
+ int num_levels)
+{
+ return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
+}
+
+static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
+ unsigned int prio, int num_levels)
+{
+ return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
+}
+
static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
*ns)
{
@@ -2387,6 +2409,9 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
cleanup_egress_acls_root_ns(dev);
cleanup_ingress_acls_root_ns(dev);
cleanup_root_ns(steering->fdb_root_ns);
+ steering->fdb_root_ns = NULL;
+ kfree(steering->fdb_sub_ns);
+ steering->fdb_sub_ns = NULL;
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
cleanup_root_ns(steering->egress_root_ns);
@@ -2432,27 +2457,64 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
{
- struct fs_prio *prio;
+ struct mlx5_flow_namespace *ns;
+ struct fs_prio *maj_prio;
+ struct fs_prio *min_prio;
+ int levels;
+ int chain;
+ int prio;
+ int err;
steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
if (!steering->fdb_root_ns)
return -ENOMEM;
- prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 2);
- if (IS_ERR(prio))
+ steering->fdb_sub_ns = kzalloc(sizeof(steering->fdb_sub_ns) *
+ (FDB_MAX_CHAIN + 1), GFP_KERNEL);
+ if (!steering->fdb_sub_ns)
+ return -ENOMEM;
+
+ levels = 2 * FDB_MAX_PRIO * (FDB_MAX_CHAIN + 1);
+ maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns, 0,
+ levels);
+ if (IS_ERR(maj_prio)) {
+ err = PTR_ERR(maj_prio);
goto out_err;
+ }
+
+ for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) {
+ ns = fs_create_namespace(maj_prio);
+ if (IS_ERR(ns)) {
+ err = PTR_ERR(ns);
+ goto out_err;
+ }
+
+ for (prio = 0; prio < FDB_MAX_PRIO * (chain + 1); prio++) {
+ min_prio = fs_create_prio(ns, prio, 2);
+ if (IS_ERR(min_prio)) {
+ err = PTR_ERR(min_prio);
+ goto out_err;
+ }
+ }
+
+ steering->fdb_sub_ns[chain] = ns;
+ }
- prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1);
- if (IS_ERR(prio))
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1);
+ if (IS_ERR(maj_prio)) {
+ err = PTR_ERR(maj_prio);
goto out_err;
+ }
set_prio_attrs(steering->fdb_root_ns);
return 0;
out_err:
cleanup_root_ns(steering->fdb_root_ns);
+ kfree(steering->fdb_sub_ns);
+ steering->fdb_sub_ns = NULL;
steering->fdb_root_ns = NULL;
- return PTR_ERR(prio);
+ return err;
}
static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
@@ -2537,16 +2599,23 @@ cleanup_root_ns:
static int init_egress_root_ns(struct mlx5_flow_steering *steering)
{
- struct fs_prio *prio;
+ int err;
steering->egress_root_ns = create_root_ns(steering,
FS_FT_NIC_TX);
if (!steering->egress_root_ns)
return -ENOMEM;
- /* create 1 prio*/
- prio = fs_create_prio(&steering->egress_root_ns->ns, 0, 1);
- return PTR_ERR_OR_ZERO(prio);
+ err = init_root_tree(steering, &egress_root_fs,
+ &steering->egress_root_ns->ns.node);
+ if (err)
+ goto cleanup;
+ set_prio_attrs(steering->egress_root_ns);
+ return 0;
+cleanup:
+ cleanup_root_ns(steering->egress_root_ns);
+ steering->egress_root_ns = NULL;
+ return err;
}
int mlx5_init_fs(struct mlx5_core_dev *dev)
@@ -2614,7 +2683,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
- if (MLX5_IPSEC_DEV(dev)) {
+ if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
err = init_egress_root_ns(steering);
if (err)
goto err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index a06f83c0c2b6..b51ad217da32 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -38,9 +38,21 @@
#include <linux/rhashtable.h>
#include <linux/llist.h>
+/* FS_TYPE_PRIO_CHAINS is a PRIO that will have namespaces only,
+ * and those are in parallel to one another when going over them to connect
+ * a new flow table. Meaning the last flow table in a TYPE_PRIO prio in one
+ * parallel namespace will not automatically connect to the first flow table
+ * found in any prio in any next namespace, but skip the entire containing
+ * TYPE_PRIO_CHAINS prio.
+ *
+ * This is used to implement tc chains, each chain of prios is a different
+ * namespace inside a containing TYPE_PRIO_CHAINS prio.
+ */
+
enum fs_node_type {
FS_TYPE_NAMESPACE,
FS_TYPE_PRIO,
+ FS_TYPE_PRIO_CHAINS,
FS_TYPE_FLOW_TABLE,
FS_TYPE_FLOW_GROUP,
FS_TYPE_FLOW_ENTRY,
@@ -73,6 +85,7 @@ struct mlx5_flow_steering {
struct kmem_cache *ftes_cache;
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
+ struct mlx5_flow_namespace **fdb_sub_ns;
struct mlx5_flow_root_namespace **esw_egress_root_ns;
struct mlx5_flow_root_namespace **esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 09206c4acd9a..32accd6b041b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -99,6 +99,18 @@ static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev,
list_add_tail(&counter->list, next);
}
+static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
+ struct mlx5_fc *counter)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+ list_del(&counter->list);
+
+ spin_lock(&fc_stats->counters_idr_lock);
+ WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id));
+ spin_unlock(&fc_stats->counters_idr_lock);
+}
+
/* The function returns the last counter that was queried so the caller
* function can continue calling it till all counters are queried.
*/
@@ -179,20 +191,23 @@ static void mlx5_fc_stats_work(struct work_struct *work)
struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
priv.fc_stats.work.work);
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- struct llist_node *tmplist = llist_del_all(&fc_stats->addlist);
+ /* Take dellist first to ensure that counters cannot be deleted before
+ * they are inserted.
+ */
+ struct llist_node *dellist = llist_del_all(&fc_stats->dellist);
+ struct llist_node *addlist = llist_del_all(&fc_stats->addlist);
struct mlx5_fc *counter = NULL, *last = NULL, *tmp;
unsigned long now = jiffies;
- if (tmplist || !list_empty(&fc_stats->counters))
+ if (addlist || !list_empty(&fc_stats->counters))
queue_delayed_work(fc_stats->wq, &fc_stats->work,
fc_stats->sampling_interval);
- llist_for_each_entry(counter, tmplist, addlist)
+ llist_for_each_entry(counter, addlist, addlist)
mlx5_fc_stats_insert(dev, counter);
- tmplist = llist_del_all(&fc_stats->dellist);
- llist_for_each_entry_safe(counter, tmp, tmplist, dellist) {
- list_del(&counter->list);
+ llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
+ mlx5_fc_stats_remove(dev, counter);
mlx5_free_fc(dev, counter);
}
@@ -258,6 +273,12 @@ err_out:
}
EXPORT_SYMBOL(mlx5_fc_create);
+u32 mlx5_fc_id(struct mlx5_fc *counter)
+{
+ return counter->id;
+}
+EXPORT_SYMBOL(mlx5_fc_id);
+
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
@@ -266,10 +287,6 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
return;
if (counter->aging) {
- spin_lock(&fc_stats->counters_idr_lock);
- WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id));
- spin_unlock(&fc_stats->counters_idr_lock);
-
llist_add(&counter->dellist, &fc_stats->dellist);
mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 41ad24f0de2c..1ab6f7e3bec6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -250,7 +250,7 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
if (ret)
return ret;
- force_state = MLX5_GET(teardown_hca_out, out, force_state);
+ force_state = MLX5_GET(teardown_hca_out, out, state);
if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
mlx5_core_warn(dev, "teardown with force mode failed, doing normal teardown\n");
return -EIO;
@@ -259,6 +259,54 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
return 0;
}
+#define MLX5_FAST_TEARDOWN_WAIT_MS 3000
+int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
+{
+ unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS;
+ u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
+ int state;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev, fast_teardown)) {
+ mlx5_core_dbg(dev, "fast teardown is not supported in the firmware\n");
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
+ MLX5_SET(teardown_hca_in, in, profile,
+ MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN);
+
+ ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ return ret;
+
+ state = MLX5_GET(teardown_hca_out, out, state);
+ if (state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
+ mlx5_core_warn(dev, "teardown with fast mode failed\n");
+ return -EIO;
+ }
+
+ mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED);
+
+ /* Loop until device state turns to disable */
+ end = jiffies + msecs_to_jiffies(delay_ms);
+ do {
+ if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ break;
+
+ cond_resched();
+ } while (!time_after(jiffies, end));
+
+ if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
+ dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
+ mlx5_get_nic_state(dev), delay_ms);
+ return -EIO;
+ }
+
+ return 0;
+}
+
enum mlxsw_reg_mcc_instruction {
MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01,
MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 9f39aeca863f..43118de8ee99 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -59,22 +59,25 @@ enum {
};
enum {
- MLX5_NIC_IFC_FULL = 0,
- MLX5_NIC_IFC_DISABLED = 1,
- MLX5_NIC_IFC_NO_DRAM_NIC = 2,
- MLX5_NIC_IFC_INVALID = 3
-};
-
-enum {
MLX5_DROP_NEW_HEALTH_WORK,
MLX5_DROP_NEW_RECOVERY_WORK,
};
-static u8 get_nic_state(struct mlx5_core_dev *dev)
+u8 mlx5_get_nic_state(struct mlx5_core_dev *dev)
{
return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
}
+void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state)
+{
+ u32 cur_cmdq_addr_l_sz;
+
+ cur_cmdq_addr_l_sz = ioread32be(&dev->iseg->cmdq_addr_l_sz);
+ iowrite32be((cur_cmdq_addr_l_sz & 0xFFFFF000) |
+ state << MLX5_NIC_IFC_OFFSET,
+ &dev->iseg->cmdq_addr_l_sz);
+}
+
static void trigger_cmd_completions(struct mlx5_core_dev *dev)
{
unsigned long flags;
@@ -103,7 +106,7 @@ static int in_fatal(struct mlx5_core_dev *dev)
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
- if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
return 1;
if (ioread32be(&h->fw_ver) == 0xffffffff)
@@ -133,7 +136,7 @@ unlock:
static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
{
- u8 nic_interface = get_nic_state(dev);
+ u8 nic_interface = mlx5_get_nic_state(dev);
switch (nic_interface) {
case MLX5_NIC_IFC_FULL:
@@ -168,7 +171,7 @@ static void health_recover(struct work_struct *work)
priv = container_of(health, struct mlx5_priv, health);
dev = container_of(priv, struct mlx5_core_dev, priv);
- nic_state = get_nic_state(dev);
+ nic_state = mlx5_get_nic_state(dev);
if (nic_state == MLX5_NIC_IFC_INVALID) {
dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 299e2a897f7e..b59953daf8b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -71,27 +71,25 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
}
/* Called directly after IPoIB netdevice was created to initialize SW structs */
-void mlx5i_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+int mlx5i_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
u16 max_mtu;
+ int err;
- /* priv init */
- priv->mdev = mdev;
- priv->netdev = netdev;
- priv->profile = profile;
- priv->ppriv = ppriv;
- priv->max_opened_tc = 1;
- mutex_init(&priv->state_lock);
+ err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
+ if (err)
+ return err;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
netdev->mtu = max_mtu;
mlx5e_build_nic_params(mdev, &priv->channels.params,
- profile->max_nch(mdev), netdev->mtu);
+ mlx5e_get_netdev_max_channels(netdev),
+ netdev->mtu);
mlx5i_build_nic_params(mdev, &priv->channels.params);
mlx5e_timestamp_init(priv);
@@ -108,20 +106,23 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
netdev->netdev_ops = &mlx5i_netdev_ops;
netdev->ethtool_ops = &mlx5i_ethtool_ops;
+
+ return 0;
}
/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
-static void mlx5i_cleanup(struct mlx5e_priv *priv)
+void mlx5i_cleanup(struct mlx5e_priv *priv)
{
- /* Do nothing .. */
+ mlx5e_netdev_cleanup(priv->netdev, priv);
}
static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
{
+ int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
struct mlx5e_sw_stats s = { 0 };
int i, j;
- for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) {
+ for (i = 0; i < max_nch; i++) {
struct mlx5e_channel_stats *channel_stats;
struct mlx5e_rq_stats *rq_stats;
@@ -418,7 +419,6 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.enable = NULL, /* mlx5i_enable */
.disable = NULL, /* mlx5i_disable */
.update_stats = NULL, /* mlx5i_update_stats */
- .max_nch = mlx5e_get_max_num_channels,
.update_carrier = NULL, /* no HW update in IB link */
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
@@ -650,7 +650,6 @@ static void mlx5_rdma_netdev_free(struct net_device *netdev)
mlx5e_detach_netdev(priv);
profile->cleanup(priv);
- destroy_workqueue(priv->wq);
if (!ipriv->sub_interface) {
mlx5i_pkey_qpn_ht_cleanup(netdev);
@@ -658,58 +657,37 @@ static void mlx5_rdma_netdev_free(struct net_device *netdev)
}
}
-struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
- struct ib_device *ibdev,
- const char *name,
- void (*setup)(struct net_device *))
+static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev)
+{
+ return mdev->mlx5e_res.pdn != 0;
+}
+
+static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev)
{
- const struct mlx5e_profile *profile;
- struct net_device *netdev;
+ if (mlx5_is_sub_interface(mdev))
+ return mlx5i_pkey_get_profile();
+ return &mlx5i_nic_profile;
+}
+
+static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num,
+ struct net_device *netdev, void *param)
+{
+ struct mlx5_core_dev *mdev = (struct mlx5_core_dev *)param;
+ const struct mlx5e_profile *prof = mlx5_get_profile(mdev);
struct mlx5i_priv *ipriv;
struct mlx5e_priv *epriv;
struct rdma_netdev *rn;
- bool sub_interface;
- int nch;
int err;
- if (mlx5i_check_required_hca_cap(mdev)) {
- mlx5_core_warn(mdev, "Accelerated mode is not supported\n");
- return ERR_PTR(-EOPNOTSUPP);
- }
-
- /* TODO: Need to find a better way to check if child device*/
- sub_interface = (mdev->mlx5e_res.pdn != 0);
-
- if (sub_interface)
- profile = mlx5i_pkey_get_profile();
- else
- profile = &mlx5i_nic_profile;
-
- nch = profile->max_nch(mdev);
-
- netdev = alloc_netdev_mqs(sizeof(struct mlx5i_priv) + sizeof(struct mlx5e_priv),
- name, NET_NAME_UNKNOWN,
- setup,
- nch * MLX5E_MAX_NUM_TC,
- nch);
- if (!netdev) {
- mlx5_core_warn(mdev, "alloc_netdev_mqs failed\n");
- return NULL;
- }
-
ipriv = netdev_priv(netdev);
epriv = mlx5i_epriv(netdev);
- epriv->wq = create_singlethread_workqueue("mlx5i");
- if (!epriv->wq)
- goto err_free_netdev;
-
- ipriv->sub_interface = sub_interface;
+ ipriv->sub_interface = mlx5_is_sub_interface(mdev);
if (!ipriv->sub_interface) {
err = mlx5i_pkey_qpn_ht_init(netdev);
if (err) {
mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n");
- goto destroy_wq;
+ return err;
}
/* This should only be called once per mdev */
@@ -718,7 +696,7 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
goto destroy_ht;
}
- profile->init(mdev, netdev, profile, ipriv);
+ prof->init(mdev, netdev, prof, ipriv);
mlx5e_attach_netdev(epriv);
netif_carrier_off(netdev);
@@ -734,15 +712,35 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
netdev->priv_destructor = mlx5_rdma_netdev_free;
netdev->needs_free_netdev = 1;
- return netdev;
+ return 0;
destroy_ht:
mlx5i_pkey_qpn_ht_cleanup(netdev);
-destroy_wq:
- destroy_workqueue(epriv->wq);
-err_free_netdev:
- free_netdev(netdev);
+ return err;
+}
- return NULL;
+int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
+ struct ib_device *device,
+ struct rdma_netdev_alloc_params *params)
+{
+ int nch;
+ int rc;
+
+ rc = mlx5i_check_required_hca_cap(mdev);
+ if (rc)
+ return rc;
+
+ nch = mlx5e_get_max_num_channels(mdev);
+
+ *params = (struct rdma_netdev_alloc_params){
+ .sizeof_priv = sizeof(struct mlx5i_priv) +
+ sizeof(struct mlx5e_priv),
+ .txqs = nch * MLX5E_MAX_NUM_TC,
+ .rxqs = nch,
+ .param = mdev,
+ .initialize_rdma_netdev = mlx5_rdma_setup_rn,
+ };
+
+ return 0;
}
-EXPORT_SYMBOL(mlx5_rdma_netdev_alloc);
+EXPORT_SYMBOL(mlx5_rdma_rn_get_params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 2e7fb829e1b0..9165ca567047 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -84,10 +84,11 @@ void mlx5i_dev_cleanup(struct net_device *dev);
int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
/* Parent profile functions */
-void mlx5i_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv);
+int mlx5i_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv);
+void mlx5i_cleanup(struct mlx5e_priv *priv);
/* Get child interface nic profile */
const struct mlx5e_profile *mlx5i_pkey_get_profile(void);
@@ -109,12 +110,11 @@ struct mlx5i_tx_wqe {
static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
struct mlx5i_tx_wqe **wqe,
- u16 *pi)
+ u16 pi)
{
struct mlx5_wq_cyc *wq = &sq->wq;
- *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- *wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
+ *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
memset(*wqe, 0, sizeof(**wqe));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index e3e8a5f1ac9b..b491b8f5fd6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -275,14 +275,17 @@ static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu)
}
/* Called directly after IPoIB netdevice was created to initialize SW structs */
-static void mlx5i_pkey_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+static int mlx5i_pkey_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ int err;
- mlx5i_init(mdev, netdev, profile, ppriv);
+ err = mlx5i_init(mdev, netdev, profile, ppriv);
+ if (err)
+ return err;
/* Override parent ndo */
netdev->netdev_ops = &mlx5i_pkey_netdev_ops;
@@ -292,12 +295,14 @@ static void mlx5i_pkey_init(struct mlx5_core_dev *mdev,
/* Use dummy rqs */
priv->channels.params.log_rq_mtu_frames = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+
+ return 0;
}
/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
static void mlx5i_pkey_cleanup(struct mlx5e_priv *priv)
{
- /* Do nothing .. */
+ mlx5i_cleanup(priv);
}
static int mlx5i_pkey_init_tx(struct mlx5e_priv *priv)
@@ -346,7 +351,6 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.enable = NULL,
.disable = NULL,
.update_stats = NULL,
- .max_nch = mlx5e_get_max_num_channels,
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
.max_tc = MLX5I_MAX_NUM_TC,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index b5e9f664fc66..28132c7dc05f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1594,12 +1594,17 @@ static const struct pci_error_handlers mlx5_err_handler = {
static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
{
- int ret;
+ bool fast_teardown = false, force_teardown = false;
+ int ret = 1;
+
+ fast_teardown = MLX5_CAP_GEN(dev, fast_teardown);
+ force_teardown = MLX5_CAP_GEN(dev, force_teardown);
+
+ mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown);
+ mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown);
- if (!MLX5_CAP_GEN(dev, force_teardown)) {
- mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n");
+ if (!fast_teardown && !force_teardown)
return -EOPNOTSUPP;
- }
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
@@ -1612,13 +1617,19 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
mlx5_drain_health_wq(dev);
mlx5_stop_health_poll(dev, false);
+ ret = mlx5_cmd_fast_teardown_hca(dev);
+ if (!ret)
+ goto succeed;
+
ret = mlx5_cmd_force_teardown_hca(dev);
- if (ret) {
- mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
- mlx5_start_health_poll(dev);
- return ret;
- }
+ if (!ret)
+ goto succeed;
+
+ mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
+ mlx5_start_health_poll(dev);
+ return ret;
+succeed:
mlx5_enter_error_state(dev, true);
/* Some platforms requiring freeing the IRQ's in the shutdown
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index b4134fa0bba3..0594d0961cb3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -39,6 +39,7 @@
#include <linux/if_link.h>
#include <linux/firmware.h>
#include <linux/mlx5/cq.h>
+#include <linux/mlx5/fs.h>
#define DRIVER_NAME "mlx5_core"
#define DRIVER_VERSION "5.0-0"
@@ -95,6 +96,8 @@ int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
+int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
+
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_core_page_fault(struct mlx5_core_dev *dev,
@@ -169,17 +172,6 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
void mlx5_dev_list_lock(void);
void mlx5_dev_list_unlock(void);
int mlx5_dev_list_trylock(void);
-int mlx5_encap_alloc(struct mlx5_core_dev *dev,
- int header_type,
- size_t size,
- void *encap_header,
- u32 *encap_id);
-void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id);
-
-int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
- u8 namespace, u8 num_actions,
- void *modify_actions, u32 *modify_header_id);
-void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id);
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
@@ -214,4 +206,14 @@ int mlx5_lag_allow(struct mlx5_core_dev *dev);
int mlx5_lag_forbid(struct mlx5_core_dev *dev);
void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
+
+enum {
+ MLX5_NIC_IFC_FULL = 0,
+ MLX5_NIC_IFC_DISABLED = 1,
+ MLX5_NIC_IFC_NO_DRAM_NIC = 2,
+ MLX5_NIC_IFC_INVALID = 3
+};
+
+u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
+void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 4ca07bfb6b14..91b8139a388d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -211,6 +211,7 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev,
}
qp->qpn = MLX5_GET(create_dct_out, out, dctn);
+ qp->uid = MLX5_GET(create_dct_in, in, uid);
err = create_resource_common(dev, qp, MLX5_RES_DCT);
if (err)
goto err_cmd;
@@ -219,6 +220,7 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev,
err_cmd:
MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
+ MLX5_SET(destroy_dct_in, din, uid, qp->uid);
mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
(void *)&out, sizeof(dout));
return err;
@@ -240,6 +242,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
if (err)
return err;
+ qp->uid = MLX5_GET(create_qp_in, in, uid);
qp->qpn = MLX5_GET(create_qp_out, out, qpn);
mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
@@ -261,6 +264,7 @@ err_cmd:
memset(dout, 0, sizeof(dout));
MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
+ MLX5_SET(destroy_qp_in, din, uid, qp->uid);
mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
return err;
}
@@ -275,6 +279,7 @@ static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
+ MLX5_SET(drain_dct_in, in, uid, qp->uid);
return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
(void *)&out, sizeof(out));
}
@@ -301,6 +306,7 @@ destroy:
destroy_resource_common(dev, &dct->mqp);
MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
+ MLX5_SET(destroy_dct_in, in, uid, qp->uid);
err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
(void *)&out, sizeof(out));
return err;
@@ -320,6 +326,7 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+ MLX5_SET(destroy_qp_in, in, uid, qp->uid);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
@@ -373,7 +380,7 @@ static void mbox_free(struct mbox_info *mbox)
static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
u32 opt_param_mask, void *qpc,
- struct mbox_info *mbox)
+ struct mbox_info *mbox, u16 uid)
{
mbox->out = NULL;
mbox->in = NULL;
@@ -381,26 +388,32 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
#define MBOX_ALLOC(mbox, typ) \
mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
-#define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
- MLX5_SET(typ##_in, in, opcode, _opcode); \
- MLX5_SET(typ##_in, in, qpn, _qpn)
-
-#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
- MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
- MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
- memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
+#define MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid) \
+ do { \
+ MLX5_SET(typ##_in, in, opcode, _opcode); \
+ MLX5_SET(typ##_in, in, qpn, _qpn); \
+ MLX5_SET(typ##_in, in, uid, _uid); \
+ } while (0)
+
+#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc, _uid) \
+ do { \
+ MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid); \
+ MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
+ memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, \
+ MLX5_ST_SZ_BYTES(qpc)); \
+ } while (0)
switch (opcode) {
/* 2RST & 2ERR */
case MLX5_CMD_OP_2RST_QP:
if (MBOX_ALLOC(mbox, qp_2rst))
return -ENOMEM;
- MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn);
+ MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn, uid);
break;
case MLX5_CMD_OP_2ERR_QP:
if (MBOX_ALLOC(mbox, qp_2err))
return -ENOMEM;
- MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn);
+ MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn, uid);
break;
/* MODIFY with QPC */
@@ -408,37 +421,37 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
if (MBOX_ALLOC(mbox, rst2init_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc);
+ opt_param_mask, qpc, uid);
break;
case MLX5_CMD_OP_INIT2RTR_QP:
if (MBOX_ALLOC(mbox, init2rtr_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc);
+ opt_param_mask, qpc, uid);
break;
case MLX5_CMD_OP_RTR2RTS_QP:
if (MBOX_ALLOC(mbox, rtr2rts_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc);
+ opt_param_mask, qpc, uid);
break;
case MLX5_CMD_OP_RTS2RTS_QP:
if (MBOX_ALLOC(mbox, rts2rts_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc);
+ opt_param_mask, qpc, uid);
break;
case MLX5_CMD_OP_SQERR2RTS_QP:
if (MBOX_ALLOC(mbox, sqerr2rts_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc);
+ opt_param_mask, qpc, uid);
break;
case MLX5_CMD_OP_INIT2INIT_QP:
if (MBOX_ALLOC(mbox, init2init_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc);
+ opt_param_mask, qpc, uid);
break;
default:
mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
@@ -456,7 +469,7 @@ int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
int err;
err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
- opt_param_mask, qpc, &mbox);
+ opt_param_mask, qpc, &mbox, qp->uid);
if (err)
return err;
@@ -531,6 +544,17 @@ int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
}
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
+static void destroy_rq_tracked(struct mlx5_core_dev *dev, u32 rqn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {};
+
+ MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ MLX5_SET(destroy_rq_in, in, rqn, rqn);
+ MLX5_SET(destroy_rq_in, in, uid, uid);
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *rq)
{
@@ -541,6 +565,7 @@ int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
if (err)
return err;
+ rq->uid = MLX5_GET(create_rq_in, in, uid);
rq->qpn = rqn;
err = create_resource_common(dev, rq, MLX5_RES_RQ);
if (err)
@@ -549,7 +574,7 @@ int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
return 0;
err_destroy_rq:
- mlx5_core_destroy_rq(dev, rq->qpn);
+ destroy_rq_tracked(dev, rq->qpn, rq->uid);
return err;
}
@@ -559,10 +584,21 @@ void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
struct mlx5_core_qp *rq)
{
destroy_resource_common(dev, rq);
- mlx5_core_destroy_rq(dev, rq->qpn);
+ destroy_rq_tracked(dev, rq->qpn, rq->uid);
}
EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
+static void destroy_sq_tracked(struct mlx5_core_dev *dev, u32 sqn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {};
+
+ MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
+ MLX5_SET(destroy_sq_in, in, sqn, sqn);
+ MLX5_SET(destroy_sq_in, in, uid, uid);
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *sq)
{
@@ -573,6 +609,7 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
if (err)
return err;
+ sq->uid = MLX5_GET(create_sq_in, in, uid);
sq->qpn = sqn;
err = create_resource_common(dev, sq, MLX5_RES_SQ);
if (err)
@@ -581,7 +618,7 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
return 0;
err_destroy_sq:
- mlx5_core_destroy_sq(dev, sq->qpn);
+ destroy_sq_tracked(dev, sq->qpn, sq->uid);
return err;
}
@@ -591,7 +628,7 @@ void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
struct mlx5_core_qp *sq)
{
destroy_resource_common(dev, sq);
- mlx5_core_destroy_sq(dev, sq->qpn);
+ destroy_sq_tracked(dev, sq->qpn, sq->uid);
}
EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index 7e20666ce5ae..6a6fc9be01e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -166,6 +166,7 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
if (!create_in)
return -ENOMEM;
+ MLX5_SET(create_srq_in, create_in, uid, in->uid);
srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
@@ -178,8 +179,10 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
sizeof(create_out));
kvfree(create_in);
- if (!err)
+ if (!err) {
srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
+ srq->uid = in->uid;
+ }
return err;
}
@@ -193,6 +196,7 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev,
MLX5_SET(destroy_srq_in, srq_in, opcode,
MLX5_CMD_OP_DESTROY_SRQ);
MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
+ MLX5_SET(destroy_srq_in, srq_in, uid, srq->uid);
return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
srq_out, sizeof(srq_out));
@@ -208,6 +212,7 @@ static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn);
MLX5_SET(arm_rq_in, srq_in, lwm, lwm);
+ MLX5_SET(arm_rq_in, srq_in, uid, srq->uid);
return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
srq_out, sizeof(srq_out));
@@ -260,6 +265,7 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
if (!create_in)
return -ENOMEM;
+ MLX5_SET(create_xrc_srq_in, create_in, uid, in->uid);
xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
xrc_srq_context_entry);
pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
@@ -277,6 +283,7 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
goto out;
srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
+ srq->uid = in->uid;
out:
kvfree(create_in);
return err;
@@ -291,6 +298,7 @@ static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
MLX5_CMD_OP_DESTROY_XRC_SRQ);
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+ MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, uid, srq->uid);
return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
xrcsrq_out, sizeof(xrcsrq_out));
@@ -306,6 +314,7 @@ static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, uid, srq->uid);
return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
xrcsrq_out, sizeof(xrcsrq_out));
@@ -365,10 +374,13 @@ static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+ MLX5_SET(create_rmp_in, create_in, uid, in->uid);
set_wq(wq, in);
memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
+ if (!err)
+ srq->uid = in->uid;
kvfree(create_in);
return err;
@@ -377,7 +389,13 @@ static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq)
{
- return mlx5_core_destroy_rmp(dev, srq->srqn);
+ u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {};
+
+ MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
+ MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
+ MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int arm_rmp_cmd(struct mlx5_core_dev *dev,
@@ -400,6 +418,7 @@ static int arm_rmp_cmd(struct mlx5_core_dev *dev,
MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn);
+ MLX5_SET(modify_rmp_in, in, uid, srq->uid);
MLX5_SET(wq, wq, lwm, lwm);
MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
@@ -469,11 +488,14 @@ static int create_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(xrqc, xrqc, user_index, in->user_index);
MLX5_SET(xrqc, xrqc, cqn, in->cqn);
MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ);
+ MLX5_SET(create_xrq_in, create_in, uid, in->uid);
err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
sizeof(create_out));
kvfree(create_in);
- if (!err)
+ if (!err) {
srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn);
+ srq->uid = in->uid;
+ }
return err;
}
@@ -485,6 +507,7 @@ static int destroy_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
+ MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
@@ -500,6 +523,7 @@ static int arm_xrq_cmd(struct mlx5_core_dev *dev,
MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
MLX5_SET(arm_rq_in, in, lwm, lwm);
+ MLX5_SET(arm_rq_in, in, uid, srq->uid);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index d2f76070ea7c..a1ee9a8a769e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -475,7 +475,8 @@ static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
for (i = 0; i < hp->num_channels; i++) {
mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]);
- mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
+ if (!hp->peer_gone)
+ mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
}
}
@@ -567,6 +568,8 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
MLX5_RQC_STATE_RST, 0, 0);
/* unset peer SQs */
+ if (hp->peer_gone)
+ return;
for (i = 0; i < hp->num_channels; i++)
mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
MLX5_SQC_STATE_RST, 0, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 68e7f8df2a6d..2dcbf1ebfd6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -39,11 +39,6 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
return (u32)wq->fbc.sz_m1 + 1;
}
-u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
-{
- return wq->fbc.frag_sz_m1 + 1;
-}
-
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
{
return wq->fbc.sz_m1 + 1;
@@ -54,54 +49,37 @@ u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
return (u32)wq->fbc.sz_m1 + 1;
}
-static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
-{
- return mlx5_wq_cyc_get_size(wq) << wq->fbc.log_stride;
-}
-
-static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq)
-{
- return mlx5_wq_cyc_get_byte_size(&wq->rq) +
- mlx5_wq_cyc_get_byte_size(&wq->sq);
-}
-
-static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
+static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
{
- return mlx5_cqwq_get_size(wq) << wq->fbc.log_stride;
-}
-
-static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
-{
- return mlx5_wq_ll_get_size(wq) << wq->fbc.log_stride;
+ return ((u32)1 << log_sz) << log_stride;
}
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
+ u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
+ u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz);
struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
int err;
- mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
- MLX5_GET(wq, wqc, log_wq_sz),
- fbc);
- wq->sz = wq->fbc.sz_m1 + 1;
-
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
- err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
+ wq->db = wq_ctrl->db.db;
+
+ err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
- fbc->frag_buf = wq_ctrl->buf;
- wq->db = wq_ctrl->db.db;
+ mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
+ wq->sz = mlx5_wq_cyc_get_size(wq);
wq_ctrl->mdev = mdev;
@@ -113,46 +91,19 @@ err_db_free:
return err;
}
-static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf,
- struct mlx5_wq_qp *qp)
-{
- struct mlx5_frag_buf_ctrl *sq_fbc;
- struct mlx5_frag_buf *rqb, *sqb;
-
- rqb = &qp->rq.fbc.frag_buf;
- *rqb = *buf;
- rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
- rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE);
-
- sq_fbc = &qp->sq.fbc;
- sqb = &sq_fbc->frag_buf;
- *sqb = *buf;
- sqb->size = mlx5_wq_cyc_get_byte_size(&qp->sq);
- sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE);
- sqb->frags += rqb->npages; /* first part is for the rq */
- if (sq_fbc->strides_offset)
- sqb->frags--;
-}
-
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
- u16 sq_strides_offset;
- u32 rq_pg_remainder;
- int err;
+ u8 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride) + 4;
+ u8 log_rq_sz = MLX5_GET(qpc, qpc, log_rq_size);
+ u8 log_sq_stride = ilog2(MLX5_SEND_WQE_BB);
+ u8 log_sq_sz = MLX5_GET(qpc, qpc, log_sq_size);
- mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
- MLX5_GET(qpc, qpc, log_rq_size),
- &wq->rq.fbc);
+ u32 rq_byte_size;
+ int err;
- rq_pg_remainder = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE;
- sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB;
- mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
- MLX5_GET(qpc, qpc, log_sq_size),
- sq_strides_offset,
- &wq->sq.fbc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
@@ -160,14 +111,32 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
return err;
}
- err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq),
+ err = mlx5_frag_buf_alloc_node(mdev,
+ wq_get_byte_sz(log_rq_sz, log_rq_stride) +
+ wq_get_byte_sz(log_sq_sz, log_sq_stride),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
- mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq);
+ mlx5_init_fbc(wq_ctrl->buf.frags, log_rq_stride, log_rq_sz, &wq->rq.fbc);
+
+ rq_byte_size = wq_get_byte_sz(log_rq_sz, log_rq_stride);
+
+ if (rq_byte_size < PAGE_SIZE) {
+ /* SQ starts within the same page of the RQ */
+ u16 sq_strides_offset = rq_byte_size / MLX5_SEND_WQE_BB;
+
+ mlx5_init_fbc_offset(wq_ctrl->buf.frags,
+ log_sq_stride, log_sq_sz, sq_strides_offset,
+ &wq->sq.fbc);
+ } else {
+ u16 rq_npages = rq_byte_size >> PAGE_SHIFT;
+
+ mlx5_init_fbc(wq_ctrl->buf.frags + rq_npages,
+ log_sq_stride, log_sq_sz, &wq->sq.fbc);
+ }
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
@@ -186,17 +155,19 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
+ u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) + 6;
+ u8 log_wq_sz = MLX5_GET(cqc, cqc, log_cq_size);
int err;
- mlx5_core_init_cq_frag_buf(&wq->fbc, cqc);
-
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
- err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
+ wq->db = wq_ctrl->db.db;
+
+ err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
&wq_ctrl->buf,
param->buf_numa_node);
if (err) {
@@ -205,8 +176,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
- wq->fbc.frag_buf = wq_ctrl->buf;
- wq->db = wq_ctrl->db.db;
+ mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, &wq->fbc);
wq_ctrl->mdev = mdev;
@@ -222,30 +192,29 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_ll *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
+ u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
+ u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz);
struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
struct mlx5_wqe_srq_next_seg *next_seg;
int err;
int i;
- mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
- MLX5_GET(wq, wqc, log_wq_sz),
- fbc);
-
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
- err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
+ wq->db = wq_ctrl->db.db;
+
+ err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
- wq->fbc.frag_buf = wq_ctrl->buf;
- wq->db = wq_ctrl->db.db;
+ mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
for (i = 0; i < fbc->sz_m1; i++) {
next_seg = mlx5_wq_ll_get_wqe(wq, i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 3a1a170bb2d7..b1293d153a58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -80,7 +80,6 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl);
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
-u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
@@ -140,11 +139,6 @@ static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
return ctr & wq->fbc.sz_m1;
}
-static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr)
-{
- return ctr & wq->fbc.frag_sz_m1;
-}
-
static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq)
{
return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr);
@@ -160,6 +154,11 @@ static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
}
+static inline u16 mlx5_wq_cyc_get_contig_wqebbs(struct mlx5_wq_cyc *wq, u16 ix)
+{
+ return mlx5_frag_buf_get_idx_last_contig_stride(&wq->fbc, ix) - ix + 1;
+}
+
static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
{
int equal = (cc1 == cc2);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 68fa44a41485..1f77e97e2d7a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -27,7 +27,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_acl_flex_keys.o \
spectrum1_mr_tcam.o spectrum2_mr_tcam.o \
spectrum_mr_tcam.o spectrum_mr.o \
- spectrum_qdisc.o spectrum_span.o
+ spectrum_qdisc.o spectrum_span.o \
+ spectrum_nve.o spectrum_nve_vxlan.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 81533d7f395c..30f751e69698 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -943,8 +943,8 @@ static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink,
mlxsw_core->bus,
mlxsw_core->bus_priv, true,
devlink);
- if (err)
- mlxsw_core->reload_fail = true;
+ mlxsw_core->reload_fail = !!err;
+
return err;
}
@@ -1055,6 +1055,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
err_driver_init:
mlxsw_thermal_fini(mlxsw_core->thermal);
err_thermal_init:
+ mlxsw_hwmon_fini(mlxsw_core->hwmon);
err_hwmon_init:
if (!reload)
devlink_unregister(devlink);
@@ -1082,12 +1083,20 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
- if (mlxsw_core->reload_fail)
- goto reload_fail;
+ if (mlxsw_core->reload_fail) {
+ if (!reload)
+ /* Only the parts that were not de-initialized in the
+ * failed reload attempt need to be de-initialized.
+ */
+ goto reload_fail_deinit;
+ else
+ return;
+ }
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
mlxsw_thermal_fini(mlxsw_core->thermal);
+ mlxsw_hwmon_fini(mlxsw_core->hwmon);
if (!reload)
devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
@@ -1096,9 +1105,12 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
if (!reload)
devlink_resources_unregister(devlink, NULL);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
- if (reload)
- return;
-reload_fail:
+
+ return;
+
+reload_fail_deinit:
+ devlink_unregister(devlink);
+ devlink_resources_unregister(devlink, NULL);
devlink_free(devlink);
}
EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 655ddd204ab2..c35be477856f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -359,6 +359,10 @@ static inline int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
return 0;
}
+static inline void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
+{
+}
+
#endif
struct mlxsw_thermal;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index f6cf2896d337..e04e8162aa14 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -303,8 +303,7 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
struct device *hwmon_dev;
int err;
- mlxsw_hwmon = devm_kzalloc(mlxsw_bus_info->dev, sizeof(*mlxsw_hwmon),
- GFP_KERNEL);
+ mlxsw_hwmon = kzalloc(sizeof(*mlxsw_hwmon), GFP_KERNEL);
if (!mlxsw_hwmon)
return -ENOMEM;
mlxsw_hwmon->core = mlxsw_core;
@@ -321,10 +320,9 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group;
mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs;
- hwmon_dev = devm_hwmon_device_register_with_groups(mlxsw_bus_info->dev,
- "mlxsw",
- mlxsw_hwmon,
- mlxsw_hwmon->groups);
+ hwmon_dev = hwmon_device_register_with_groups(mlxsw_bus_info->dev,
+ "mlxsw", mlxsw_hwmon,
+ mlxsw_hwmon->groups);
if (IS_ERR(hwmon_dev)) {
err = PTR_ERR(hwmon_dev);
goto err_hwmon_register;
@@ -337,5 +335,12 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
err_hwmon_register:
err_fans_init:
err_temp_init:
+ kfree(mlxsw_hwmon);
return err;
}
+
+void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
+{
+ hwmon_device_unregister(mlxsw_hwmon->hwmon_dev);
+ kfree(mlxsw_hwmon);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 4d271fb3de3d..5890fdfd62c3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -718,14 +718,17 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
memset(&active_cqns, 0, sizeof(active_cqns));
while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
- u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
- switch (event_type) {
- case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
+ /* Command interface completion events are always received on
+ * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
+ * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
+ */
+ switch (q->num) {
+ case MLXSW_PCI_EQ_ASYNC_NUM:
mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
q->u.eq.ev_cmd_count++;
break;
- case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
+ case MLXSW_PCI_EQ_COMP_NUM:
cqn = mlxsw_pci_eqe_cqn_get(eqe);
set_bit(cqn, active_cqns);
cq_handle = true;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 83f452b7ccbb..bb99f6d41fe0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -221,7 +221,7 @@ MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8);
MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8);
/* pci_eqe_cqn
- * Completion Queue that triggeret this EQE.
+ * Completion Queue that triggered this EQE.
*/
MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index df81e0a1eb64..db3d2790aeec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -295,6 +295,7 @@ enum mlxsw_reg_sfd_rec_type {
MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0,
MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG = 0x1,
MLXSW_REG_SFD_REC_TYPE_MULTICAST = 0x2,
+ MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL = 0xC,
};
/* reg_sfd_rec_type
@@ -525,6 +526,61 @@ mlxsw_reg_sfd_mc_pack(char *payload, int rec_index,
mlxsw_reg_sfd_mc_mid_set(payload, rec_index, mid);
}
+/* reg_sfd_uc_tunnel_uip_msb
+ * When protocol is IPv4, the most significant byte of the underlay IPv4
+ * destination IP.
+ * When protocol is IPv6, reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_uip_msb, MLXSW_REG_SFD_BASE_LEN, 24,
+ 8, MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_uc_tunnel_fid
+ * Filtering ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_fid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+enum mlxsw_reg_sfd_uc_tunnel_protocol {
+ MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4,
+ MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV6,
+};
+
+/* reg_sfd_uc_tunnel_protocol
+ * IP protocol.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_protocol, MLXSW_REG_SFD_BASE_LEN, 27,
+ 1, MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+/* reg_sfd_uc_tunnel_uip_lsb
+ * When protocol is IPv4, the least significant bytes of the underlay
+ * IPv4 destination IP.
+ * When protocol is IPv6, pointer to the underlay IPv6 destination IP
+ * which is configured by RIPS.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_uip_lsb, MLXSW_REG_SFD_BASE_LEN, 0,
+ 24, MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+static inline void
+mlxsw_reg_sfd_uc_tunnel_pack(char *payload, int rec_index,
+ enum mlxsw_reg_sfd_rec_policy policy,
+ const char *mac, u16 fid,
+ enum mlxsw_reg_sfd_rec_action action, u32 uip,
+ enum mlxsw_reg_sfd_uc_tunnel_protocol proto)
+{
+ mlxsw_reg_sfd_rec_pack(payload, rec_index,
+ MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL, mac,
+ action);
+ mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
+ mlxsw_reg_sfd_uc_tunnel_uip_msb_set(payload, rec_index, uip >> 24);
+ mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip);
+ mlxsw_reg_sfd_uc_tunnel_fid_set(payload, rec_index, fid);
+ mlxsw_reg_sfd_uc_tunnel_protocol_set(payload, rec_index, proto);
+}
+
/* SFN - Switch FDB Notification Register
* -------------------------------------------
* The switch provides notifications on newly learned FDB entries and
@@ -1069,6 +1125,8 @@ enum mlxsw_reg_sfdf_flush_type {
MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID,
MLXSW_REG_SFDF_FLUSH_PER_LAG,
MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID,
+ MLXSW_REG_SFDF_FLUSH_PER_NVE,
+ MLXSW_REG_SFDF_FLUSH_PER_NVE_AND_FID,
};
/* reg_sfdf_flush_type
@@ -1079,6 +1137,10 @@ enum mlxsw_reg_sfdf_flush_type {
* 3 - All FID dynamic entries pointing to port are flushed.
* 4 - All dynamic entries pointing to LAG are flushed.
* 5 - All FID dynamic entries pointing to LAG are flushed.
+ * 6 - All entries of type "Unicast Tunnel" or "Multicast Tunnel" are
+ * flushed.
+ * 7 - All entries of type "Unicast Tunnel" or "Multicast Tunnel" are
+ * flushed, per FID.
* Access: RW
*/
MLXSW_ITEM32(reg, sfdf, flush_type, 0x04, 28, 4);
@@ -1315,12 +1377,19 @@ MLXSW_ITEM32(reg, slcr, type, 0x00, 0, 4);
*/
MLXSW_ITEM32(reg, slcr, lag_hash, 0x04, 0, 20);
-static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash)
+/* reg_slcr_seed
+ * LAG seed value. The seed is the same for all ports.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, slcr, seed, 0x08, 0, 32);
+
+static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash, u32 seed)
{
MLXSW_REG_ZERO(slcr, payload);
mlxsw_reg_slcr_pp_set(payload, MLXSW_REG_SLCR_PP_GLOBAL);
mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_CRC);
mlxsw_reg_slcr_lag_hash_set(payload, lag_hash);
+ mlxsw_reg_slcr_seed_set(payload, seed);
}
/* SLCOR - Switch LAG Collector Register
@@ -3215,7 +3284,7 @@ static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port,
* Configures the ETS elements.
*/
#define MLXSW_REG_QEEC_ID 0x400D
-#define MLXSW_REG_QEEC_LEN 0x1C
+#define MLXSW_REG_QEEC_LEN 0x20
MLXSW_REG_DEFINE(qeec, MLXSW_REG_QEEC_ID, MLXSW_REG_QEEC_LEN);
@@ -3257,6 +3326,15 @@ MLXSW_ITEM32(reg, qeec, element_index, 0x04, 0, 8);
*/
MLXSW_ITEM32(reg, qeec, next_element_index, 0x08, 0, 8);
+/* reg_qeec_mise
+ * Min shaper configuration enable. Enables configuration of the min
+ * shaper on this ETS element
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, mise, 0x0C, 31, 1);
+
enum {
MLXSW_REG_QEEC_BYTES_MODE,
MLXSW_REG_QEEC_PACKETS_MODE,
@@ -3273,6 +3351,17 @@ enum {
*/
MLXSW_ITEM32(reg, qeec, pb, 0x0C, 28, 1);
+/* The smallest permitted min shaper rate. */
+#define MLXSW_REG_QEEC_MIS_MIN 200000 /* Kbps */
+
+/* reg_qeec_min_shaper_rate
+ * Min shaper information rate.
+ * For CPU port, can only be configured for port hierarchy.
+ * When in bytes mode, value is specified in units of 1000bps.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, min_shaper_rate, 0x0C, 0, 28);
+
/* reg_qeec_mase
* Max shaper configuration enable. Enables configuration of the max
* shaper on this ETS element.
@@ -8279,6 +8368,508 @@ static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
mlxsw_reg_mgpc_opcode_set(payload, opcode);
}
+/* MPRS - Monitoring Parsing State Register
+ * ----------------------------------------
+ * The MPRS register is used for setting up the parsing for hash,
+ * policy-engine and routing.
+ */
+#define MLXSW_REG_MPRS_ID 0x9083
+#define MLXSW_REG_MPRS_LEN 0x14
+
+MLXSW_REG_DEFINE(mprs, MLXSW_REG_MPRS_ID, MLXSW_REG_MPRS_LEN);
+
+/* reg_mprs_parsing_depth
+ * Minimum parsing depth.
+ * Need to enlarge parsing depth according to L3, MPLS, tunnels, ACL
+ * rules, traps, hash, etc. Default is 96 bytes. Reserved when SwitchX-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mprs, parsing_depth, 0x00, 0, 16);
+
+/* reg_mprs_parsing_en
+ * Parsing enable.
+ * Bit 0 - Enable parsing of NVE of types VxLAN, VxLAN-GPE, GENEVE and
+ * NVGRE. Default is enabled. Reserved when SwitchX-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mprs, parsing_en, 0x04, 0, 16);
+
+/* reg_mprs_vxlan_udp_dport
+ * VxLAN UDP destination port.
+ * Used for identifying VxLAN packets and for dport field in
+ * encapsulation. Default is 4789.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mprs, vxlan_udp_dport, 0x10, 0, 16);
+
+static inline void mlxsw_reg_mprs_pack(char *payload, u16 parsing_depth,
+ u16 vxlan_udp_dport)
+{
+ MLXSW_REG_ZERO(mprs, payload);
+ mlxsw_reg_mprs_parsing_depth_set(payload, parsing_depth);
+ mlxsw_reg_mprs_parsing_en_set(payload, true);
+ mlxsw_reg_mprs_vxlan_udp_dport_set(payload, vxlan_udp_dport);
+}
+
+/* TNGCR - Tunneling NVE General Configuration Register
+ * ----------------------------------------------------
+ * The TNGCR register is used for setting up the NVE Tunneling configuration.
+ */
+#define MLXSW_REG_TNGCR_ID 0xA001
+#define MLXSW_REG_TNGCR_LEN 0x44
+
+MLXSW_REG_DEFINE(tngcr, MLXSW_REG_TNGCR_ID, MLXSW_REG_TNGCR_LEN);
+
+enum mlxsw_reg_tngcr_type {
+ MLXSW_REG_TNGCR_TYPE_VXLAN,
+ MLXSW_REG_TNGCR_TYPE_VXLAN_GPE,
+ MLXSW_REG_TNGCR_TYPE_GENEVE,
+ MLXSW_REG_TNGCR_TYPE_NVGRE,
+};
+
+/* reg_tngcr_type
+ * Tunnel type for encapsulation and decapsulation. The types are mutually
+ * exclusive.
+ * Note: For Spectrum the NVE parsing must be enabled in MPRS.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, type, 0x00, 0, 4);
+
+/* reg_tngcr_nve_valid
+ * The VTEP is valid. Allows adding FDB entries for tunnel encapsulation.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_valid, 0x04, 31, 1);
+
+/* reg_tngcr_nve_ttl_uc
+ * The TTL for NVE tunnel encapsulation underlay unicast packets.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_ttl_uc, 0x04, 0, 8);
+
+/* reg_tngcr_nve_ttl_mc
+ * The TTL for NVE tunnel encapsulation underlay multicast packets.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_ttl_mc, 0x08, 0, 8);
+
+enum {
+ /* Do not copy flow label. Calculate flow label using nve_flh. */
+ MLXSW_REG_TNGCR_FL_NO_COPY,
+ /* Copy flow label from inner packet if packet is IPv6 and
+ * encapsulation is by IPv6. Otherwise, calculate flow label using
+ * nve_flh.
+ */
+ MLXSW_REG_TNGCR_FL_COPY,
+};
+
+/* reg_tngcr_nve_flc
+ * For NVE tunnel encapsulation: Flow label copy from inner packet.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_flc, 0x0C, 25, 1);
+
+enum {
+ /* Flow label is static. In Spectrum this means '0'. Spectrum-2
+ * uses {nve_fl_prefix, nve_fl_suffix}.
+ */
+ MLXSW_REG_TNGCR_FL_NO_HASH,
+ /* 8 LSBs of the flow label are calculated from ECMP hash of the
+ * inner packet. 12 MSBs are configured by nve_fl_prefix.
+ */
+ MLXSW_REG_TNGCR_FL_HASH,
+};
+
+/* reg_tngcr_nve_flh
+ * NVE flow label hash.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_flh, 0x0C, 24, 1);
+
+/* reg_tngcr_nve_fl_prefix
+ * NVE flow label prefix. Constant 12 MSBs of the flow label.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_fl_prefix, 0x0C, 8, 12);
+
+/* reg_tngcr_nve_fl_suffix
+ * NVE flow label suffix. Constant 8 LSBs of the flow label.
+ * Reserved when nve_flh=1 and for Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_fl_suffix, 0x0C, 0, 8);
+
+enum {
+ /* Source UDP port is fixed (default '0') */
+ MLXSW_REG_TNGCR_UDP_SPORT_NO_HASH,
+ /* Source UDP port is calculated based on hash */
+ MLXSW_REG_TNGCR_UDP_SPORT_HASH,
+};
+
+/* reg_tngcr_nve_udp_sport_type
+ * NVE UDP source port type.
+ * Spectrum uses LAG hash (SLCRv2). Spectrum-2 uses ECMP hash (RECRv2).
+ * When the source UDP port is calculated based on hash, then the 8 LSBs
+ * are calculated from hash the 8 MSBs are configured by
+ * nve_udp_sport_prefix.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_udp_sport_type, 0x10, 24, 1);
+
+/* reg_tngcr_nve_udp_sport_prefix
+ * NVE UDP source port prefix. Constant 8 MSBs of the UDP source port.
+ * Reserved when NVE type is NVGRE.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_udp_sport_prefix, 0x10, 8, 8);
+
+/* reg_tngcr_nve_group_size_mc
+ * The amount of sequential linked lists of MC entries. The first linked
+ * list is configured by SFD.underlay_mc_ptr.
+ * Valid values: 1, 2, 4, 8, 16, 32, 64
+ * The linked list are configured by TNUMT.
+ * The hash is set by LAG hash.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_group_size_mc, 0x18, 0, 8);
+
+/* reg_tngcr_nve_group_size_flood
+ * The amount of sequential linked lists of flooding entries. The first
+ * linked list is configured by SFMR.nve_tunnel_flood_ptr
+ * Valid values: 1, 2, 4, 8, 16, 32, 64
+ * The linked list are configured by TNUMT.
+ * The hash is set by LAG hash.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_group_size_flood, 0x1C, 0, 8);
+
+/* reg_tngcr_learn_enable
+ * During decapsulation, whether to learn from NVE port.
+ * Reserved when Spectrum-2. See TNPC.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, learn_enable, 0x20, 31, 1);
+
+/* reg_tngcr_underlay_virtual_router
+ * Underlay virtual router.
+ * Reserved when Spectrum-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, underlay_virtual_router, 0x20, 0, 16);
+
+/* reg_tngcr_underlay_rif
+ * Underlay ingress router interface. RIF type should be loopback generic.
+ * Reserved when Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, underlay_rif, 0x24, 0, 16);
+
+/* reg_tngcr_usipv4
+ * Underlay source IPv4 address of the NVE.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, usipv4, 0x28, 0, 32);
+
+/* reg_tngcr_usipv6
+ * Underlay source IPv6 address of the NVE. For Spectrum, must not be
+ * modified under traffic of NVE tunneling encapsulation.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, tngcr, usipv6, 0x30, 16);
+
+static inline void mlxsw_reg_tngcr_pack(char *payload,
+ enum mlxsw_reg_tngcr_type type,
+ bool valid, u8 ttl)
+{
+ MLXSW_REG_ZERO(tngcr, payload);
+ mlxsw_reg_tngcr_type_set(payload, type);
+ mlxsw_reg_tngcr_nve_valid_set(payload, valid);
+ mlxsw_reg_tngcr_nve_ttl_uc_set(payload, ttl);
+ mlxsw_reg_tngcr_nve_ttl_mc_set(payload, ttl);
+ mlxsw_reg_tngcr_nve_flc_set(payload, MLXSW_REG_TNGCR_FL_NO_COPY);
+ mlxsw_reg_tngcr_nve_flh_set(payload, 0);
+ mlxsw_reg_tngcr_nve_udp_sport_type_set(payload,
+ MLXSW_REG_TNGCR_UDP_SPORT_HASH);
+ mlxsw_reg_tngcr_nve_udp_sport_prefix_set(payload, 0);
+ mlxsw_reg_tngcr_nve_group_size_mc_set(payload, 1);
+ mlxsw_reg_tngcr_nve_group_size_flood_set(payload, 1);
+}
+
+/* TNUMT - Tunneling NVE Underlay Multicast Table Register
+ * -------------------------------------------------------
+ * The TNUMT register is for building the underlay MC table. It is used
+ * for MC, flooding and BC traffic into the NVE tunnel.
+ */
+#define MLXSW_REG_TNUMT_ID 0xA003
+#define MLXSW_REG_TNUMT_LEN 0x20
+
+MLXSW_REG_DEFINE(tnumt, MLXSW_REG_TNUMT_ID, MLXSW_REG_TNUMT_LEN);
+
+enum mlxsw_reg_tnumt_record_type {
+ MLXSW_REG_TNUMT_RECORD_TYPE_IPV4,
+ MLXSW_REG_TNUMT_RECORD_TYPE_IPV6,
+ MLXSW_REG_TNUMT_RECORD_TYPE_LABEL,
+};
+
+/* reg_tnumt_record_type
+ * Record type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnumt, record_type, 0x00, 28, 4);
+
+enum mlxsw_reg_tnumt_tunnel_port {
+ MLXSW_REG_TNUMT_TUNNEL_PORT_NVE,
+ MLXSW_REG_TNUMT_TUNNEL_PORT_VPLS,
+ MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL0,
+ MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL1,
+};
+
+/* reg_tnumt_tunnel_port
+ * Tunnel port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnumt, tunnel_port, 0x00, 24, 4);
+
+/* reg_tnumt_underlay_mc_ptr
+ * Index to the underlay multicast table.
+ * For Spectrum the index is to the KVD linear.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tnumt, underlay_mc_ptr, 0x00, 0, 24);
+
+/* reg_tnumt_vnext
+ * The next_underlay_mc_ptr is valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnumt, vnext, 0x04, 31, 1);
+
+/* reg_tnumt_next_underlay_mc_ptr
+ * The next index to the underlay multicast table.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnumt, next_underlay_mc_ptr, 0x04, 0, 24);
+
+/* reg_tnumt_record_size
+ * Number of IP addresses in the record.
+ * Range is 1..cap_max_nve_mc_entries_ipv{4,6}
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnumt, record_size, 0x08, 0, 3);
+
+/* reg_tnumt_udip
+ * The underlay IPv4 addresses. udip[i] is reserved if i >= size
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, tnumt, udip, 0x0C, 0, 32, 0x04, 0x00, false);
+
+/* reg_tnumt_udip_ptr
+ * The pointer to the underlay IPv6 addresses. udip_ptr[i] is reserved if
+ * i >= size. The IPv6 addresses are configured by RIPS.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, tnumt, udip_ptr, 0x0C, 0, 24, 0x04, 0x00, false);
+
+static inline void mlxsw_reg_tnumt_pack(char *payload,
+ enum mlxsw_reg_tnumt_record_type type,
+ enum mlxsw_reg_tnumt_tunnel_port tport,
+ u32 underlay_mc_ptr, bool vnext,
+ u32 next_underlay_mc_ptr,
+ u8 record_size)
+{
+ MLXSW_REG_ZERO(tnumt, payload);
+ mlxsw_reg_tnumt_record_type_set(payload, type);
+ mlxsw_reg_tnumt_tunnel_port_set(payload, tport);
+ mlxsw_reg_tnumt_underlay_mc_ptr_set(payload, underlay_mc_ptr);
+ mlxsw_reg_tnumt_vnext_set(payload, vnext);
+ mlxsw_reg_tnumt_next_underlay_mc_ptr_set(payload, next_underlay_mc_ptr);
+ mlxsw_reg_tnumt_record_size_set(payload, record_size);
+}
+
+/* TNQCR - Tunneling NVE QoS Configuration Register
+ * ------------------------------------------------
+ * The TNQCR register configures how QoS is set in encapsulation into the
+ * underlay network.
+ */
+#define MLXSW_REG_TNQCR_ID 0xA010
+#define MLXSW_REG_TNQCR_LEN 0x0C
+
+MLXSW_REG_DEFINE(tnqcr, MLXSW_REG_TNQCR_ID, MLXSW_REG_TNQCR_LEN);
+
+/* reg_tnqcr_enc_set_dscp
+ * For encapsulation: How to set DSCP field:
+ * 0 - Copy the DSCP from the overlay (inner) IP header to the underlay
+ * (outer) IP header. If there is no IP header, use TNQDR.dscp
+ * 1 - Set the DSCP field as TNQDR.dscp
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnqcr, enc_set_dscp, 0x04, 28, 1);
+
+static inline void mlxsw_reg_tnqcr_pack(char *payload)
+{
+ MLXSW_REG_ZERO(tnqcr, payload);
+ mlxsw_reg_tnqcr_enc_set_dscp_set(payload, 0);
+}
+
+/* TNQDR - Tunneling NVE QoS Default Register
+ * ------------------------------------------
+ * The TNQDR register configures the default QoS settings for NVE
+ * encapsulation.
+ */
+#define MLXSW_REG_TNQDR_ID 0xA011
+#define MLXSW_REG_TNQDR_LEN 0x08
+
+MLXSW_REG_DEFINE(tnqdr, MLXSW_REG_TNQDR_ID, MLXSW_REG_TNQDR_LEN);
+
+/* reg_tnqdr_local_port
+ * Local port number (receive port). CPU port is supported.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tnqdr, local_port, 0x00, 16, 8);
+
+/* reg_tnqdr_dscp
+ * For encapsulation, the default DSCP.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnqdr, dscp, 0x04, 0, 6);
+
+static inline void mlxsw_reg_tnqdr_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(tnqdr, payload);
+ mlxsw_reg_tnqdr_local_port_set(payload, local_port);
+ mlxsw_reg_tnqdr_dscp_set(payload, 0);
+}
+
+/* TNEEM - Tunneling NVE Encapsulation ECN Mapping Register
+ * --------------------------------------------------------
+ * The TNEEM register maps ECN of the IP header at the ingress to the
+ * encapsulation to the ECN of the underlay network.
+ */
+#define MLXSW_REG_TNEEM_ID 0xA012
+#define MLXSW_REG_TNEEM_LEN 0x0C
+
+MLXSW_REG_DEFINE(tneem, MLXSW_REG_TNEEM_ID, MLXSW_REG_TNEEM_LEN);
+
+/* reg_tneem_overlay_ecn
+ * ECN of the IP header in the overlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tneem, overlay_ecn, 0x04, 24, 2);
+
+/* reg_tneem_underlay_ecn
+ * ECN of the IP header in the underlay network.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tneem, underlay_ecn, 0x04, 16, 2);
+
+static inline void mlxsw_reg_tneem_pack(char *payload, u8 overlay_ecn,
+ u8 underlay_ecn)
+{
+ MLXSW_REG_ZERO(tneem, payload);
+ mlxsw_reg_tneem_overlay_ecn_set(payload, overlay_ecn);
+ mlxsw_reg_tneem_underlay_ecn_set(payload, underlay_ecn);
+}
+
+/* TNDEM - Tunneling NVE Decapsulation ECN Mapping Register
+ * --------------------------------------------------------
+ * The TNDEM register configures the actions that are done in the
+ * decapsulation.
+ */
+#define MLXSW_REG_TNDEM_ID 0xA013
+#define MLXSW_REG_TNDEM_LEN 0x0C
+
+MLXSW_REG_DEFINE(tndem, MLXSW_REG_TNDEM_ID, MLXSW_REG_TNDEM_LEN);
+
+/* reg_tndem_underlay_ecn
+ * ECN field of the IP header in the underlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tndem, underlay_ecn, 0x04, 24, 2);
+
+/* reg_tndem_overlay_ecn
+ * ECN field of the IP header in the overlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tndem, overlay_ecn, 0x04, 16, 2);
+
+/* reg_tndem_eip_ecn
+ * Egress IP ECN. ECN field of the IP header of the packet which goes out
+ * from the decapsulation.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tndem, eip_ecn, 0x04, 8, 2);
+
+/* reg_tndem_trap_en
+ * Trap enable:
+ * 0 - No trap due to decap ECN
+ * 1 - Trap enable with trap_id
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tndem, trap_en, 0x08, 28, 4);
+
+/* reg_tndem_trap_id
+ * Trap ID. Either DECAP_ECN0 or DECAP_ECN1.
+ * Reserved when trap_en is '0'.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tndem, trap_id, 0x08, 0, 9);
+
+static inline void mlxsw_reg_tndem_pack(char *payload, u8 underlay_ecn,
+ u8 overlay_ecn, u8 ecn, bool trap_en,
+ u16 trap_id)
+{
+ MLXSW_REG_ZERO(tndem, payload);
+ mlxsw_reg_tndem_underlay_ecn_set(payload, underlay_ecn);
+ mlxsw_reg_tndem_overlay_ecn_set(payload, overlay_ecn);
+ mlxsw_reg_tndem_eip_ecn_set(payload, ecn);
+ mlxsw_reg_tndem_trap_en_set(payload, trap_en);
+ mlxsw_reg_tndem_trap_id_set(payload, trap_id);
+}
+
+/* TNPC - Tunnel Port Configuration Register
+ * -----------------------------------------
+ * The TNPC register is used for tunnel port configuration.
+ * Reserved when Spectrum.
+ */
+#define MLXSW_REG_TNPC_ID 0xA020
+#define MLXSW_REG_TNPC_LEN 0x18
+
+MLXSW_REG_DEFINE(tnpc, MLXSW_REG_TNPC_ID, MLXSW_REG_TNPC_LEN);
+
+enum mlxsw_reg_tnpc_tunnel_port {
+ MLXSW_REG_TNPC_TUNNEL_PORT_NVE,
+ MLXSW_REG_TNPC_TUNNEL_PORT_VPLS,
+ MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL0,
+ MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL1,
+};
+
+/* reg_tnpc_tunnel_port
+ * Tunnel port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tnpc, tunnel_port, 0x00, 0, 4);
+
+/* reg_tnpc_learn_enable_v6
+ * During IPv6 underlay decapsulation, whether to learn from tunnel port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnpc, learn_enable_v6, 0x04, 1, 1);
+
+/* reg_tnpc_learn_enable_v4
+ * During IPv4 underlay decapsulation, whether to learn from tunnel port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnpc, learn_enable_v4, 0x04, 0, 1);
+
+static inline void mlxsw_reg_tnpc_pack(char *payload,
+ enum mlxsw_reg_tnpc_tunnel_port tport,
+ bool learn_enable)
+{
+ MLXSW_REG_ZERO(tnpc, payload);
+ mlxsw_reg_tnpc_tunnel_port_set(payload, tport);
+ mlxsw_reg_tnpc_learn_enable_v4_set(payload, learn_enable);
+ mlxsw_reg_tnpc_learn_enable_v6_set(payload, learn_enable);
+}
+
/* TIGCR - Tunneling IPinIP General Configuration Register
* -------------------------------------------------------
* The TIGCR register is used for setting up the IPinIP Tunnel configuration.
@@ -8828,6 +9419,14 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mcc),
MLXSW_REG(mcda),
MLXSW_REG(mgpc),
+ MLXSW_REG(mprs),
+ MLXSW_REG(tngcr),
+ MLXSW_REG(tnumt),
+ MLXSW_REG(tnqcr),
+ MLXSW_REG(tnqdr),
+ MLXSW_REG(tneem),
+ MLXSW_REG(tndem),
+ MLXSW_REG(tnpc),
MLXSW_REG(tigcr),
MLXSW_REG(sbpr),
MLXSW_REG(sbcm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 79a31de7c825..99b341539870 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -46,6 +46,8 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_RIFS,
MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES,
MLXSW_RES_ID_MAX_LPM_TREES,
+ MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4,
+ MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6,
/* Internal resources.
* Determined by the SW, not queried from the HW.
@@ -96,6 +98,8 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
[MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES] = 0x2C10,
[MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30,
+ [MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4] = 0x2E02,
+ [MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6] = 0x2E03,
};
struct mlxsw_res {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 88c33a8474eb..a2df12b79f8e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -21,6 +21,7 @@
#include <linux/dcbnl.h>
#include <linux/inetdevice.h>
#include <linux/netlink.h>
+#include <linux/random.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
@@ -2739,6 +2740,21 @@ int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
}
+static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index,
+ u8 next_index, u32 minrate)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qeec_pl[MLXSW_REG_QEEC_LEN];
+
+ mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
+ next_index);
+ mlxsw_reg_qeec_mise_set(qeec_pl, true);
+ mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
+}
+
int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
u8 switch_prio, u8 tclass)
{
@@ -2816,6 +2832,16 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
return err;
}
+ /* Configure the min shaper for multicast TCs. */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_TC,
+ i + 8, i,
+ MLXSW_REG_QEEC_MIS_MIN);
+ if (err)
+ return err;
+ }
+
/* Map all priorities to traffic class 0. */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
@@ -2993,6 +3019,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_qdiscs_init;
}
+ err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_nve_init;
+ }
+
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
if (IS_ERR(mlxsw_sp_port_vlan)) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
@@ -3021,6 +3054,8 @@ err_register_netdev:
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
err_port_vlan_get:
+ mlxsw_sp_port_nve_fini(mlxsw_sp_port);
+err_port_nve_init:
mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
err_port_qdiscs_init:
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
@@ -3060,6 +3095,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
+ mlxsw_sp_port_nve_fini(mlxsw_sp_port);
mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
@@ -3469,6 +3505,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
+ MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
/* PKT Sample trap */
@@ -3482,6 +3519,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
+ /* NVE traps */
+ MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false),
};
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
@@ -3666,8 +3705,10 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
char slcr_pl[MLXSW_REG_SLCR_LEN];
+ u32 seed;
int err;
+ get_random_bytes(&seed, sizeof(seed));
mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
MLXSW_REG_SLCR_LAG_HASH_DMAC |
MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
@@ -3676,7 +3717,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
MLXSW_REG_SLCR_LAG_HASH_DIP |
MLXSW_REG_SLCR_LAG_HASH_SPORT |
MLXSW_REG_SLCR_LAG_HASH_DPORT |
- MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
+ MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
if (err)
return err;
@@ -3789,6 +3830,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_afa_init;
}
+ err = mlxsw_sp_nve_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
+ goto err_nve_init;
+ }
+
err = mlxsw_sp_router_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
@@ -3835,6 +3882,8 @@ err_acl_init:
err_netdev_notifier:
mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
+ mlxsw_sp_nve_fini(mlxsw_sp);
+err_nve_init:
mlxsw_sp_afa_fini(mlxsw_sp);
err_afa_init:
mlxsw_sp_counter_pool_fini(mlxsw_sp);
@@ -3867,6 +3916,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
+ mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
}
@@ -3881,6 +3931,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+ mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
}
@@ -3894,6 +3945,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_acl_fini(mlxsw_sp);
unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
mlxsw_sp_router_fini(mlxsw_sp);
+ mlxsw_sp_nve_fini(mlxsw_sp);
mlxsw_sp_afa_fini(mlxsw_sp);
mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
@@ -4560,6 +4612,41 @@ static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
}
+static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
+{
+ unsigned int num_vxlans = 0;
+ struct net_device *dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ if (netif_is_vxlan(dev))
+ num_vxlans++;
+ }
+
+ return num_vxlans > 1;
+}
+
+static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
+ struct netlink_ext_ack *extack)
+{
+ if (br_multicast_enabled(br_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
+ return false;
+ }
+
+ if (br_vlan_enabled(br_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN filtering can not be enabled on a bridge with a VxLAN device");
+ return false;
+ }
+
+ if (mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
+ return false;
+ }
+
+ return true;
+}
+
static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
struct net_device *dev,
unsigned long event, void *ptr)
@@ -4589,6 +4676,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
}
if (!info->linking)
break;
+ if (netif_is_bridge_master(upper_dev) &&
+ !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
+ mlxsw_sp_bridge_has_vxlan(upper_dev) &&
+ !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
+ return -EOPNOTSUPP;
if (netdev_has_any_upper_dev(upper_dev) &&
(!netif_is_bridge_master(upper_dev) ||
!mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
@@ -4746,6 +4838,11 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
}
if (!info->linking)
break;
+ if (netif_is_bridge_master(upper_dev) &&
+ !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
+ mlxsw_sp_bridge_has_vxlan(upper_dev) &&
+ !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
+ return -EOPNOTSUPP;
if (netdev_has_any_upper_dev(upper_dev) &&
(!netif_is_bridge_master(upper_dev) ||
!mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
@@ -4855,6 +4952,8 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
upper_dev = info->upper_dev;
if (info->linking)
break;
+ if (is_vlan_dev(upper_dev))
+ mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
if (netif_is_macvlan(upper_dev))
mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
break;
@@ -4890,6 +4989,63 @@ static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
return netif_is_l3_master(info->upper_dev);
}
+static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ struct netdev_notifier_changeupper_info *cu_info;
+ struct netdev_notifier_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+
+ extack = netdev_notifier_info_to_extack(info);
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ cu_info = container_of(info,
+ struct netdev_notifier_changeupper_info,
+ info);
+ upper_dev = cu_info->upper_dev;
+ if (!netif_is_bridge_master(upper_dev))
+ return 0;
+ if (!mlxsw_sp_lower_get(upper_dev))
+ return 0;
+ if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
+ return -EOPNOTSUPP;
+ if (cu_info->linking) {
+ if (!netif_running(dev))
+ return 0;
+ return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
+ dev, extack);
+ } else {
+ mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, upper_dev, dev);
+ }
+ break;
+ case NETDEV_PRE_UP:
+ upper_dev = netdev_master_upper_dev_get(dev);
+ if (!upper_dev)
+ return 0;
+ if (!netif_is_bridge_master(upper_dev))
+ return 0;
+ if (!mlxsw_sp_lower_get(upper_dev))
+ return 0;
+ return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev,
+ extack);
+ case NETDEV_DOWN:
+ upper_dev = netdev_master_upper_dev_get(dev);
+ if (!upper_dev)
+ return 0;
+ if (!netif_is_bridge_master(upper_dev))
+ return 0;
+ if (!mlxsw_sp_lower_get(upper_dev))
+ return 0;
+ mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, upper_dev, dev);
+ break;
+ }
+
+ return 0;
+}
+
static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -4906,6 +5062,8 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
}
mlxsw_sp_span_respin(mlxsw_sp);
+ if (netif_is_vxlan(dev))
+ err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
event, ptr);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 3cdb7aca90b7..0875a79cbe7b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -16,6 +16,7 @@
#include <net/psample.h>
#include <net/pkt_cls.h>
#include <net/red.h>
+#include <net/vxlan.h>
#include "port.h"
#include "core.h"
@@ -55,6 +56,8 @@ enum mlxsw_sp_resource_id {
struct mlxsw_sp_port;
struct mlxsw_sp_rif;
struct mlxsw_sp_span_entry;
+enum mlxsw_sp_l3proto;
+union mlxsw_sp_l3addr;
struct mlxsw_sp_upper {
struct net_device *dev;
@@ -113,9 +116,11 @@ struct mlxsw_sp_acl;
struct mlxsw_sp_counter_pool;
struct mlxsw_sp_fid_core;
struct mlxsw_sp_kvdl;
+struct mlxsw_sp_nve;
struct mlxsw_sp_kvdl_ops;
struct mlxsw_sp_mr_tcam_ops;
struct mlxsw_sp_acl_tcam_ops;
+struct mlxsw_sp_nve_ops;
struct mlxsw_sp {
struct mlxsw_sp_port **ports;
@@ -132,6 +137,7 @@ struct mlxsw_sp {
struct mlxsw_sp_acl *acl;
struct mlxsw_sp_fid_core *fid_core;
struct mlxsw_sp_kvdl *kvdl;
+ struct mlxsw_sp_nve *nve;
struct notifier_block netdevice_nb;
struct mlxsw_sp_counter_pool *counter_pool;
@@ -146,6 +152,7 @@ struct mlxsw_sp {
const struct mlxsw_afk_ops *afk_ops;
const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops;
const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops;
+ const struct mlxsw_sp_nve_ops **nve_ops_arr;
};
static inline struct mlxsw_sp_upper *
@@ -235,6 +242,25 @@ struct mlxsw_sp_port {
struct mlxsw_sp_acl_block *eg_acl_block;
};
+static inline struct net_device *
+mlxsw_sp_bridge_vxlan_dev_find(struct net_device *br_dev)
+{
+ struct net_device *dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ if (netif_is_vxlan(dev))
+ return dev;
+ }
+
+ return NULL;
+}
+
+static inline bool mlxsw_sp_bridge_has_vxlan(struct net_device *br_dev)
+{
+ return !!mlxsw_sp_bridge_vxlan_dev_find(br_dev);
+}
+
static inline bool
mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
{
@@ -330,6 +356,13 @@ void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *br_dev);
bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev);
+int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ const struct net_device *vxlan_dev,
+ struct netlink_ext_ack *extack);
+void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ const struct net_device *vxlan_dev);
/* spectrum.c */
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -383,6 +416,17 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
#endif
/* spectrum_router.c */
+enum mlxsw_sp_l3proto {
+ MLXSW_SP_L3_PROTO_IPV4,
+ MLXSW_SP_L3_PROTO_IPV6,
+#define MLXSW_SP_L3_PROTO_MAX (MLXSW_SP_L3_PROTO_IPV6 + 1)
+};
+
+union mlxsw_sp_l3addr {
+ __be32 addr4;
+ struct in6_addr addr6;
+};
+
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
@@ -416,6 +460,19 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev);
+struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
+u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
+struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif);
+int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
+ enum mlxsw_sp_l3proto ul_proto,
+ const union mlxsw_sp_l3addr *ul_sip,
+ u32 tunnel_index);
+void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
+ enum mlxsw_sp_l3proto ul_proto,
+ const union mlxsw_sp_l3addr *ul_sip);
+int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
+ u16 *vr_id);
/* spectrum_kvdl.c */
enum mlxsw_sp_kvdl_entry_type {
@@ -423,6 +480,7 @@ enum mlxsw_sp_kvdl_entry_type {
MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+ MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT,
};
static inline unsigned int
@@ -433,6 +491,7 @@ mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type)
case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */
case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */
case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */
+ case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT: /* fall through */
default:
return 1;
}
@@ -662,6 +721,16 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p);
/* spectrum_fid.c */
+struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp,
+ __be32 vni);
+int mlxsw_sp_fid_vni(const struct mlxsw_sp_fid *fid, __be32 *vni);
+int mlxsw_sp_fid_nve_flood_index_set(struct mlxsw_sp_fid *fid,
+ u32 nve_flood_index);
+void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid);
+bool mlxsw_sp_fid_nve_flood_index_is_set(const struct mlxsw_sp_fid *fid);
+int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni);
+void mlxsw_sp_fid_vni_clear(struct mlxsw_sp_fid *fid);
+bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid);
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
enum mlxsw_sp_flood_type packet_type, u8 local_port,
bool member);
@@ -680,6 +749,8 @@ u16 mlxsw_sp_fid_8021q_vid(const struct mlxsw_sp_fid *fid);
struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_get(struct mlxsw_sp *mlxsw_sp, u16 vid);
struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp,
int br_ifindex);
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_lookup(struct mlxsw_sp *mlxsw_sp,
+ int br_ifindex);
struct mlxsw_sp_fid *mlxsw_sp_fid_rfid_get(struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
struct mlxsw_sp_fid *mlxsw_sp_fid_dummy_get(struct mlxsw_sp *mlxsw_sp);
@@ -725,4 +796,39 @@ extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops;
/* spectrum2_mr_tcam.c */
extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops;
+/* spectrum_nve.c */
+enum mlxsw_sp_nve_type {
+ MLXSW_SP_NVE_TYPE_VXLAN,
+};
+
+struct mlxsw_sp_nve_params {
+ enum mlxsw_sp_nve_type type;
+ __be32 vni;
+ const struct net_device *dev;
+};
+
+extern const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[];
+extern const struct mlxsw_sp_nve_ops *mlxsw_sp2_nve_ops_arr[];
+
+int mlxsw_sp_nve_flood_ip_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr);
+void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr);
+u32 mlxsw_sp_nve_decap_tunnel_index_get(const struct mlxsw_sp *mlxsw_sp);
+bool mlxsw_sp_nve_ipv4_route_is_decap(const struct mlxsw_sp *mlxsw_sp,
+ u32 tb_id, __be32 addr);
+int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_nve_params *params,
+ struct netlink_ext_ack *extack);
+void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid);
+int mlxsw_sp_port_nve_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_nve_fini(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
index 68c8b148bef2..8d14770766b4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
@@ -35,6 +35,7 @@ static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = {
MAX_KVD_ACTION_SETS),
MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE),
MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE),
+ MLXSW_SP2_KVDL_PART_INFO(TNUMT, 0x29, KVD_SIZE, KVD_SIZE),
};
#define MLXSW_SP2_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp2_kvdl_parts_info)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 715d24ff937e..a3db033d7399 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -6,6 +6,7 @@
#include <linux/if_vlan.h>
#include <linux/if_bridge.h>
#include <linux/netdevice.h>
+#include <linux/rhashtable.h>
#include <linux/rtnetlink.h>
#include "spectrum.h"
@@ -14,6 +15,7 @@
struct mlxsw_sp_fid_family;
struct mlxsw_sp_fid_core {
+ struct rhashtable vni_ht;
struct mlxsw_sp_fid_family *fid_family_arr[MLXSW_SP_FID_TYPE_MAX];
unsigned int *port_fid_mappings;
};
@@ -24,6 +26,12 @@ struct mlxsw_sp_fid {
unsigned int ref_count;
u16 fid_index;
struct mlxsw_sp_fid_family *fid_family;
+
+ struct rhash_head vni_ht_node;
+ __be32 vni;
+ u32 nve_flood_index;
+ u8 vni_valid:1,
+ nve_flood_index_valid:1;
};
struct mlxsw_sp_fid_8021q {
@@ -36,6 +44,12 @@ struct mlxsw_sp_fid_8021d {
int br_ifindex;
};
+static const struct rhashtable_params mlxsw_sp_fid_vni_ht_params = {
+ .key_len = sizeof_field(struct mlxsw_sp_fid, vni),
+ .key_offset = offsetof(struct mlxsw_sp_fid, vni),
+ .head_offset = offsetof(struct mlxsw_sp_fid, vni_ht_node),
+};
+
struct mlxsw_sp_flood_table {
enum mlxsw_sp_flood_type packet_type;
enum mlxsw_reg_sfgc_bridge_type bridge_type;
@@ -56,6 +70,11 @@ struct mlxsw_sp_fid_ops {
struct mlxsw_sp_port *port, u16 vid);
void (*port_vid_unmap)(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *port, u16 vid);
+ int (*vni_set)(struct mlxsw_sp_fid *fid, __be32 vni);
+ void (*vni_clear)(struct mlxsw_sp_fid *fid);
+ int (*nve_flood_index_set)(struct mlxsw_sp_fid *fid,
+ u32 nve_flood_index);
+ void (*nve_flood_index_clear)(struct mlxsw_sp_fid *fid);
};
struct mlxsw_sp_fid_family {
@@ -94,6 +113,117 @@ static const int *mlxsw_sp_packet_type_sfgc_types[] = {
[MLXSW_SP_FLOOD_TYPE_MC] = mlxsw_sp_sfgc_mc_packet_types,
};
+struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp,
+ __be32 vni)
+{
+ struct mlxsw_sp_fid *fid;
+
+ fid = rhashtable_lookup_fast(&mlxsw_sp->fid_core->vni_ht, &vni,
+ mlxsw_sp_fid_vni_ht_params);
+ if (fid)
+ fid->ref_count++;
+
+ return fid;
+}
+
+int mlxsw_sp_fid_vni(const struct mlxsw_sp_fid *fid, __be32 *vni)
+{
+ if (!fid->vni_valid)
+ return -EINVAL;
+
+ *vni = fid->vni;
+
+ return 0;
+}
+
+int mlxsw_sp_fid_nve_flood_index_set(struct mlxsw_sp_fid *fid,
+ u32 nve_flood_index)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
+ int err;
+
+ if (WARN_ON(!ops->nve_flood_index_set || fid->nve_flood_index_valid))
+ return -EINVAL;
+
+ err = ops->nve_flood_index_set(fid, nve_flood_index);
+ if (err)
+ return err;
+
+ fid->nve_flood_index = nve_flood_index;
+ fid->nve_flood_index_valid = true;
+
+ return 0;
+}
+
+void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
+
+ if (WARN_ON(!ops->nve_flood_index_clear || !fid->nve_flood_index_valid))
+ return;
+
+ fid->nve_flood_index_valid = false;
+ ops->nve_flood_index_clear(fid);
+}
+
+bool mlxsw_sp_fid_nve_flood_index_is_set(const struct mlxsw_sp_fid *fid)
+{
+ return fid->nve_flood_index_valid;
+}
+
+int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+ int err;
+
+ if (WARN_ON(!ops->vni_set || fid->vni_valid))
+ return -EINVAL;
+
+ fid->vni = vni;
+ err = rhashtable_lookup_insert_fast(&mlxsw_sp->fid_core->vni_ht,
+ &fid->vni_ht_node,
+ mlxsw_sp_fid_vni_ht_params);
+ if (err)
+ return err;
+
+ err = ops->vni_set(fid, vni);
+ if (err)
+ goto err_vni_set;
+
+ fid->vni_valid = true;
+
+ return 0;
+
+err_vni_set:
+ rhashtable_remove_fast(&mlxsw_sp->fid_core->vni_ht, &fid->vni_ht_node,
+ mlxsw_sp_fid_vni_ht_params);
+ return err;
+}
+
+void mlxsw_sp_fid_vni_clear(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+
+ if (WARN_ON(!ops->vni_clear || !fid->vni_valid))
+ return;
+
+ fid->vni_valid = false;
+ ops->vni_clear(fid);
+ rhashtable_remove_fast(&mlxsw_sp->fid_core->vni_ht, &fid->vni_ht_node,
+ mlxsw_sp_fid_vni_ht_params);
+}
+
+bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid)
+{
+ return fid->vni_valid;
+}
+
static const struct mlxsw_sp_flood_table *
mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid,
enum mlxsw_sp_flood_type packet_type)
@@ -217,6 +347,21 @@ static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
+static int mlxsw_sp_fid_vni_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
+ __be32 vni, bool vni_valid, u32 nve_flood_index,
+ bool nve_flood_index_valid)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid_index,
+ 0);
+ mlxsw_reg_sfmr_vv_set(sfmr_pl, vni_valid);
+ mlxsw_reg_sfmr_vni_set(sfmr_pl, be32_to_cpu(vni));
+ mlxsw_reg_sfmr_vtfp_set(sfmr_pl, nve_flood_index_valid);
+ mlxsw_reg_sfmr_nve_tunnel_flood_ptr_set(sfmr_pl, nve_flood_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
static int mlxsw_sp_fid_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
u16 vid, bool valid)
{
@@ -393,6 +538,8 @@ static int mlxsw_sp_fid_8021d_configure(struct mlxsw_sp_fid *fid)
static void mlxsw_sp_fid_8021d_deconfigure(struct mlxsw_sp_fid *fid)
{
+ if (fid->vni_valid)
+ mlxsw_sp_nve_fid_disable(fid->fid_family->mlxsw_sp, fid);
mlxsw_sp_fid_op(fid->fid_family->mlxsw_sp, fid->fid_index, 0, false);
}
@@ -531,6 +678,41 @@ mlxsw_sp_fid_8021d_port_vid_unmap(struct mlxsw_sp_fid *fid,
mlxsw_sp_port->local_port, vid, false);
}
+static int mlxsw_sp_fid_8021d_vni_set(struct mlxsw_sp_fid *fid, __be32 vni)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+
+ return mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, vni,
+ true, fid->nve_flood_index,
+ fid->nve_flood_index_valid);
+}
+
+static void mlxsw_sp_fid_8021d_vni_clear(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+
+ mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, 0, false,
+ fid->nve_flood_index, fid->nve_flood_index_valid);
+}
+
+static int mlxsw_sp_fid_8021d_nve_flood_index_set(struct mlxsw_sp_fid *fid,
+ u32 nve_flood_index)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+
+ return mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index,
+ fid->vni, fid->vni_valid, nve_flood_index,
+ true);
+}
+
+static void mlxsw_sp_fid_8021d_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+
+ mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, fid->vni,
+ fid->vni_valid, 0, false);
+}
+
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.setup = mlxsw_sp_fid_8021d_setup,
.configure = mlxsw_sp_fid_8021d_configure,
@@ -540,6 +722,10 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.flood_index = mlxsw_sp_fid_8021d_flood_index,
.port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
.port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_8021d_vni_set,
+ .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
};
static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = {
@@ -708,14 +894,12 @@ static const struct mlxsw_sp_fid_family *mlxsw_sp_fid_family_arr[] = {
[MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp_fid_dummy_family,
};
-static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_sp_fid_type type,
- const void *arg)
+static struct mlxsw_sp_fid *mlxsw_sp_fid_lookup(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_fid_type type,
+ const void *arg)
{
struct mlxsw_sp_fid_family *fid_family;
struct mlxsw_sp_fid *fid;
- u16 fid_index;
- int err;
fid_family = mlxsw_sp->fid_core->fid_family_arr[type];
list_for_each_entry(fid, &fid_family->fids_list, list) {
@@ -725,6 +909,23 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
return fid;
}
+ return NULL;
+}
+
+static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_fid_type type,
+ const void *arg)
+{
+ struct mlxsw_sp_fid_family *fid_family;
+ struct mlxsw_sp_fid *fid;
+ u16 fid_index;
+ int err;
+
+ fid = mlxsw_sp_fid_lookup(mlxsw_sp, type, arg);
+ if (fid)
+ return fid;
+
+ fid_family = mlxsw_sp->fid_core->fid_family_arr[type];
fid = kzalloc(fid_family->fid_size, GFP_KERNEL);
if (!fid)
return ERR_PTR(-ENOMEM);
@@ -784,6 +985,13 @@ struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_8021D, &br_ifindex);
}
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_lookup(struct mlxsw_sp *mlxsw_sp,
+ int br_ifindex)
+{
+ return mlxsw_sp_fid_lookup(mlxsw_sp, MLXSW_SP_FID_TYPE_8021D,
+ &br_ifindex);
+}
+
struct mlxsw_sp_fid *mlxsw_sp_fid_rfid_get(struct mlxsw_sp *mlxsw_sp,
u16 rif_index)
{
@@ -918,6 +1126,10 @@ int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp)
return -ENOMEM;
mlxsw_sp->fid_core = fid_core;
+ err = rhashtable_init(&fid_core->vni_ht, &mlxsw_sp_fid_vni_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
fid_core->port_fid_mappings = kcalloc(max_ports, sizeof(unsigned int),
GFP_KERNEL);
if (!fid_core->port_fid_mappings) {
@@ -944,6 +1156,8 @@ err_fid_ops_register:
}
kfree(fid_core->port_fid_mappings);
err_alloc_port_fid_mappings:
+ rhashtable_destroy(&fid_core->vni_ht);
+err_rhashtable_init:
kfree(fid_core);
return err;
}
@@ -957,5 +1171,6 @@ void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_fid_family_unregister(mlxsw_sp,
fid_core->fid_family_arr[i]);
kfree(fid_core->port_fid_mappings);
+ rhashtable_destroy(&fid_core->vni_ht);
kfree(fid_core);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
new file mode 100644
index 000000000000..ad06d9969bc1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -0,0 +1,982 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <net/inet_ecn.h>
+#include <net/ipv6.h>
+
+#include "reg.h"
+#include "spectrum.h"
+#include "spectrum_nve.h"
+
+const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[] = {
+ [MLXSW_SP_NVE_TYPE_VXLAN] = &mlxsw_sp1_nve_vxlan_ops,
+};
+
+const struct mlxsw_sp_nve_ops *mlxsw_sp2_nve_ops_arr[] = {
+ [MLXSW_SP_NVE_TYPE_VXLAN] = &mlxsw_sp2_nve_vxlan_ops,
+};
+
+struct mlxsw_sp_nve_mc_entry;
+struct mlxsw_sp_nve_mc_record;
+struct mlxsw_sp_nve_mc_list;
+
+struct mlxsw_sp_nve_mc_record_ops {
+ enum mlxsw_reg_tnumt_record_type type;
+ int (*entry_add)(struct mlxsw_sp_nve_mc_record *mc_record,
+ struct mlxsw_sp_nve_mc_entry *mc_entry,
+ const union mlxsw_sp_l3addr *addr);
+ void (*entry_del)(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry);
+ void (*entry_set)(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry,
+ char *tnumt_pl, unsigned int entry_index);
+ bool (*entry_compare)(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry,
+ const union mlxsw_sp_l3addr *addr);
+};
+
+struct mlxsw_sp_nve_mc_list_key {
+ u16 fid_index;
+};
+
+struct mlxsw_sp_nve_mc_ipv6_entry {
+ struct in6_addr addr6;
+ u32 addr6_kvdl_index;
+};
+
+struct mlxsw_sp_nve_mc_entry {
+ union {
+ __be32 addr4;
+ struct mlxsw_sp_nve_mc_ipv6_entry ipv6_entry;
+ };
+ u8 valid:1;
+};
+
+struct mlxsw_sp_nve_mc_record {
+ struct list_head list;
+ enum mlxsw_sp_l3proto proto;
+ unsigned int num_entries;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_nve_mc_list *mc_list;
+ const struct mlxsw_sp_nve_mc_record_ops *ops;
+ u32 kvdl_index;
+ struct mlxsw_sp_nve_mc_entry entries[0];
+};
+
+struct mlxsw_sp_nve_mc_list {
+ struct list_head records_list;
+ struct rhash_head ht_node;
+ struct mlxsw_sp_nve_mc_list_key key;
+};
+
+static const struct rhashtable_params mlxsw_sp_nve_mc_list_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_nve_mc_list_key),
+ .key_offset = offsetof(struct mlxsw_sp_nve_mc_list, key),
+ .head_offset = offsetof(struct mlxsw_sp_nve_mc_list, ht_node),
+};
+
+static int
+mlxsw_sp_nve_mc_record_ipv4_entry_add(struct mlxsw_sp_nve_mc_record *mc_record,
+ struct mlxsw_sp_nve_mc_entry *mc_entry,
+ const union mlxsw_sp_l3addr *addr)
+{
+ mc_entry->addr4 = addr->addr4;
+
+ return 0;
+}
+
+static void
+mlxsw_sp_nve_mc_record_ipv4_entry_del(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry)
+{
+}
+
+static void
+mlxsw_sp_nve_mc_record_ipv4_entry_set(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry,
+ char *tnumt_pl, unsigned int entry_index)
+{
+ u32 udip = be32_to_cpu(mc_entry->addr4);
+
+ mlxsw_reg_tnumt_udip_set(tnumt_pl, entry_index, udip);
+}
+
+static bool
+mlxsw_sp_nve_mc_record_ipv4_entry_compare(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry,
+ const union mlxsw_sp_l3addr *addr)
+{
+ return mc_entry->addr4 == addr->addr4;
+}
+
+static const struct mlxsw_sp_nve_mc_record_ops
+mlxsw_sp_nve_mc_record_ipv4_ops = {
+ .type = MLXSW_REG_TNUMT_RECORD_TYPE_IPV4,
+ .entry_add = &mlxsw_sp_nve_mc_record_ipv4_entry_add,
+ .entry_del = &mlxsw_sp_nve_mc_record_ipv4_entry_del,
+ .entry_set = &mlxsw_sp_nve_mc_record_ipv4_entry_set,
+ .entry_compare = &mlxsw_sp_nve_mc_record_ipv4_entry_compare,
+};
+
+static int
+mlxsw_sp_nve_mc_record_ipv6_entry_add(struct mlxsw_sp_nve_mc_record *mc_record,
+ struct mlxsw_sp_nve_mc_entry *mc_entry,
+ const union mlxsw_sp_l3addr *addr)
+{
+ WARN_ON(1);
+
+ return -EINVAL;
+}
+
+static void
+mlxsw_sp_nve_mc_record_ipv6_entry_del(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry)
+{
+}
+
+static void
+mlxsw_sp_nve_mc_record_ipv6_entry_set(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry,
+ char *tnumt_pl, unsigned int entry_index)
+{
+ u32 udip_ptr = mc_entry->ipv6_entry.addr6_kvdl_index;
+
+ mlxsw_reg_tnumt_udip_ptr_set(tnumt_pl, entry_index, udip_ptr);
+}
+
+static bool
+mlxsw_sp_nve_mc_record_ipv6_entry_compare(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry,
+ const union mlxsw_sp_l3addr *addr)
+{
+ return ipv6_addr_equal(&mc_entry->ipv6_entry.addr6, &addr->addr6);
+}
+
+static const struct mlxsw_sp_nve_mc_record_ops
+mlxsw_sp_nve_mc_record_ipv6_ops = {
+ .type = MLXSW_REG_TNUMT_RECORD_TYPE_IPV6,
+ .entry_add = &mlxsw_sp_nve_mc_record_ipv6_entry_add,
+ .entry_del = &mlxsw_sp_nve_mc_record_ipv6_entry_del,
+ .entry_set = &mlxsw_sp_nve_mc_record_ipv6_entry_set,
+ .entry_compare = &mlxsw_sp_nve_mc_record_ipv6_entry_compare,
+};
+
+static const struct mlxsw_sp_nve_mc_record_ops *
+mlxsw_sp_nve_mc_record_ops_arr[] = {
+ [MLXSW_SP_L3_PROTO_IPV4] = &mlxsw_sp_nve_mc_record_ipv4_ops,
+ [MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_nve_mc_record_ipv6_ops,
+};
+
+static struct mlxsw_sp_nve_mc_list *
+mlxsw_sp_nve_mc_list_find(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nve_mc_list_key *key)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+
+ return rhashtable_lookup_fast(&nve->mc_list_ht, key,
+ mlxsw_sp_nve_mc_list_ht_params);
+}
+
+static struct mlxsw_sp_nve_mc_list *
+mlxsw_sp_nve_mc_list_create(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nve_mc_list_key *key)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+ struct mlxsw_sp_nve_mc_list *mc_list;
+ int err;
+
+ mc_list = kmalloc(sizeof(*mc_list), GFP_KERNEL);
+ if (!mc_list)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&mc_list->records_list);
+ mc_list->key = *key;
+
+ err = rhashtable_insert_fast(&nve->mc_list_ht, &mc_list->ht_node,
+ mlxsw_sp_nve_mc_list_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return mc_list;
+
+err_rhashtable_insert:
+ kfree(mc_list);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_nve_mc_list_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_mc_list *mc_list)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+
+ rhashtable_remove_fast(&nve->mc_list_ht, &mc_list->ht_node,
+ mlxsw_sp_nve_mc_list_ht_params);
+ WARN_ON(!list_empty(&mc_list->records_list));
+ kfree(mc_list);
+}
+
+static struct mlxsw_sp_nve_mc_list *
+mlxsw_sp_nve_mc_list_get(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nve_mc_list_key *key)
+{
+ struct mlxsw_sp_nve_mc_list *mc_list;
+
+ mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, key);
+ if (mc_list)
+ return mc_list;
+
+ return mlxsw_sp_nve_mc_list_create(mlxsw_sp, key);
+}
+
+static void
+mlxsw_sp_nve_mc_list_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_mc_list *mc_list)
+{
+ if (!list_empty(&mc_list->records_list))
+ return;
+ mlxsw_sp_nve_mc_list_destroy(mlxsw_sp, mc_list);
+}
+
+static struct mlxsw_sp_nve_mc_record *
+mlxsw_sp_nve_mc_record_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_mc_list *mc_list,
+ enum mlxsw_sp_l3proto proto)
+{
+ unsigned int num_max_entries = mlxsw_sp->nve->num_max_mc_entries[proto];
+ struct mlxsw_sp_nve_mc_record *mc_record;
+ int err;
+
+ mc_record = kzalloc(sizeof(*mc_record) + num_max_entries *
+ sizeof(struct mlxsw_sp_nve_mc_entry), GFP_KERNEL);
+ if (!mc_record)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT, 1,
+ &mc_record->kvdl_index);
+ if (err)
+ goto err_kvdl_alloc;
+
+ mc_record->ops = mlxsw_sp_nve_mc_record_ops_arr[proto];
+ mc_record->mlxsw_sp = mlxsw_sp;
+ mc_record->mc_list = mc_list;
+ mc_record->proto = proto;
+ list_add_tail(&mc_record->list, &mc_list->records_list);
+
+ return mc_record;
+
+err_kvdl_alloc:
+ kfree(mc_record);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_nve_mc_record_destroy(struct mlxsw_sp_nve_mc_record *mc_record)
+{
+ struct mlxsw_sp *mlxsw_sp = mc_record->mlxsw_sp;
+
+ list_del(&mc_record->list);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT, 1,
+ mc_record->kvdl_index);
+ WARN_ON(mc_record->num_entries);
+ kfree(mc_record);
+}
+
+static struct mlxsw_sp_nve_mc_record *
+mlxsw_sp_nve_mc_record_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_mc_list *mc_list,
+ enum mlxsw_sp_l3proto proto)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record;
+
+ list_for_each_entry_reverse(mc_record, &mc_list->records_list, list) {
+ unsigned int num_entries = mc_record->num_entries;
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+
+ if (mc_record->proto == proto &&
+ num_entries < nve->num_max_mc_entries[proto])
+ return mc_record;
+ }
+
+ return mlxsw_sp_nve_mc_record_create(mlxsw_sp, mc_list, proto);
+}
+
+static void
+mlxsw_sp_nve_mc_record_put(struct mlxsw_sp_nve_mc_record *mc_record)
+{
+ if (mc_record->num_entries != 0)
+ return;
+
+ mlxsw_sp_nve_mc_record_destroy(mc_record);
+}
+
+static struct mlxsw_sp_nve_mc_entry *
+mlxsw_sp_nve_mc_free_entry_find(struct mlxsw_sp_nve_mc_record *mc_record)
+{
+ struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve;
+ unsigned int num_max_entries;
+ int i;
+
+ num_max_entries = nve->num_max_mc_entries[mc_record->proto];
+ for (i = 0; i < num_max_entries; i++) {
+ if (mc_record->entries[i].valid)
+ continue;
+ return &mc_record->entries[i];
+ }
+
+ return NULL;
+}
+
+static int
+mlxsw_sp_nve_mc_record_refresh(struct mlxsw_sp_nve_mc_record *mc_record)
+{
+ enum mlxsw_reg_tnumt_record_type type = mc_record->ops->type;
+ struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list;
+ struct mlxsw_sp *mlxsw_sp = mc_record->mlxsw_sp;
+ char tnumt_pl[MLXSW_REG_TNUMT_LEN];
+ unsigned int num_max_entries;
+ unsigned int num_entries = 0;
+ u32 next_kvdl_index = 0;
+ bool next_valid = false;
+ int i;
+
+ if (!list_is_last(&mc_record->list, &mc_list->records_list)) {
+ struct mlxsw_sp_nve_mc_record *next_record;
+
+ next_record = list_next_entry(mc_record, list);
+ next_kvdl_index = next_record->kvdl_index;
+ next_valid = true;
+ }
+
+ mlxsw_reg_tnumt_pack(tnumt_pl, type, MLXSW_REG_TNUMT_TUNNEL_PORT_NVE,
+ mc_record->kvdl_index, next_valid,
+ next_kvdl_index, mc_record->num_entries);
+
+ num_max_entries = mlxsw_sp->nve->num_max_mc_entries[mc_record->proto];
+ for (i = 0; i < num_max_entries; i++) {
+ struct mlxsw_sp_nve_mc_entry *mc_entry;
+
+ mc_entry = &mc_record->entries[i];
+ if (!mc_entry->valid)
+ continue;
+ mc_record->ops->entry_set(mc_record, mc_entry, tnumt_pl,
+ num_entries++);
+ }
+
+ WARN_ON(num_entries != mc_record->num_entries);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnumt), tnumt_pl);
+}
+
+static bool
+mlxsw_sp_nve_mc_record_is_first(struct mlxsw_sp_nve_mc_record *mc_record)
+{
+ struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list;
+ struct mlxsw_sp_nve_mc_record *first_record;
+
+ first_record = list_first_entry(&mc_list->records_list,
+ struct mlxsw_sp_nve_mc_record, list);
+
+ return mc_record == first_record;
+}
+
+static struct mlxsw_sp_nve_mc_entry *
+mlxsw_sp_nve_mc_entry_find(struct mlxsw_sp_nve_mc_record *mc_record,
+ union mlxsw_sp_l3addr *addr)
+{
+ struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve;
+ unsigned int num_max_entries;
+ int i;
+
+ num_max_entries = nve->num_max_mc_entries[mc_record->proto];
+ for (i = 0; i < num_max_entries; i++) {
+ struct mlxsw_sp_nve_mc_entry *mc_entry;
+
+ mc_entry = &mc_record->entries[i];
+ if (!mc_entry->valid)
+ continue;
+ if (mc_record->ops->entry_compare(mc_record, mc_entry, addr))
+ return mc_entry;
+ }
+
+ return NULL;
+}
+
+static int
+mlxsw_sp_nve_mc_record_ip_add(struct mlxsw_sp_nve_mc_record *mc_record,
+ union mlxsw_sp_l3addr *addr)
+{
+ struct mlxsw_sp_nve_mc_entry *mc_entry = NULL;
+ int err;
+
+ mc_entry = mlxsw_sp_nve_mc_free_entry_find(mc_record);
+ if (WARN_ON(!mc_entry))
+ return -EINVAL;
+
+ err = mc_record->ops->entry_add(mc_record, mc_entry, addr);
+ if (err)
+ return err;
+ mc_record->num_entries++;
+ mc_entry->valid = true;
+
+ err = mlxsw_sp_nve_mc_record_refresh(mc_record);
+ if (err)
+ goto err_record_refresh;
+
+ /* If this is a new record and not the first one, then we need to
+ * update the next pointer of the previous entry
+ */
+ if (mc_record->num_entries != 1 ||
+ mlxsw_sp_nve_mc_record_is_first(mc_record))
+ return 0;
+
+ err = mlxsw_sp_nve_mc_record_refresh(list_prev_entry(mc_record, list));
+ if (err)
+ goto err_prev_record_refresh;
+
+ return 0;
+
+err_prev_record_refresh:
+err_record_refresh:
+ mc_entry->valid = false;
+ mc_record->num_entries--;
+ mc_record->ops->entry_del(mc_record, mc_entry);
+ return err;
+}
+
+static void
+mlxsw_sp_nve_mc_record_entry_del(struct mlxsw_sp_nve_mc_record *mc_record,
+ struct mlxsw_sp_nve_mc_entry *mc_entry)
+{
+ struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list;
+
+ mc_entry->valid = false;
+ mc_record->num_entries--;
+
+ /* When the record continues to exist we only need to invalidate
+ * the requested entry
+ */
+ if (mc_record->num_entries != 0) {
+ mlxsw_sp_nve_mc_record_refresh(mc_record);
+ mc_record->ops->entry_del(mc_record, mc_entry);
+ return;
+ }
+
+ /* If the record needs to be deleted, but it is not the first,
+ * then we need to make sure that the previous record no longer
+ * points to it. Remove deleted record from the list to reflect
+ * that and then re-add it at the end, so that it could be
+ * properly removed by the record destruction code
+ */
+ if (!mlxsw_sp_nve_mc_record_is_first(mc_record)) {
+ struct mlxsw_sp_nve_mc_record *prev_record;
+
+ prev_record = list_prev_entry(mc_record, list);
+ list_del(&mc_record->list);
+ mlxsw_sp_nve_mc_record_refresh(prev_record);
+ list_add_tail(&mc_record->list, &mc_list->records_list);
+ mc_record->ops->entry_del(mc_record, mc_entry);
+ return;
+ }
+
+ /* If the first record needs to be deleted, but the list is not
+ * singular, then the second record needs to be written in the
+ * first record's address, as this address is stored as a property
+ * of the FID
+ */
+ if (mlxsw_sp_nve_mc_record_is_first(mc_record) &&
+ !list_is_singular(&mc_list->records_list)) {
+ struct mlxsw_sp_nve_mc_record *next_record;
+
+ next_record = list_next_entry(mc_record, list);
+ swap(mc_record->kvdl_index, next_record->kvdl_index);
+ mlxsw_sp_nve_mc_record_refresh(next_record);
+ mc_record->ops->entry_del(mc_record, mc_entry);
+ return;
+ }
+
+ /* This is the last case where the last remaining record needs to
+ * be deleted. Simply delete the entry
+ */
+ mc_record->ops->entry_del(mc_record, mc_entry);
+}
+
+static struct mlxsw_sp_nve_mc_record *
+mlxsw_sp_nve_mc_record_find(struct mlxsw_sp_nve_mc_list *mc_list,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr,
+ struct mlxsw_sp_nve_mc_entry **mc_entry)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record;
+
+ list_for_each_entry(mc_record, &mc_list->records_list, list) {
+ if (mc_record->proto != proto)
+ continue;
+
+ *mc_entry = mlxsw_sp_nve_mc_entry_find(mc_record, addr);
+ if (*mc_entry)
+ return mc_record;
+ }
+
+ return NULL;
+}
+
+static int mlxsw_sp_nve_mc_list_ip_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_mc_list *mc_list,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record;
+ int err;
+
+ mc_record = mlxsw_sp_nve_mc_record_get(mlxsw_sp, mc_list, proto);
+ if (IS_ERR(mc_record))
+ return PTR_ERR(mc_record);
+
+ err = mlxsw_sp_nve_mc_record_ip_add(mc_record, addr);
+ if (err)
+ goto err_ip_add;
+
+ return 0;
+
+err_ip_add:
+ mlxsw_sp_nve_mc_record_put(mc_record);
+ return err;
+}
+
+static void mlxsw_sp_nve_mc_list_ip_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_mc_list *mc_list,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record;
+ struct mlxsw_sp_nve_mc_entry *mc_entry;
+
+ mc_record = mlxsw_sp_nve_mc_record_find(mc_list, proto, addr,
+ &mc_entry);
+ if (WARN_ON(!mc_record))
+ return;
+
+ mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry);
+ mlxsw_sp_nve_mc_record_put(mc_record);
+}
+
+static int
+mlxsw_sp_nve_fid_flood_index_set(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_nve_mc_list *mc_list)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record;
+
+ /* The address of the first record in the list is a property of
+ * the FID and we never change it. It only needs to be set when
+ * a new list is created
+ */
+ if (mlxsw_sp_fid_nve_flood_index_is_set(fid))
+ return 0;
+
+ mc_record = list_first_entry(&mc_list->records_list,
+ struct mlxsw_sp_nve_mc_record, list);
+
+ return mlxsw_sp_fid_nve_flood_index_set(fid, mc_record->kvdl_index);
+}
+
+static void
+mlxsw_sp_nve_fid_flood_index_clear(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_nve_mc_list *mc_list)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record;
+
+ /* The address of the first record needs to be invalidated only when
+ * the last record is about to be removed
+ */
+ if (!list_is_singular(&mc_list->records_list))
+ return;
+
+ mc_record = list_first_entry(&mc_list->records_list,
+ struct mlxsw_sp_nve_mc_record, list);
+ if (mc_record->num_entries != 1)
+ return;
+
+ return mlxsw_sp_fid_nve_flood_index_clear(fid);
+}
+
+int mlxsw_sp_nve_flood_ip_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ struct mlxsw_sp_nve_mc_list_key key = { 0 };
+ struct mlxsw_sp_nve_mc_list *mc_list;
+ int err;
+
+ key.fid_index = mlxsw_sp_fid_index(fid);
+ mc_list = mlxsw_sp_nve_mc_list_get(mlxsw_sp, &key);
+ if (IS_ERR(mc_list))
+ return PTR_ERR(mc_list);
+
+ err = mlxsw_sp_nve_mc_list_ip_add(mlxsw_sp, mc_list, proto, addr);
+ if (err)
+ goto err_add_ip;
+
+ err = mlxsw_sp_nve_fid_flood_index_set(fid, mc_list);
+ if (err)
+ goto err_fid_flood_index_set;
+
+ return 0;
+
+err_fid_flood_index_set:
+ mlxsw_sp_nve_mc_list_ip_del(mlxsw_sp, mc_list, proto, addr);
+err_add_ip:
+ mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list);
+ return err;
+}
+
+void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ struct mlxsw_sp_nve_mc_list_key key = { 0 };
+ struct mlxsw_sp_nve_mc_list *mc_list;
+
+ key.fid_index = mlxsw_sp_fid_index(fid);
+ mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key);
+ if (WARN_ON(!mc_list))
+ return;
+
+ mlxsw_sp_nve_fid_flood_index_clear(fid, mc_list);
+ mlxsw_sp_nve_mc_list_ip_del(mlxsw_sp, mc_list, proto, addr);
+ mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list);
+}
+
+static void
+mlxsw_sp_nve_mc_record_delete(struct mlxsw_sp_nve_mc_record *mc_record)
+{
+ struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve;
+ unsigned int num_max_entries;
+ int i;
+
+ num_max_entries = nve->num_max_mc_entries[mc_record->proto];
+ for (i = 0; i < num_max_entries; i++) {
+ struct mlxsw_sp_nve_mc_entry *mc_entry = &mc_record->entries[i];
+
+ if (!mc_entry->valid)
+ continue;
+ mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry);
+ }
+
+ WARN_ON(mc_record->num_entries);
+ mlxsw_sp_nve_mc_record_put(mc_record);
+}
+
+static void mlxsw_sp_nve_flood_ip_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record, *tmp;
+ struct mlxsw_sp_nve_mc_list_key key = { 0 };
+ struct mlxsw_sp_nve_mc_list *mc_list;
+
+ if (!mlxsw_sp_fid_nve_flood_index_is_set(fid))
+ return;
+
+ mlxsw_sp_fid_nve_flood_index_clear(fid);
+
+ key.fid_index = mlxsw_sp_fid_index(fid);
+ mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key);
+ if (WARN_ON(!mc_list))
+ return;
+
+ list_for_each_entry_safe(mc_record, tmp, &mc_list->records_list, list)
+ mlxsw_sp_nve_mc_record_delete(mc_record);
+
+ WARN_ON(!list_empty(&mc_list->records_list));
+ mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list);
+}
+
+u32 mlxsw_sp_nve_decap_tunnel_index_get(const struct mlxsw_sp *mlxsw_sp)
+{
+ WARN_ON(mlxsw_sp->nve->num_nve_tunnels == 0);
+
+ return mlxsw_sp->nve->tunnel_index;
+}
+
+bool mlxsw_sp_nve_ipv4_route_is_decap(const struct mlxsw_sp *mlxsw_sp,
+ u32 tb_id, __be32 addr)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+ struct mlxsw_sp_nve_config *config = &nve->config;
+
+ if (nve->num_nve_tunnels &&
+ config->ul_proto == MLXSW_SP_L3_PROTO_IPV4 &&
+ config->ul_sip.addr4 == addr && config->ul_tb_id == tb_id)
+ return true;
+
+ return false;
+}
+
+static int mlxsw_sp_nve_tunnel_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_config *config)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+ const struct mlxsw_sp_nve_ops *ops;
+ int err;
+
+ if (nve->num_nve_tunnels++ != 0)
+ return 0;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ &nve->tunnel_index);
+ if (err)
+ goto err_kvdl_alloc;
+
+ ops = nve->nve_ops_arr[config->type];
+ err = ops->init(nve, config);
+ if (err)
+ goto err_ops_init;
+
+ return 0;
+
+err_ops_init:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ nve->tunnel_index);
+err_kvdl_alloc:
+ nve->num_nve_tunnels--;
+ return err;
+}
+
+static void mlxsw_sp_nve_tunnel_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+ const struct mlxsw_sp_nve_ops *ops;
+
+ ops = nve->nve_ops_arr[nve->config.type];
+
+ if (mlxsw_sp->nve->num_nve_tunnels == 1) {
+ ops->fini(nve);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ nve->tunnel_index);
+ }
+ nve->num_nve_tunnels--;
+}
+
+static void mlxsw_sp_nve_fdb_flush_by_fid(struct mlxsw_sp *mlxsw_sp,
+ u16 fid_index)
+{
+ char sfdf_pl[MLXSW_REG_SFDF_LEN];
+
+ mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_NVE_AND_FID);
+ mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+}
+
+int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_nve_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+ const struct mlxsw_sp_nve_ops *ops;
+ struct mlxsw_sp_nve_config config;
+ int err;
+
+ ops = nve->nve_ops_arr[params->type];
+
+ if (!ops->can_offload(nve, params->dev, extack))
+ return -EOPNOTSUPP;
+
+ memset(&config, 0, sizeof(config));
+ ops->nve_config(nve, params->dev, &config);
+ if (nve->num_nve_tunnels &&
+ memcmp(&config, &nve->config, sizeof(config))) {
+ NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlxsw_sp_nve_tunnel_init(mlxsw_sp, &config);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to initialize NVE tunnel");
+ return err;
+ }
+
+ err = mlxsw_sp_fid_vni_set(fid, params->vni);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set VNI on FID");
+ goto err_fid_vni_set;
+ }
+
+ nve->config = config;
+
+ return 0;
+
+err_fid_vni_set:
+ mlxsw_sp_nve_tunnel_fini(mlxsw_sp);
+ return err;
+}
+
+void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid)
+{
+ u16 fid_index = mlxsw_sp_fid_index(fid);
+
+ mlxsw_sp_nve_flood_ip_flush(mlxsw_sp, fid);
+ mlxsw_sp_nve_fdb_flush_by_fid(mlxsw_sp, fid_index);
+ mlxsw_sp_fid_vni_clear(fid);
+ mlxsw_sp_nve_tunnel_fini(mlxsw_sp);
+}
+
+int mlxsw_sp_port_nve_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char tnqdr_pl[MLXSW_REG_TNQDR_LEN];
+
+ mlxsw_reg_tnqdr_pack(tnqdr_pl, mlxsw_sp_port->local_port);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnqdr), tnqdr_pl);
+}
+
+void mlxsw_sp_port_nve_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+}
+
+static int mlxsw_sp_nve_qos_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char tnqcr_pl[MLXSW_REG_TNQCR_LEN];
+
+ mlxsw_reg_tnqcr_pack(tnqcr_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnqcr), tnqcr_pl);
+}
+
+static int mlxsw_sp_nve_ecn_encap_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ /* Iterate over inner ECN values */
+ for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) {
+ u8 outer_ecn = INET_ECN_encapsulate(0, i);
+ char tneem_pl[MLXSW_REG_TNEEM_LEN];
+ int err;
+
+ mlxsw_reg_tneem_pack(tneem_pl, i, outer_ecn);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tneem),
+ tneem_pl);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp,
+ u8 inner_ecn, u8 outer_ecn)
+{
+ char tndem_pl[MLXSW_REG_TNDEM_LEN];
+ bool trap_en, set_ce = false;
+ u8 new_inner_ecn;
+
+ trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
+ new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn;
+
+ mlxsw_reg_tndem_pack(tndem_pl, outer_ecn, inner_ecn, new_inner_ecn,
+ trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tndem), tndem_pl);
+}
+
+static int mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ /* Iterate over inner ECN values */
+ for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) {
+ int j;
+
+ /* Iterate over outer ECN values */
+ for (j = INET_ECN_NOT_ECT; j <= INET_ECN_CE; j++) {
+ int err;
+
+ err = __mlxsw_sp_nve_ecn_decap_init(mlxsw_sp, i, j);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_nve_ecn_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = mlxsw_sp_nve_ecn_encap_init(mlxsw_sp);
+ if (err)
+ return err;
+
+ return mlxsw_sp_nve_ecn_decap_init(mlxsw_sp);
+}
+
+static int mlxsw_sp_nve_resources_query(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int max;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV4) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV6))
+ return -EIO;
+ max = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV4);
+ mlxsw_sp->nve->num_max_mc_entries[MLXSW_SP_L3_PROTO_IPV4] = max;
+ max = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV6);
+ mlxsw_sp->nve->num_max_mc_entries[MLXSW_SP_L3_PROTO_IPV6] = max;
+
+ return 0;
+}
+
+int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_nve *nve;
+ int err;
+
+ nve = kzalloc(sizeof(*mlxsw_sp->nve), GFP_KERNEL);
+ if (!nve)
+ return -ENOMEM;
+ mlxsw_sp->nve = nve;
+ nve->mlxsw_sp = mlxsw_sp;
+ nve->nve_ops_arr = mlxsw_sp->nve_ops_arr;
+
+ err = rhashtable_init(&nve->mc_list_ht,
+ &mlxsw_sp_nve_mc_list_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ err = mlxsw_sp_nve_qos_init(mlxsw_sp);
+ if (err)
+ goto err_nve_qos_init;
+
+ err = mlxsw_sp_nve_ecn_init(mlxsw_sp);
+ if (err)
+ goto err_nve_ecn_init;
+
+ err = mlxsw_sp_nve_resources_query(mlxsw_sp);
+ if (err)
+ goto err_nve_resources_query;
+
+ return 0;
+
+err_nve_resources_query:
+err_nve_ecn_init:
+err_nve_qos_init:
+ rhashtable_destroy(&nve->mc_list_ht);
+err_rhashtable_init:
+ mlxsw_sp->nve = NULL;
+ kfree(nve);
+ return err;
+}
+
+void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ WARN_ON(mlxsw_sp->nve->num_nve_tunnels);
+ rhashtable_destroy(&mlxsw_sp->nve->mc_list_ht);
+ mlxsw_sp->nve = NULL;
+ kfree(mlxsw_sp->nve);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
new file mode 100644
index 000000000000..4cc3297e13d6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_SPECTRUM_NVE_H
+#define _MLXSW_SPECTRUM_NVE_H
+
+#include <linux/netlink.h>
+#include <linux/rhashtable.h>
+
+#include "spectrum.h"
+
+struct mlxsw_sp_nve_config {
+ enum mlxsw_sp_nve_type type;
+ u8 ttl;
+ u8 learning_en:1;
+ __be16 udp_dport;
+ __be32 flowlabel;
+ u32 ul_tb_id;
+ enum mlxsw_sp_l3proto ul_proto;
+ union mlxsw_sp_l3addr ul_sip;
+};
+
+struct mlxsw_sp_nve {
+ struct mlxsw_sp_nve_config config;
+ struct rhashtable mc_list_ht;
+ struct mlxsw_sp *mlxsw_sp;
+ const struct mlxsw_sp_nve_ops **nve_ops_arr;
+ unsigned int num_nve_tunnels; /* Protected by RTNL */
+ unsigned int num_max_mc_entries[MLXSW_SP_L3_PROTO_MAX];
+ u32 tunnel_index;
+};
+
+struct mlxsw_sp_nve_ops {
+ enum mlxsw_sp_nve_type type;
+ bool (*can_offload)(const struct mlxsw_sp_nve *nve,
+ const struct net_device *dev,
+ struct netlink_ext_ack *extack);
+ void (*nve_config)(const struct mlxsw_sp_nve *nve,
+ const struct net_device *dev,
+ struct mlxsw_sp_nve_config *config);
+ int (*init)(struct mlxsw_sp_nve *nve,
+ const struct mlxsw_sp_nve_config *config);
+ void (*fini)(struct mlxsw_sp_nve *nve);
+};
+
+extern const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops;
+extern const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops;
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
new file mode 100644
index 000000000000..d21c7be5b1c9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/random.h>
+#include <net/vxlan.h>
+
+#include "reg.h"
+#include "spectrum_nve.h"
+
+/* Eth (18B) | IPv6 (40B) | UDP (8B) | VxLAN (8B) | Eth (14B) | IPv6 (40B)
+ *
+ * In the worst case - where we have a VLAN tag on the outer Ethernet
+ * header and IPv6 in overlay and underlay - we need to parse 128 bytes
+ */
+#define MLXSW_SP_NVE_VXLAN_PARSING_DEPTH 128
+#define MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH 96
+
+#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS VXLAN_F_UDP_ZERO_CSUM_TX
+
+static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
+ const struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_config *cfg = &vxlan->cfg;
+
+ if (cfg->saddr.sa.sa_family != AF_INET) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only IPv4 underlay is supported");
+ return false;
+ }
+
+ if (vxlan_addr_multicast(&cfg->remote_ip)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Multicast destination IP is not supported");
+ return false;
+ }
+
+ if (vxlan_addr_any(&cfg->saddr)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Source address must be specified");
+ return false;
+ }
+
+ if (cfg->remote_ifindex) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Local interface is not supported");
+ return false;
+ }
+
+ if (cfg->port_min || cfg->port_max) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only default UDP source port range is supported");
+ return false;
+ }
+
+ if (cfg->tos != 1) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: TOS must be configured to inherit");
+ return false;
+ }
+
+ if (cfg->flags & VXLAN_F_TTL_INHERIT) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: TTL must not be configured to inherit");
+ return false;
+ }
+
+ if (cfg->flags & VXLAN_F_LEARN) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Learning is not supported");
+ return false;
+ }
+
+ if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: UDP checksum is not supported");
+ return false;
+ }
+
+ if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag");
+ return false;
+ }
+
+ if (cfg->ttl == 0) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: TTL must not be configured to 0");
+ return false;
+ }
+
+ if (cfg->label != 0) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Flow label must be configured to 0");
+ return false;
+ }
+
+ return true;
+}
+
+static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
+ const struct net_device *dev,
+ struct mlxsw_sp_nve_config *config)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_config *cfg = &vxlan->cfg;
+
+ config->type = MLXSW_SP_NVE_TYPE_VXLAN;
+ config->ttl = cfg->ttl;
+ config->flowlabel = cfg->label;
+ config->learning_en = cfg->flags & VXLAN_F_LEARN ? 1 : 0;
+ config->ul_tb_id = RT_TABLE_MAIN;
+ config->ul_proto = MLXSW_SP_L3_PROTO_IPV4;
+ config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr;
+ config->udp_dport = cfg->dst_port;
+}
+
+static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
+ unsigned int parsing_depth,
+ __be16 udp_dport)
+{
+ char mprs_pl[MLXSW_REG_MPRS_LEN];
+
+ mlxsw_reg_mprs_pack(mprs_pl, parsing_depth, be16_to_cpu(udp_dport));
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
+}
+
+static int
+mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nve_config *config)
+{
+ char tngcr_pl[MLXSW_REG_TNGCR_LEN];
+ u16 ul_vr_id;
+ u8 udp_sport;
+ int err;
+
+ err = mlxsw_sp_router_tb_id_vr_id(mlxsw_sp, config->ul_tb_id,
+ &ul_vr_id);
+ if (err)
+ return err;
+
+ mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true,
+ config->ttl);
+ /* VxLAN driver's default UDP source port range is 32768 (0x8000)
+ * to 60999 (0xee47). Set the upper 8 bits of the UDP source port
+ * to a random number between 0x80 and 0xee
+ */
+ get_random_bytes(&udp_sport, sizeof(udp_sport));
+ udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80;
+ mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport);
+ mlxsw_reg_tngcr_learn_enable_set(tngcr_pl, config->learning_en);
+ mlxsw_reg_tngcr_underlay_virtual_router_set(tngcr_pl, ul_vr_id);
+ mlxsw_reg_tngcr_usipv4_set(tngcr_pl, be32_to_cpu(config->ul_sip.addr4));
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
+}
+
+static void mlxsw_sp1_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
+{
+ char tngcr_pl[MLXSW_REG_TNGCR_LEN];
+
+ mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
+
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
+}
+
+static int mlxsw_sp1_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp,
+ unsigned int tunnel_index)
+{
+ char rtdp_pl[MLXSW_REG_RTDP_LEN];
+
+ mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
+}
+
+static int mlxsw_sp1_nve_vxlan_init(struct mlxsw_sp_nve *nve,
+ const struct mlxsw_sp_nve_config *config)
+{
+ struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
+ int err;
+
+ err = mlxsw_sp_nve_parsing_set(mlxsw_sp,
+ MLXSW_SP_NVE_VXLAN_PARSING_DEPTH,
+ config->udp_dport);
+ if (err)
+ return err;
+
+ err = mlxsw_sp1_nve_vxlan_config_set(mlxsw_sp, config);
+ if (err)
+ goto err_config_set;
+
+ err = mlxsw_sp1_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index);
+ if (err)
+ goto err_rtdp_set;
+
+ err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id,
+ config->ul_proto,
+ &config->ul_sip,
+ nve->tunnel_index);
+ if (err)
+ goto err_promote_decap;
+
+ return 0;
+
+err_promote_decap:
+err_rtdp_set:
+ mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
+err_config_set:
+ mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
+ config->udp_dport);
+ return err;
+}
+
+static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
+{
+ struct mlxsw_sp_nve_config *config = &nve->config;
+ struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
+
+ mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
+ config->ul_proto, &config->ul_sip);
+ mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
+ mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
+ config->udp_dport);
+}
+
+const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
+ .type = MLXSW_SP_NVE_TYPE_VXLAN,
+ .can_offload = mlxsw_sp1_nve_vxlan_can_offload,
+ .nve_config = mlxsw_sp_nve_vxlan_config,
+ .init = mlxsw_sp1_nve_vxlan_init,
+ .fini = mlxsw_sp1_nve_vxlan_fini,
+};
+
+static bool mlxsw_sp2_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
+ const struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ return false;
+}
+
+static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve,
+ const struct mlxsw_sp_nve_config *config)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
+{
+}
+
+const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = {
+ .type = MLXSW_SP_NVE_TYPE_VXLAN,
+ .can_offload = mlxsw_sp2_nve_vxlan_can_offload,
+ .nve_config = mlxsw_sp_nve_vxlan_config,
+ .init = mlxsw_sp2_nve_vxlan_init,
+ .fini = mlxsw_sp2_nve_vxlan_fini,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 2ab9cf25a08a..9e9bb57134f2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -366,6 +366,7 @@ enum mlxsw_sp_fib_entry_type {
* encapsulating entries.)
*/
MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
+ MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
};
struct mlxsw_sp_nexthop_group;
@@ -741,6 +742,19 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
return NULL;
}
+int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
+ u16 *vr_id)
+{
+ struct mlxsw_sp_vr *vr;
+
+ vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
+ if (!vr)
+ return -ESRCH;
+ *vr_id = vr->id;
+
+ return 0;
+}
+
static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
enum mlxsw_sp_l3proto proto)
{
@@ -1128,6 +1142,52 @@ mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
}
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
+ enum mlxsw_sp_l3proto proto,
+ const union mlxsw_sp_l3addr *addr,
+ enum mlxsw_sp_fib_entry_type type)
+{
+ struct mlxsw_sp_fib_entry *fib_entry;
+ struct mlxsw_sp_fib_node *fib_node;
+ unsigned char addr_prefix_len;
+ struct mlxsw_sp_fib *fib;
+ struct mlxsw_sp_vr *vr;
+ const void *addrp;
+ size_t addr_len;
+ u32 addr4;
+
+ vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
+ if (!vr)
+ return NULL;
+ fib = mlxsw_sp_vr_fib(vr, proto);
+
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ addr4 = be32_to_cpu(addr->addr4);
+ addrp = &addr4;
+ addr_len = 4;
+ addr_prefix_len = 32;
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
+ addr_prefix_len);
+ if (!fib_node || list_empty(&fib_node->entry_list))
+ return NULL;
+
+ fib_entry = list_first_entry(&fib_node->entry_list,
+ struct mlxsw_sp_fib_entry, list);
+ if (fib_entry->type != type)
+ return NULL;
+
+ return fib_entry;
+}
+
/* Given an IPIP entry, find the corresponding decap route. */
static struct mlxsw_sp_fib_entry *
mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
@@ -1765,6 +1825,56 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
+ enum mlxsw_sp_l3proto ul_proto,
+ const union mlxsw_sp_l3addr *ul_sip,
+ u32 tunnel_index)
+{
+ enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ struct mlxsw_sp_fib_entry *fib_entry;
+ int err;
+
+ /* It is valid to create a tunnel with a local IP and only later
+ * assign this IP address to a local interface
+ */
+ fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
+ ul_proto, ul_sip,
+ type);
+ if (!fib_entry)
+ return 0;
+
+ fib_entry->decap.tunnel_index = tunnel_index;
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
+
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+ if (err)
+ goto err_fib_entry_update;
+
+ return 0;
+
+err_fib_entry_update:
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+ return err;
+}
+
+void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
+ enum mlxsw_sp_l3proto ul_proto,
+ const union mlxsw_sp_l3addr *ul_sip)
+{
+ enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
+ struct mlxsw_sp_fib_entry *fib_entry;
+
+ fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
+ ul_proto, ul_sip,
+ type);
+ if (!fib_entry)
+ return;
+
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+}
+
struct mlxsw_sp_neigh_key {
struct neighbour *n;
};
@@ -3815,6 +3925,7 @@ mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
return !!nh_group->nh_rif;
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
+ case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
return true;
default:
return false;
@@ -3848,7 +3959,8 @@ mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
int i;
if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
- fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
+ fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP ||
+ fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP) {
nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
return;
}
@@ -4072,6 +4184,18 @@ mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
fib_entry->decap.tunnel_index);
}
+static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
+ fib_entry->decap.tunnel_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
@@ -4086,6 +4210,8 @@ static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
fib_entry, op);
+ case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
+ return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
}
return -EINVAL;
}
@@ -4121,6 +4247,7 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
+ u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
struct net_device *dev = fen_info->fi->fib_dev;
struct mlxsw_sp_ipip_entry *ipip_entry;
struct fib_info *fi = fen_info->fi;
@@ -4135,6 +4262,15 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
fib_entry,
ipip_entry);
}
+ if (mlxsw_sp_nve_ipv4_route_is_decap(mlxsw_sp, tb_id,
+ dip.addr4)) {
+ u32 t_index;
+
+ t_index = mlxsw_sp_nve_decap_tunnel_index_get(mlxsw_sp);
+ fib_entry->decap.tunnel_index = t_index;
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
+ return 0;
+ }
/* fall through */
case RTN_BROADCAST:
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 1a60391daafa..3dbafdeaab2b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -7,17 +7,6 @@
#include "spectrum.h"
#include "reg.h"
-enum mlxsw_sp_l3proto {
- MLXSW_SP_L3_PROTO_IPV4,
- MLXSW_SP_L3_PROTO_IPV6,
-#define MLXSW_SP_L3_PROTO_MAX (MLXSW_SP_L3_PROTO_IPV6 + 1)
-};
-
-union mlxsw_sp_l3addr {
- __be32 addr4;
- struct in6_addr addr6;
-};
-
struct mlxsw_sp_rif_ipip_lb;
struct mlxsw_sp_rif_ipip_lb_config {
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
@@ -35,8 +24,6 @@ struct mlxsw_sp_neigh_entry;
struct mlxsw_sp_nexthop;
struct mlxsw_sp_ipip_entry;
-struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev);
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
@@ -44,9 +31,7 @@ u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
-u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif);
-struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index db715da7bab7..739a51f0a366 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -15,9 +15,9 @@
#include <linux/rtnetlink.h>
#include <linux/netlink.h>
#include <net/switchdev.h>
+#include <net/vxlan.h>
#include "spectrum_span.h"
-#include "spectrum_router.h"
#include "spectrum_switchdev.h"
#include "spectrum.h"
#include "core.h"
@@ -84,9 +84,19 @@ struct mlxsw_sp_bridge_ops {
void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
struct mlxsw_sp_port *mlxsw_sp_port);
+ int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev,
+ struct netlink_ext_ack *extack);
+ void (*vxlan_leave)(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev);
struct mlxsw_sp_fid *
(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
u16 vid);
+ struct mlxsw_sp_fid *
+ (*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid);
+ u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct mlxsw_sp_fid *fid);
};
static int
@@ -1237,6 +1247,51 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
MLXSW_REG_SFD_OP_WRITE_REMOVE;
}
+static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid,
+ enum mlxsw_sp_l3proto proto,
+ const union mlxsw_sp_l3addr *addr,
+ bool adding, bool dynamic)
+{
+ enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto;
+ char *sfd_pl;
+ u8 num_rec;
+ u32 uip;
+ int err;
+
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ uip = be32_to_cpu(addr->addr4);
+ sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4;
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
+ default:
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+
+ sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+ if (!sfd_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
+ mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0,
+ mlxsw_sp_sfd_rec_policy(dynamic), mac, fid,
+ MLXSW_REG_SFD_REC_ACTION_NOP, uip,
+ sfd_proto);
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
+ if (err)
+ goto out;
+
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ err = -EBUSY;
+
+out:
+ kfree(sfd_pl);
+ return err;
+}
+
static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
const char *mac, u16 fid, bool adding,
enum mlxsw_reg_sfd_rec_action action,
@@ -1950,6 +2005,21 @@ mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
}
+static int
+mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev,
+ struct netlink_ext_ack *extack)
+{
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static void
+mlxsw_sp_bridge_8021q_vxlan_leave(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev)
+{
+}
+
static struct mlxsw_sp_fid *
mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
u16 vid)
@@ -1959,10 +2029,29 @@ mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
}
+static struct mlxsw_sp_fid *
+mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid)
+{
+ WARN_ON(1);
+ return NULL;
+}
+
+static u16
+mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct mlxsw_sp_fid *fid)
+{
+ return mlxsw_sp_fid_8021q_vid(fid);
+}
+
static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
.port_join = mlxsw_sp_bridge_8021q_port_join,
.port_leave = mlxsw_sp_bridge_8021q_port_leave,
+ .vxlan_join = mlxsw_sp_bridge_8021q_vxlan_join,
+ .vxlan_leave = mlxsw_sp_bridge_8021q_vxlan_leave,
.fid_get = mlxsw_sp_bridge_8021q_fid_get,
+ .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup,
+ .fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
};
static bool
@@ -2026,19 +2115,126 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
}
+static int
+mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+ struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
+ struct mlxsw_sp_nve_params params = {
+ .type = MLXSW_SP_NVE_TYPE_VXLAN,
+ .vni = vxlan->cfg.vni,
+ .dev = vxlan_dev,
+ };
+ struct mlxsw_sp_fid *fid;
+ int err;
+
+ fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
+ if (!fid)
+ return -EINVAL;
+
+ if (mlxsw_sp_fid_vni_is_set(fid))
+ return -EINVAL;
+
+ err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
+ if (err)
+ goto err_nve_fid_enable;
+
+ /* The tunnel port does not hold a reference on the FID. Only
+ * local ports and the router port
+ */
+ mlxsw_sp_fid_put(fid);
+
+ return 0;
+
+err_nve_fid_enable:
+ mlxsw_sp_fid_put(fid);
+ return err;
+}
+
+static void
+mlxsw_sp_bridge_8021d_vxlan_leave(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+ struct mlxsw_sp_fid *fid;
+
+ fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
+ if (WARN_ON(!fid))
+ return;
+
+ /* If the VxLAN device is down, then the FID does not have a VNI */
+ if (!mlxsw_sp_fid_vni_is_set(fid))
+ goto out;
+
+ mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
+out:
+ mlxsw_sp_fid_put(fid);
+}
+
static struct mlxsw_sp_fid *
mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+ struct net_device *vxlan_dev;
+ struct mlxsw_sp_fid *fid;
+ int err;
+
+ fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
+ if (IS_ERR(fid))
+ return fid;
+
+ if (mlxsw_sp_fid_vni_is_set(fid))
+ return fid;
+
+ vxlan_dev = mlxsw_sp_bridge_vxlan_dev_find(bridge_device->dev);
+ if (!vxlan_dev)
+ return fid;
+
+ if (!netif_running(vxlan_dev))
+ return fid;
+
+ err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, NULL);
+ if (err)
+ goto err_vxlan_join;
+
+ return fid;
+
+err_vxlan_join:
+ mlxsw_sp_fid_put(fid);
+ return ERR_PTR(err);
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
- return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
+ /* The only valid VLAN for a VLAN-unaware bridge is 0 */
+ if (vid)
+ return NULL;
+
+ return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
+}
+
+static u16
+mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct mlxsw_sp_fid *fid)
+{
+ return 0;
}
static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
.port_join = mlxsw_sp_bridge_8021d_port_join,
.port_leave = mlxsw_sp_bridge_8021d_port_leave,
+ .vxlan_join = mlxsw_sp_bridge_8021d_vxlan_join,
+ .vxlan_leave = mlxsw_sp_bridge_8021d_vxlan_leave,
.fid_get = mlxsw_sp_bridge_8021d_fid_get,
+ .fid_lookup = mlxsw_sp_bridge_8021d_fid_lookup,
+ .fid_vid = mlxsw_sp_bridge_8021d_fid_vid,
};
int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -2088,15 +2284,43 @@ void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
}
+int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ const struct net_device *vxlan_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (WARN_ON(!bridge_device))
+ return -EINVAL;
+
+ return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, extack);
+}
+
+void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ const struct net_device *vxlan_dev)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (WARN_ON(!bridge_device))
+ return;
+
+ bridge_device->ops->vxlan_leave(bridge_device, vxlan_dev);
+}
+
static void
mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
const char *mac, u16 vid,
- struct net_device *dev)
+ struct net_device *dev, bool offloaded)
{
struct switchdev_notifier_fdb_info info;
info.addr = mac;
info.vid = vid;
+ info.offloaded = offloaded;
call_switchdev_notifiers(type, dev, &info.info);
}
@@ -2148,7 +2372,7 @@ do_fdb_op:
if (!do_notification)
return;
type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
- mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
+ mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
return;
@@ -2208,7 +2432,7 @@ do_fdb_op:
if (!do_notification)
return;
type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
- mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
+ mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
return;
@@ -2284,12 +2508,127 @@ out:
struct mlxsw_sp_switchdev_event_work {
struct work_struct work;
- struct switchdev_notifier_fdb_info fdb_info;
+ union {
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
+ };
struct net_device *dev;
unsigned long event;
};
-static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
+static void
+mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
+ enum mlxsw_sp_l3proto *proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ if (vxlan_addr->sa.sa_family == AF_INET) {
+ addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
+ *proto = MLXSW_SP_L3_PROTO_IPV4;
+ } else {
+ addr->addr6 = vxlan_addr->sin6.sin6_addr;
+ *proto = MLXSW_SP_L3_PROTO_IPV6;
+ }
+}
+
+static void
+mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_switchdev_event_work *
+ switchdev_work,
+ struct mlxsw_sp_fid *fid, __be32 vni)
+{
+ struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct net_device *dev = switchdev_work->dev;
+ enum mlxsw_sp_l3proto proto;
+ union mlxsw_sp_l3addr addr;
+ int err;
+
+ fdb_info = &switchdev_work->fdb_info;
+ err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
+ if (err)
+ return;
+
+ mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
+ &proto, &addr);
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
+ vxlan_fdb_info.eth_addr,
+ mlxsw_sp_fid_index(fid),
+ proto, &addr, true, false);
+ if (err)
+ return;
+ vxlan_fdb_info.offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
+ &vxlan_fdb_info.info);
+ mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ vxlan_fdb_info.eth_addr,
+ fdb_info->vid, dev, true);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
+ vxlan_fdb_info.eth_addr,
+ mlxsw_sp_fid_index(fid),
+ proto, &addr, false,
+ false);
+ vxlan_fdb_info.offloaded = false;
+ call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
+ &vxlan_fdb_info.info);
+ break;
+ }
+}
+
+static void
+mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
+ switchdev_work)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct net_device *dev = switchdev_work->dev;
+ struct net_device *br_dev;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_fid *fid;
+ __be32 vni;
+ int err;
+
+ if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
+ switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
+ return;
+
+ if (!switchdev_work->fdb_info.added_by_user)
+ return;
+
+ if (!netif_running(dev))
+ return;
+ br_dev = netdev_master_upper_dev_get(dev);
+ if (!br_dev)
+ return;
+ if (!netif_is_bridge_master(br_dev))
+ return;
+ mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+ if (!mlxsw_sp)
+ return;
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return;
+
+ fid = bridge_device->ops->fid_lookup(bridge_device,
+ switchdev_work->fdb_info.vid);
+ if (!fid)
+ return;
+
+ err = mlxsw_sp_fid_vni(fid, &vni);
+ if (err)
+ goto out;
+
+ mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
+ vni);
+
+out:
+ mlxsw_sp_fid_put(fid);
+}
+
+static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
{
struct mlxsw_sp_switchdev_event_work *switchdev_work =
container_of(work, struct mlxsw_sp_switchdev_event_work, work);
@@ -2299,6 +2638,11 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
int err;
rtnl_lock();
+ if (netif_is_vxlan(dev)) {
+ mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
+ goto out;
+ }
+
mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
if (!mlxsw_sp_port)
goto out;
@@ -2313,12 +2657,10 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
break;
mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
fdb_info->addr,
- fdb_info->vid, dev);
+ fdb_info->vid, dev, true);
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
fdb_info = &switchdev_work->fdb_info;
- if (!fdb_info->added_by_user)
- break;
mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
break;
case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
@@ -2338,22 +2680,213 @@ out:
dev_put(dev);
}
+static void
+mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_switchdev_event_work *
+ switchdev_work)
+{
+ struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct net_device *dev = switchdev_work->dev;
+ u8 all_zeros_mac[ETH_ALEN] = { 0 };
+ enum mlxsw_sp_l3proto proto;
+ union mlxsw_sp_l3addr addr;
+ struct net_device *br_dev;
+ struct mlxsw_sp_fid *fid;
+ u16 vid;
+ int err;
+
+ vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
+ br_dev = netdev_master_upper_dev_get(dev);
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return;
+
+ fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
+ if (!fid)
+ return;
+
+ mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
+ &proto, &addr);
+
+ if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
+ err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
+ if (err) {
+ mlxsw_sp_fid_put(fid);
+ return;
+ }
+ vxlan_fdb_info->offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
+ &vxlan_fdb_info->info);
+ mlxsw_sp_fid_put(fid);
+ return;
+ }
+
+ /* The device has a single FDB table, whereas Linux has two - one
+ * in the bridge driver and another in the VxLAN driver. We only
+ * program an entry to the device if the MAC points to the VxLAN
+ * device in the bridge's FDB table
+ */
+ vid = bridge_device->ops->fid_vid(bridge_device, fid);
+ if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
+ goto err_br_fdb_find;
+
+ err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
+ mlxsw_sp_fid_index(fid), proto,
+ &addr, true, false);
+ if (err)
+ goto err_fdb_tunnel_uc_op;
+ vxlan_fdb_info->offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
+ &vxlan_fdb_info->info);
+ mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ vxlan_fdb_info->eth_addr, vid, dev, true);
+
+ mlxsw_sp_fid_put(fid);
+
+ return;
+
+err_fdb_tunnel_uc_op:
+err_br_fdb_find:
+ mlxsw_sp_fid_put(fid);
+}
+
+static void
+mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_switchdev_event_work *
+ switchdev_work)
+{
+ struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct net_device *dev = switchdev_work->dev;
+ struct net_device *br_dev = netdev_master_upper_dev_get(dev);
+ u8 all_zeros_mac[ETH_ALEN] = { 0 };
+ enum mlxsw_sp_l3proto proto;
+ union mlxsw_sp_l3addr addr;
+ struct mlxsw_sp_fid *fid;
+ u16 vid;
+
+ vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return;
+
+ fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
+ if (!fid)
+ return;
+
+ mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
+ &proto, &addr);
+
+ if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
+ mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
+ mlxsw_sp_fid_put(fid);
+ return;
+ }
+
+ mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
+ mlxsw_sp_fid_index(fid), proto, &addr,
+ false, false);
+ vid = bridge_device->ops->fid_vid(bridge_device, fid);
+ mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ vxlan_fdb_info->eth_addr, vid, dev, false);
+
+ mlxsw_sp_fid_put(fid);
+}
+
+static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
+{
+ struct mlxsw_sp_switchdev_event_work *switchdev_work =
+ container_of(work, struct mlxsw_sp_switchdev_event_work, work);
+ struct net_device *dev = switchdev_work->dev;
+ struct mlxsw_sp *mlxsw_sp;
+ struct net_device *br_dev;
+
+ rtnl_lock();
+
+ if (!netif_running(dev))
+ goto out;
+ br_dev = netdev_master_upper_dev_get(dev);
+ if (!br_dev)
+ goto out;
+ if (!netif_is_bridge_master(br_dev))
+ goto out;
+ mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+ if (!mlxsw_sp)
+ goto out;
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
+ mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
+ break;
+ case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
+ mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
+ break;
+ }
+
+out:
+ rtnl_unlock();
+ kfree(switchdev_work);
+ dev_put(dev);
+}
+
+static int
+mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
+ switchdev_work,
+ struct switchdev_notifier_info *info)
+{
+ struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
+ struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
+ struct vxlan_config *cfg = &vxlan->cfg;
+
+ vxlan_fdb_info = container_of(info,
+ struct switchdev_notifier_vxlan_fdb_info,
+ info);
+
+ if (vxlan_fdb_info->remote_port != cfg->dst_port)
+ return -EOPNOTSUPP;
+ if (vxlan_fdb_info->remote_vni != cfg->vni)
+ return -EOPNOTSUPP;
+ if (vxlan_fdb_info->vni != cfg->vni)
+ return -EOPNOTSUPP;
+ if (vxlan_fdb_info->remote_ifindex)
+ return -EOPNOTSUPP;
+ if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr))
+ return -EOPNOTSUPP;
+ if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip))
+ return -EOPNOTSUPP;
+
+ switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
+
+ return 0;
+}
+
/* Called under rcu_read_lock() */
static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
struct mlxsw_sp_switchdev_event_work *switchdev_work;
- struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct switchdev_notifier_info *info = ptr;
+ struct net_device *br_dev;
+ int err;
- if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
+ /* Tunnel devices are not our uppers, so check their master instead */
+ br_dev = netdev_master_upper_dev_get_rcu(dev);
+ if (!br_dev)
+ return NOTIFY_DONE;
+ if (!netif_is_bridge_master(br_dev))
+ return NOTIFY_DONE;
+ if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
return NOTIFY_DONE;
switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
if (!switchdev_work)
return NOTIFY_BAD;
- INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_event_work);
switchdev_work->dev = dev;
switchdev_work->event = event;
@@ -2362,6 +2895,11 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
case SWITCHDEV_FDB_DEL_TO_BRIDGE:
+ fdb_info = container_of(info,
+ struct switchdev_notifier_fdb_info,
+ info);
+ INIT_WORK(&switchdev_work->work,
+ mlxsw_sp_switchdev_bridge_fdb_event_work);
memcpy(&switchdev_work->fdb_info, ptr,
sizeof(switchdev_work->fdb_info));
switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
@@ -2375,6 +2913,16 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
*/
dev_hold(dev);
break;
+ case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: /* fall through */
+ case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
+ INIT_WORK(&switchdev_work->work,
+ mlxsw_sp_switchdev_vxlan_fdb_event_work);
+ err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
+ info);
+ if (err)
+ goto err_vxlan_work_prepare;
+ dev_hold(dev);
+ break;
default:
kfree(switchdev_work);
return NOTIFY_DONE;
@@ -2384,6 +2932,7 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
return NOTIFY_DONE;
+err_vxlan_work_prepare:
err_addr_alloc:
kfree(switchdev_work);
return NOTIFY_BAD;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 53020724c2f6..6f18f4d3322a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -24,6 +24,7 @@ enum {
MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
MLXSW_TRAP_ID_PKT_SAMPLE = 0x38,
MLXSW_TRAP_ID_FID_MISS = 0x3D,
+ MLXSW_TRAP_ID_DECAP_ECN0 = 0x40,
MLXSW_TRAP_ID_ARPBC = 0x50,
MLXSW_TRAP_ID_ARPUC = 0x51,
MLXSW_TRAP_ID_MTUERROR = 0x52,
@@ -59,6 +60,7 @@ enum {
MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91,
MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92,
MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1,
+ MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
MLXSW_TRAP_ID_ACL0 = 0x1C0,
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 16bd3f44dbe8..cf1d49149cc8 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -5,7 +5,6 @@
config NET_VENDOR_MICROCHIP
bool "Microchip devices"
default y
- depends on SPI
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig
index 36c84625d54e..bcec0587cf61 100644
--- a/drivers/net/ethernet/mscc/Kconfig
+++ b/drivers/net/ethernet/mscc/Kconfig
@@ -23,6 +23,8 @@ config MSCC_OCELOT_SWITCH
config MSCC_OCELOT_SWITCH_OCELOT
tristate "Ocelot switch driver on Ocelot"
depends on MSCC_OCELOT_SWITCH
+ depends on GENERIC_PHY
+ depends on OF_NET
help
This driver supports the Ocelot network switch device as present on
the Ocelot SoCs.
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 1a4f2bb48ead..3238b9ee42f3 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -133,9 +133,9 @@ static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
{
unsigned int val, timeout = 10;
- /* Wait for the issued mac table command to be completed, or timeout.
- * When the command read from ANA_TABLES_MACACCESS is
- * MACACCESS_CMD_IDLE, the issued command completed successfully.
+ /* Wait for the issued vlan table command to be completed, or timeout.
+ * When the command read from ANA_TABLES_VLANACCESS is
+ * VLANACCESS_CMD_IDLE, the issued command completed successfully.
*/
do {
val = ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
@@ -472,6 +472,7 @@ static int ocelot_port_open(struct net_device *dev)
{
struct ocelot_port *port = netdev_priv(dev);
struct ocelot *ocelot = port->ocelot;
+ enum phy_mode phy_mode;
int err;
/* Enable receiving frames on the port, and activate auto-learning of
@@ -482,8 +483,21 @@ static int ocelot_port_open(struct net_device *dev)
ANA_PORT_PORT_CFG_PORTID_VAL(port->chip_port),
ANA_PORT_PORT_CFG, port->chip_port);
+ if (port->serdes) {
+ if (port->phy_mode == PHY_INTERFACE_MODE_SGMII)
+ phy_mode = PHY_MODE_SGMII;
+ else
+ phy_mode = PHY_MODE_QSGMII;
+
+ err = phy_set_mode(port->serdes, phy_mode);
+ if (err) {
+ netdev_err(dev, "Could not set mode of SerDes\n");
+ return err;
+ }
+ }
+
err = phy_connect_direct(dev, port->phy, &ocelot_port_adjust_link,
- PHY_INTERFACE_MODE_NA);
+ port->phy_mode);
if (err) {
netdev_err(dev, "Could not attach to PHY\n");
return err;
@@ -1606,7 +1620,7 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
dev->ethtool_ops = &ocelot_ethtool_ops;
dev->switchdev_ops = &ocelot_port_switchdev_ops;
- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS;
dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 616bec30dfa3..62c7c8eb00d9 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -11,12 +11,13 @@
#include <linux/bitops.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include "ocelot_ana.h"
#include "ocelot_dev.h"
-#include "ocelot_hsio.h"
#include "ocelot_qsys.h"
#include "ocelot_rew.h"
#include "ocelot_sys.h"
@@ -333,79 +334,6 @@ enum ocelot_reg {
SYS_CM_DATA_RD,
SYS_CM_OP,
SYS_CM_DATA,
- HSIO_PLL5G_CFG0 = HSIO << TARGET_OFFSET,
- HSIO_PLL5G_CFG1,
- HSIO_PLL5G_CFG2,
- HSIO_PLL5G_CFG3,
- HSIO_PLL5G_CFG4,
- HSIO_PLL5G_CFG5,
- HSIO_PLL5G_CFG6,
- HSIO_PLL5G_STATUS0,
- HSIO_PLL5G_STATUS1,
- HSIO_PLL5G_BIST_CFG0,
- HSIO_PLL5G_BIST_CFG1,
- HSIO_PLL5G_BIST_CFG2,
- HSIO_PLL5G_BIST_STAT0,
- HSIO_PLL5G_BIST_STAT1,
- HSIO_RCOMP_CFG0,
- HSIO_RCOMP_STATUS,
- HSIO_SYNC_ETH_CFG,
- HSIO_SYNC_ETH_PLL_CFG,
- HSIO_S1G_DES_CFG,
- HSIO_S1G_IB_CFG,
- HSIO_S1G_OB_CFG,
- HSIO_S1G_SER_CFG,
- HSIO_S1G_COMMON_CFG,
- HSIO_S1G_PLL_CFG,
- HSIO_S1G_PLL_STATUS,
- HSIO_S1G_DFT_CFG0,
- HSIO_S1G_DFT_CFG1,
- HSIO_S1G_DFT_CFG2,
- HSIO_S1G_TP_CFG,
- HSIO_S1G_RC_PLL_BIST_CFG,
- HSIO_S1G_MISC_CFG,
- HSIO_S1G_DFT_STATUS,
- HSIO_S1G_MISC_STATUS,
- HSIO_MCB_S1G_ADDR_CFG,
- HSIO_S6G_DIG_CFG,
- HSIO_S6G_DFT_CFG0,
- HSIO_S6G_DFT_CFG1,
- HSIO_S6G_DFT_CFG2,
- HSIO_S6G_TP_CFG0,
- HSIO_S6G_TP_CFG1,
- HSIO_S6G_RC_PLL_BIST_CFG,
- HSIO_S6G_MISC_CFG,
- HSIO_S6G_OB_ANEG_CFG,
- HSIO_S6G_DFT_STATUS,
- HSIO_S6G_ERR_CNT,
- HSIO_S6G_MISC_STATUS,
- HSIO_S6G_DES_CFG,
- HSIO_S6G_IB_CFG,
- HSIO_S6G_IB_CFG1,
- HSIO_S6G_IB_CFG2,
- HSIO_S6G_IB_CFG3,
- HSIO_S6G_IB_CFG4,
- HSIO_S6G_IB_CFG5,
- HSIO_S6G_OB_CFG,
- HSIO_S6G_OB_CFG1,
- HSIO_S6G_SER_CFG,
- HSIO_S6G_COMMON_CFG,
- HSIO_S6G_PLL_CFG,
- HSIO_S6G_ACJTAG_CFG,
- HSIO_S6G_GP_CFG,
- HSIO_S6G_IB_STATUS0,
- HSIO_S6G_IB_STATUS1,
- HSIO_S6G_ACJTAG_STATUS,
- HSIO_S6G_PLL_STATUS,
- HSIO_S6G_REVID,
- HSIO_MCB_S6G_ADDR_CFG,
- HSIO_HW_CFG,
- HSIO_HW_QSGMII_CFG,
- HSIO_HW_QSGMII_STAT,
- HSIO_CLK_CFG,
- HSIO_TEMP_SENSOR_CTRL,
- HSIO_TEMP_SENSOR_CFG,
- HSIO_TEMP_SENSOR_STAT,
};
enum ocelot_regfield {
@@ -527,6 +455,9 @@ struct ocelot_port {
u8 vlan_aware;
u64 *stats;
+
+ phy_interface_t phy_mode;
+ struct phy *serdes;
};
u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 3cdf63e35b53..4c23d18bbf44 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -6,9 +6,11 @@
*/
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/of_net.h>
#include <linux/netdevice.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
+#include <linux/mfd/syscon.h>
#include <linux/skbuff.h>
#include "ocelot.h"
@@ -126,11 +128,16 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
len += sz;
} while (len < buf_len);
- /* Read the FCS and discard it */
+ /* Read the FCS */
sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
/* Update the statistics if part of the FCS was read before */
len -= ETH_FCS_LEN - sz;
+ if (unlikely(dev->features & NETIF_F_RXFCS)) {
+ buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
+ *buf = val;
+ }
+
if (sz < 0) {
err = sz;
break;
@@ -168,6 +175,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct device_node *ports, *portnp;
struct ocelot *ocelot;
+ struct regmap *hsio;
u32 val;
struct {
@@ -179,7 +187,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
{ QSYS, "qsys" },
{ ANA, "ana" },
{ QS, "qs" },
- { HSIO, "hsio" },
};
if (!np && !pdev->dev.platform_data)
@@ -202,6 +209,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->targets[res[i].id] = target;
}
+ hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio");
+ if (IS_ERR(hsio)) {
+ dev_err(&pdev->dev, "missing hsio syscon\n");
+ return PTR_ERR(hsio);
+ }
+
+ ocelot->targets[HSIO] = hsio;
+
err = ocelot_chip_init(ocelot);
if (err)
return err;
@@ -244,18 +259,11 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&ocelot->multicast);
ocelot_init(ocelot);
- ocelot_rmw(ocelot, HSIO_HW_CFG_DEV1G_4_MODE |
- HSIO_HW_CFG_DEV1G_6_MODE |
- HSIO_HW_CFG_DEV1G_9_MODE,
- HSIO_HW_CFG_DEV1G_4_MODE |
- HSIO_HW_CFG_DEV1G_6_MODE |
- HSIO_HW_CFG_DEV1G_9_MODE,
- HSIO_HW_CFG);
-
for_each_available_child_of_node(ports, portnp) {
struct device_node *phy_node;
struct phy_device *phy;
struct resource *res;
+ struct phy *serdes;
void __iomem *regs;
char res_name[8];
u32 port;
@@ -280,10 +288,43 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
continue;
err = ocelot_probe_port(ocelot, port, regs, phy);
- if (err) {
- dev_err(&pdev->dev, "failed to probe ports\n");
+ if (err)
+ return err;
+
+ err = of_get_phy_mode(portnp);
+ if (err < 0)
+ ocelot->ports[port]->phy_mode = PHY_INTERFACE_MODE_NA;
+ else
+ ocelot->ports[port]->phy_mode = err;
+
+ switch (ocelot->ports[port]->phy_mode) {
+ case PHY_INTERFACE_MODE_NA:
+ continue;
+ case PHY_INTERFACE_MODE_SGMII:
+ break;
+ case PHY_INTERFACE_MODE_QSGMII:
+ break;
+ default:
+ dev_err(ocelot->dev,
+ "invalid phy mode for port%d, (Q)SGMII only\n",
+ port);
+ return -EINVAL;
+ }
+
+ serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
+ if (IS_ERR(serdes)) {
+ err = PTR_ERR(serdes);
+ if (err == -EPROBE_DEFER)
+ dev_dbg(ocelot->dev, "deferring probe\n");
+ else
+ dev_err(ocelot->dev,
+ "missing SerDes phys for port%d\n",
+ port);
+
goto err_probe_ports;
}
+
+ ocelot->ports[port]->serdes = serdes;
}
register_netdevice_notifier(&ocelot_netdevice_nb);
diff --git a/drivers/net/ethernet/mscc/ocelot_hsio.h b/drivers/net/ethernet/mscc/ocelot_hsio.h
deleted file mode 100644
index d93ddec3931b..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_hsio.h
+++ /dev/null
@@ -1,785 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/*
- * Microsemi Ocelot Switch driver
- *
- * Copyright (c) 2017 Microsemi Corporation
- */
-
-#ifndef _MSCC_OCELOT_HSIO_H_
-#define _MSCC_OCELOT_HSIO_H_
-
-#define HSIO_PLL5G_CFG0_ENA_ROT BIT(31)
-#define HSIO_PLL5G_CFG0_ENA_LANE BIT(30)
-#define HSIO_PLL5G_CFG0_ENA_CLKTREE BIT(29)
-#define HSIO_PLL5G_CFG0_DIV4 BIT(28)
-#define HSIO_PLL5G_CFG0_ENA_LOCK_FINE BIT(27)
-#define HSIO_PLL5G_CFG0_SELBGV820(x) (((x) << 23) & GENMASK(26, 23))
-#define HSIO_PLL5G_CFG0_SELBGV820_M GENMASK(26, 23)
-#define HSIO_PLL5G_CFG0_SELBGV820_X(x) (((x) & GENMASK(26, 23)) >> 23)
-#define HSIO_PLL5G_CFG0_LOOP_BW_RES(x) (((x) << 18) & GENMASK(22, 18))
-#define HSIO_PLL5G_CFG0_LOOP_BW_RES_M GENMASK(22, 18)
-#define HSIO_PLL5G_CFG0_LOOP_BW_RES_X(x) (((x) & GENMASK(22, 18)) >> 18)
-#define HSIO_PLL5G_CFG0_SELCPI(x) (((x) << 16) & GENMASK(17, 16))
-#define HSIO_PLL5G_CFG0_SELCPI_M GENMASK(17, 16)
-#define HSIO_PLL5G_CFG0_SELCPI_X(x) (((x) & GENMASK(17, 16)) >> 16)
-#define HSIO_PLL5G_CFG0_ENA_VCO_CONTRH BIT(15)
-#define HSIO_PLL5G_CFG0_ENA_CP1 BIT(14)
-#define HSIO_PLL5G_CFG0_ENA_VCO_BUF BIT(13)
-#define HSIO_PLL5G_CFG0_ENA_BIAS BIT(12)
-#define HSIO_PLL5G_CFG0_CPU_CLK_DIV(x) (((x) << 6) & GENMASK(11, 6))
-#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_M GENMASK(11, 6)
-#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_X(x) (((x) & GENMASK(11, 6)) >> 6)
-#define HSIO_PLL5G_CFG0_CORE_CLK_DIV(x) ((x) & GENMASK(5, 0))
-#define HSIO_PLL5G_CFG0_CORE_CLK_DIV_M GENMASK(5, 0)
-
-#define HSIO_PLL5G_CFG1_ENA_DIRECT BIT(18)
-#define HSIO_PLL5G_CFG1_ROT_SPEED BIT(17)
-#define HSIO_PLL5G_CFG1_ROT_DIR BIT(16)
-#define HSIO_PLL5G_CFG1_READBACK_DATA_SEL BIT(15)
-#define HSIO_PLL5G_CFG1_RC_ENABLE BIT(14)
-#define HSIO_PLL5G_CFG1_RC_CTRL_DATA(x) (((x) << 6) & GENMASK(13, 6))
-#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_M GENMASK(13, 6)
-#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_X(x) (((x) & GENMASK(13, 6)) >> 6)
-#define HSIO_PLL5G_CFG1_QUARTER_RATE BIT(5)
-#define HSIO_PLL5G_CFG1_PWD_TX BIT(4)
-#define HSIO_PLL5G_CFG1_PWD_RX BIT(3)
-#define HSIO_PLL5G_CFG1_OUT_OF_RANGE_RECAL_ENA BIT(2)
-#define HSIO_PLL5G_CFG1_HALF_RATE BIT(1)
-#define HSIO_PLL5G_CFG1_FORCE_SET_ENA BIT(0)
-
-#define HSIO_PLL5G_CFG2_ENA_TEST_MODE BIT(30)
-#define HSIO_PLL5G_CFG2_ENA_PFD_IN_FLIP BIT(29)
-#define HSIO_PLL5G_CFG2_ENA_VCO_NREF_TESTOUT BIT(28)
-#define HSIO_PLL5G_CFG2_ENA_FBTESTOUT BIT(27)
-#define HSIO_PLL5G_CFG2_ENA_RCPLL BIT(26)
-#define HSIO_PLL5G_CFG2_ENA_CP2 BIT(25)
-#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS1 BIT(24)
-#define HSIO_PLL5G_CFG2_AMPC_SEL(x) (((x) << 16) & GENMASK(23, 16))
-#define HSIO_PLL5G_CFG2_AMPC_SEL_M GENMASK(23, 16)
-#define HSIO_PLL5G_CFG2_AMPC_SEL_X(x) (((x) & GENMASK(23, 16)) >> 16)
-#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS BIT(15)
-#define HSIO_PLL5G_CFG2_PWD_AMPCTRL_N BIT(14)
-#define HSIO_PLL5G_CFG2_ENA_AMPCTRL BIT(13)
-#define HSIO_PLL5G_CFG2_ENA_AMP_CTRL_FORCE BIT(12)
-#define HSIO_PLL5G_CFG2_FRC_FSM_POR BIT(11)
-#define HSIO_PLL5G_CFG2_DISABLE_FSM_POR BIT(10)
-#define HSIO_PLL5G_CFG2_GAIN_TEST(x) (((x) << 5) & GENMASK(9, 5))
-#define HSIO_PLL5G_CFG2_GAIN_TEST_M GENMASK(9, 5)
-#define HSIO_PLL5G_CFG2_GAIN_TEST_X(x) (((x) & GENMASK(9, 5)) >> 5)
-#define HSIO_PLL5G_CFG2_EN_RESET_OVERRUN BIT(4)
-#define HSIO_PLL5G_CFG2_EN_RESET_LIM_DET BIT(3)
-#define HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET BIT(2)
-#define HSIO_PLL5G_CFG2_DISABLE_FSM BIT(1)
-#define HSIO_PLL5G_CFG2_ENA_GAIN_TEST BIT(0)
-
-#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL(x) (((x) << 22) & GENMASK(23, 22))
-#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_M GENMASK(23, 22)
-#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_X(x) (((x) & GENMASK(23, 22)) >> 22)
-#define HSIO_PLL5G_CFG3_TESTOUT_SEL(x) (((x) << 19) & GENMASK(21, 19))
-#define HSIO_PLL5G_CFG3_TESTOUT_SEL_M GENMASK(21, 19)
-#define HSIO_PLL5G_CFG3_TESTOUT_SEL_X(x) (((x) & GENMASK(21, 19)) >> 19)
-#define HSIO_PLL5G_CFG3_ENA_ANA_TEST_OUT BIT(18)
-#define HSIO_PLL5G_CFG3_ENA_TEST_OUT BIT(17)
-#define HSIO_PLL5G_CFG3_SEL_FBDCLK BIT(16)
-#define HSIO_PLL5G_CFG3_SEL_CML_CMOS_PFD BIT(15)
-#define HSIO_PLL5G_CFG3_RST_FB_N BIT(14)
-#define HSIO_PLL5G_CFG3_FORCE_VCO_CONTRH BIT(13)
-#define HSIO_PLL5G_CFG3_FORCE_LO BIT(12)
-#define HSIO_PLL5G_CFG3_FORCE_HI BIT(11)
-#define HSIO_PLL5G_CFG3_FORCE_ENA BIT(10)
-#define HSIO_PLL5G_CFG3_FORCE_CP BIT(9)
-#define HSIO_PLL5G_CFG3_FBDIVSEL_TST_ENA BIT(8)
-#define HSIO_PLL5G_CFG3_FBDIVSEL(x) ((x) & GENMASK(7, 0))
-#define HSIO_PLL5G_CFG3_FBDIVSEL_M GENMASK(7, 0)
-
-#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL(x) (((x) << 16) & GENMASK(23, 16))
-#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_M GENMASK(23, 16)
-#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_X(x) (((x) & GENMASK(23, 16)) >> 16)
-#define HSIO_PLL5G_CFG4_IB_CTRL(x) ((x) & GENMASK(15, 0))
-#define HSIO_PLL5G_CFG4_IB_CTRL_M GENMASK(15, 0)
-
-#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL(x) (((x) << 16) & GENMASK(23, 16))
-#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_M GENMASK(23, 16)
-#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_X(x) (((x) & GENMASK(23, 16)) >> 16)
-#define HSIO_PLL5G_CFG5_OB_CTRL(x) ((x) & GENMASK(15, 0))
-#define HSIO_PLL5G_CFG5_OB_CTRL_M GENMASK(15, 0)
-
-#define HSIO_PLL5G_CFG6_REFCLK_SEL_SRC BIT(23)
-#define HSIO_PLL5G_CFG6_REFCLK_SEL(x) (((x) << 20) & GENMASK(22, 20))
-#define HSIO_PLL5G_CFG6_REFCLK_SEL_M GENMASK(22, 20)
-#define HSIO_PLL5G_CFG6_REFCLK_SEL_X(x) (((x) & GENMASK(22, 20)) >> 20)
-#define HSIO_PLL5G_CFG6_REFCLK_SRC BIT(19)
-#define HSIO_PLL5G_CFG6_POR_DEL_SEL(x) (((x) << 16) & GENMASK(17, 16))
-#define HSIO_PLL5G_CFG6_POR_DEL_SEL_M GENMASK(17, 16)
-#define HSIO_PLL5G_CFG6_POR_DEL_SEL_X(x) (((x) & GENMASK(17, 16)) >> 16)
-#define HSIO_PLL5G_CFG6_DIV125REF_SEL(x) (((x) << 8) & GENMASK(15, 8))
-#define HSIO_PLL5G_CFG6_DIV125REF_SEL_M GENMASK(15, 8)
-#define HSIO_PLL5G_CFG6_DIV125REF_SEL_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define HSIO_PLL5G_CFG6_ENA_REFCLKC2 BIT(7)
-#define HSIO_PLL5G_CFG6_ENA_FBCLKC2 BIT(6)
-#define HSIO_PLL5G_CFG6_DDR_CLK_DIV(x) ((x) & GENMASK(5, 0))
-#define HSIO_PLL5G_CFG6_DDR_CLK_DIV_M GENMASK(5, 0)
-
-#define HSIO_PLL5G_STATUS0_RANGE_LIM BIT(12)
-#define HSIO_PLL5G_STATUS0_OUT_OF_RANGE_ERR BIT(11)
-#define HSIO_PLL5G_STATUS0_CALIBRATION_ERR BIT(10)
-#define HSIO_PLL5G_STATUS0_CALIBRATION_DONE BIT(9)
-#define HSIO_PLL5G_STATUS0_READBACK_DATA(x) (((x) << 1) & GENMASK(8, 1))
-#define HSIO_PLL5G_STATUS0_READBACK_DATA_M GENMASK(8, 1)
-#define HSIO_PLL5G_STATUS0_READBACK_DATA_X(x) (((x) & GENMASK(8, 1)) >> 1)
-#define HSIO_PLL5G_STATUS0_LOCK_STATUS BIT(0)
-
-#define HSIO_PLL5G_STATUS1_SIG_DEL(x) (((x) << 21) & GENMASK(28, 21))
-#define HSIO_PLL5G_STATUS1_SIG_DEL_M GENMASK(28, 21)
-#define HSIO_PLL5G_STATUS1_SIG_DEL_X(x) (((x) & GENMASK(28, 21)) >> 21)
-#define HSIO_PLL5G_STATUS1_GAIN_STAT(x) (((x) << 16) & GENMASK(20, 16))
-#define HSIO_PLL5G_STATUS1_GAIN_STAT_M GENMASK(20, 16)
-#define HSIO_PLL5G_STATUS1_GAIN_STAT_X(x) (((x) & GENMASK(20, 16)) >> 16)
-#define HSIO_PLL5G_STATUS1_FBCNT_DIF(x) (((x) << 4) & GENMASK(13, 4))
-#define HSIO_PLL5G_STATUS1_FBCNT_DIF_M GENMASK(13, 4)
-#define HSIO_PLL5G_STATUS1_FBCNT_DIF_X(x) (((x) & GENMASK(13, 4)) >> 4)
-#define HSIO_PLL5G_STATUS1_FSM_STAT(x) (((x) << 1) & GENMASK(3, 1))
-#define HSIO_PLL5G_STATUS1_FSM_STAT_M GENMASK(3, 1)
-#define HSIO_PLL5G_STATUS1_FSM_STAT_X(x) (((x) & GENMASK(3, 1)) >> 1)
-#define HSIO_PLL5G_STATUS1_FSM_LOCK BIT(0)
-
-#define HSIO_PLL5G_BIST_CFG0_PLLB_START_BIST BIT(31)
-#define HSIO_PLL5G_BIST_CFG0_PLLB_MEAS_MODE BIT(30)
-#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT(x) (((x) << 20) & GENMASK(23, 20))
-#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_M GENMASK(23, 20)
-#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_X(x) (((x) & GENMASK(23, 20)) >> 20)
-#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT(x) (((x) << 16) & GENMASK(19, 16))
-#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_M GENMASK(19, 16)
-#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_X(x) (((x) & GENMASK(19, 16)) >> 16)
-#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE(x) ((x) & GENMASK(15, 0))
-#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE_M GENMASK(15, 0)
-
-#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT(x) (((x) << 4) & GENMASK(7, 4))
-#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_M GENMASK(7, 4)
-#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_PLL5G_BIST_STAT0_PLLB_BUSY BIT(2)
-#define HSIO_PLL5G_BIST_STAT0_PLLB_DONE_N BIT(1)
-#define HSIO_PLL5G_BIST_STAT0_PLLB_FAIL BIT(0)
-
-#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT(x) (((x) << 16) & GENMASK(31, 16))
-#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_M GENMASK(31, 16)
-#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF(x) ((x) & GENMASK(15, 0))
-#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF_M GENMASK(15, 0)
-
-#define HSIO_RCOMP_CFG0_PWD_ENA BIT(13)
-#define HSIO_RCOMP_CFG0_RUN_CAL BIT(12)
-#define HSIO_RCOMP_CFG0_SPEED_SEL(x) (((x) << 10) & GENMASK(11, 10))
-#define HSIO_RCOMP_CFG0_SPEED_SEL_M GENMASK(11, 10)
-#define HSIO_RCOMP_CFG0_SPEED_SEL_X(x) (((x) & GENMASK(11, 10)) >> 10)
-#define HSIO_RCOMP_CFG0_MODE_SEL(x) (((x) << 8) & GENMASK(9, 8))
-#define HSIO_RCOMP_CFG0_MODE_SEL_M GENMASK(9, 8)
-#define HSIO_RCOMP_CFG0_MODE_SEL_X(x) (((x) & GENMASK(9, 8)) >> 8)
-#define HSIO_RCOMP_CFG0_FORCE_ENA BIT(4)
-#define HSIO_RCOMP_CFG0_RCOMP_VAL(x) ((x) & GENMASK(3, 0))
-#define HSIO_RCOMP_CFG0_RCOMP_VAL_M GENMASK(3, 0)
-
-#define HSIO_RCOMP_STATUS_BUSY BIT(12)
-#define HSIO_RCOMP_STATUS_DELTA_ALERT BIT(7)
-#define HSIO_RCOMP_STATUS_RCOMP(x) ((x) & GENMASK(3, 0))
-#define HSIO_RCOMP_STATUS_RCOMP_M GENMASK(3, 0)
-
-#define HSIO_SYNC_ETH_CFG_RSZ 0x4
-
-#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC(x) (((x) << 4) & GENMASK(7, 4))
-#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_M GENMASK(7, 4)
-#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV(x) (((x) << 1) & GENMASK(3, 1))
-#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_M GENMASK(3, 1)
-#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_X(x) (((x) & GENMASK(3, 1)) >> 1)
-#define HSIO_SYNC_ETH_CFG_RECO_CLK_ENA BIT(0)
-
-#define HSIO_SYNC_ETH_PLL_CFG_PLL_AUTO_SQUELCH_ENA BIT(0)
-
-#define HSIO_S1G_DES_CFG_DES_PHS_CTRL(x) (((x) << 13) & GENMASK(16, 13))
-#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_M GENMASK(16, 13)
-#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13)
-#define HSIO_S1G_DES_CFG_DES_CPMD_SEL(x) (((x) << 11) & GENMASK(12, 11))
-#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_M GENMASK(12, 11)
-#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_X(x) (((x) & GENMASK(12, 11)) >> 11)
-#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL(x) (((x) << 8) & GENMASK(10, 8))
-#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_M GENMASK(10, 8)
-#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_X(x) (((x) & GENMASK(10, 8)) >> 8)
-#define HSIO_S1G_DES_CFG_DES_BW_ANA(x) (((x) << 5) & GENMASK(7, 5))
-#define HSIO_S1G_DES_CFG_DES_BW_ANA_M GENMASK(7, 5)
-#define HSIO_S1G_DES_CFG_DES_BW_ANA_X(x) (((x) & GENMASK(7, 5)) >> 5)
-#define HSIO_S1G_DES_CFG_DES_SWAP_ANA BIT(4)
-#define HSIO_S1G_DES_CFG_DES_BW_HYST(x) (((x) << 1) & GENMASK(3, 1))
-#define HSIO_S1G_DES_CFG_DES_BW_HYST_M GENMASK(3, 1)
-#define HSIO_S1G_DES_CFG_DES_BW_HYST_X(x) (((x) & GENMASK(3, 1)) >> 1)
-#define HSIO_S1G_DES_CFG_DES_SWAP_HYST BIT(0)
-
-#define HSIO_S1G_IB_CFG_IB_FX100_ENA BIT(27)
-#define HSIO_S1G_IB_CFG_ACJTAG_HYST(x) (((x) << 24) & GENMASK(26, 24))
-#define HSIO_S1G_IB_CFG_ACJTAG_HYST_M GENMASK(26, 24)
-#define HSIO_S1G_IB_CFG_ACJTAG_HYST_X(x) (((x) & GENMASK(26, 24)) >> 24)
-#define HSIO_S1G_IB_CFG_IB_DET_LEV(x) (((x) << 19) & GENMASK(21, 19))
-#define HSIO_S1G_IB_CFG_IB_DET_LEV_M GENMASK(21, 19)
-#define HSIO_S1G_IB_CFG_IB_DET_LEV_X(x) (((x) & GENMASK(21, 19)) >> 19)
-#define HSIO_S1G_IB_CFG_IB_HYST_LEV BIT(14)
-#define HSIO_S1G_IB_CFG_IB_ENA_CMV_TERM BIT(13)
-#define HSIO_S1G_IB_CFG_IB_ENA_DC_COUPLING BIT(12)
-#define HSIO_S1G_IB_CFG_IB_ENA_DETLEV BIT(11)
-#define HSIO_S1G_IB_CFG_IB_ENA_HYST BIT(10)
-#define HSIO_S1G_IB_CFG_IB_ENA_OFFSET_COMP BIT(9)
-#define HSIO_S1G_IB_CFG_IB_EQ_GAIN(x) (((x) << 6) & GENMASK(8, 6))
-#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_M GENMASK(8, 6)
-#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_X(x) (((x) & GENMASK(8, 6)) >> 6)
-#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ(x) (((x) << 4) & GENMASK(5, 4))
-#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_M GENMASK(5, 4)
-#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_X(x) (((x) & GENMASK(5, 4)) >> 4)
-#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0))
-#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL_M GENMASK(3, 0)
-
-#define HSIO_S1G_OB_CFG_OB_SLP(x) (((x) << 17) & GENMASK(18, 17))
-#define HSIO_S1G_OB_CFG_OB_SLP_M GENMASK(18, 17)
-#define HSIO_S1G_OB_CFG_OB_SLP_X(x) (((x) & GENMASK(18, 17)) >> 17)
-#define HSIO_S1G_OB_CFG_OB_AMP_CTRL(x) (((x) << 13) & GENMASK(16, 13))
-#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_M GENMASK(16, 13)
-#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13)
-#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL(x) (((x) << 10) & GENMASK(12, 10))
-#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_M GENMASK(12, 10)
-#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_X(x) (((x) & GENMASK(12, 10)) >> 10)
-#define HSIO_S1G_OB_CFG_OB_DIS_VCM_CTRL BIT(9)
-#define HSIO_S1G_OB_CFG_OB_EN_MEAS_VREG BIT(8)
-#define HSIO_S1G_OB_CFG_OB_VCM_CTRL(x) (((x) << 4) & GENMASK(7, 4))
-#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_M GENMASK(7, 4)
-#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0))
-#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL_M GENMASK(3, 0)
-
-#define HSIO_S1G_SER_CFG_SER_IDLE BIT(9)
-#define HSIO_S1G_SER_CFG_SER_DEEMPH BIT(8)
-#define HSIO_S1G_SER_CFG_SER_CPMD_SEL BIT(7)
-#define HSIO_S1G_SER_CFG_SER_SWAP_CPMD BIT(6)
-#define HSIO_S1G_SER_CFG_SER_ALISEL(x) (((x) << 4) & GENMASK(5, 4))
-#define HSIO_S1G_SER_CFG_SER_ALISEL_M GENMASK(5, 4)
-#define HSIO_S1G_SER_CFG_SER_ALISEL_X(x) (((x) & GENMASK(5, 4)) >> 4)
-#define HSIO_S1G_SER_CFG_SER_ENHYS BIT(3)
-#define HSIO_S1G_SER_CFG_SER_BIG_WIN BIT(2)
-#define HSIO_S1G_SER_CFG_SER_EN_WIN BIT(1)
-#define HSIO_S1G_SER_CFG_SER_ENALI BIT(0)
-
-#define HSIO_S1G_COMMON_CFG_SYS_RST BIT(31)
-#define HSIO_S1G_COMMON_CFG_SE_AUTO_SQUELCH_ENA BIT(21)
-#define HSIO_S1G_COMMON_CFG_ENA_LANE BIT(18)
-#define HSIO_S1G_COMMON_CFG_PWD_RX BIT(17)
-#define HSIO_S1G_COMMON_CFG_PWD_TX BIT(16)
-#define HSIO_S1G_COMMON_CFG_LANE_CTRL(x) (((x) << 13) & GENMASK(15, 13))
-#define HSIO_S1G_COMMON_CFG_LANE_CTRL_M GENMASK(15, 13)
-#define HSIO_S1G_COMMON_CFG_LANE_CTRL_X(x) (((x) & GENMASK(15, 13)) >> 13)
-#define HSIO_S1G_COMMON_CFG_ENA_DIRECT BIT(12)
-#define HSIO_S1G_COMMON_CFG_ENA_ELOOP BIT(11)
-#define HSIO_S1G_COMMON_CFG_ENA_FLOOP BIT(10)
-#define HSIO_S1G_COMMON_CFG_ENA_ILOOP BIT(9)
-#define HSIO_S1G_COMMON_CFG_ENA_PLOOP BIT(8)
-#define HSIO_S1G_COMMON_CFG_HRATE BIT(7)
-#define HSIO_S1G_COMMON_CFG_IF_MODE BIT(0)
-
-#define HSIO_S1G_PLL_CFG_PLL_ENA_FB_DIV2 BIT(22)
-#define HSIO_S1G_PLL_CFG_PLL_ENA_RC_DIV2 BIT(21)
-#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA(x) (((x) << 8) & GENMASK(15, 8))
-#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_M GENMASK(15, 8)
-#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define HSIO_S1G_PLL_CFG_PLL_FSM_ENA BIT(7)
-#define HSIO_S1G_PLL_CFG_PLL_FSM_FORCE_SET_ENA BIT(6)
-#define HSIO_S1G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA BIT(5)
-#define HSIO_S1G_PLL_CFG_PLL_RB_DATA_SEL BIT(3)
-
-#define HSIO_S1G_PLL_STATUS_PLL_CAL_NOT_DONE BIT(12)
-#define HSIO_S1G_PLL_STATUS_PLL_CAL_ERR BIT(11)
-#define HSIO_S1G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR BIT(10)
-#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA(x) ((x) & GENMASK(7, 0))
-#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA_M GENMASK(7, 0)
-
-#define HSIO_S1G_DFT_CFG0_LAZYBIT BIT(31)
-#define HSIO_S1G_DFT_CFG0_INV_DIS BIT(23)
-#define HSIO_S1G_DFT_CFG0_PRBS_SEL(x) (((x) << 20) & GENMASK(21, 20))
-#define HSIO_S1G_DFT_CFG0_PRBS_SEL_M GENMASK(21, 20)
-#define HSIO_S1G_DFT_CFG0_PRBS_SEL_X(x) (((x) & GENMASK(21, 20)) >> 20)
-#define HSIO_S1G_DFT_CFG0_TEST_MODE(x) (((x) << 16) & GENMASK(18, 16))
-#define HSIO_S1G_DFT_CFG0_TEST_MODE_M GENMASK(18, 16)
-#define HSIO_S1G_DFT_CFG0_TEST_MODE_X(x) (((x) & GENMASK(18, 16)) >> 16)
-#define HSIO_S1G_DFT_CFG0_RX_PHS_CORR_DIS BIT(4)
-#define HSIO_S1G_DFT_CFG0_RX_PDSENS_ENA BIT(3)
-#define HSIO_S1G_DFT_CFG0_RX_DFT_ENA BIT(2)
-#define HSIO_S1G_DFT_CFG0_TX_DFT_ENA BIT(0)
-
-#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8))
-#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_M GENMASK(17, 8)
-#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8)
-#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4))
-#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_M GENMASK(7, 4)
-#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_S1G_DFT_CFG1_TX_JI_ENA BIT(3)
-#define HSIO_S1G_DFT_CFG1_TX_WAVEFORM_SEL BIT(2)
-#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_DIR BIT(1)
-#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_ENA BIT(0)
-
-#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8))
-#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_M GENMASK(17, 8)
-#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8)
-#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4))
-#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_M GENMASK(7, 4)
-#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_S1G_DFT_CFG2_RX_JI_ENA BIT(3)
-#define HSIO_S1G_DFT_CFG2_RX_WAVEFORM_SEL BIT(2)
-#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_DIR BIT(1)
-#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_ENA BIT(0)
-
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_ENA BIT(20)
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x) (((x) << 16) & GENMASK(17, 16))
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M GENMASK(17, 16)
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x) (((x) & GENMASK(17, 16)) >> 16)
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x) (((x) << 8) & GENMASK(15, 8))
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M GENMASK(15, 8)
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x) ((x) & GENMASK(7, 0))
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M GENMASK(7, 0)
-
-#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE(x) (((x) << 11) & GENMASK(12, 11))
-#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_M GENMASK(12, 11)
-#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_X(x) (((x) & GENMASK(12, 11)) >> 11)
-#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_SWAP BIT(10)
-#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_MODE BIT(9)
-#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_ENA BIT(8)
-#define HSIO_S1G_MISC_CFG_RX_LPI_MODE_ENA BIT(5)
-#define HSIO_S1G_MISC_CFG_TX_LPI_MODE_ENA BIT(4)
-#define HSIO_S1G_MISC_CFG_RX_DATA_INV_ENA BIT(3)
-#define HSIO_S1G_MISC_CFG_TX_DATA_INV_ENA BIT(2)
-#define HSIO_S1G_MISC_CFG_LANE_RST BIT(0)
-
-#define HSIO_S1G_DFT_STATUS_PLL_BIST_NOT_DONE BIT(7)
-#define HSIO_S1G_DFT_STATUS_PLL_BIST_FAILED BIT(6)
-#define HSIO_S1G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR BIT(5)
-#define HSIO_S1G_DFT_STATUS_BIST_ACTIVE BIT(3)
-#define HSIO_S1G_DFT_STATUS_BIST_NOSYNC BIT(2)
-#define HSIO_S1G_DFT_STATUS_BIST_COMPLETE_N BIT(1)
-#define HSIO_S1G_DFT_STATUS_BIST_ERROR BIT(0)
-
-#define HSIO_S1G_MISC_STATUS_DES_100FX_PHASE_SEL BIT(0)
-
-#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_WR_ONE_SHOT BIT(31)
-#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_RD_ONE_SHOT BIT(30)
-#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR(x) ((x) & GENMASK(8, 0))
-#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR_M GENMASK(8, 0)
-
-#define HSIO_S6G_DIG_CFG_GP(x) (((x) << 16) & GENMASK(18, 16))
-#define HSIO_S6G_DIG_CFG_GP_M GENMASK(18, 16)
-#define HSIO_S6G_DIG_CFG_GP_X(x) (((x) & GENMASK(18, 16)) >> 16)
-#define HSIO_S6G_DIG_CFG_TX_BIT_DOUBLING_MODE_ENA BIT(7)
-#define HSIO_S6G_DIG_CFG_SIGDET_TESTMODE BIT(6)
-#define HSIO_S6G_DIG_CFG_SIGDET_AST(x) (((x) << 3) & GENMASK(5, 3))
-#define HSIO_S6G_DIG_CFG_SIGDET_AST_M GENMASK(5, 3)
-#define HSIO_S6G_DIG_CFG_SIGDET_AST_X(x) (((x) & GENMASK(5, 3)) >> 3)
-#define HSIO_S6G_DIG_CFG_SIGDET_DST(x) ((x) & GENMASK(2, 0))
-#define HSIO_S6G_DIG_CFG_SIGDET_DST_M GENMASK(2, 0)
-
-#define HSIO_S6G_DFT_CFG0_LAZYBIT BIT(31)
-#define HSIO_S6G_DFT_CFG0_INV_DIS BIT(23)
-#define HSIO_S6G_DFT_CFG0_PRBS_SEL(x) (((x) << 20) & GENMASK(21, 20))
-#define HSIO_S6G_DFT_CFG0_PRBS_SEL_M GENMASK(21, 20)
-#define HSIO_S6G_DFT_CFG0_PRBS_SEL_X(x) (((x) & GENMASK(21, 20)) >> 20)
-#define HSIO_S6G_DFT_CFG0_TEST_MODE(x) (((x) << 16) & GENMASK(18, 16))
-#define HSIO_S6G_DFT_CFG0_TEST_MODE_M GENMASK(18, 16)
-#define HSIO_S6G_DFT_CFG0_TEST_MODE_X(x) (((x) & GENMASK(18, 16)) >> 16)
-#define HSIO_S6G_DFT_CFG0_RX_PHS_CORR_DIS BIT(4)
-#define HSIO_S6G_DFT_CFG0_RX_PDSENS_ENA BIT(3)
-#define HSIO_S6G_DFT_CFG0_RX_DFT_ENA BIT(2)
-#define HSIO_S6G_DFT_CFG0_TX_DFT_ENA BIT(0)
-
-#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8))
-#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_M GENMASK(17, 8)
-#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8)
-#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4))
-#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_M GENMASK(7, 4)
-#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_S6G_DFT_CFG1_TX_JI_ENA BIT(3)
-#define HSIO_S6G_DFT_CFG1_TX_WAVEFORM_SEL BIT(2)
-#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_DIR BIT(1)
-#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_ENA BIT(0)
-
-#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8))
-#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_M GENMASK(17, 8)
-#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8)
-#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4))
-#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_M GENMASK(7, 4)
-#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_S6G_DFT_CFG2_RX_JI_ENA BIT(3)
-#define HSIO_S6G_DFT_CFG2_RX_WAVEFORM_SEL BIT(2)
-#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_DIR BIT(1)
-#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_ENA BIT(0)
-
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_ENA BIT(20)
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x) (((x) << 16) & GENMASK(19, 16))
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M GENMASK(19, 16)
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x) (((x) & GENMASK(19, 16)) >> 16)
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x) (((x) << 8) & GENMASK(15, 8))
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M GENMASK(15, 8)
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x) ((x) & GENMASK(7, 0))
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M GENMASK(7, 0)
-
-#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK(x) (((x) << 13) & GENMASK(14, 13))
-#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_M GENMASK(14, 13)
-#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_X(x) (((x) & GENMASK(14, 13)) >> 13)
-#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE(x) (((x) << 11) & GENMASK(12, 11))
-#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_M GENMASK(12, 11)
-#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_X(x) (((x) & GENMASK(12, 11)) >> 11)
-#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_SWAP BIT(10)
-#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_MODE BIT(9)
-#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_ENA BIT(8)
-#define HSIO_S6G_MISC_CFG_RX_BUS_FLIP_ENA BIT(7)
-#define HSIO_S6G_MISC_CFG_TX_BUS_FLIP_ENA BIT(6)
-#define HSIO_S6G_MISC_CFG_RX_LPI_MODE_ENA BIT(5)
-#define HSIO_S6G_MISC_CFG_TX_LPI_MODE_ENA BIT(4)
-#define HSIO_S6G_MISC_CFG_RX_DATA_INV_ENA BIT(3)
-#define HSIO_S6G_MISC_CFG_TX_DATA_INV_ENA BIT(2)
-#define HSIO_S6G_MISC_CFG_LANE_RST BIT(0)
-
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0(x) (((x) << 23) & GENMASK(28, 23))
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_M GENMASK(28, 23)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_X(x) (((x) & GENMASK(28, 23)) >> 23)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1(x) (((x) << 18) & GENMASK(22, 18))
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_M GENMASK(22, 18)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_X(x) (((x) & GENMASK(22, 18)) >> 18)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC(x) (((x) << 13) & GENMASK(17, 13))
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_M GENMASK(17, 13)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_X(x) (((x) & GENMASK(17, 13)) >> 13)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS(x) (((x) << 6) & GENMASK(8, 6))
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_M GENMASK(8, 6)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_X(x) (((x) & GENMASK(8, 6)) >> 6)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV(x) ((x) & GENMASK(5, 0))
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV_M GENMASK(5, 0)
-
-#define HSIO_S6G_DFT_STATUS_PRBS_SYNC_STAT BIT(8)
-#define HSIO_S6G_DFT_STATUS_PLL_BIST_NOT_DONE BIT(7)
-#define HSIO_S6G_DFT_STATUS_PLL_BIST_FAILED BIT(6)
-#define HSIO_S6G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR BIT(5)
-#define HSIO_S6G_DFT_STATUS_BIST_ACTIVE BIT(3)
-#define HSIO_S6G_DFT_STATUS_BIST_NOSYNC BIT(2)
-#define HSIO_S6G_DFT_STATUS_BIST_COMPLETE_N BIT(1)
-#define HSIO_S6G_DFT_STATUS_BIST_ERROR BIT(0)
-
-#define HSIO_S6G_MISC_STATUS_DES_100FX_PHASE_SEL BIT(0)
-
-#define HSIO_S6G_DES_CFG_DES_PHS_CTRL(x) (((x) << 13) & GENMASK(16, 13))
-#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_M GENMASK(16, 13)
-#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13)
-#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL(x) (((x) << 10) & GENMASK(12, 10))
-#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_M GENMASK(12, 10)
-#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_X(x) (((x) & GENMASK(12, 10)) >> 10)
-#define HSIO_S6G_DES_CFG_DES_CPMD_SEL(x) (((x) << 8) & GENMASK(9, 8))
-#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_M GENMASK(9, 8)
-#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_X(x) (((x) & GENMASK(9, 8)) >> 8)
-#define HSIO_S6G_DES_CFG_DES_BW_HYST(x) (((x) << 5) & GENMASK(7, 5))
-#define HSIO_S6G_DES_CFG_DES_BW_HYST_M GENMASK(7, 5)
-#define HSIO_S6G_DES_CFG_DES_BW_HYST_X(x) (((x) & GENMASK(7, 5)) >> 5)
-#define HSIO_S6G_DES_CFG_DES_SWAP_HYST BIT(4)
-#define HSIO_S6G_DES_CFG_DES_BW_ANA(x) (((x) << 1) & GENMASK(3, 1))
-#define HSIO_S6G_DES_CFG_DES_BW_ANA_M GENMASK(3, 1)
-#define HSIO_S6G_DES_CFG_DES_BW_ANA_X(x) (((x) & GENMASK(3, 1)) >> 1)
-#define HSIO_S6G_DES_CFG_DES_SWAP_ANA BIT(0)
-
-#define HSIO_S6G_IB_CFG_IB_SOFSI(x) (((x) << 29) & GENMASK(30, 29))
-#define HSIO_S6G_IB_CFG_IB_SOFSI_M GENMASK(30, 29)
-#define HSIO_S6G_IB_CFG_IB_SOFSI_X(x) (((x) & GENMASK(30, 29)) >> 29)
-#define HSIO_S6G_IB_CFG_IB_VBULK_SEL BIT(28)
-#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ(x) (((x) << 24) & GENMASK(27, 24))
-#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_M GENMASK(27, 24)
-#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_X(x) (((x) & GENMASK(27, 24)) >> 24)
-#define HSIO_S6G_IB_CFG_IB_ICML_ADJ(x) (((x) << 20) & GENMASK(23, 20))
-#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_M GENMASK(23, 20)
-#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_X(x) (((x) & GENMASK(23, 20)) >> 20)
-#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL(x) (((x) << 18) & GENMASK(19, 18))
-#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_M GENMASK(19, 18)
-#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_X(x) (((x) & GENMASK(19, 18)) >> 18)
-#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL(x) (((x) << 15) & GENMASK(17, 15))
-#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_M GENMASK(17, 15)
-#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_X(x) (((x) & GENMASK(17, 15)) >> 15)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP(x) (((x) << 13) & GENMASK(14, 13))
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_M GENMASK(14, 13)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_X(x) (((x) & GENMASK(14, 13)) >> 13)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID(x) (((x) << 11) & GENMASK(12, 11))
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_M GENMASK(12, 11)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_X(x) (((x) & GENMASK(12, 11)) >> 11)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP(x) (((x) << 9) & GENMASK(10, 9))
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_M GENMASK(10, 9)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_X(x) (((x) & GENMASK(10, 9)) >> 9)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET(x) (((x) << 7) & GENMASK(8, 7))
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_M GENMASK(8, 7)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_X(x) (((x) & GENMASK(8, 7)) >> 7)
-#define HSIO_S6G_IB_CFG_IB_ANA_TEST_ENA BIT(6)
-#define HSIO_S6G_IB_CFG_IB_SIG_DET_ENA BIT(5)
-#define HSIO_S6G_IB_CFG_IB_CONCUR BIT(4)
-#define HSIO_S6G_IB_CFG_IB_CAL_ENA BIT(3)
-#define HSIO_S6G_IB_CFG_IB_SAM_ENA BIT(2)
-#define HSIO_S6G_IB_CFG_IB_EQZ_ENA BIT(1)
-#define HSIO_S6G_IB_CFG_IB_REG_ENA BIT(0)
-
-#define HSIO_S6G_IB_CFG1_IB_TJTAG(x) (((x) << 17) & GENMASK(21, 17))
-#define HSIO_S6G_IB_CFG1_IB_TJTAG_M GENMASK(21, 17)
-#define HSIO_S6G_IB_CFG1_IB_TJTAG_X(x) (((x) & GENMASK(21, 17)) >> 17)
-#define HSIO_S6G_IB_CFG1_IB_TSDET(x) (((x) << 12) & GENMASK(16, 12))
-#define HSIO_S6G_IB_CFG1_IB_TSDET_M GENMASK(16, 12)
-#define HSIO_S6G_IB_CFG1_IB_TSDET_X(x) (((x) & GENMASK(16, 12)) >> 12)
-#define HSIO_S6G_IB_CFG1_IB_SCALY(x) (((x) << 8) & GENMASK(11, 8))
-#define HSIO_S6G_IB_CFG1_IB_SCALY_M GENMASK(11, 8)
-#define HSIO_S6G_IB_CFG1_IB_SCALY_X(x) (((x) & GENMASK(11, 8)) >> 8)
-#define HSIO_S6G_IB_CFG1_IB_FILT_HP BIT(7)
-#define HSIO_S6G_IB_CFG1_IB_FILT_MID BIT(6)
-#define HSIO_S6G_IB_CFG1_IB_FILT_LP BIT(5)
-#define HSIO_S6G_IB_CFG1_IB_FILT_OFFSET BIT(4)
-#define HSIO_S6G_IB_CFG1_IB_FRC_HP BIT(3)
-#define HSIO_S6G_IB_CFG1_IB_FRC_MID BIT(2)
-#define HSIO_S6G_IB_CFG1_IB_FRC_LP BIT(1)
-#define HSIO_S6G_IB_CFG1_IB_FRC_OFFSET BIT(0)
-
-#define HSIO_S6G_IB_CFG2_IB_TINFV(x) (((x) << 27) & GENMASK(29, 27))
-#define HSIO_S6G_IB_CFG2_IB_TINFV_M GENMASK(29, 27)
-#define HSIO_S6G_IB_CFG2_IB_TINFV_X(x) (((x) & GENMASK(29, 27)) >> 27)
-#define HSIO_S6G_IB_CFG2_IB_OINFI(x) (((x) << 22) & GENMASK(26, 22))
-#define HSIO_S6G_IB_CFG2_IB_OINFI_M GENMASK(26, 22)
-#define HSIO_S6G_IB_CFG2_IB_OINFI_X(x) (((x) & GENMASK(26, 22)) >> 22)
-#define HSIO_S6G_IB_CFG2_IB_TAUX(x) (((x) << 19) & GENMASK(21, 19))
-#define HSIO_S6G_IB_CFG2_IB_TAUX_M GENMASK(21, 19)
-#define HSIO_S6G_IB_CFG2_IB_TAUX_X(x) (((x) & GENMASK(21, 19)) >> 19)
-#define HSIO_S6G_IB_CFG2_IB_OINFS(x) (((x) << 16) & GENMASK(18, 16))
-#define HSIO_S6G_IB_CFG2_IB_OINFS_M GENMASK(18, 16)
-#define HSIO_S6G_IB_CFG2_IB_OINFS_X(x) (((x) & GENMASK(18, 16)) >> 16)
-#define HSIO_S6G_IB_CFG2_IB_OCALS(x) (((x) << 10) & GENMASK(15, 10))
-#define HSIO_S6G_IB_CFG2_IB_OCALS_M GENMASK(15, 10)
-#define HSIO_S6G_IB_CFG2_IB_OCALS_X(x) (((x) & GENMASK(15, 10)) >> 10)
-#define HSIO_S6G_IB_CFG2_IB_TCALV(x) (((x) << 5) & GENMASK(9, 5))
-#define HSIO_S6G_IB_CFG2_IB_TCALV_M GENMASK(9, 5)
-#define HSIO_S6G_IB_CFG2_IB_TCALV_X(x) (((x) & GENMASK(9, 5)) >> 5)
-#define HSIO_S6G_IB_CFG2_IB_UMAX(x) (((x) << 3) & GENMASK(4, 3))
-#define HSIO_S6G_IB_CFG2_IB_UMAX_M GENMASK(4, 3)
-#define HSIO_S6G_IB_CFG2_IB_UMAX_X(x) (((x) & GENMASK(4, 3)) >> 3)
-#define HSIO_S6G_IB_CFG2_IB_UREG(x) ((x) & GENMASK(2, 0))
-#define HSIO_S6G_IB_CFG2_IB_UREG_M GENMASK(2, 0)
-
-#define HSIO_S6G_IB_CFG3_IB_INI_HP(x) (((x) << 18) & GENMASK(23, 18))
-#define HSIO_S6G_IB_CFG3_IB_INI_HP_M GENMASK(23, 18)
-#define HSIO_S6G_IB_CFG3_IB_INI_HP_X(x) (((x) & GENMASK(23, 18)) >> 18)
-#define HSIO_S6G_IB_CFG3_IB_INI_MID(x) (((x) << 12) & GENMASK(17, 12))
-#define HSIO_S6G_IB_CFG3_IB_INI_MID_M GENMASK(17, 12)
-#define HSIO_S6G_IB_CFG3_IB_INI_MID_X(x) (((x) & GENMASK(17, 12)) >> 12)
-#define HSIO_S6G_IB_CFG3_IB_INI_LP(x) (((x) << 6) & GENMASK(11, 6))
-#define HSIO_S6G_IB_CFG3_IB_INI_LP_M GENMASK(11, 6)
-#define HSIO_S6G_IB_CFG3_IB_INI_LP_X(x) (((x) & GENMASK(11, 6)) >> 6)
-#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET(x) ((x) & GENMASK(5, 0))
-#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET_M GENMASK(5, 0)
-
-#define HSIO_S6G_IB_CFG4_IB_MAX_HP(x) (((x) << 18) & GENMASK(23, 18))
-#define HSIO_S6G_IB_CFG4_IB_MAX_HP_M GENMASK(23, 18)
-#define HSIO_S6G_IB_CFG4_IB_MAX_HP_X(x) (((x) & GENMASK(23, 18)) >> 18)
-#define HSIO_S6G_IB_CFG4_IB_MAX_MID(x) (((x) << 12) & GENMASK(17, 12))
-#define HSIO_S6G_IB_CFG4_IB_MAX_MID_M GENMASK(17, 12)
-#define HSIO_S6G_IB_CFG4_IB_MAX_MID_X(x) (((x) & GENMASK(17, 12)) >> 12)
-#define HSIO_S6G_IB_CFG4_IB_MAX_LP(x) (((x) << 6) & GENMASK(11, 6))
-#define HSIO_S6G_IB_CFG4_IB_MAX_LP_M GENMASK(11, 6)
-#define HSIO_S6G_IB_CFG4_IB_MAX_LP_X(x) (((x) & GENMASK(11, 6)) >> 6)
-#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET(x) ((x) & GENMASK(5, 0))
-#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET_M GENMASK(5, 0)
-
-#define HSIO_S6G_IB_CFG5_IB_MIN_HP(x) (((x) << 18) & GENMASK(23, 18))
-#define HSIO_S6G_IB_CFG5_IB_MIN_HP_M GENMASK(23, 18)
-#define HSIO_S6G_IB_CFG5_IB_MIN_HP_X(x) (((x) & GENMASK(23, 18)) >> 18)
-#define HSIO_S6G_IB_CFG5_IB_MIN_MID(x) (((x) << 12) & GENMASK(17, 12))
-#define HSIO_S6G_IB_CFG5_IB_MIN_MID_M GENMASK(17, 12)
-#define HSIO_S6G_IB_CFG5_IB_MIN_MID_X(x) (((x) & GENMASK(17, 12)) >> 12)
-#define HSIO_S6G_IB_CFG5_IB_MIN_LP(x) (((x) << 6) & GENMASK(11, 6))
-#define HSIO_S6G_IB_CFG5_IB_MIN_LP_M GENMASK(11, 6)
-#define HSIO_S6G_IB_CFG5_IB_MIN_LP_X(x) (((x) & GENMASK(11, 6)) >> 6)
-#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET(x) ((x) & GENMASK(5, 0))
-#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET_M GENMASK(5, 0)
-
-#define HSIO_S6G_OB_CFG_OB_IDLE BIT(31)
-#define HSIO_S6G_OB_CFG_OB_ENA1V_MODE BIT(30)
-#define HSIO_S6G_OB_CFG_OB_POL BIT(29)
-#define HSIO_S6G_OB_CFG_OB_POST0(x) (((x) << 23) & GENMASK(28, 23))
-#define HSIO_S6G_OB_CFG_OB_POST0_M GENMASK(28, 23)
-#define HSIO_S6G_OB_CFG_OB_POST0_X(x) (((x) & GENMASK(28, 23)) >> 23)
-#define HSIO_S6G_OB_CFG_OB_PREC(x) (((x) << 18) & GENMASK(22, 18))
-#define HSIO_S6G_OB_CFG_OB_PREC_M GENMASK(22, 18)
-#define HSIO_S6G_OB_CFG_OB_PREC_X(x) (((x) & GENMASK(22, 18)) >> 18)
-#define HSIO_S6G_OB_CFG_OB_R_ADJ_MUX BIT(17)
-#define HSIO_S6G_OB_CFG_OB_R_ADJ_PDR BIT(16)
-#define HSIO_S6G_OB_CFG_OB_POST1(x) (((x) << 11) & GENMASK(15, 11))
-#define HSIO_S6G_OB_CFG_OB_POST1_M GENMASK(15, 11)
-#define HSIO_S6G_OB_CFG_OB_POST1_X(x) (((x) & GENMASK(15, 11)) >> 11)
-#define HSIO_S6G_OB_CFG_OB_R_COR BIT(10)
-#define HSIO_S6G_OB_CFG_OB_SEL_RCTRL BIT(9)
-#define HSIO_S6G_OB_CFG_OB_SR_H BIT(8)
-#define HSIO_S6G_OB_CFG_OB_SR(x) (((x) << 4) & GENMASK(7, 4))
-#define HSIO_S6G_OB_CFG_OB_SR_M GENMASK(7, 4)
-#define HSIO_S6G_OB_CFG_OB_SR_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0))
-#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL_M GENMASK(3, 0)
-
-#define HSIO_S6G_OB_CFG1_OB_ENA_CAS(x) (((x) << 6) & GENMASK(8, 6))
-#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_M GENMASK(8, 6)
-#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_X(x) (((x) & GENMASK(8, 6)) >> 6)
-#define HSIO_S6G_OB_CFG1_OB_LEV(x) ((x) & GENMASK(5, 0))
-#define HSIO_S6G_OB_CFG1_OB_LEV_M GENMASK(5, 0)
-
-#define HSIO_S6G_SER_CFG_SER_4TAP_ENA BIT(8)
-#define HSIO_S6G_SER_CFG_SER_CPMD_SEL BIT(7)
-#define HSIO_S6G_SER_CFG_SER_SWAP_CPMD BIT(6)
-#define HSIO_S6G_SER_CFG_SER_ALISEL(x) (((x) << 4) & GENMASK(5, 4))
-#define HSIO_S6G_SER_CFG_SER_ALISEL_M GENMASK(5, 4)
-#define HSIO_S6G_SER_CFG_SER_ALISEL_X(x) (((x) & GENMASK(5, 4)) >> 4)
-#define HSIO_S6G_SER_CFG_SER_ENHYS BIT(3)
-#define HSIO_S6G_SER_CFG_SER_BIG_WIN BIT(2)
-#define HSIO_S6G_SER_CFG_SER_EN_WIN BIT(1)
-#define HSIO_S6G_SER_CFG_SER_ENALI BIT(0)
-
-#define HSIO_S6G_COMMON_CFG_SYS_RST BIT(17)
-#define HSIO_S6G_COMMON_CFG_SE_DIV2_ENA BIT(16)
-#define HSIO_S6G_COMMON_CFG_SE_AUTO_SQUELCH_ENA BIT(15)
-#define HSIO_S6G_COMMON_CFG_ENA_LANE BIT(14)
-#define HSIO_S6G_COMMON_CFG_PWD_RX BIT(13)
-#define HSIO_S6G_COMMON_CFG_PWD_TX BIT(12)
-#define HSIO_S6G_COMMON_CFG_LANE_CTRL(x) (((x) << 9) & GENMASK(11, 9))
-#define HSIO_S6G_COMMON_CFG_LANE_CTRL_M GENMASK(11, 9)
-#define HSIO_S6G_COMMON_CFG_LANE_CTRL_X(x) (((x) & GENMASK(11, 9)) >> 9)
-#define HSIO_S6G_COMMON_CFG_ENA_DIRECT BIT(8)
-#define HSIO_S6G_COMMON_CFG_ENA_ELOOP BIT(7)
-#define HSIO_S6G_COMMON_CFG_ENA_FLOOP BIT(6)
-#define HSIO_S6G_COMMON_CFG_ENA_ILOOP BIT(5)
-#define HSIO_S6G_COMMON_CFG_ENA_PLOOP BIT(4)
-#define HSIO_S6G_COMMON_CFG_HRATE BIT(3)
-#define HSIO_S6G_COMMON_CFG_QRATE BIT(2)
-#define HSIO_S6G_COMMON_CFG_IF_MODE(x) ((x) & GENMASK(1, 0))
-#define HSIO_S6G_COMMON_CFG_IF_MODE_M GENMASK(1, 0)
-
-#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS(x) (((x) << 16) & GENMASK(17, 16))
-#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_M GENMASK(17, 16)
-#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_X(x) (((x) & GENMASK(17, 16)) >> 16)
-#define HSIO_S6G_PLL_CFG_PLL_DIV4 BIT(15)
-#define HSIO_S6G_PLL_CFG_PLL_ENA_ROT BIT(14)
-#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA(x) (((x) << 6) & GENMASK(13, 6))
-#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_M GENMASK(13, 6)
-#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x) (((x) & GENMASK(13, 6)) >> 6)
-#define HSIO_S6G_PLL_CFG_PLL_FSM_ENA BIT(5)
-#define HSIO_S6G_PLL_CFG_PLL_FSM_FORCE_SET_ENA BIT(4)
-#define HSIO_S6G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA BIT(3)
-#define HSIO_S6G_PLL_CFG_PLL_RB_DATA_SEL BIT(2)
-#define HSIO_S6G_PLL_CFG_PLL_ROT_DIR BIT(1)
-#define HSIO_S6G_PLL_CFG_PLL_ROT_FRQ BIT(0)
-
-#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_N BIT(5)
-#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_P BIT(4)
-#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_CLK BIT(3)
-#define HSIO_S6G_ACJTAG_CFG_OB_DIRECT BIT(2)
-#define HSIO_S6G_ACJTAG_CFG_ACJTAG_ENA BIT(1)
-#define HSIO_S6G_ACJTAG_CFG_JTAG_CTRL_ENA BIT(0)
-
-#define HSIO_S6G_GP_CFG_GP_MSB(x) (((x) << 16) & GENMASK(31, 16))
-#define HSIO_S6G_GP_CFG_GP_MSB_M GENMASK(31, 16)
-#define HSIO_S6G_GP_CFG_GP_MSB_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define HSIO_S6G_GP_CFG_GP_LSB(x) ((x) & GENMASK(15, 0))
-#define HSIO_S6G_GP_CFG_GP_LSB_M GENMASK(15, 0)
-
-#define HSIO_S6G_IB_STATUS0_IB_CAL_DONE BIT(8)
-#define HSIO_S6G_IB_STATUS0_IB_HP_GAIN_ACT BIT(7)
-#define HSIO_S6G_IB_STATUS0_IB_MID_GAIN_ACT BIT(6)
-#define HSIO_S6G_IB_STATUS0_IB_LP_GAIN_ACT BIT(5)
-#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ACT BIT(4)
-#define HSIO_S6G_IB_STATUS0_IB_OFFSET_VLD BIT(3)
-#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ERR BIT(2)
-#define HSIO_S6G_IB_STATUS0_IB_OFFSDIR BIT(1)
-#define HSIO_S6G_IB_STATUS0_IB_SIG_DET BIT(0)
-
-#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT(x) (((x) << 18) & GENMASK(23, 18))
-#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_M GENMASK(23, 18)
-#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_X(x) (((x) & GENMASK(23, 18)) >> 18)
-#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT(x) (((x) << 12) & GENMASK(17, 12))
-#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_M GENMASK(17, 12)
-#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_X(x) (((x) & GENMASK(17, 12)) >> 12)
-#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT(x) (((x) << 6) & GENMASK(11, 6))
-#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_M GENMASK(11, 6)
-#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_X(x) (((x) & GENMASK(11, 6)) >> 6)
-#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT(x) ((x) & GENMASK(5, 0))
-#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT_M GENMASK(5, 0)
-
-#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_N BIT(2)
-#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_P BIT(1)
-#define HSIO_S6G_ACJTAG_STATUS_IB_DIRECT BIT(0)
-
-#define HSIO_S6G_PLL_STATUS_PLL_CAL_NOT_DONE BIT(10)
-#define HSIO_S6G_PLL_STATUS_PLL_CAL_ERR BIT(9)
-#define HSIO_S6G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR BIT(8)
-#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA(x) ((x) & GENMASK(7, 0))
-#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA_M GENMASK(7, 0)
-
-#define HSIO_S6G_REVID_SERDES_REV(x) (((x) << 26) & GENMASK(31, 26))
-#define HSIO_S6G_REVID_SERDES_REV_M GENMASK(31, 26)
-#define HSIO_S6G_REVID_SERDES_REV_X(x) (((x) & GENMASK(31, 26)) >> 26)
-#define HSIO_S6G_REVID_RCPLL_REV(x) (((x) << 21) & GENMASK(25, 21))
-#define HSIO_S6G_REVID_RCPLL_REV_M GENMASK(25, 21)
-#define HSIO_S6G_REVID_RCPLL_REV_X(x) (((x) & GENMASK(25, 21)) >> 21)
-#define HSIO_S6G_REVID_SER_REV(x) (((x) << 16) & GENMASK(20, 16))
-#define HSIO_S6G_REVID_SER_REV_M GENMASK(20, 16)
-#define HSIO_S6G_REVID_SER_REV_X(x) (((x) & GENMASK(20, 16)) >> 16)
-#define HSIO_S6G_REVID_DES_REV(x) (((x) << 10) & GENMASK(15, 10))
-#define HSIO_S6G_REVID_DES_REV_M GENMASK(15, 10)
-#define HSIO_S6G_REVID_DES_REV_X(x) (((x) & GENMASK(15, 10)) >> 10)
-#define HSIO_S6G_REVID_OB_REV(x) (((x) << 5) & GENMASK(9, 5))
-#define HSIO_S6G_REVID_OB_REV_M GENMASK(9, 5)
-#define HSIO_S6G_REVID_OB_REV_X(x) (((x) & GENMASK(9, 5)) >> 5)
-#define HSIO_S6G_REVID_IB_REV(x) ((x) & GENMASK(4, 0))
-#define HSIO_S6G_REVID_IB_REV_M GENMASK(4, 0)
-
-#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_WR_ONE_SHOT BIT(31)
-#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_RD_ONE_SHOT BIT(30)
-#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR(x) ((x) & GENMASK(24, 0))
-#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR_M GENMASK(24, 0)
-
-#define HSIO_HW_CFG_DEV2G5_10_MODE BIT(6)
-#define HSIO_HW_CFG_DEV1G_9_MODE BIT(5)
-#define HSIO_HW_CFG_DEV1G_6_MODE BIT(4)
-#define HSIO_HW_CFG_DEV1G_5_MODE BIT(3)
-#define HSIO_HW_CFG_DEV1G_4_MODE BIT(2)
-#define HSIO_HW_CFG_PCIE_ENA BIT(1)
-#define HSIO_HW_CFG_QSGMII_ENA BIT(0)
-
-#define HSIO_HW_QSGMII_CFG_SHYST_DIS BIT(3)
-#define HSIO_HW_QSGMII_CFG_E_DET_ENA BIT(2)
-#define HSIO_HW_QSGMII_CFG_USE_I1_ENA BIT(1)
-#define HSIO_HW_QSGMII_CFG_FLIP_LANES BIT(0)
-
-#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS(x) (((x) << 1) & GENMASK(6, 1))
-#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_M GENMASK(6, 1)
-#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_X(x) (((x) & GENMASK(6, 1)) >> 1)
-#define HSIO_HW_QSGMII_STAT_SYNC BIT(0)
-
-#define HSIO_CLK_CFG_CLKDIV_PHY(x) (((x) << 1) & GENMASK(8, 1))
-#define HSIO_CLK_CFG_CLKDIV_PHY_M GENMASK(8, 1)
-#define HSIO_CLK_CFG_CLKDIV_PHY_X(x) (((x) & GENMASK(8, 1)) >> 1)
-#define HSIO_CLK_CFG_CLKDIV_PHY_DIS BIT(0)
-
-#define HSIO_TEMP_SENSOR_CTRL_FORCE_TEMP_RD BIT(5)
-#define HSIO_TEMP_SENSOR_CTRL_FORCE_RUN BIT(4)
-#define HSIO_TEMP_SENSOR_CTRL_FORCE_NO_RST BIT(3)
-#define HSIO_TEMP_SENSOR_CTRL_FORCE_POWER_UP BIT(2)
-#define HSIO_TEMP_SENSOR_CTRL_FORCE_CLK BIT(1)
-#define HSIO_TEMP_SENSOR_CTRL_SAMPLE_ENA BIT(0)
-
-#define HSIO_TEMP_SENSOR_CFG_RUN_WID(x) (((x) << 8) & GENMASK(15, 8))
-#define HSIO_TEMP_SENSOR_CFG_RUN_WID_M GENMASK(15, 8)
-#define HSIO_TEMP_SENSOR_CFG_RUN_WID_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER(x) ((x) & GENMASK(7, 0))
-#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER_M GENMASK(7, 0)
-
-#define HSIO_TEMP_SENSOR_STAT_TEMP_VALID BIT(8)
-#define HSIO_TEMP_SENSOR_STAT_TEMP(x) ((x) & GENMASK(7, 0))
-#define HSIO_TEMP_SENSOR_STAT_TEMP_M GENMASK(7, 0)
-
-#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c
index e334b406c40c..9271af18b93b 100644
--- a/drivers/net/ethernet/mscc/ocelot_regs.c
+++ b/drivers/net/ethernet/mscc/ocelot_regs.c
@@ -5,6 +5,7 @@
* Copyright (c) 2017 Microsemi Corporation
*/
#include "ocelot.h"
+#include <soc/mscc/ocelot_hsio.h>
static const u32 ocelot_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x009000),
@@ -102,82 +103,6 @@ static const u32 ocelot_qs_regmap[] = {
REG(QS_INH_DBG, 0x000048),
};
-static const u32 ocelot_hsio_regmap[] = {
- REG(HSIO_PLL5G_CFG0, 0x000000),
- REG(HSIO_PLL5G_CFG1, 0x000004),
- REG(HSIO_PLL5G_CFG2, 0x000008),
- REG(HSIO_PLL5G_CFG3, 0x00000c),
- REG(HSIO_PLL5G_CFG4, 0x000010),
- REG(HSIO_PLL5G_CFG5, 0x000014),
- REG(HSIO_PLL5G_CFG6, 0x000018),
- REG(HSIO_PLL5G_STATUS0, 0x00001c),
- REG(HSIO_PLL5G_STATUS1, 0x000020),
- REG(HSIO_PLL5G_BIST_CFG0, 0x000024),
- REG(HSIO_PLL5G_BIST_CFG1, 0x000028),
- REG(HSIO_PLL5G_BIST_CFG2, 0x00002c),
- REG(HSIO_PLL5G_BIST_STAT0, 0x000030),
- REG(HSIO_PLL5G_BIST_STAT1, 0x000034),
- REG(HSIO_RCOMP_CFG0, 0x000038),
- REG(HSIO_RCOMP_STATUS, 0x00003c),
- REG(HSIO_SYNC_ETH_CFG, 0x000040),
- REG(HSIO_SYNC_ETH_PLL_CFG, 0x000048),
- REG(HSIO_S1G_DES_CFG, 0x00004c),
- REG(HSIO_S1G_IB_CFG, 0x000050),
- REG(HSIO_S1G_OB_CFG, 0x000054),
- REG(HSIO_S1G_SER_CFG, 0x000058),
- REG(HSIO_S1G_COMMON_CFG, 0x00005c),
- REG(HSIO_S1G_PLL_CFG, 0x000060),
- REG(HSIO_S1G_PLL_STATUS, 0x000064),
- REG(HSIO_S1G_DFT_CFG0, 0x000068),
- REG(HSIO_S1G_DFT_CFG1, 0x00006c),
- REG(HSIO_S1G_DFT_CFG2, 0x000070),
- REG(HSIO_S1G_TP_CFG, 0x000074),
- REG(HSIO_S1G_RC_PLL_BIST_CFG, 0x000078),
- REG(HSIO_S1G_MISC_CFG, 0x00007c),
- REG(HSIO_S1G_DFT_STATUS, 0x000080),
- REG(HSIO_S1G_MISC_STATUS, 0x000084),
- REG(HSIO_MCB_S1G_ADDR_CFG, 0x000088),
- REG(HSIO_S6G_DIG_CFG, 0x00008c),
- REG(HSIO_S6G_DFT_CFG0, 0x000090),
- REG(HSIO_S6G_DFT_CFG1, 0x000094),
- REG(HSIO_S6G_DFT_CFG2, 0x000098),
- REG(HSIO_S6G_TP_CFG0, 0x00009c),
- REG(HSIO_S6G_TP_CFG1, 0x0000a0),
- REG(HSIO_S6G_RC_PLL_BIST_CFG, 0x0000a4),
- REG(HSIO_S6G_MISC_CFG, 0x0000a8),
- REG(HSIO_S6G_OB_ANEG_CFG, 0x0000ac),
- REG(HSIO_S6G_DFT_STATUS, 0x0000b0),
- REG(HSIO_S6G_ERR_CNT, 0x0000b4),
- REG(HSIO_S6G_MISC_STATUS, 0x0000b8),
- REG(HSIO_S6G_DES_CFG, 0x0000bc),
- REG(HSIO_S6G_IB_CFG, 0x0000c0),
- REG(HSIO_S6G_IB_CFG1, 0x0000c4),
- REG(HSIO_S6G_IB_CFG2, 0x0000c8),
- REG(HSIO_S6G_IB_CFG3, 0x0000cc),
- REG(HSIO_S6G_IB_CFG4, 0x0000d0),
- REG(HSIO_S6G_IB_CFG5, 0x0000d4),
- REG(HSIO_S6G_OB_CFG, 0x0000d8),
- REG(HSIO_S6G_OB_CFG1, 0x0000dc),
- REG(HSIO_S6G_SER_CFG, 0x0000e0),
- REG(HSIO_S6G_COMMON_CFG, 0x0000e4),
- REG(HSIO_S6G_PLL_CFG, 0x0000e8),
- REG(HSIO_S6G_ACJTAG_CFG, 0x0000ec),
- REG(HSIO_S6G_GP_CFG, 0x0000f0),
- REG(HSIO_S6G_IB_STATUS0, 0x0000f4),
- REG(HSIO_S6G_IB_STATUS1, 0x0000f8),
- REG(HSIO_S6G_ACJTAG_STATUS, 0x0000fc),
- REG(HSIO_S6G_PLL_STATUS, 0x000100),
- REG(HSIO_S6G_REVID, 0x000104),
- REG(HSIO_MCB_S6G_ADDR_CFG, 0x000108),
- REG(HSIO_HW_CFG, 0x00010c),
- REG(HSIO_HW_QSGMII_CFG, 0x000110),
- REG(HSIO_HW_QSGMII_STAT, 0x000114),
- REG(HSIO_CLK_CFG, 0x000118),
- REG(HSIO_TEMP_SENSOR_CTRL, 0x00011c),
- REG(HSIO_TEMP_SENSOR_CFG, 0x000120),
- REG(HSIO_TEMP_SENSOR_STAT, 0x000124),
-};
-
static const u32 ocelot_qsys_regmap[] = {
REG(QSYS_PORT_MODE, 0x011200),
REG(QSYS_SWITCH_PORT_MODE, 0x011234),
@@ -302,7 +227,6 @@ static const u32 ocelot_sys_regmap[] = {
static const u32 *ocelot_regmap[] = {
[ANA] = ocelot_ana_regmap,
[QS] = ocelot_qs_regmap,
- [HSIO] = ocelot_hsio_regmap,
[QSYS] = ocelot_qsys_regmap,
[REW] = ocelot_rew_regmap,
[SYS] = ocelot_sys_regmap,
@@ -453,9 +377,11 @@ static void ocelot_pll5_init(struct ocelot *ocelot)
/* Configure PLL5. This will need a proper CCF driver
* The values are coming from the VTSS API for Ocelot
*/
- ocelot_write(ocelot, HSIO_PLL5G_CFG4_IB_CTRL(0x7600) |
- HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8), HSIO_PLL5G_CFG4);
- ocelot_write(ocelot, HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) |
+ regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG4,
+ HSIO_PLL5G_CFG4_IB_CTRL(0x7600) |
+ HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8));
+ regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG0,
+ HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) |
HSIO_PLL5G_CFG0_CPU_CLK_DIV(2) |
HSIO_PLL5G_CFG0_ENA_BIAS |
HSIO_PLL5G_CFG0_ENA_VCO_BUF |
@@ -465,13 +391,14 @@ static void ocelot_pll5_init(struct ocelot *ocelot)
HSIO_PLL5G_CFG0_SELBGV820(4) |
HSIO_PLL5G_CFG0_DIV4 |
HSIO_PLL5G_CFG0_ENA_CLKTREE |
- HSIO_PLL5G_CFG0_ENA_LANE, HSIO_PLL5G_CFG0);
- ocelot_write(ocelot, HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET |
+ HSIO_PLL5G_CFG0_ENA_LANE);
+ regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG2,
+ HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET |
HSIO_PLL5G_CFG2_EN_RESET_OVERRUN |
HSIO_PLL5G_CFG2_GAIN_TEST(0x8) |
HSIO_PLL5G_CFG2_ENA_AMPCTRL |
HSIO_PLL5G_CFG2_PWD_AMPCTRL_N |
- HSIO_PLL5G_CFG2_AMPC_SEL(0x10), HSIO_PLL5G_CFG2);
+ HSIO_PLL5G_CFG2_AMPC_SEL(0x10));
}
int ocelot_chip_init(struct ocelot *ocelot)
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index b2d2ec8c11e2..5f384f73007d 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -70,7 +70,6 @@
#include <net/tcp.h>
#include <asm/byteorder.h>
#include <asm/processor.h>
-#include <net/busy_poll.h>
#include "myri10ge_mcp.h"
#include "myri10ge_mcp_gen_header.h"
diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
index 5b06f07c78cd..3c661f422688 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
@@ -1,36 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index 305ac07dc1e7..c0830c0c2c3f 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -1,36 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h
index 934a70835473..f907b7d98917 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.h
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.h
@@ -1,36 +1,5 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#ifndef __NFP_ABM_H__
#define __NFP_ABM_H__ 1
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index 2572a4b91c7c..9b6cfa697879 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bpf.h>
#include <linux/bitops.h>
@@ -89,15 +59,32 @@ nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
return skb;
}
+static unsigned int
+nfp_bpf_cmsg_map_req_size(struct nfp_app_bpf *bpf, unsigned int n)
+{
+ unsigned int size;
+
+ size = sizeof(struct cmsg_req_map_op);
+ size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
+
+ return size;
+}
+
static struct sk_buff *
nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
{
+ return nfp_bpf_cmsg_alloc(bpf, nfp_bpf_cmsg_map_req_size(bpf, n));
+}
+
+static unsigned int
+nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n)
+{
unsigned int size;
- size = sizeof(struct cmsg_req_map_op);
- size += sizeof(struct cmsg_key_value_pair) * n;
+ size = sizeof(struct cmsg_reply_map_op);
+ size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
- return nfp_bpf_cmsg_alloc(bpf, size);
+ return size;
}
static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
@@ -338,6 +325,34 @@ void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
dev_consume_skb_any(skb);
}
+static void *
+nfp_bpf_ctrl_req_key(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
+ unsigned int n)
+{
+ return &req->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
+}
+
+static void *
+nfp_bpf_ctrl_req_val(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
+ unsigned int n)
+{
+ return &req->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
+}
+
+static void *
+nfp_bpf_ctrl_reply_key(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
+ unsigned int n)
+{
+ return &reply->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
+}
+
+static void *
+nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
+ unsigned int n)
+{
+ return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
+}
+
static int
nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
enum nfp_bpf_cmsg_type op,
@@ -366,12 +381,13 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
/* Copy inputs */
if (key)
- memcpy(&req->elem[0].key, key, map->key_size);
+ memcpy(nfp_bpf_ctrl_req_key(bpf, req, 0), key, map->key_size);
if (value)
- memcpy(&req->elem[0].value, value, map->value_size);
+ memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value,
+ map->value_size);
skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
- sizeof(*reply) + sizeof(*reply->elem));
+ nfp_bpf_cmsg_map_reply_size(bpf, 1));
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -382,9 +398,11 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
/* Copy outputs */
if (out_key)
- memcpy(out_key, &reply->elem[0].key, map->key_size);
+ memcpy(out_key, nfp_bpf_ctrl_reply_key(bpf, reply, 0),
+ map->key_size);
if (out_value)
- memcpy(out_value, &reply->elem[0].value, map->value_size);
+ memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, 0),
+ map->value_size);
dev_consume_skb_any(skb);
@@ -428,6 +446,13 @@ int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
key, NULL, 0, next_key, NULL);
}
+unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf)
+{
+ return max3((unsigned int)NFP_NET_DEFAULT_MTU,
+ nfp_bpf_cmsg_map_req_size(bpf, 1),
+ nfp_bpf_cmsg_map_reply_size(bpf, 1));
+}
+
void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_app_bpf *bpf = app->priv;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
index e4f9b7ec8528..721921bcf120 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef NFP_BPF_FW_H
#define NFP_BPF_FW_H 1
@@ -52,6 +22,7 @@ enum bpf_cap_tlv_type {
NFP_BPF_CAP_TYPE_RANDOM = 4,
NFP_BPF_CAP_TYPE_QUEUE_SELECT = 5,
NFP_BPF_CAP_TYPE_ADJUST_TAIL = 6,
+ NFP_BPF_CAP_TYPE_ABI_VERSION = 7,
};
struct nfp_bpf_cap_tlv_func {
@@ -98,6 +69,7 @@ enum nfp_bpf_cmsg_type {
#define CMSG_TYPE_MAP_REPLY_BIT 7
#define __CMSG_REPLY(req) (BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req))
+/* BPF ABIv2 fixed-length control message fields */
#define CMSG_MAP_KEY_LW 16
#define CMSG_MAP_VALUE_LW 16
@@ -147,24 +119,19 @@ struct cmsg_reply_map_free_tbl {
__be32 count;
};
-struct cmsg_key_value_pair {
- __be32 key[CMSG_MAP_KEY_LW];
- __be32 value[CMSG_MAP_VALUE_LW];
-};
-
struct cmsg_req_map_op {
struct cmsg_hdr hdr;
__be32 tid;
__be32 count;
__be32 flags;
- struct cmsg_key_value_pair elem[0];
+ u8 data[0];
};
struct cmsg_reply_map_op {
struct cmsg_reply_map_simple reply_hdr;
__be32 count;
__be32 resv;
- struct cmsg_key_value_pair elem[0];
+ u8 data[0];
};
struct cmsg_bpf_event {
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index eff57f7d056a..97d33bb4d84d 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2016-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
#define pr_fmt(fmt) "NFP net bpf: " fmt
@@ -267,6 +237,38 @@ emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer)
}
static void
+__emit_br_alu(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
+ u8 defer, bool dst_lmextn, bool src_lmextn)
+{
+ u64 insn;
+
+ insn = OP_BR_ALU_BASE |
+ FIELD_PREP(OP_BR_ALU_A_SRC, areg) |
+ FIELD_PREP(OP_BR_ALU_B_SRC, breg) |
+ FIELD_PREP(OP_BR_ALU_DEFBR, defer) |
+ FIELD_PREP(OP_BR_ALU_IMM_HI, imm_hi) |
+ FIELD_PREP(OP_BR_ALU_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_BR_ALU_DST_LMEXTN, dst_lmextn);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void emit_rtn(struct nfp_prog *nfp_prog, swreg base, u8 defer)
+{
+ struct nfp_insn_ur_regs reg;
+ int err;
+
+ err = swreg_to_unrestricted(reg_none(), base, reg_imm(0), &reg);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_br_alu(nfp_prog, reg.areg, reg.breg, 0, defer, reg.dst_lmextn,
+ reg.src_lmextn);
+}
+
+static void
__emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
enum immed_width width, bool invert,
enum immed_shift shift, bool wr_both,
@@ -1137,7 +1139,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr,
bool clr_gpr, lmem_step step)
{
- s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off;
+ s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off;
bool first = true, last;
bool needs_inc = false;
swreg stack_off_reg;
@@ -1146,7 +1148,8 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
bool lm3 = true;
int ret;
- if (meta->ptr_not_const) {
+ if (meta->ptr_not_const ||
+ meta->flags & FLAG_INSN_PTR_CALLER_STACK_FRAME) {
/* Use of the last encountered ptr_off is OK, they all have
* the same alignment. Depend on low bits of value being
* discarded when written to LMaddr register.
@@ -1695,7 +1698,7 @@ map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
s64 lm_off;
/* We only have to reload LM0 if the key is not at start of stack */
- lm_off = nfp_prog->stack_depth;
+ lm_off = nfp_prog->stack_frame_depth;
lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off;
load_lm_ptr = meta->arg2.var_off || lm_off;
@@ -1808,10 +1811,10 @@ static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
swreg stack_depth_reg;
stack_depth_reg = ur_load_imm_any(nfp_prog,
- nfp_prog->stack_depth,
+ nfp_prog->stack_frame_depth,
stack_imm(nfp_prog));
- emit_alu(nfp_prog, reg_both(dst),
- stack_reg(nfp_prog), ALU_OP_ADD, stack_depth_reg);
+ emit_alu(nfp_prog, reg_both(dst), stack_reg(nfp_prog),
+ ALU_OP_ADD, stack_depth_reg);
wrp_immed(nfp_prog, reg_both(dst + 1), 0);
} else {
wrp_reg_mov(nfp_prog, dst, src);
@@ -3081,7 +3084,93 @@ static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE);
}
-static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int
+bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ u32 ret_tgt, stack_depth, offset_br;
+ swreg tmp_reg;
+
+ stack_depth = round_up(nfp_prog->stack_frame_depth, STACK_FRAME_ALIGN);
+ /* Space for saving the return address is accounted for by the callee,
+ * so stack_depth can be zero for the main function.
+ */
+ if (stack_depth) {
+ tmp_reg = ur_load_imm_any(nfp_prog, stack_depth,
+ stack_imm(nfp_prog));
+ emit_alu(nfp_prog, stack_reg(nfp_prog),
+ stack_reg(nfp_prog), ALU_OP_ADD, tmp_reg);
+ emit_csr_wr(nfp_prog, stack_reg(nfp_prog),
+ NFP_CSR_ACT_LM_ADDR0);
+ }
+
+ /* Two cases for jumping to the callee:
+ *
+ * - If callee uses and needs to save R6~R9 then:
+ * 1. Put the start offset of the callee into imm_b(). This will
+ * require a fixup step, as we do not necessarily know this
+ * address yet.
+ * 2. Put the return address from the callee to the caller into
+ * register ret_reg().
+ * 3. (After defer slots are consumed) Jump to the subroutine that
+ * pushes the registers to the stack.
+ * The subroutine acts as a trampoline, and returns to the address in
+ * imm_b(), i.e. jumps to the callee.
+ *
+ * - If callee does not need to save R6~R9 then just load return
+ * address to the caller in ret_reg(), and jump to the callee
+ * directly.
+ *
+ * Using ret_reg() to pass the return address to the callee is set here
+ * as a convention. The callee can then push this address onto its
+ * stack frame in its prologue. The advantages of passing the return
+ * address through ret_reg(), instead of pushing it to the stack right
+ * here, are the following:
+ * - It looks cleaner.
+ * - If the called function is called multiple time, we get a lower
+ * program size.
+ * - We save two no-op instructions that should be added just before
+ * the emit_br() when stack depth is not null otherwise.
+ * - If we ever find a register to hold the return address during whole
+ * execution of the callee, we will not have to push the return
+ * address to the stack for leaf functions.
+ */
+ if (!meta->jmp_dst) {
+ pr_err("BUG: BPF-to-BPF call has no destination recorded\n");
+ return -ELOOP;
+ }
+ if (nfp_prog->subprog[meta->jmp_dst->subprog_idx].needs_reg_push) {
+ ret_tgt = nfp_prog_current_offset(nfp_prog) + 3;
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2,
+ RELO_BR_GO_CALL_PUSH_REGS);
+ offset_br = nfp_prog_current_offset(nfp_prog);
+ wrp_immed_relo(nfp_prog, imm_b(nfp_prog), 0, RELO_IMMED_REL);
+ } else {
+ ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
+ emit_br(nfp_prog, BR_UNC, meta->n + 1 + meta->insn.imm, 1);
+ offset_br = nfp_prog_current_offset(nfp_prog);
+ }
+ wrp_immed_relo(nfp_prog, ret_reg(nfp_prog), ret_tgt, RELO_IMMED_REL);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
+ return -EINVAL;
+
+ if (stack_depth) {
+ tmp_reg = ur_load_imm_any(nfp_prog, stack_depth,
+ stack_imm(nfp_prog));
+ emit_alu(nfp_prog, stack_reg(nfp_prog),
+ stack_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
+ emit_csr_wr(nfp_prog, stack_reg(nfp_prog),
+ NFP_CSR_ACT_LM_ADDR0);
+ wrp_nops(nfp_prog, 3);
+ }
+
+ meta->num_insns_after_br = nfp_prog_current_offset(nfp_prog);
+ meta->num_insns_after_br -= offset_br;
+
+ return 0;
+}
+
+static int helper_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
switch (meta->insn.imm) {
case BPF_FUNC_xdp_adjust_head:
@@ -3102,6 +3191,19 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
}
}
+static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ if (is_mbpf_pseudo_call(meta))
+ return bpf_to_bpf_call(nfp_prog, meta);
+ else
+ return helper_call(nfp_prog, meta);
+}
+
+static bool nfp_is_main_function(struct nfp_insn_meta *meta)
+{
+ return meta->subprog_idx == 0;
+}
+
static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT);
@@ -3109,6 +3211,39 @@ static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
+static int
+nfp_subprog_epilogue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ if (nfp_prog->subprog[meta->subprog_idx].needs_reg_push) {
+ /* Pop R6~R9 to the stack via related subroutine.
+ * We loaded the return address to the caller into ret_reg().
+ * This means that the subroutine does not come back here, we
+ * make it jump back to the subprogram caller directly!
+ */
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 1,
+ RELO_BR_GO_CALL_POP_REGS);
+ /* Pop return address from the stack. */
+ wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0));
+ } else {
+ /* Pop return address from the stack. */
+ wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0));
+ /* Jump back to caller if no callee-saved registers were used
+ * by the subprogram.
+ */
+ emit_rtn(nfp_prog, ret_reg(nfp_prog), 0);
+ }
+
+ return 0;
+}
+
+static int jmp_exit(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ if (nfp_is_main_function(meta))
+ return goto_out(nfp_prog, meta);
+ else
+ return nfp_subprog_epilogue(nfp_prog, meta);
+}
+
static const instr_cb_t instr_cb[256] = {
[BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64,
[BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64,
@@ -3197,36 +3332,66 @@ static const instr_cb_t instr_cb[256] = {
[BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
[BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
[BPF_JMP | BPF_CALL] = call,
- [BPF_JMP | BPF_EXIT] = goto_out,
+ [BPF_JMP | BPF_EXIT] = jmp_exit,
};
/* --- Assembler logic --- */
+static int
+nfp_fixup_immed_relo(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ struct nfp_insn_meta *jmp_dst, u32 br_idx)
+{
+ if (immed_get_value(nfp_prog->prog[br_idx + 1])) {
+ pr_err("BUG: failed to fix up callee register saving\n");
+ return -EINVAL;
+ }
+
+ immed_set_value(&nfp_prog->prog[br_idx + 1], jmp_dst->off);
+
+ return 0;
+}
+
static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta, *jmp_dst;
u32 idx, br_idx;
+ int err;
list_for_each_entry(meta, &nfp_prog->insns, l) {
if (meta->skip)
continue;
- if (meta->insn.code == (BPF_JMP | BPF_CALL))
- continue;
if (BPF_CLASS(meta->insn.code) != BPF_JMP)
continue;
+ if (meta->insn.code == (BPF_JMP | BPF_EXIT) &&
+ !nfp_is_main_function(meta))
+ continue;
+ if (is_mbpf_helper_call(meta))
+ continue;
if (list_is_last(&meta->l, &nfp_prog->insns))
br_idx = nfp_prog->last_bpf_off;
else
br_idx = list_next_entry(meta, l)->off - 1;
+ /* For BPF-to-BPF function call, a stack adjustment sequence is
+ * generated after the return instruction. Therefore, we must
+ * withdraw the length of this sequence to have br_idx pointing
+ * to where the "branch" NFP instruction is expected to be.
+ */
+ if (is_mbpf_pseudo_call(meta))
+ br_idx -= meta->num_insns_after_br;
+
if (!nfp_is_br(nfp_prog->prog[br_idx])) {
pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
br_idx, meta->insn.code, nfp_prog->prog[br_idx]);
return -ELOOP;
}
+
+ if (meta->insn.code == (BPF_JMP | BPF_EXIT))
+ continue;
+
/* Leave special branches for later */
if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) !=
- RELO_BR_REL)
+ RELO_BR_REL && !is_mbpf_pseudo_call(meta))
continue;
if (!meta->jmp_dst) {
@@ -3241,6 +3406,18 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
return -ELOOP;
}
+ if (is_mbpf_pseudo_call(meta) &&
+ nfp_prog->subprog[jmp_dst->subprog_idx].needs_reg_push) {
+ err = nfp_fixup_immed_relo(nfp_prog, meta,
+ jmp_dst, br_idx);
+ if (err)
+ return err;
+ }
+
+ if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) !=
+ RELO_BR_REL)
+ continue;
+
for (idx = meta->off; idx <= br_idx; idx++) {
if (!nfp_is_br(nfp_prog->prog[idx]))
continue;
@@ -3258,6 +3435,27 @@ static void nfp_intro(struct nfp_prog *nfp_prog)
plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog));
}
+static void
+nfp_subprog_prologue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ /* Save return address into the stack. */
+ wrp_mov(nfp_prog, reg_lm(0, 0), ret_reg(nfp_prog));
+}
+
+static void
+nfp_start_subprog(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ unsigned int depth = nfp_prog->subprog[meta->subprog_idx].stack_depth;
+
+ nfp_prog->stack_frame_depth = round_up(depth, 4);
+ nfp_subprog_prologue(nfp_prog, meta);
+}
+
+bool nfp_is_subprog_start(struct nfp_insn_meta *meta)
+{
+ return meta->flags & FLAG_INSN_IS_SUBPROG_START;
+}
+
static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
{
/* TC direct-action mode:
@@ -3348,6 +3546,67 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
}
+static bool nfp_prog_needs_callee_reg_save(struct nfp_prog *nfp_prog)
+{
+ unsigned int idx;
+
+ for (idx = 1; idx < nfp_prog->subprog_cnt; idx++)
+ if (nfp_prog->subprog[idx].needs_reg_push)
+ return true;
+
+ return false;
+}
+
+static void nfp_push_callee_registers(struct nfp_prog *nfp_prog)
+{
+ u8 reg;
+
+ /* Subroutine: Save all callee saved registers (R6 ~ R9).
+ * imm_b() holds the return address.
+ */
+ nfp_prog->tgt_call_push_regs = nfp_prog_current_offset(nfp_prog);
+ for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) {
+ u8 adj = (reg - BPF_REG_0) * 2;
+ u8 idx = (reg - BPF_REG_6) * 2;
+
+ /* The first slot in the stack frame is used to push the return
+ * address in bpf_to_bpf_call(), start just after.
+ */
+ wrp_mov(nfp_prog, reg_lm(0, 1 + idx), reg_b(adj));
+
+ if (reg == BPF_REG_8)
+ /* Prepare to jump back, last 3 insns use defer slots */
+ emit_rtn(nfp_prog, imm_b(nfp_prog), 3);
+
+ wrp_mov(nfp_prog, reg_lm(0, 1 + idx + 1), reg_b(adj + 1));
+ }
+}
+
+static void nfp_pop_callee_registers(struct nfp_prog *nfp_prog)
+{
+ u8 reg;
+
+ /* Subroutine: Restore all callee saved registers (R6 ~ R9).
+ * ret_reg() holds the return address.
+ */
+ nfp_prog->tgt_call_pop_regs = nfp_prog_current_offset(nfp_prog);
+ for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) {
+ u8 adj = (reg - BPF_REG_0) * 2;
+ u8 idx = (reg - BPF_REG_6) * 2;
+
+ /* The first slot in the stack frame holds the return address,
+ * start popping just after that.
+ */
+ wrp_mov(nfp_prog, reg_both(adj), reg_lm(0, 1 + idx));
+
+ if (reg == BPF_REG_8)
+ /* Prepare to jump back, last 3 insns use defer slots */
+ emit_rtn(nfp_prog, ret_reg(nfp_prog), 3);
+
+ wrp_mov(nfp_prog, reg_both(adj + 1), reg_lm(0, 1 + idx + 1));
+ }
+}
+
static void nfp_outro(struct nfp_prog *nfp_prog)
{
switch (nfp_prog->type) {
@@ -3360,13 +3619,23 @@ static void nfp_outro(struct nfp_prog *nfp_prog)
default:
WARN_ON(1);
}
+
+ if (!nfp_prog_needs_callee_reg_save(nfp_prog))
+ return;
+
+ nfp_push_callee_registers(nfp_prog);
+ nfp_pop_callee_registers(nfp_prog);
}
static int nfp_translate(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta;
+ unsigned int depth;
int err;
+ depth = nfp_prog->subprog[0].stack_depth;
+ nfp_prog->stack_frame_depth = round_up(depth, 4);
+
nfp_intro(nfp_prog);
if (nfp_prog->error)
return nfp_prog->error;
@@ -3376,6 +3645,12 @@ static int nfp_translate(struct nfp_prog *nfp_prog)
meta->off = nfp_prog_current_offset(nfp_prog);
+ if (nfp_is_subprog_start(meta)) {
+ nfp_start_subprog(nfp_prog, meta);
+ if (nfp_prog->error)
+ return nfp_prog->error;
+ }
+
if (meta->skip) {
nfp_prog->n_translated++;
continue;
@@ -4018,20 +4293,35 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
/* Another pass to record jump information. */
list_for_each_entry(meta, &nfp_prog->insns, l) {
+ struct nfp_insn_meta *dst_meta;
u64 code = meta->insn.code;
+ unsigned int dst_idx;
+ bool pseudo_call;
+
+ if (BPF_CLASS(code) != BPF_JMP)
+ continue;
+ if (BPF_OP(code) == BPF_EXIT)
+ continue;
+ if (is_mbpf_helper_call(meta))
+ continue;
- if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT &&
- BPF_OP(code) != BPF_CALL) {
- struct nfp_insn_meta *dst_meta;
- unsigned short dst_indx;
+ /* If opcode is BPF_CALL at this point, this can only be a
+ * BPF-to-BPF call (a.k.a pseudo call).
+ */
+ pseudo_call = BPF_OP(code) == BPF_CALL;
- dst_indx = meta->n + 1 + meta->insn.off;
- dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx,
- cnt);
+ if (pseudo_call)
+ dst_idx = meta->n + 1 + meta->insn.imm;
+ else
+ dst_idx = meta->n + 1 + meta->insn.off;
- meta->jmp_dst = dst_meta;
- dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
- }
+ dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx, cnt);
+
+ if (pseudo_call)
+ dst_meta->flags |= FLAG_INSN_IS_SUBPROG_START;
+
+ dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
+ meta->jmp_dst = dst_meta;
}
}
@@ -4054,6 +4344,7 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
for (i = 0; i < nfp_prog->prog_len; i++) {
enum nfp_relo_type special;
u32 val;
+ u16 off;
special = FIELD_GET(OP_RELO_TYPE, prog[i]);
switch (special) {
@@ -4070,6 +4361,24 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
br_set_offset(&prog[i],
nfp_prog->tgt_abort + bv->start_off);
break;
+ case RELO_BR_GO_CALL_PUSH_REGS:
+ if (!nfp_prog->tgt_call_push_regs) {
+ pr_err("BUG: failed to detect subprogram registers needs\n");
+ err = -EINVAL;
+ goto err_free_prog;
+ }
+ off = nfp_prog->tgt_call_push_regs + bv->start_off;
+ br_set_offset(&prog[i], off);
+ break;
+ case RELO_BR_GO_CALL_POP_REGS:
+ if (!nfp_prog->tgt_call_pop_regs) {
+ pr_err("BUG: failed to detect subprogram registers needs\n");
+ err = -EINVAL;
+ goto err_free_prog;
+ }
+ off = nfp_prog->tgt_call_pop_regs + bv->start_off;
+ br_set_offset(&prog[i], off);
+ break;
case RELO_BR_NEXT_PKT:
br_set_offset(&prog[i], bv->tgt_done);
break;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 970af07f4656..6243af0ab025 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <net/pkt_cls.h>
@@ -54,11 +24,14 @@ const struct rhashtable_params nfp_bpf_maps_neutral_params = {
static bool nfp_net_ebpf_capable(struct nfp_net *nn)
{
#ifdef __LITTLE_ENDIAN
- if (nn->cap & NFP_NET_CFG_CTRL_BPF &&
- nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI)
- return true;
-#endif
+ struct nfp_app_bpf *bpf = nn->app->priv;
+
+ return nn->cap & NFP_NET_CFG_CTRL_BPF &&
+ bpf->abi_version &&
+ nn_readb(nn, NFP_NET_CFG_BPF_ABI) == bpf->abi_version;
+#else
return false;
+#endif
}
static int
@@ -342,6 +315,26 @@ nfp_bpf_parse_cap_adjust_tail(struct nfp_app_bpf *bpf, void __iomem *value,
return 0;
}
+static int
+nfp_bpf_parse_cap_abi_version(struct nfp_app_bpf *bpf, void __iomem *value,
+ u32 length)
+{
+ if (length < 4) {
+ nfp_err(bpf->app->cpp, "truncated ABI version TLV: %d\n",
+ length);
+ return -EINVAL;
+ }
+
+ bpf->abi_version = readl(value);
+ if (bpf->abi_version < 2 || bpf->abi_version > 3) {
+ nfp_warn(bpf->app->cpp, "unsupported BPF ABI version: %d\n",
+ bpf->abi_version);
+ bpf->abi_version = 0;
+ }
+
+ return 0;
+}
+
static int nfp_bpf_parse_capabilities(struct nfp_app *app)
{
struct nfp_cpp *cpp = app->pf->cpp;
@@ -393,6 +386,11 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
length))
goto err_release_free;
break;
+ case NFP_BPF_CAP_TYPE_ABI_VERSION:
+ if (nfp_bpf_parse_cap_abi_version(app->priv, value,
+ length))
+ goto err_release_free;
+ break;
default:
nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
break;
@@ -414,6 +412,11 @@ err_release_free:
return -EINVAL;
}
+static void nfp_bpf_init_capabilities(struct nfp_app_bpf *bpf)
+{
+ bpf->abi_version = 2; /* Original BPF ABI version */
+}
+
static int nfp_bpf_ndo_init(struct nfp_app *app, struct net_device *netdev)
{
struct nfp_app_bpf *bpf = app->priv;
@@ -447,10 +450,21 @@ static int nfp_bpf_init(struct nfp_app *app)
if (err)
goto err_free_bpf;
+ nfp_bpf_init_capabilities(bpf);
+
err = nfp_bpf_parse_capabilities(app);
if (err)
goto err_free_neutral_maps;
+ if (bpf->abi_version < 3) {
+ bpf->cmsg_key_sz = CMSG_MAP_KEY_LW * 4;
+ bpf->cmsg_val_sz = CMSG_MAP_VALUE_LW * 4;
+ } else {
+ bpf->cmsg_key_sz = bpf->maps.max_key_sz;
+ bpf->cmsg_val_sz = bpf->maps.max_val_sz;
+ app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf);
+ }
+
bpf->bpf_dev = bpf_offload_dev_create();
err = PTR_ERR_OR_ZERO(bpf->bpf_dev);
if (err)
@@ -465,11 +479,6 @@ err_free_bpf:
return err;
}
-static void nfp_check_rhashtable_empty(void *ptr, void *arg)
-{
- WARN_ON_ONCE(1);
-}
-
static void nfp_bpf_clean(struct nfp_app *app)
{
struct nfp_app_bpf *bpf = app->priv;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index dbd00982fd2b..7f591d71ab28 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2016-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
#ifndef __NFP_BPF_H__
#define __NFP_BPF_H__ 1
@@ -61,6 +31,8 @@ enum nfp_relo_type {
/* internal jumps to parts of the outro */
RELO_BR_GO_OUT,
RELO_BR_GO_ABORT,
+ RELO_BR_GO_CALL_PUSH_REGS,
+ RELO_BR_GO_CALL_POP_REGS,
/* external jumps to fixed addresses */
RELO_BR_NEXT_PKT,
RELO_BR_HELPER,
@@ -104,6 +76,7 @@ enum pkt_vec {
#define imma_a(np) reg_a(STATIC_REG_IMMA)
#define imma_b(np) reg_b(STATIC_REG_IMMA)
#define imm_both(np) reg_both(STATIC_REG_IMM)
+#define ret_reg(np) imm_a(np)
#define NFP_BPF_ABI_FLAGS reg_imm(0)
#define NFP_BPF_ABI_FLAG_MARK 1
@@ -121,12 +94,17 @@ enum pkt_vec {
* @cmsg_replies: received cmsg replies waiting to be consumed
* @cmsg_wq: work queue for waiting for cmsg replies
*
+ * @cmsg_key_sz: size of key in cmsg element array
+ * @cmsg_val_sz: size of value in cmsg element array
+ *
* @map_list: list of offloaded maps
* @maps_in_use: number of currently offloaded maps
* @map_elems_in_use: number of elements allocated to offloaded maps
*
* @maps_neutral: hash table of offload-neutral maps (on pointer)
*
+ * @abi_version: global BPF ABI version
+ *
* @adjust_head: adjust head capability
* @adjust_head.flags: extra flags for adjust head
* @adjust_head.off_min: minimal packet offset within buffer required
@@ -164,12 +142,17 @@ struct nfp_app_bpf {
struct sk_buff_head cmsg_replies;
struct wait_queue_head cmsg_wq;
+ unsigned int cmsg_key_sz;
+ unsigned int cmsg_val_sz;
+
struct list_head map_list;
unsigned int maps_in_use;
unsigned int map_elems_in_use;
struct rhashtable maps_neutral;
+ u32 abi_version;
+
struct nfp_bpf_cap_adjust_head {
u32 flags;
int off_min;
@@ -206,6 +189,11 @@ enum nfp_bpf_map_use {
NFP_MAP_USE_ATOMIC_CNT,
};
+struct nfp_bpf_map_word {
+ unsigned char type :4;
+ unsigned char non_zero_update :1;
+};
+
/**
* struct nfp_bpf_map - private per-map data attached to BPF maps for offload
* @offmap: pointer to the offloaded BPF map
@@ -219,7 +207,7 @@ struct nfp_bpf_map {
struct nfp_app_bpf *bpf;
u32 tid;
struct list_head l;
- enum nfp_bpf_map_use use_map[];
+ struct nfp_bpf_map_word use_map[];
};
struct nfp_bpf_neutral_map {
@@ -252,7 +240,9 @@ struct nfp_bpf_reg_state {
bool var_off;
};
-#define FLAG_INSN_IS_JUMP_DST BIT(0)
+#define FLAG_INSN_IS_JUMP_DST BIT(0)
+#define FLAG_INSN_IS_SUBPROG_START BIT(1)
+#define FLAG_INSN_PTR_CALLER_STACK_FRAME BIT(2)
/**
* struct nfp_insn_meta - BPF instruction wrapper
@@ -269,6 +259,7 @@ struct nfp_bpf_reg_state {
* @xadd_maybe_16bit: 16bit immediate is possible
* @jmp_dst: destination info for jump instructions
* @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB
+ * @num_insns_after_br: number of insns following a branch jump, used for fixup
* @func_id: function id for call instructions
* @arg1: arg1 for call instructions
* @arg2: arg2 for call instructions
@@ -279,6 +270,7 @@ struct nfp_bpf_reg_state {
* @off: index of first generated machine instruction (in nfp_prog.prog)
* @n: eBPF instruction number
* @flags: eBPF instruction extra optimization flags
+ * @subprog_idx: index of subprogram to which the instruction belongs
* @skip: skip this instruction (optimized out)
* @double_cb: callback for second part of the instruction
* @l: link on nfp_prog->insns list
@@ -304,6 +296,7 @@ struct nfp_insn_meta {
struct {
struct nfp_insn_meta *jmp_dst;
bool jump_neg_op;
+ u32 num_insns_after_br; /* only for BPF-to-BPF calls */
};
/* function calls */
struct {
@@ -325,6 +318,7 @@ struct nfp_insn_meta {
unsigned int off;
unsigned short n;
unsigned short flags;
+ unsigned short subprog_idx;
bool skip;
instr_cb_t double_cb;
@@ -413,23 +407,56 @@ static inline bool is_mbpf_div(const struct nfp_insn_meta *meta)
return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV;
}
+static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta)
+{
+ struct bpf_insn insn = meta->insn;
+
+ return insn.code == (BPF_JMP | BPF_CALL) &&
+ insn.src_reg != BPF_PSEUDO_CALL;
+}
+
+static inline bool is_mbpf_pseudo_call(const struct nfp_insn_meta *meta)
+{
+ struct bpf_insn insn = meta->insn;
+
+ return insn.code == (BPF_JMP | BPF_CALL) &&
+ insn.src_reg == BPF_PSEUDO_CALL;
+}
+
+#define STACK_FRAME_ALIGN 64
+
+/**
+ * struct nfp_bpf_subprog_info - nfp BPF sub-program (a.k.a. function) info
+ * @stack_depth: maximum stack depth used by this sub-program
+ * @needs_reg_push: whether sub-program uses callee-saved registers
+ */
+struct nfp_bpf_subprog_info {
+ u16 stack_depth;
+ u8 needs_reg_push : 1;
+};
+
/**
* struct nfp_prog - nfp BPF program
* @bpf: backpointer to the bpf app priv structure
* @prog: machine code
* @prog_len: number of valid instructions in @prog array
* @__prog_alloc_len: alloc size of @prog array
+ * @stack_size: total amount of stack used
* @verifier_meta: temporary storage for verifier's insn meta
* @type: BPF program type
* @last_bpf_off: address of the last instruction translated from BPF
* @tgt_out: jump target for normal exit
* @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
+ * @tgt_call_push_regs: jump target for subroutine for saving R6~R9 to stack
+ * @tgt_call_pop_regs: jump target for subroutine used for restoring R6~R9
* @n_translated: number of successfully translated instructions (for errors)
* @error: error code if something went wrong
- * @stack_depth: max stack depth from the verifier
+ * @stack_frame_depth: max stack depth for current frame
* @adjust_head_location: if program has single adjust head call - the insn no.
* @map_records_cnt: the number of map pointers recorded for this prog
+ * @subprog_cnt: number of sub-programs, including main function
* @map_records: the map record pointers from bpf->maps_neutral
+ * @subprog: pointer to an array of objects holding info about sub-programs
* @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
*/
struct nfp_prog {
@@ -439,6 +466,8 @@ struct nfp_prog {
unsigned int prog_len;
unsigned int __prog_alloc_len;
+ unsigned int stack_size;
+
struct nfp_insn_meta *verifier_meta;
enum bpf_prog_type type;
@@ -446,15 +475,19 @@ struct nfp_prog {
unsigned int last_bpf_off;
unsigned int tgt_out;
unsigned int tgt_abort;
+ unsigned int tgt_call_push_regs;
+ unsigned int tgt_call_pop_regs;
unsigned int n_translated;
int error;
- unsigned int stack_depth;
+ unsigned int stack_frame_depth;
unsigned int adjust_head_location;
unsigned int map_records_cnt;
+ unsigned int subprog_cnt;
struct nfp_bpf_neutral_map **map_records;
+ struct nfp_bpf_subprog_info *subprog;
struct list_head insns;
};
@@ -471,6 +504,7 @@ struct nfp_bpf_vnic {
unsigned int tgt_done;
};
+bool nfp_is_subprog_start(struct nfp_insn_meta *meta);
void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
int nfp_bpf_jit(struct nfp_prog *prog);
bool nfp_bpf_supported_opcode(u8 code);
@@ -492,6 +526,7 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
+unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf);
long long int
nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
void
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 1ccd6371a15b..ba8ceedcf6a2 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2016-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
/*
* nfp_net_offload.c
@@ -208,6 +178,8 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta, *tmp;
+ kfree(nfp_prog->subprog);
+
list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
list_del(&meta->l);
kfree(meta);
@@ -250,18 +222,9 @@ err_free:
static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
- unsigned int stack_size;
unsigned int max_instr;
int err;
- stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
- if (prog->aux->stack_depth > stack_size) {
- nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
- prog->aux->stack_depth, stack_size);
- return -EOPNOTSUPP;
- }
- nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4);
-
max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
@@ -299,10 +262,25 @@ static void nfp_map_bpf_byte_swap(struct nfp_bpf_map *nfp_map, void *value)
unsigned int i;
for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
- if (nfp_map->use_map[i] == NFP_MAP_USE_ATOMIC_CNT)
+ if (nfp_map->use_map[i].type == NFP_MAP_USE_ATOMIC_CNT)
word[i] = (__force u32)cpu_to_be32(word[i]);
}
+/* Mark value as unsafely initialized in case it becomes atomic later
+ * and we didn't byte swap something non-byte swap neutral.
+ */
+static void
+nfp_map_bpf_byte_swap_record(struct nfp_bpf_map *nfp_map, void *value)
+{
+ u32 *word = value;
+ unsigned int i;
+
+ for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
+ if (nfp_map->use_map[i].type == NFP_MAP_UNUSED &&
+ word[i] != (__force u32)cpu_to_be32(word[i]))
+ nfp_map->use_map[i].non_zero_update = 1;
+}
+
static int
nfp_bpf_map_lookup_entry(struct bpf_offloaded_map *offmap,
void *key, void *value)
@@ -322,6 +300,7 @@ nfp_bpf_map_update_entry(struct bpf_offloaded_map *offmap,
void *key, void *value, u64 flags)
{
nfp_map_bpf_byte_swap(offmap->dev_priv, value);
+ nfp_map_bpf_byte_swap_record(offmap->dev_priv, value);
return nfp_bpf_ctrl_update_entry(offmap, key, value, flags);
}
@@ -510,7 +489,7 @@ nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
- unsigned int max_mtu;
+ unsigned int max_mtu, max_stack, max_prog_len;
dma_addr_t dma_addr;
void *img;
int err;
@@ -521,6 +500,18 @@ nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
return -EOPNOTSUPP;
}
+ max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
+ if (nfp_prog->stack_size > max_stack) {
+ NL_SET_ERR_MSG_MOD(extack, "stack too large");
+ return -EOPNOTSUPP;
+ }
+
+ max_prog_len = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
+ if (nfp_prog->prog_len > max_prog_len) {
+ NL_SET_ERR_MSG_MOD(extack, "program too long");
+ return -EOPNOTSUPP;
+ }
+
img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
if (IS_ERR(img))
return PTR_ERR(img);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index a6e9248669e1..99f977bfd8cc 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -1,43 +1,15 @@
-/*
- * Copyright (C) 2016-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/kernel.h>
+#include <linux/netdevice.h>
#include <linux/pkt_cls.h>
#include "../nfp_app.h"
#include "../nfp_main.h"
+#include "../nfp_net.h"
#include "fw.h"
#include "main.h"
@@ -108,6 +80,46 @@ exit_set_location:
nfp_prog->adjust_head_location = location;
}
+static bool nfp_bpf_map_update_value_ok(struct bpf_verifier_env *env)
+{
+ const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
+ const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3;
+ struct bpf_offloaded_map *offmap;
+ struct bpf_func_state *state;
+ struct nfp_bpf_map *nfp_map;
+ int off, i;
+
+ state = env->cur_state->frame[reg3->frameno];
+
+ /* We need to record each time update happens with non-zero words,
+ * in case such word is used in atomic operations.
+ * Implicitly depend on nfp_bpf_stack_arg_ok(reg3) being run before.
+ */
+
+ offmap = map_to_offmap(reg1->map_ptr);
+ nfp_map = offmap->dev_priv;
+ off = reg3->off + reg3->var_off.value;
+
+ for (i = 0; i < offmap->map.value_size; i++) {
+ struct bpf_stack_state *stack_entry;
+ unsigned int soff;
+
+ soff = -(off + i) - 1;
+ stack_entry = &state->stack[soff / BPF_REG_SIZE];
+ if (stack_entry->slot_type[soff % BPF_REG_SIZE] == STACK_ZERO)
+ continue;
+
+ if (nfp_map->use_map[i / 4].type == NFP_MAP_USE_ATOMIC_CNT) {
+ pr_vlog(env, "value at offset %d/%d may be non-zero, bpf_map_update_elem() is required to initialize atomic counters to zero to avoid offload endian issues\n",
+ i, soff);
+ return false;
+ }
+ nfp_map->use_map[i / 4].non_zero_update = 1;
+ }
+
+ return true;
+}
+
static int
nfp_bpf_stack_arg_ok(const char *fname, struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
@@ -155,8 +167,9 @@ nfp_bpf_map_call_ok(const char *fname, struct bpf_verifier_env *env,
}
static int
-nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
- struct nfp_insn_meta *meta)
+nfp_bpf_check_helper_call(struct nfp_prog *nfp_prog,
+ struct bpf_verifier_env *env,
+ struct nfp_insn_meta *meta)
{
const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
@@ -198,7 +211,8 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
bpf->helpers.map_update, reg1) ||
!nfp_bpf_stack_arg_ok("map_update", env, reg2,
meta->func_id ? &meta->arg2 : NULL) ||
- !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL))
+ !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL) ||
+ !nfp_bpf_map_update_value_ok(env))
return -EOPNOTSUPP;
break;
@@ -333,6 +347,9 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
{
s32 old_off, new_off;
+ if (reg->frameno != env->cur_state->curframe)
+ meta->flags |= FLAG_INSN_PTR_CALLER_STACK_FRAME;
+
if (!tnum_is_const(reg->var_off)) {
pr_vlog(env, "variable ptr stack access\n");
return -EINVAL;
@@ -376,15 +393,22 @@ nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env,
struct nfp_bpf_map *nfp_map,
unsigned int off, enum nfp_bpf_map_use use)
{
- if (nfp_map->use_map[off / 4] != NFP_MAP_UNUSED &&
- nfp_map->use_map[off / 4] != use) {
+ if (nfp_map->use_map[off / 4].type != NFP_MAP_UNUSED &&
+ nfp_map->use_map[off / 4].type != use) {
pr_vlog(env, "map value use type conflict %s vs %s off: %u\n",
- nfp_bpf_map_use_name(nfp_map->use_map[off / 4]),
+ nfp_bpf_map_use_name(nfp_map->use_map[off / 4].type),
nfp_bpf_map_use_name(use), off);
return -EOPNOTSUPP;
}
- nfp_map->use_map[off / 4] = use;
+ if (nfp_map->use_map[off / 4].non_zero_update &&
+ use == NFP_MAP_USE_ATOMIC_CNT) {
+ pr_vlog(env, "atomic counter in map value may already be initialized to non-zero value off: %u\n",
+ off);
+ return -EOPNOTSUPP;
+ }
+
+ nfp_map->use_map[off / 4].type = use;
return 0;
}
@@ -620,8 +644,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
return -EINVAL;
}
- if (meta->insn.code == (BPF_JMP | BPF_CALL))
- return nfp_bpf_check_call(nfp_prog, env, meta);
+ if (is_mbpf_helper_call(meta))
+ return nfp_bpf_check_helper_call(nfp_prog, env, meta);
if (meta->insn.code == (BPF_JMP | BPF_EXIT))
return nfp_bpf_check_exit(nfp_prog, env);
@@ -640,6 +664,132 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
return 0;
}
+static int
+nfp_assign_subprog_idx_and_regs(struct bpf_verifier_env *env,
+ struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta;
+ int index = 0;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ if (nfp_is_subprog_start(meta))
+ index++;
+ meta->subprog_idx = index;
+
+ if (meta->insn.dst_reg >= BPF_REG_6 &&
+ meta->insn.dst_reg <= BPF_REG_9)
+ nfp_prog->subprog[index].needs_reg_push = 1;
+ }
+
+ if (index + 1 != nfp_prog->subprog_cnt) {
+ pr_vlog(env, "BUG: number of processed BPF functions is not consistent (processed %d, expected %d)\n",
+ index + 1, nfp_prog->subprog_cnt);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static unsigned int
+nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog, unsigned int cnt)
+{
+ struct nfp_insn_meta *meta = nfp_prog_first_meta(nfp_prog);
+ unsigned int max_depth = 0, depth = 0, frame = 0;
+ struct nfp_insn_meta *ret_insn[MAX_CALL_FRAMES];
+ unsigned short frame_depths[MAX_CALL_FRAMES];
+ unsigned short ret_prog[MAX_CALL_FRAMES];
+ unsigned short idx = meta->subprog_idx;
+
+ /* Inspired from check_max_stack_depth() from kernel verifier.
+ * Starting from main subprogram, walk all instructions and recursively
+ * walk all callees that given subprogram can call. Since recursion is
+ * prevented by the kernel verifier, this algorithm only needs a local
+ * stack of MAX_CALL_FRAMES to remember callsites.
+ */
+process_subprog:
+ frame_depths[frame] = nfp_prog->subprog[idx].stack_depth;
+ frame_depths[frame] = round_up(frame_depths[frame], STACK_FRAME_ALIGN);
+ depth += frame_depths[frame];
+ max_depth = max(max_depth, depth);
+
+continue_subprog:
+ for (; meta != nfp_prog_last_meta(nfp_prog) && meta->subprog_idx == idx;
+ meta = nfp_meta_next(meta)) {
+ if (!is_mbpf_pseudo_call(meta))
+ continue;
+
+ /* We found a call to a subprogram. Remember instruction to
+ * return to and subprog id.
+ */
+ ret_insn[frame] = nfp_meta_next(meta);
+ ret_prog[frame] = idx;
+
+ /* Find the callee and start processing it. */
+ meta = nfp_bpf_goto_meta(nfp_prog, meta,
+ meta->n + 1 + meta->insn.imm, cnt);
+ idx = meta->subprog_idx;
+ frame++;
+ goto process_subprog;
+ }
+ /* End of for() loop means the last instruction of the subprog was
+ * reached. If we popped all stack frames, return; otherwise, go on
+ * processing remaining instructions from the caller.
+ */
+ if (frame == 0)
+ return max_depth;
+
+ depth -= frame_depths[frame];
+ frame--;
+ meta = ret_insn[frame];
+ idx = ret_prog[frame];
+ goto continue_subprog;
+}
+
+static int nfp_bpf_finalize(struct bpf_verifier_env *env)
+{
+ struct bpf_subprog_info *info;
+ struct nfp_prog *nfp_prog;
+ unsigned int max_stack;
+ struct nfp_net *nn;
+ int i;
+
+ nfp_prog = env->prog->aux->offload->dev_priv;
+ nfp_prog->subprog_cnt = env->subprog_cnt;
+ nfp_prog->subprog = kcalloc(nfp_prog->subprog_cnt,
+ sizeof(nfp_prog->subprog[0]), GFP_KERNEL);
+ if (!nfp_prog->subprog)
+ return -ENOMEM;
+
+ nfp_assign_subprog_idx_and_regs(env, nfp_prog);
+
+ info = env->subprog_info;
+ for (i = 0; i < nfp_prog->subprog_cnt; i++) {
+ nfp_prog->subprog[i].stack_depth = info[i].stack_depth;
+
+ if (i == 0)
+ continue;
+
+ /* Account for size of return address. */
+ nfp_prog->subprog[i].stack_depth += REG_WIDTH;
+ /* Account for size of saved registers, if necessary. */
+ if (nfp_prog->subprog[i].needs_reg_push)
+ nfp_prog->subprog[i].stack_depth += BPF_REG_SIZE * 4;
+ }
+
+ nn = netdev_priv(env->prog->aux->offload->netdev);
+ max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
+ nfp_prog->stack_size = nfp_bpf_get_stack_usage(nfp_prog,
+ env->prog->len);
+ if (nfp_prog->stack_size > max_stack) {
+ pr_vlog(env, "stack too large: program %dB > FW stack %dB\n",
+ nfp_prog->stack_size, max_stack);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
- .insn_hook = nfp_verify_insn,
+ .insn_hook = nfp_verify_insn,
+ .finalize = nfp_bpf_finalize,
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 46ba0cf257c6..244dc261006e 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <net/geneve.h>
@@ -429,12 +399,14 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
switch (off) {
case offsetof(struct iphdr, daddr):
- set_ip_addr->ipv4_dst_mask = mask;
- set_ip_addr->ipv4_dst = exact;
+ set_ip_addr->ipv4_dst_mask |= mask;
+ set_ip_addr->ipv4_dst &= ~mask;
+ set_ip_addr->ipv4_dst |= exact & mask;
break;
case offsetof(struct iphdr, saddr):
- set_ip_addr->ipv4_src_mask = mask;
- set_ip_addr->ipv4_src = exact;
+ set_ip_addr->ipv4_src_mask |= mask;
+ set_ip_addr->ipv4_src &= ~mask;
+ set_ip_addr->ipv4_src |= exact & mask;
break;
default:
return -EOPNOTSUPP;
@@ -448,11 +420,12 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
}
static void
-nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask,
+nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
struct nfp_fl_set_ipv6_addr *ip6)
{
- ip6->ipv6[idx % 4].mask = mask;
- ip6->ipv6[idx % 4].exact = exact;
+ ip6->ipv6[word].mask |= mask;
+ ip6->ipv6[word].exact &= ~mask;
+ ip6->ipv6[word].exact |= exact & mask;
ip6->reserved = cpu_to_be16(0);
ip6->head.jump_id = opcode_tag;
@@ -465,6 +438,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
struct nfp_fl_set_ipv6_addr *ip_src)
{
__be32 exact, mask;
+ u8 word;
/* We are expecting tcf_pedit to return a big endian value */
mask = (__force __be32)~tcf_pedit_mask(action, idx);
@@ -473,17 +447,20 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
if (exact & ~mask)
return -EOPNOTSUPP;
- if (off < offsetof(struct ipv6hdr, saddr))
+ if (off < offsetof(struct ipv6hdr, saddr)) {
return -EOPNOTSUPP;
- else if (off < offsetof(struct ipv6hdr, daddr))
- nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx,
+ } else if (off < offsetof(struct ipv6hdr, daddr)) {
+ word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
+ nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
exact, mask, ip_src);
- else if (off < offsetof(struct ipv6hdr, daddr) +
- sizeof(struct in6_addr))
- nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx,
+ } else if (off < offsetof(struct ipv6hdr, daddr) +
+ sizeof(struct in6_addr)) {
+ word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact);
+ nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
exact, mask, ip_dst);
- else
+ } else {
return -EOPNOTSUPP;
+ }
return 0;
}
@@ -541,7 +518,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
struct nfp_fl_set_eth set_eth;
enum pedit_header_type htype;
int idx, nkeys, err;
- size_t act_size;
+ size_t act_size = 0;
u32 offset, cmd;
u8 ip_proto = 0;
@@ -599,7 +576,9 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
act_size = sizeof(set_eth);
memcpy(nfp_action, &set_eth, act_size);
*a_len += act_size;
- } else if (set_ip_addr.head.len_lw) {
+ }
+ if (set_ip_addr.head.len_lw) {
+ nfp_action += act_size;
act_size = sizeof(set_ip_addr);
memcpy(nfp_action, &set_ip_addr, act_size);
*a_len += act_size;
@@ -607,10 +586,12 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
nfp_fl_csum_l4_to_flag(ip_proto);
- } else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
+ }
+ if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
/* TC compiles set src and dst IPv6 address as a single action,
* the hardware requires this to be 2 separate actions.
*/
+ nfp_action += act_size;
act_size = sizeof(set_ip6_src);
memcpy(nfp_action, &set_ip6_src, act_size);
*a_len += act_size;
@@ -623,6 +604,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_ip6_dst.head.len_lw) {
+ nfp_action += act_size;
act_size = sizeof(set_ip6_dst);
memcpy(nfp_action, &set_ip6_dst, act_size);
*a_len += act_size;
@@ -630,13 +612,16 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_ip6_src.head.len_lw) {
+ nfp_action += act_size;
act_size = sizeof(set_ip6_src);
memcpy(nfp_action, &set_ip6_src, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
- } else if (set_tport.head.len_lw) {
+ }
+ if (set_tport.head.len_lw) {
+ nfp_action += act_size;
act_size = sizeof(set_tport);
memcpy(nfp_action, &set_tport, act_size);
*a_len += act_size;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index cb8565222621..4c5eaf36d5bb 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 325954b829c8..29d673aa5277 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef NFP_FLOWER_CMSG_H
#define NFP_FLOWER_CMSG_H
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index bf10598f66ae..81dcf5b318ba 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#include "main.h"
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index e57d23746585..3a54728d2ea6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/etherdevice.h>
#include <linux/lockdep.h>
@@ -518,8 +488,8 @@ err_clear_nn:
static int nfp_flower_init(struct nfp_app *app)
{
const struct nfp_pf *pf = app->pf;
+ u64 version, features, ctx_count;
struct nfp_flower_priv *app_priv;
- u64 version, features;
int err;
if (!pf->eth_tbl) {
@@ -543,6 +513,16 @@ static int nfp_flower_init(struct nfp_app *app)
return err;
}
+ ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT",
+ &err);
+ if (err) {
+ nfp_warn(app->cpp,
+ "FlowerNIC: unsupported host context count: %d\n",
+ err);
+ err = 0;
+ ctx_count = BIT(17);
+ }
+
/* We need to ensure hardware has enough flower capabilities. */
if (version != NFP_FLOWER_ALLOWED_VER) {
nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n");
@@ -553,6 +533,7 @@ static int nfp_flower_init(struct nfp_app *app)
if (!app_priv)
return -ENOMEM;
+ app_priv->stats_ring_size = roundup_pow_of_two(ctx_count);
app->priv = app_priv;
app_priv->app = app;
skb_queue_head_init(&app_priv->cmsg_skbs_high);
@@ -563,7 +544,7 @@ static int nfp_flower_init(struct nfp_app *app)
init_waitqueue_head(&app_priv->mtu_conf.wait_q);
spin_lock_init(&app_priv->mtu_conf.lock);
- err = nfp_flower_metadata_init(app);
+ err = nfp_flower_metadata_init(app, ctx_count);
if (err)
goto err_free_app_priv;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 81d941ab895c..90045bab95bf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef __NFP_FLOWER_H__
#define __NFP_FLOWER_H__ 1
@@ -38,6 +8,7 @@
#include <linux/circ_buf.h>
#include <linux/hashtable.h>
+#include <linux/rhashtable.h>
#include <linux/time64.h>
#include <linux/types.h>
#include <net/pkt_cls.h>
@@ -50,10 +21,8 @@ struct net_device;
struct nfp_app;
#define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff)
-#define NFP_FL_STATS_ENTRY_RS BIT(20)
-#define NFP_FL_STATS_ELEM_RS 4
-#define NFP_FL_REPEATED_HASH_MAX BIT(17)
-#define NFP_FLOWER_HASH_BITS 19
+#define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \
+ init_unalloc)
#define NFP_FLOWER_MASK_ENTRY_RS 256
#define NFP_FLOWER_MASK_ELEMENT_RS 1
#define NFP_FLOWER_MASK_HASH_BITS 10
@@ -138,7 +107,10 @@ struct nfp_fl_lag {
* @stats_ids: List of free stats ids
* @mask_ids: List of free mask ids
* @mask_table: Hash table used to store masks
+ * @stats_ring_size: Maximum number of allowed stats ids
* @flow_table: Hash table used to store flower rules
+ * @stats: Stored stats updates for flower rules
+ * @stats_lock: Lock for flower rule stats updates
* @cmsg_work: Workqueue for control messages processing
* @cmsg_skbs_high: List of higher priority skbs for control message
* processing
@@ -171,7 +143,10 @@ struct nfp_flower_priv {
struct nfp_fl_stats_id stats_ids;
struct nfp_fl_mask_id mask_ids;
DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
- DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS);
+ u32 stats_ring_size;
+ struct rhashtable flow_table;
+ struct nfp_fl_stats *stats;
+ spinlock_t stats_lock; /* lock stats */
struct work_struct cmsg_work;
struct sk_buff_head cmsg_skbs_high;
struct sk_buff_head cmsg_skbs_low;
@@ -227,10 +202,8 @@ struct nfp_fl_stats {
struct nfp_fl_payload {
struct nfp_fl_rule_metadata meta;
unsigned long tc_flower_cookie;
- struct hlist_node link;
+ struct rhash_head fl_node;
struct rcu_head rcu;
- spinlock_t lock; /* lock stats */
- struct nfp_fl_stats stats;
__be32 nfp_tun_ipv4_addr;
struct net_device *ingress_dev;
char *unmasked_data;
@@ -239,6 +212,8 @@ struct nfp_fl_payload {
bool ingress_offload;
};
+extern const struct rhashtable_params nfp_flower_table_params;
+
struct nfp_fl_stats_frame {
__be32 stats_con_id;
__be32 pkt_count;
@@ -246,7 +221,7 @@ struct nfp_fl_stats_frame {
__be64 stats_cookie;
};
-int nfp_flower_metadata_init(struct nfp_app *app);
+int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count);
void nfp_flower_metadata_cleanup(struct nfp_app *app);
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 17acb8cc6044..e54fb6034326 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <net/pkt_cls.h>
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index c098730544b7..48729bf171e0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/hash.h>
#include <linux/hashtable.h>
@@ -48,6 +18,12 @@ struct nfp_mask_id_table {
u8 mask_id;
};
+struct nfp_fl_flow_table_cmp_arg {
+ struct net_device *netdev;
+ unsigned long cookie;
+ __be32 host_ctx;
+};
+
static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
{
struct nfp_flower_priv *priv = app->priv;
@@ -55,14 +31,14 @@ static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
ring = &priv->stats_ids.free_list;
/* Check if buffer is full. */
- if (!CIRC_SPACE(ring->head, ring->tail, NFP_FL_STATS_ENTRY_RS *
- NFP_FL_STATS_ELEM_RS -
+ if (!CIRC_SPACE(ring->head, ring->tail,
+ priv->stats_ring_size * NFP_FL_STATS_ELEM_RS -
NFP_FL_STATS_ELEM_RS + 1))
return -ENOBUFS;
memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
- (NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
+ (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
return 0;
}
@@ -74,7 +50,7 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
struct circ_buf *ring;
ring = &priv->stats_ids.free_list;
- freed_stats_id = NFP_FL_STATS_ENTRY_RS;
+ freed_stats_id = priv->stats_ring_size;
/* Check for unallocated entries first. */
if (priv->stats_ids.init_unalloc > 0) {
*stats_context_id = priv->stats_ids.init_unalloc - 1;
@@ -92,7 +68,7 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
*stats_context_id = temp_stats_id;
memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
- (NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
+ (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
return 0;
}
@@ -102,56 +78,37 @@ struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
struct net_device *netdev, __be32 host_ctx)
{
+ struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
struct nfp_flower_priv *priv = app->priv;
- struct nfp_fl_payload *flower_entry;
- hash_for_each_possible_rcu(priv->flow_table, flower_entry, link,
- tc_flower_cookie)
- if (flower_entry->tc_flower_cookie == tc_flower_cookie &&
- (!netdev || flower_entry->ingress_dev == netdev) &&
- (host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
- flower_entry->meta.host_ctx_id == host_ctx))
- return flower_entry;
+ flower_cmp_arg.netdev = netdev;
+ flower_cmp_arg.cookie = tc_flower_cookie;
+ flower_cmp_arg.host_ctx = host_ctx;
- return NULL;
-}
-
-static void
-nfp_flower_update_stats(struct nfp_app *app, struct nfp_fl_stats_frame *stats)
-{
- struct nfp_fl_payload *nfp_flow;
- unsigned long flower_cookie;
-
- flower_cookie = be64_to_cpu(stats->stats_cookie);
-
- rcu_read_lock();
- nfp_flow = nfp_flower_search_fl_table(app, flower_cookie, NULL,
- stats->stats_con_id);
- if (!nfp_flow)
- goto exit_rcu_unlock;
-
- spin_lock(&nfp_flow->lock);
- nfp_flow->stats.pkts += be32_to_cpu(stats->pkt_count);
- nfp_flow->stats.bytes += be64_to_cpu(stats->byte_count);
- nfp_flow->stats.used = jiffies;
- spin_unlock(&nfp_flow->lock);
-
-exit_rcu_unlock:
- rcu_read_unlock();
+ return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
+ nfp_flower_table_params);
}
void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
{
unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
- struct nfp_fl_stats_frame *stats_frame;
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_stats_frame *stats;
unsigned char *msg;
+ u32 ctx_id;
int i;
msg = nfp_flower_cmsg_get_data(skb);
- stats_frame = (struct nfp_fl_stats_frame *)msg;
- for (i = 0; i < msg_len / sizeof(*stats_frame); i++)
- nfp_flower_update_stats(app, stats_frame + i);
+ spin_lock(&priv->stats_lock);
+ for (i = 0; i < msg_len / sizeof(*stats); i++) {
+ stats = (struct nfp_fl_stats_frame *)msg + i;
+ ctx_id = be32_to_cpu(stats->stats_con_id);
+ priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count);
+ priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count);
+ priv->stats[ctx_id].used = jiffies;
+ }
+ spin_unlock(&priv->stats_lock);
}
static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
@@ -345,9 +302,9 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
/* Update flow payload with mask ids. */
nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
- nfp_flow->stats.pkts = 0;
- nfp_flow->stats.bytes = 0;
- nfp_flow->stats.used = jiffies;
+ priv->stats[stats_cxt].pkts = 0;
+ priv->stats[stats_cxt].bytes = 0;
+ priv->stats[stats_cxt].used = jiffies;
check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev,
NFP_FL_STATS_CTX_DONT_CARE);
@@ -389,12 +346,56 @@ int nfp_modify_flow_metadata(struct nfp_app *app,
return nfp_release_stats_entry(app, temp_ctx_id);
}
-int nfp_flower_metadata_init(struct nfp_app *app)
+static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
+ const struct nfp_fl_payload *flow_entry = obj;
+
+ if ((!cmp_arg->netdev || flow_entry->ingress_dev == cmp_arg->netdev) &&
+ (cmp_arg->host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
+ flow_entry->meta.host_ctx_id == cmp_arg->host_ctx))
+ return flow_entry->tc_flower_cookie != cmp_arg->cookie;
+
+ return 1;
+}
+
+static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct nfp_fl_payload *flower_entry = data;
+
+ return jhash2((u32 *)&flower_entry->tc_flower_cookie,
+ sizeof(flower_entry->tc_flower_cookie) / sizeof(u32),
+ seed);
+}
+
+static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data;
+
+ return jhash2((u32 *)&cmp_arg->cookie,
+ sizeof(cmp_arg->cookie) / sizeof(u32), seed);
+}
+
+const struct rhashtable_params nfp_flower_table_params = {
+ .head_offset = offsetof(struct nfp_fl_payload, fl_node),
+ .hashfn = nfp_fl_key_hashfn,
+ .obj_cmpfn = nfp_fl_obj_cmpfn,
+ .obj_hashfn = nfp_fl_obj_hashfn,
+ .automatic_shrinking = true,
+};
+
+int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count)
{
struct nfp_flower_priv *priv = app->priv;
+ int err;
hash_init(priv->mask_table);
- hash_init(priv->flow_table);
+
+ err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params);
+ if (err)
+ return err;
+
get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
/* Init ring buffer and unallocated mask_ids. */
@@ -402,7 +403,7 @@ int nfp_flower_metadata_init(struct nfp_app *app)
kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
if (!priv->mask_ids.mask_id_free_list.buf)
- return -ENOMEM;
+ goto err_free_flow_table;
priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
@@ -416,18 +417,29 @@ int nfp_flower_metadata_init(struct nfp_app *app)
/* Init ring buffer and unallocated stats_ids. */
priv->stats_ids.free_list.buf =
vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
- NFP_FL_STATS_ENTRY_RS));
+ priv->stats_ring_size));
if (!priv->stats_ids.free_list.buf)
goto err_free_last_used;
- priv->stats_ids.init_unalloc = NFP_FL_REPEATED_HASH_MAX;
+ priv->stats_ids.init_unalloc = host_ctx_count;
+
+ priv->stats = kvmalloc_array(priv->stats_ring_size,
+ sizeof(struct nfp_fl_stats), GFP_KERNEL);
+ if (!priv->stats)
+ goto err_free_ring_buf;
+
+ spin_lock_init(&priv->stats_lock);
return 0;
+err_free_ring_buf:
+ vfree(priv->stats_ids.free_list.buf);
err_free_last_used:
kfree(priv->mask_ids.last_used);
err_free_mask_id:
kfree(priv->mask_ids.mask_id_free_list.buf);
+err_free_flow_table:
+ rhashtable_destroy(&priv->flow_table);
return -ENOMEM;
}
@@ -438,6 +450,9 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
if (!priv)
return;
+ rhashtable_free_and_destroy(&priv->flow_table,
+ nfp_check_rhashtable_empty, NULL);
+ kvfree(priv->stats);
kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used);
vfree(priv->stats_ids.free_list.buf);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index bd19624f10cf..29c95423ab64 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/skbuff.h>
#include <net/devlink.h>
@@ -428,8 +398,6 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
flow_pay->nfp_tun_ipv4_addr = 0;
flow_pay->meta.flags = 0;
- spin_lock_init(&flow_pay->lock);
-
flow_pay->ingress_offload = !egress;
return flow_pay;
@@ -513,9 +481,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_destroy_flow;
- INIT_HLIST_NODE(&flow_pay->link);
flow_pay->tc_flower_cookie = flow->cookie;
- hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie);
+ err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
+ nfp_flower_table_params);
+ if (err)
+ goto err_destroy_flow;
+
port->tc_offload_cnt++;
/* Deallocate flow payload when flower rule has been destroyed. */
@@ -550,6 +521,7 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flow, bool egress)
{
struct nfp_port *port = nfp_port_from_netdev(netdev);
+ struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *nfp_flow;
struct net_device *ingr_dev;
int err;
@@ -573,11 +545,13 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
goto err_free_flow;
err_free_flow:
- hash_del_rcu(&nfp_flow->link);
port->tc_offload_cnt--;
kfree(nfp_flow->action_data);
kfree(nfp_flow->mask_data);
kfree(nfp_flow->unmasked_data);
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
+ &nfp_flow->fl_node,
+ nfp_flower_table_params));
kfree_rcu(nfp_flow, rcu);
return err;
}
@@ -598,8 +572,10 @@ static int
nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flow, bool egress)
{
+ struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *nfp_flow;
struct net_device *ingr_dev;
+ u32 ctx_id;
ingr_dev = egress ? NULL : netdev;
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
@@ -610,13 +586,16 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
if (nfp_flow->ingress_offload && egress)
return 0;
- spin_lock_bh(&nfp_flow->lock);
- tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes,
- nfp_flow->stats.pkts, nfp_flow->stats.used);
+ ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
+
+ spin_lock_bh(&priv->stats_lock);
+ tcf_exts_stats_update(flow->exts, priv->stats[ctx_id].bytes,
+ priv->stats[ctx_id].pkts,
+ priv->stats[ctx_id].used);
- nfp_flow->stats.pkts = 0;
- nfp_flow->stats.bytes = 0;
- spin_unlock_bh(&nfp_flow->lock);
+ priv->stats[ctx_id].pkts = 0;
+ priv->stats[ctx_id].bytes = 0;
+ spin_unlock_bh(&priv->stats_lock);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 382bb93cb090..8e5bec04d1f9 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -1,39 +1,10 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/etherdevice.h>
#include <linux/inetdevice.h>
#include <net/netevent.h>
+#include <net/vxlan.h>
#include <linux/idr.h>
#include <net/dst_metadata.h>
#include <net/arp.h>
@@ -217,7 +188,7 @@ static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
return false;
if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
return true;
- if (!strcmp(netdev->rtnl_link_ops->kind, "vxlan"))
+ if (netif_is_vxlan(netdev))
return true;
return false;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_abi.h b/drivers/net/ethernet/netronome/nfp/nfp_abi.h
index 8b56c27931bf..dd359a44adfb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_abi.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_abi.h
@@ -1,36 +1,5 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#ifndef __NFP_ABI__
#define __NFP_ABI__ 1
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 8607d09ab732..68a0991aac22 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bug.h>
#include <linux/lockdep.h>
@@ -60,6 +30,11 @@ static const struct nfp_app_type *apps[] = {
#endif
};
+void nfp_check_rhashtable_empty(void *ptr, void *arg)
+{
+ WARN_ON_ONCE(1);
+}
+
struct nfp_app *nfp_app_from_netdev(struct net_device *netdev)
{
if (nfp_netdev_is_nfp_net(netdev)) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 4e1eb3395648..4d6ecf99b1cc 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef _NFP_APP_H
#define _NFP_APP_H 1
@@ -40,6 +10,8 @@
#include "nfp_net_repr.h"
+#define NFP_APP_CTRL_MTU_MAX U32_MAX
+
struct bpf_prog;
struct net_device;
struct netdev_bpf;
@@ -178,6 +150,7 @@ struct nfp_app_type {
* @ctrl: pointer to ctrl vNIC struct
* @reprs: array of pointers to representors
* @type: pointer to const application ops and info
+ * @ctrl_mtu: MTU to set on the control vNIC (set in .init())
* @priv: app-specific priv data
*/
struct nfp_app {
@@ -189,9 +162,11 @@ struct nfp_app {
struct nfp_reprs __rcu *reprs[NFP_REPR_TYPE_MAX + 1];
const struct nfp_app_type *type;
+ unsigned int ctrl_mtu;
void *priv;
};
+void nfp_check_rhashtable_empty(void *ptr, void *arg);
bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
index e2dfe4f168bb..f119277fd66c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include "nfpcore/nfp_cpp.h"
#include "nfpcore/nfp_nsp.h"
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
index cc6ace2be8a9..b04b83687fe2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2016-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
#include <linux/bitops.h>
#include <linux/errno.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index fad0e62a910c..648c2810e5ba 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2016-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
#ifndef __NFP_ASM_H__
#define __NFP_ASM_H__ 1
@@ -82,6 +52,15 @@
#define OP_BR_BIT_ADDR_LO OP_BR_ADDR_LO
#define OP_BR_BIT_ADDR_HI OP_BR_ADDR_HI
+#define OP_BR_ALU_BASE 0x0e800000000ULL
+#define OP_BR_ALU_BASE_MASK 0x0ff80000000ULL
+#define OP_BR_ALU_A_SRC 0x000000003ffULL
+#define OP_BR_ALU_B_SRC 0x000000ffc00ULL
+#define OP_BR_ALU_DEFBR 0x00000300000ULL
+#define OP_BR_ALU_IMM_HI 0x0007fc00000ULL
+#define OP_BR_ALU_SRC_LMEXTN 0x40000000000ULL
+#define OP_BR_ALU_DST_LMEXTN 0x80000000000ULL
+
static inline bool nfp_is_br(u64 insn)
{
return (insn & OP_BR_BASE_MASK) == OP_BR_BASE ||
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index db463e20a876..808647ec3573 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/rtnetlink.h>
#include <net/devlink.h>
@@ -96,6 +66,7 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
{
struct nfp_pf *pf = devlink_priv(devlink);
struct nfp_eth_table_port eth_port;
+ unsigned int lanes;
int ret;
if (count < 2)
@@ -114,8 +85,12 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
goto out;
}
- ret = nfp_devlink_set_lanes(pf, eth_port.index,
- eth_port.port_lanes / count);
+ /* Special case the 100G CXP -> 2x40G split */
+ lanes = eth_port.port_lanes / count;
+ if (eth_port.lanes == 10 && count == 2)
+ lanes = 8 / count;
+
+ ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
out:
mutex_unlock(&pf->lock);
@@ -128,6 +103,7 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index,
{
struct nfp_pf *pf = devlink_priv(devlink);
struct nfp_eth_table_port eth_port;
+ unsigned int lanes;
int ret;
mutex_lock(&pf->lock);
@@ -143,7 +119,12 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index,
goto out;
}
- ret = nfp_devlink_set_lanes(pf, eth_port.index, eth_port.port_lanes);
+ /* Special case the 100G CXP -> 2x40G unsplit */
+ lanes = eth_port.port_lanes;
+ if (eth_port.port_lanes == 8)
+ lanes = 10;
+
+ ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
out:
mutex_unlock(&pf->lock);
@@ -177,7 +158,8 @@ static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
return nfp_app_eswitch_mode_get(pf->app, mode);
}
-static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
{
struct nfp_pf *pf = devlink_priv(devlink);
int ret;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
index f0dcf45aeec1..5cabb1aa9c0c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017 Netronome Systems, Inc. */
#include <linux/kernel.h>
#include <linux/bitops.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 9474a4eed8ce..6c10e8d119e4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_main.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index 595b3dc280e3..a3613a2e0aa5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_main.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 439e6ffe2f05..6f0c37d09256 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_net.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index d05e37fcc1b2..6bddfcfdec34 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_net_common.c
@@ -2077,14 +2047,17 @@ nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
return true;
}
-static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
{
struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
struct nfp_net *nn = r_vec->nfp_net;
struct nfp_net_dp *dp = &nn->dp;
+ unsigned int budget = 512;
- while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring))
+ while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
continue;
+
+ return budget;
}
static void nfp_ctrl_poll(unsigned long arg)
@@ -2096,9 +2069,13 @@ static void nfp_ctrl_poll(unsigned long arg)
__nfp_ctrl_tx_queued(r_vec);
spin_unlock(&r_vec->lock);
- nfp_ctrl_rx(r_vec);
-
- nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+ if (nfp_ctrl_rx(r_vec)) {
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+ } else {
+ tasklet_schedule(&r_vec->tasklet);
+ nn_dp_warn(&r_vec->nfp_net->dp,
+ "control message budget exceeded!\n");
+ }
}
/* Setup and Configuration
@@ -3877,10 +3854,20 @@ int nfp_net_init(struct nfp_net *nn)
return err;
/* Set default MTU and Freelist buffer size */
- if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
+ if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) {
+ if (nn->app->ctrl_mtu <= nn->max_mtu) {
+ nn->dp.mtu = nn->app->ctrl_mtu;
+ } else {
+ if (nn->app->ctrl_mtu != NFP_APP_CTRL_MTU_MAX)
+ nn_warn(nn, "app requested MTU above max supported %u > %u\n",
+ nn->app->ctrl_mtu, nn->max_mtu);
+ nn->dp.mtu = nn->max_mtu;
+ }
+ } else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) {
nn->dp.mtu = nn->max_mtu;
- else
+ } else {
nn->dp.mtu = NFP_NET_DEFAULT_MTU;
+ }
nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
if (nfp_app_ctrl_uses_data_vnics(nn->app))
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
index 2190836eaa1d..f2aaef976c7d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <linux/device.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index a51490747689..d7c8518ac952 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_net_ctrl.h
@@ -264,7 +234,6 @@
* %NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
*/
#define NFP_NET_CFG_BPF_ABI 0x0080
-#define NFP_NET_BPF_ABI 2
#define NFP_NET_CFG_BPF_CAP 0x0081
#define NFP_NET_BPF_CAP_RELO (1 << 0) /* seamless reload */
#define NFP_NET_CFG_BPF_MAX_LEN 0x0082
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
index b6b897840ac5..769ceef09756 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 099b63d67451..69b1c9b62e3d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 6a79c8e4a7a4..cb9c512abc76 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_net_ethtool.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 0b1ac9c234d1..1e7d20468a34 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_net_main.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 18a09cdcd9c6..c09b893c30dd 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/etherdevice.h>
#include <linux/io-64-nonatomic-hi-lo.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
index 1bf2b18109ab..c412b94bfb97 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef NFP_NET_REPR_H
#define NFP_NET_REPR_H
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
index 8b1b962cf1d1..b6ec46ed0540 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <linux/errno.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
index e9df9d1eab8e..c9f09c5bb5ee 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017 Netronome Systems, Inc. */
#ifndef _NFP_NET_SRIOV_H_
#define _NFP_NET_SRIOV_H_
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 68928c86b698..d2c1e9ea5668 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_netvf_main.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c
index 9c1298114c70..86bc149ca231 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/lockdep.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index 51f10ae2d53e..b2479a2a49e5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef _NFP_PORT_H_
#define _NFP_PORT_H_
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c
index 0ecd83705368..814360ed3a20 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c
@@ -1,36 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/kernel.h>
#include <net/devlink.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
index 6cee6382deb4..afab6f0fc564 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
#ifndef NFP_CRC32_H
#define NFP_CRC32_H
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
index f44d0a857314..db94b0bddc92 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h
index 0e497a6154db..4a12133850f5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
#ifndef NFP6000_NFP6000_H
#define NFP6000_NFP6000_H
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h
index 40fb19939505..9a86ec11c5ba 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
/*
* nfp_xpb.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index fd63d83bdea5..85d46f206b3c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp6000_pcie.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
index 245d8aaaa97d..6d1bffa6eac6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
/*
* nfp6000_pcie.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h
index 31fe92247f51..3d172e255693 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
/*
* nfp_arm.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index 123e29cba6d1..2dd0f5842873 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_cpp.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index f7e1d79e735f..94994a939277 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_cppcore.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
index 03fcde5fa137..3cfecf105bde 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_cpplib.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
index 063a9a6243d6..f05dd34ab89f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
/* Parse the hwinfo table that the ARM firmware builds in the ARM scratch SRAM
* after chip reset.
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c
index 5f193fe2d69e..79e17943519e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
/*
* nfp_mip.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
index c88bf673cb76..7bc17b94ac60 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
index a164fbc85cd3..d4e02542e2e9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_nffw.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
index 8d2cbdf4d517..49a4d3f56b56 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_nffw.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index bf593a6b26a1..ce1577bbbd2a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_nsp.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index bd6c9071c8e9..ff33ac54097a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
#ifndef NSP_NSP_H
#define NSP_NSP_H 1
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c
index 5d362f87af08..0997d127144f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017 Netronome Systems, Inc. */
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index 7ca589660e4d..802c9224bb32 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
/* Authors: David Brunecz <david.brunecz@netronome.com>
* Jakub Kicinski <jakub.kicinski@netronome.com>
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
index d32af598da90..ce7492a6a98f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_resource.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
index 1ad0a015572e..75f012444796 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_rtsym.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c
index f691c6587c76..79470f198a62 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_target.c
diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.c b/drivers/net/ethernet/netronome/nfp/nic/main.c
index d5b587fccaa3..aea8579206ee 100644
--- a/drivers/net/ethernet/netronome/nfp/nic/main.c
+++ b/drivers/net/ethernet/netronome/nfp/nic/main.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017 Netronome Systems, Inc. */
#include "../nfpcore/nfp_cpp.h"
#include "../nfpcore/nfp_nsp.h"
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 8b23d2848457..25382f8fbb70 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -19,34 +19,18 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/crc32.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
#include <linux/clk.h>
-#include <linux/workqueue.h>
-#include <linux/netdevice.h>
+#include <linux/crc32.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/phy.h>
-#include <linux/dma-mapping.h>
-#include <linux/of.h>
+#include <linux/module.h>
#include <linux/of_net.h>
-#include <linux/types.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
-#include <linux/io.h>
#include <mach/board.h>
-#include <mach/platform.h>
#include <mach/hardware.h>
+#include <mach/platform.h>
#define MODNAME "lpc-eth"
#define DRV_VERSION "1.00"
@@ -1257,18 +1241,19 @@ static const struct net_device_ops lpc_netdev_ops = {
static int lpc_eth_drv_probe(struct platform_device *pdev)
{
- struct resource *res;
- struct net_device *ndev;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct netdata_local *pldat;
- struct phy_device *phydev;
+ struct net_device *ndev;
dma_addr_t dma_handle;
+ struct resource *res;
int irq, ret;
u32 tmp;
/* Setup network interface for RMII or MII mode */
tmp = __raw_readl(LPC32XX_CLKPWR_MACCLK_CTRL);
tmp &= ~LPC32XX_CLKPWR_MACCTRL_PINS_MSK;
- if (lpc_phy_interface_mode(&pdev->dev) == PHY_INTERFACE_MODE_MII)
+ if (lpc_phy_interface_mode(dev) == PHY_INTERFACE_MODE_MII)
tmp |= LPC32XX_CLKPWR_MACCTRL_USE_MII_PINS;
else
tmp |= LPC32XX_CLKPWR_MACCTRL_USE_RMII_PINS;
@@ -1278,7 +1263,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!res || irq < 0) {
- dev_err(&pdev->dev, "error getting resources.\n");
+ dev_err(dev, "error getting resources.\n");
ret = -ENXIO;
goto err_exit;
}
@@ -1286,12 +1271,12 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
/* Allocate net driver data structure */
ndev = alloc_etherdev(sizeof(struct netdata_local));
if (!ndev) {
- dev_err(&pdev->dev, "could not allocate device.\n");
+ dev_err(dev, "could not allocate device.\n");
ret = -ENOMEM;
goto err_exit;
}
- SET_NETDEV_DEV(ndev, &pdev->dev);
+ SET_NETDEV_DEV(ndev, dev);
pldat = netdev_priv(ndev);
pldat->pdev = pdev;
@@ -1303,9 +1288,9 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
ndev->irq = irq;
/* Get clock for the device */
- pldat->clk = clk_get(&pdev->dev, NULL);
+ pldat->clk = clk_get(dev, NULL);
if (IS_ERR(pldat->clk)) {
- dev_err(&pdev->dev, "error getting clock.\n");
+ dev_err(dev, "error getting clock.\n");
ret = PTR_ERR(pldat->clk);
goto err_out_free_dev;
}
@@ -1318,14 +1303,14 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
/* Map IO space */
pldat->net_base = ioremap(res->start, resource_size(res));
if (!pldat->net_base) {
- dev_err(&pdev->dev, "failed to map registers\n");
+ dev_err(dev, "failed to map registers\n");
ret = -ENOMEM;
goto err_out_disable_clocks;
}
ret = request_irq(ndev->irq, __lpc_eth_interrupt, 0,
ndev->name, ndev);
if (ret) {
- dev_err(&pdev->dev, "error requesting interrupt.\n");
+ dev_err(dev, "error requesting interrupt.\n");
goto err_out_iounmap;
}
@@ -1339,7 +1324,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t));
pldat->dma_buff_base_v = 0;
- if (use_iram_for_net(&pldat->pdev->dev)) {
+ if (use_iram_for_net(dev)) {
dma_handle = LPC32XX_IRAM_BASE;
if (pldat->dma_buff_size <= lpc32xx_return_iram_size())
pldat->dma_buff_base_v =
@@ -1350,7 +1335,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
}
if (pldat->dma_buff_base_v == 0) {
- ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
goto err_out_free_irq;
@@ -1359,7 +1344,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
/* Allocate a chunk of memory for the DMA ethernet buffers
and descriptors */
pldat->dma_buff_base_v =
- dma_alloc_coherent(&pldat->pdev->dev,
+ dma_alloc_coherent(dev,
pldat->dma_buff_size, &dma_handle,
GFP_KERNEL);
if (pldat->dma_buff_base_v == NULL) {
@@ -1384,7 +1369,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
__lpc_get_mac(pldat, ndev->dev_addr);
if (!is_valid_ether_addr(ndev->dev_addr)) {
- const char *macaddr = of_get_mac_address(pdev->dev.of_node);
+ const char *macaddr = of_get_mac_address(np);
if (macaddr)
memcpy(ndev->dev_addr, macaddr, ETH_ALEN);
}
@@ -1414,7 +1399,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
ret = register_netdev(ndev);
if (ret) {
- dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ dev_err(dev, "Cannot register net device, aborting.\n");
goto err_out_dma_unmap;
}
platform_set_drvdata(pdev, ndev);
@@ -1426,19 +1411,17 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
res->start, ndev->irq);
- phydev = ndev->phydev;
-
- device_init_wakeup(&pdev->dev, 1);
- device_set_wakeup_enable(&pdev->dev, 0);
+ device_init_wakeup(dev, 1);
+ device_set_wakeup_enable(dev, 0);
return 0;
err_out_unregister_netdev:
unregister_netdev(ndev);
err_out_dma_unmap:
- if (!use_iram_for_net(&pldat->pdev->dev) ||
+ if (!use_iram_for_net(dev) ||
pldat->dma_buff_size > lpc32xx_return_iram_size())
- dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
+ dma_free_coherent(dev, pldat->dma_buff_size,
pldat->dma_buff_base_v,
pldat->dma_buff_base_p);
err_out_free_irq:
@@ -1533,13 +1516,11 @@ static int lpc_eth_drv_resume(struct platform_device *pdev)
}
#endif
-#ifdef CONFIG_OF
static const struct of_device_id lpc_eth_match[] = {
{ .compatible = "nxp,lpc-eth" },
{ }
};
MODULE_DEVICE_TABLE(of, lpc_eth_match);
-#endif
static struct platform_driver lpc_eth_driver = {
.probe = lpc_eth_drv_probe,
@@ -1550,7 +1531,7 @@ static struct platform_driver lpc_eth_driver = {
#endif
.driver = {
.name = MODNAME,
- .of_match_table = of_match_ptr(lpc_eth_match),
+ .of_match_table = lpc_eth_match,
},
};
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 69aa7fc392c5..7d9819d80e44 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -72,9 +72,6 @@ static void netxen_schedule_work(struct netxen_adapter *adapter,
work_func_t func, int delay);
static void netxen_cancel_fw_work(struct netxen_adapter *adapter);
static int netxen_nic_poll(struct napi_struct *napi, int budget);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void netxen_nic_poll_controller(struct net_device *netdev);
-#endif
static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
@@ -581,9 +578,6 @@ static const struct net_device_ops netxen_netdev_ops = {
.ndo_tx_timeout = netxen_tx_timeout,
.ndo_fix_features = netxen_fix_features,
.ndo_set_features = netxen_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = netxen_nic_poll_controller,
-#endif
};
static inline bool netxen_function_zero(struct pci_dev *pdev)
@@ -1790,11 +1784,6 @@ static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev)
return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
-static void netxen_io_resume(struct pci_dev *pdev)
-{
- pci_cleanup_aer_uncorrect_error_status(pdev);
-}
-
static void netxen_nic_shutdown(struct pci_dev *pdev)
{
struct netxen_adapter *adapter = pci_get_drvdata(pdev);
@@ -2402,23 +2391,6 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
return work_done;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void netxen_nic_poll_controller(struct net_device *netdev)
-{
- int ring;
- struct nx_host_sds_ring *sds_ring;
- struct netxen_adapter *adapter = netdev_priv(netdev);
- struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
-
- disable_irq(adapter->irq);
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- netxen_intr(adapter->irq, sds_ring);
- }
- enable_irq(adapter->irq);
-}
-#endif
-
static int
nx_incr_dev_ref_cnt(struct netxen_adapter *adapter)
{
@@ -3488,7 +3460,6 @@ netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
static const struct pci_error_handlers netxen_err_handler = {
.error_detected = netxen_io_error_detected,
.slot_reset = netxen_io_slot_reset,
- .resume = netxen_io_resume,
};
static struct pci_driver netxen_driver = {
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 5f0962d353ce..d9a03aba0e02 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -915,7 +915,7 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
/* Prototypes */
int qed_fill_dev_info(struct qed_dev *cdev,
struct qed_dev_info *dev_info);
-void qed_link_update(struct qed_hwfn *hwfn);
+void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 56578f888b70..5c221ebaa7b3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12102,6 +12102,7 @@ struct public_global {
u32 running_bundle_id;
s32 external_temperature;
u32 mdump_reason;
+ u64 reserved;
u32 data_ptr;
u32 data_size;
};
@@ -12206,11 +12207,56 @@ struct public_port {
u32 transceiver_data;
#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF
#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
+#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000
#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
-
+#define ETH_TRANSCEIVER_TYPE_MASK 0x0000FF00
+#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8
+#define ETH_TRANSCEIVER_TYPE_NONE 0x00
+#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xFF
+#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01
+#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02
+#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03
+#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04
+#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05
+#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06
+#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07
+#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08
+#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09
+#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a
+#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b
+#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c
+#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d
+#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e
+#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f
+#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10
+#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11
+#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12
+#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13
+#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14
+#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15
+#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17
+#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19
+#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b
+#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c
+#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d
+#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e
+#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f
+#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20
+#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21
+#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36
u32 wol_info;
u32 wol_pkt_len;
u32 wol_pkt_details;
@@ -12275,7 +12321,7 @@ struct public_func {
#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
u32 status;
-#define FUNC_STATUS_VLINK_DOWN 0x00000001
+#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001
u32 mac_upper;
#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
@@ -12697,6 +12743,7 @@ struct public_drv_mb {
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000FFFF
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
+#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
@@ -12749,6 +12796,7 @@ struct public_drv_mb {
/* get MFW feature support response */
#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002
+#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000
#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
@@ -13196,6 +13244,13 @@ struct nvm_cfg1_port {
u32 transceiver_00;
u32 device_ids;
u32 board_cfg;
+#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000FF
+#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0
+#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0
+#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1
+#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2
+#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3
+#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4
u32 mnm_10g_cap;
u32 mnm_10g_ctrl;
u32 mnm_10g_misc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index af3a28ec04eb..0f0aba793352 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -228,7 +228,7 @@ static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)),
GET_FIELD(tmp2, QED_GRC_ATTENTION_PF),
(GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) ==
- QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)",
+ QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)",
GET_FIELD(tmp2, QED_GRC_ATTENTION_VF));
out:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index f99797a149a4..beb8e5d6401a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1709,7 +1709,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
cm_info->local_ip[0] = ntohl(iph->daddr);
cm_info->remote_ip[0] = ntohl(iph->saddr);
- cm_info->ip_version = TCP_IPV4;
+ cm_info->ip_version = QED_TCP_IPV4;
ip_hlen = (iph->ihl) * sizeof(u32);
*payload_len = ntohs(iph->tot_len) - ip_hlen;
@@ -1729,7 +1729,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
cm_info->remote_ip[i] =
ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
}
- cm_info->ip_version = TCP_IPV6;
+ cm_info->ip_version = QED_TCP_IPV6;
ip_hlen = sizeof(*ip6h);
*payload_len = ntohs(ip6h->payload_len);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index da13117a604a..aa633381aa47 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -796,7 +796,18 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
tx_pkt.vlan = p_buffer->vlan;
tx_pkt.bd_flags = bd_flags;
tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
- tx_pkt.tx_dest = p_ll2_conn->tx_dest;
+ switch (p_ll2_conn->tx_dest) {
+ case CORE_TX_DEST_NW:
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_NW;
+ break;
+ case CORE_TX_DEST_LB:
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
+ break;
+ case CORE_TX_DEST_DROP:
+ default:
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
+ break;
+ }
tx_pkt.first_frag = first_frag;
tx_pkt.first_frag_len = p_buffer->packet_length;
tx_pkt.cookie = p_buffer;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 75d217aaf8ce..35fd0db6a677 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -58,6 +58,7 @@
#include "qed_iscsi.h"
#include "qed_mcp.h"
+#include "qed_reg_addr.h"
#include "qed_hw.h"
#include "qed_selftest.h"
#include "qed_debug.h"
@@ -1304,6 +1305,7 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
struct qed_hwfn *hwfn;
struct qed_mcp_link_params *link_params;
struct qed_ptt *ptt;
+ u32 sup_caps;
int rc;
if (!cdev)
@@ -1330,26 +1332,50 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
link_params->speed.autoneg = params->autoneg;
if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
link_params->speed.advertised_speeds = 0;
- if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) ||
- (params->adv_speeds & QED_LM_1000baseT_Full_BIT))
+ sup_caps = QED_LM_1000baseT_Full_BIT |
+ QED_LM_1000baseKX_Full_BIT |
+ QED_LM_1000baseX_Full_BIT;
+ if (params->adv_speeds & sup_caps)
link_params->speed.advertised_speeds |=
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
- if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT)
+ sup_caps = QED_LM_10000baseT_Full_BIT |
+ QED_LM_10000baseKR_Full_BIT |
+ QED_LM_10000baseKX4_Full_BIT |
+ QED_LM_10000baseR_FEC_BIT |
+ QED_LM_10000baseCR_Full_BIT |
+ QED_LM_10000baseSR_Full_BIT |
+ QED_LM_10000baseLR_Full_BIT |
+ QED_LM_10000baseLRM_Full_BIT;
+ if (params->adv_speeds & sup_caps)
link_params->speed.advertised_speeds |=
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT)
link_params->speed.advertised_speeds |=
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
- if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT)
+ sup_caps = QED_LM_25000baseKR_Full_BIT |
+ QED_LM_25000baseCR_Full_BIT |
+ QED_LM_25000baseSR_Full_BIT;
+ if (params->adv_speeds & sup_caps)
link_params->speed.advertised_speeds |=
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
- if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT)
+ sup_caps = QED_LM_40000baseLR4_Full_BIT |
+ QED_LM_40000baseKR4_Full_BIT |
+ QED_LM_40000baseCR4_Full_BIT |
+ QED_LM_40000baseSR4_Full_BIT;
+ if (params->adv_speeds & sup_caps)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
- if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+ sup_caps = QED_LM_50000baseKR2_Full_BIT |
+ QED_LM_50000baseCR2_Full_BIT |
+ QED_LM_50000baseSR2_Full_BIT;
+ if (params->adv_speeds & sup_caps)
link_params->speed.advertised_speeds |=
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
- if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT)
+ sup_caps = QED_LM_100000baseKR4_Full_BIT |
+ QED_LM_100000baseSR4_Full_BIT |
+ QED_LM_100000baseCR4_Full_BIT |
+ QED_LM_100000baseLR4_ER4_Full_BIT;
+ if (params->adv_speeds & sup_caps)
link_params->speed.advertised_speeds |=
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
}
@@ -1462,12 +1488,149 @@ static int qed_get_link_data(struct qed_hwfn *hwfn,
return 0;
}
+static void qed_fill_link_capability(struct qed_hwfn *hwfn,
+ struct qed_ptt *ptt, u32 capability,
+ u32 *if_capability)
+{
+ u32 media_type, tcvr_state, tcvr_type;
+ u32 speed_mask, board_cfg;
+
+ if (qed_mcp_get_media_type(hwfn, ptt, &media_type))
+ media_type = MEDIA_UNSPECIFIED;
+
+ if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type))
+ tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED;
+
+ if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask))
+ speed_mask = 0xFFFFFFFF;
+
+ if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg))
+ board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
+
+ DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
+ "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
+ media_type, tcvr_state, tcvr_type, speed_mask, board_cfg);
+
+ switch (media_type) {
+ case MEDIA_DA_TWINAX:
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
+ *if_capability |= QED_LM_20000baseKR2_Full_BIT;
+ /* For DAC media multiple speed capabilities are supported*/
+ capability = capability & speed_mask;
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ *if_capability |= QED_LM_1000baseKX_Full_BIT;
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ *if_capability |= QED_LM_10000baseCR_Full_BIT;
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ *if_capability |= QED_LM_40000baseCR4_Full_BIT;
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+ *if_capability |= QED_LM_25000baseCR_Full_BIT;
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ *if_capability |= QED_LM_50000baseCR2_Full_BIT;
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
+ *if_capability |= QED_LM_100000baseCR4_Full_BIT;
+ break;
+ case MEDIA_BASE_T:
+ if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
+ *if_capability |= QED_LM_1000baseT_Full_BIT;
+ }
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
+ *if_capability |= QED_LM_10000baseT_Full_BIT;
+ }
+ }
+ if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
+ if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET)
+ *if_capability |= QED_LM_1000baseT_Full_BIT;
+ if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET)
+ *if_capability |= QED_LM_10000baseT_Full_BIT;
+ }
+ break;
+ case MEDIA_SFP_1G_FIBER:
+ case MEDIA_SFPP_10G_FIBER:
+ case MEDIA_XFP_FIBER:
+ case MEDIA_MODULE_FIBER:
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
+ if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) ||
+ (tcvr_type == ETH_TRANSCEIVER_TYPE_1G_SX))
+ *if_capability |= QED_LM_1000baseKX_Full_BIT;
+ }
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
+ if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_SR)
+ *if_capability |= QED_LM_10000baseSR_Full_BIT;
+ if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LR)
+ *if_capability |= QED_LM_10000baseLR_Full_BIT;
+ if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LRM)
+ *if_capability |= QED_LM_10000baseLRM_Full_BIT;
+ if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_ER)
+ *if_capability |= QED_LM_10000baseR_FEC_BIT;
+ }
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
+ *if_capability |= QED_LM_20000baseKR2_Full_BIT;
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) {
+ if (tcvr_type == ETH_TRANSCEIVER_TYPE_25G_SR)
+ *if_capability |= QED_LM_25000baseSR_Full_BIT;
+ }
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) {
+ if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_LR4)
+ *if_capability |= QED_LM_40000baseLR4_Full_BIT;
+ if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_SR4)
+ *if_capability |= QED_LM_40000baseSR4_Full_BIT;
+ }
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ *if_capability |= QED_LM_50000baseKR2_Full_BIT;
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) {
+ if (tcvr_type == ETH_TRANSCEIVER_TYPE_100G_SR4)
+ *if_capability |= QED_LM_100000baseSR4_Full_BIT;
+ }
+
+ break;
+ case MEDIA_KR:
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
+ *if_capability |= QED_LM_20000baseKR2_Full_BIT;
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ *if_capability |= QED_LM_1000baseKX_Full_BIT;
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ *if_capability |= QED_LM_10000baseKR_Full_BIT;
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+ *if_capability |= QED_LM_25000baseKR_Full_BIT;
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ *if_capability |= QED_LM_40000baseKR4_Full_BIT;
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ *if_capability |= QED_LM_50000baseKR2_Full_BIT;
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
+ *if_capability |= QED_LM_100000baseKR4_Full_BIT;
+ break;
+ case MEDIA_UNSPECIFIED:
+ case MEDIA_NOT_PRESENT:
+ DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG,
+ "Unknown media and transceiver type;\n");
+ break;
+ }
+}
+
static void qed_fill_link(struct qed_hwfn *hwfn,
+ struct qed_ptt *ptt,
struct qed_link_output *if_link)
{
+ struct qed_mcp_link_capabilities link_caps;
struct qed_mcp_link_params params;
struct qed_mcp_link_state link;
- struct qed_mcp_link_capabilities link_caps;
u32 media_type;
memset(if_link, 0, sizeof(*if_link));
@@ -1498,58 +1661,20 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
if_link->advertised_caps |= QED_LM_Autoneg_BIT;
else
if_link->advertised_caps &= ~QED_LM_Autoneg_BIT;
- if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
- if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT |
- QED_LM_1000baseT_Full_BIT;
- if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
- if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT;
- if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
- if_link->advertised_caps |= QED_LM_20000baseKR2_Full_BIT;
- if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
- if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT;
- if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
- if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT;
- if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
- if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT;
- if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
- if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT;
-
- if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
- if_link->supported_caps |= QED_LM_1000baseT_Half_BIT |
- QED_LM_1000baseT_Full_BIT;
- if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
- if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT;
- if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
- if_link->supported_caps |= QED_LM_20000baseKR2_Full_BIT;
- if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
- if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT;
- if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
- if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT;
- if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
- if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT;
- if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
- if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT;
+
+ /* Fill link advertised capability*/
+ qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds,
+ &if_link->advertised_caps);
+ /* Fill link supported capability*/
+ qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities,
+ &if_link->supported_caps);
if (link.link_up)
if_link->speed = link.speed;
/* TODO - fill duplex properly */
if_link->duplex = DUPLEX_FULL;
- qed_mcp_get_media_type(hwfn->cdev, &media_type);
+ qed_mcp_get_media_type(hwfn, ptt, &media_type);
if_link->port = qed_get_port_type(media_type);
if_link->autoneg = params.speed.autoneg;
@@ -1562,9 +1687,8 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
/* Link partner capabilities */
- if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD)
- if_link->lp_caps |= QED_LM_1000baseT_Half_BIT;
- if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD)
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_1G_FD)
if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
@@ -1607,21 +1731,34 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
static void qed_get_current_link(struct qed_dev *cdev,
struct qed_link_output *if_link)
{
+ struct qed_hwfn *hwfn;
+ struct qed_ptt *ptt;
int i;
- qed_fill_link(&cdev->hwfns[0], if_link);
+ hwfn = &cdev->hwfns[0];
+ if (IS_PF(cdev)) {
+ ptt = qed_ptt_acquire(hwfn);
+ if (ptt) {
+ qed_fill_link(hwfn, ptt, if_link);
+ qed_ptt_release(hwfn, ptt);
+ } else {
+ DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
+ }
+ } else {
+ qed_fill_link(hwfn, NULL, if_link);
+ }
for_each_hwfn(cdev, i)
qed_inform_vf_link_state(&cdev->hwfns[i]);
}
-void qed_link_update(struct qed_hwfn *hwfn)
+void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
{
void *cookie = hwfn->cdev->ops_cookie;
struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
struct qed_link_output if_link;
- qed_fill_link(hwfn, &if_link);
+ qed_fill_link(hwfn, ptt, &if_link);
qed_inform_vf_link_state(hwfn);
if (IS_LEAD_HWFN(hwfn) && cookie)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 58c7eb9d8e1b..f40f654398a0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1247,6 +1247,52 @@ static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn,
p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
}
+static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct public_func *p_data, int pfid)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr;
+ u32 i, size;
+
+ func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+ memset(p_data, 0, sizeof(*p_data));
+
+ size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
+ for (i = 0; i < size / sizeof(u32); i++)
+ ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
+ func_addr + (i << 2));
+ return size;
+}
+
+static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
+ struct public_func *p_shmem_info)
+{
+ struct qed_mcp_function_info *p_info;
+
+ p_info = &p_hwfn->mcp_info->func_info;
+
+ p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config,
+ FUNC_MF_CFG_MIN_BW);
+ if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+ p_info->bandwidth_min);
+ p_info->bandwidth_min = 1;
+ }
+
+ p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config,
+ FUNC_MF_CFG_MAX_BW);
+ if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+ p_info->bandwidth_max);
+ p_info->bandwidth_max = 100;
+ }
+}
+
static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool b_reset)
{
@@ -1274,10 +1320,29 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
goto out;
}
- if (p_hwfn->b_drv_link_init)
- p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
- else
+ if (p_hwfn->b_drv_link_init) {
+ /* Link indication with modern MFW arrives as per-PF
+ * indication.
+ */
+ if (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
+ struct public_func shmem_info;
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+ p_link->link_up = !!(shmem_info.status &
+ FUNC_STATUS_VIRTUAL_LINK_UP);
+ qed_read_pf_bandwidth(p_hwfn, &shmem_info);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Virtual link_up = %d\n", p_link->link_up);
+ } else {
+ p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Physical link_up = %d\n", p_link->link_up);
+ }
+ } else {
p_link->link_up = false;
+ }
p_link->full_duplex = true;
switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
@@ -1382,7 +1447,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
- qed_link_update(p_hwfn);
+ qed_link_update(p_hwfn, p_ptt);
out:
spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
}
@@ -1504,53 +1569,6 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
}
-static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
- struct public_func *p_shmem_info)
-{
- struct qed_mcp_function_info *p_info;
-
- p_info = &p_hwfn->mcp_info->func_info;
-
- p_info->bandwidth_min = (p_shmem_info->config &
- FUNC_MF_CFG_MIN_BW_MASK) >>
- FUNC_MF_CFG_MIN_BW_SHIFT;
- if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
- DP_INFO(p_hwfn,
- "bandwidth minimum out of bounds [%02x]. Set to 1\n",
- p_info->bandwidth_min);
- p_info->bandwidth_min = 1;
- }
-
- p_info->bandwidth_max = (p_shmem_info->config &
- FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT;
- if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
- DP_INFO(p_hwfn,
- "bandwidth maximum out of bounds [%02x]. Set to 100\n",
- p_info->bandwidth_max);
- p_info->bandwidth_max = 100;
- }
-}
-
-static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct public_func *p_data, int pfid)
-{
- u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
- PUBLIC_FUNC);
- u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
- u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
- u32 i, size;
-
- memset(p_data, 0, sizeof(*p_data));
-
- size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
- for (i = 0; i < size / sizeof(u32); i++)
- ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
- func_addr + (i << 2));
- return size;
-}
-
static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_function_info *p_info;
@@ -1849,12 +1867,12 @@ int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
+int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_media_type)
{
- struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
- struct qed_ptt *p_ptt;
+ *p_media_type = MEDIA_UNSPECIFIED;
- if (IS_VF(cdev))
+ if (IS_VF(p_hwfn->cdev))
return -EINVAL;
if (!qed_mcp_is_init(p_hwfn)) {
@@ -1862,16 +1880,195 @@ int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
return -EBUSY;
}
- *p_media_type = MEDIA_UNSPECIFIED;
+ if (!p_ptt) {
+ *p_media_type = MEDIA_UNSPECIFIED;
+ return -EINVAL;
+ }
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt)
+ *p_media_type = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ media_type));
+
+ return 0;
+}
+
+int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_transceiver_state,
+ u32 *p_transceiver_type)
+{
+ u32 transceiver_info;
+
+ *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
+ *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
return -EBUSY;
+ }
- *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
- offsetof(struct public_port, media_type));
+ transceiver_info = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ transceiver_data));
- qed_ptt_release(p_hwfn, p_ptt);
+ *p_transceiver_state = (transceiver_info &
+ ETH_TRANSCEIVER_STATE_MASK) >>
+ ETH_TRANSCEIVER_STATE_OFFSET;
+
+ if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
+ *p_transceiver_type = (transceiver_info &
+ ETH_TRANSCEIVER_TYPE_MASK) >>
+ ETH_TRANSCEIVER_TYPE_OFFSET;
+ else
+ *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
+
+ return 0;
+}
+static bool qed_is_transceiver_ready(u32 transceiver_state,
+ u32 transceiver_type)
+{
+ if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
+ ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
+ (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
+ return true;
+
+ return false;
+}
+
+int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_speed_mask)
+{
+ u32 transceiver_type, transceiver_state;
+
+ qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
+ &transceiver_type);
+
+ if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
+ false)
+ return -EINVAL;
+
+ switch (transceiver_type) {
+ case ETH_TRANSCEIVER_TYPE_1G_LX:
+ case ETH_TRANSCEIVER_TYPE_1G_SX:
+ case ETH_TRANSCEIVER_TYPE_1G_PCC:
+ case ETH_TRANSCEIVER_TYPE_1G_ACC:
+ case ETH_TRANSCEIVER_TYPE_1000BASET:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_10G_SR:
+ case ETH_TRANSCEIVER_TYPE_10G_LR:
+ case ETH_TRANSCEIVER_TYPE_10G_LRM:
+ case ETH_TRANSCEIVER_TYPE_10G_ER:
+ case ETH_TRANSCEIVER_TYPE_10G_PCC:
+ case ETH_TRANSCEIVER_TYPE_10G_ACC:
+ case ETH_TRANSCEIVER_TYPE_4x10G:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_40G_LR4:
+ case ETH_TRANSCEIVER_TYPE_40G_SR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_100G_AOC:
+ case ETH_TRANSCEIVER_TYPE_100G_SR4:
+ case ETH_TRANSCEIVER_TYPE_100G_LR4:
+ case ETH_TRANSCEIVER_TYPE_100G_ER4:
+ case ETH_TRANSCEIVER_TYPE_100G_ACC:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_25G_SR:
+ case ETH_TRANSCEIVER_TYPE_25G_LR:
+ case ETH_TRANSCEIVER_TYPE_25G_AOC:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_25G_CA_N:
+ case ETH_TRANSCEIVER_TYPE_25G_CA_S:
+ case ETH_TRANSCEIVER_TYPE_25G_CA_L:
+ case ETH_TRANSCEIVER_TYPE_4x25G_CR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_40G_CR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_100G_CR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_XLPPI:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_10G_BASET:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+ default:
+ DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n",
+ transceiver_type);
+ *p_speed_mask = 0xff;
+ break;
+ }
+
+ return 0;
+}
+
+int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_board_config)
+{
+ u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
+ return -EBUSY;
+ }
+ if (!p_ptt) {
+ *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
+ return -EINVAL;
+ }
+
+ nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+ nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ *p_board_config = qed_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ offsetof(struct nvm_cfg1_port,
+ board_cfg));
return 0;
}
@@ -3351,7 +3548,8 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 mcp_resp, mcp_param, features;
- features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
+ features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
+ DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
features, &mcp_resp, &mcp_param);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 85e6b3989e7a..1adfe52b3905 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -322,14 +322,61 @@ int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
* @brief Get media type value of the port.
*
* @param cdev - qed dev pointer
+ * @param p_ptt
* @param mfw_ver - media type value
*
* @return int -
* 0 - Operation was successul.
* -EBUSY - Operation failed
*/
-int qed_mcp_get_media_type(struct qed_dev *cdev,
- u32 *media_type);
+int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *media_type);
+
+/**
+ * @brief Get transceiver data of the port.
+ *
+ * @param cdev - qed dev pointer
+ * @param p_ptt
+ * @param p_transceiver_state - transceiver state.
+ * @param p_transceiver_type - media type value
+ *
+ * @return int -
+ * 0 - Operation was successful.
+ * -EBUSY - Operation failed
+ */
+int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_transceiver_state,
+ u32 *p_tranceiver_type);
+
+/**
+ * @brief Get transceiver supported speed mask.
+ *
+ * @param cdev - qed dev pointer
+ * @param p_ptt
+ * @param p_speed_mask - Bit mask of all supported speeds.
+ *
+ * @return int -
+ * 0 - Operation was successful.
+ * -EBUSY - Operation failed
+ */
+
+int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_speed_mask);
+
+/**
+ * @brief Get board configuration.
+ *
+ * @param cdev - qed dev pointer
+ * @param p_ptt
+ * @param p_board_config - Board config.
+ *
+ * @return int -
+ * 0 - Operation was successful.
+ * -EBUSY - Operation failed
+ */
+int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_board_config);
/**
* @brief General function for sending commands to the MCP
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index be941cfaa2d4..c71391b9c757 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -228,7 +228,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
num_cons, "Toggle");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate toogle bits, rc = %d\n", rc);
+ "Failed to allocate toggle bits, rc = %d\n", rc);
goto free_cq_map;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 7d7a64c55ff1..f9167d1354bb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -140,23 +140,16 @@ static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
{
- enum roce_flavor flavor;
-
switch (roce_mode) {
case ROCE_V1:
- flavor = PLAIN_ROCE;
- break;
+ return PLAIN_ROCE;
case ROCE_V2_IPV4:
- flavor = RROCE_IPV4;
- break;
+ return RROCE_IPV4;
case ROCE_V2_IPV6:
- flavor = ROCE_V2_IPV6;
- break;
+ return RROCE_IPV6;
default:
- flavor = MAX_ROCE_MODE;
- break;
+ return MAX_ROCE_FLAVOR;
}
- return flavor;
}
static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 8de644b4721e..77b6248ad3b9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -154,7 +154,7 @@ qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
struct qed_tunnel_info *p_src)
{
- enum tunnel_clss type;
+ int type;
p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 3d4269659820..b6cccf44bf40 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -413,7 +413,6 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
}
if (!p_iov->b_pre_fp_hsi &&
- ETH_HSI_VER_MINOR &&
(resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
DP_INFO(p_hwfn,
"PF is using older fastpath HSI; %02x.%02x is configured\n",
@@ -572,7 +571,7 @@ free_p_iov:
static void
__qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
struct qed_tunn_update_type *p_src,
- enum qed_tunn_clss mask, u8 *p_cls)
+ enum qed_tunn_mode mask, u8 *p_cls)
{
if (p_src->b_update_mode) {
p_req->tun_mode_update_mask |= BIT(mask);
@@ -587,7 +586,7 @@ __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
static void
qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
struct qed_tunn_update_type *p_src,
- enum qed_tunn_clss mask,
+ enum qed_tunn_mode mask,
u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
u8 *p_update_port, u16 *p_udp_port)
{
@@ -1689,7 +1688,7 @@ static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
ops->ports_update(cookie, vxlan_port, geneve_port);
/* Always update link configuration according to bulletin */
- qed_link_update(hwfn);
+ qed_link_update(hwfn, NULL);
}
void qed_iov_vf_task(struct work_struct *work)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 7ff50b4488f6..8cbbd628fd73 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -413,19 +413,42 @@ struct qede_link_mode_mapping {
};
static const struct qede_link_mode_mapping qed_lm_map[] = {
- {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT},
{QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT},
{QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT},
{QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT},
- {QED_LM_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Half_BIT},
{QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
+ {QED_LM_10000baseT_Full_BIT, ETHTOOL_LINK_MODE_10000baseT_Full_BIT},
+ {QED_LM_2500baseX_Full_BIT, ETHTOOL_LINK_MODE_2500baseX_Full_BIT},
+ {QED_LM_Backplane_BIT, ETHTOOL_LINK_MODE_Backplane_BIT},
+ {QED_LM_1000baseKX_Full_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT},
+ {QED_LM_10000baseKX4_Full_BIT, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT},
{QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
+ {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
+ {QED_LM_10000baseR_FEC_BIT, ETHTOOL_LINK_MODE_10000baseR_FEC_BIT},
{QED_LM_20000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT},
- {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
+ {QED_LM_40000baseKR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT},
+ {QED_LM_40000baseCR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT},
+ {QED_LM_40000baseSR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT},
{QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT},
+ {QED_LM_25000baseCR_Full_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT},
+ {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
+ {QED_LM_25000baseSR_Full_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT},
+ {QED_LM_50000baseCR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT},
{QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT},
{QED_LM_100000baseKR4_Full_BIT,
- ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT},
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT},
+ {QED_LM_100000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT},
+ {QED_LM_100000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT},
+ {QED_LM_100000baseLR4_ER4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT},
+ {QED_LM_50000baseSR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT},
+ {QED_LM_1000baseX_Full_BIT, ETHTOOL_LINK_MODE_1000baseX_Full_BIT},
+ {QED_LM_10000baseCR_Full_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT},
+ {QED_LM_10000baseSR_Full_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT},
+ {QED_LM_10000baseLR_Full_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT},
+ {QED_LM_10000baseLRM_Full_BIT, ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT},
};
#define QEDE_DRV_TO_ETHTOOL_CAPS(caps, lk_ksettings, name) \
@@ -495,6 +518,7 @@ static int qede_set_link_ksettings(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
struct qed_link_params params;
+ u32 sup_caps;
if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
DP_INFO(edev, "Link settings are not allowed to be changed\n");
@@ -521,60 +545,85 @@ static int qede_set_link_ksettings(struct net_device *dev,
params.forced_speed = base->speed;
switch (base->speed) {
case SPEED_1000:
- if (!(current_link.supported_caps &
- QED_LM_1000baseT_Full_BIT)) {
+ sup_caps = QED_LM_1000baseT_Full_BIT |
+ QED_LM_1000baseKX_Full_BIT |
+ QED_LM_1000baseX_Full_BIT;
+ if (!(current_link.supported_caps & sup_caps)) {
DP_INFO(edev, "1G speed not supported\n");
return -EINVAL;
}
- params.adv_speeds = QED_LM_1000baseT_Full_BIT;
+ params.adv_speeds = current_link.supported_caps &
+ sup_caps;
break;
case SPEED_10000:
- if (!(current_link.supported_caps &
- QED_LM_10000baseKR_Full_BIT)) {
+ sup_caps = QED_LM_10000baseT_Full_BIT |
+ QED_LM_10000baseKR_Full_BIT |
+ QED_LM_10000baseKX4_Full_BIT |
+ QED_LM_10000baseR_FEC_BIT |
+ QED_LM_10000baseCR_Full_BIT |
+ QED_LM_10000baseSR_Full_BIT |
+ QED_LM_10000baseLR_Full_BIT |
+ QED_LM_10000baseLRM_Full_BIT;
+ if (!(current_link.supported_caps & sup_caps)) {
DP_INFO(edev, "10G speed not supported\n");
return -EINVAL;
}
- params.adv_speeds = QED_LM_10000baseKR_Full_BIT;
+ params.adv_speeds = current_link.supported_caps &
+ sup_caps;
break;
case SPEED_20000:
if (!(current_link.supported_caps &
- QED_LM_20000baseKR2_Full_BIT)) {
+ QED_LM_20000baseKR2_Full_BIT)) {
DP_INFO(edev, "20G speed not supported\n");
return -EINVAL;
}
params.adv_speeds = QED_LM_20000baseKR2_Full_BIT;
break;
case SPEED_25000:
- if (!(current_link.supported_caps &
- QED_LM_25000baseKR_Full_BIT)) {
+ sup_caps = QED_LM_25000baseKR_Full_BIT |
+ QED_LM_25000baseCR_Full_BIT |
+ QED_LM_25000baseSR_Full_BIT;
+ if (!(current_link.supported_caps & sup_caps)) {
DP_INFO(edev, "25G speed not supported\n");
return -EINVAL;
}
- params.adv_speeds = QED_LM_25000baseKR_Full_BIT;
+ params.adv_speeds = current_link.supported_caps &
+ sup_caps;
break;
case SPEED_40000:
- if (!(current_link.supported_caps &
- QED_LM_40000baseLR4_Full_BIT)) {
+ sup_caps = QED_LM_40000baseLR4_Full_BIT |
+ QED_LM_40000baseKR4_Full_BIT |
+ QED_LM_40000baseCR4_Full_BIT |
+ QED_LM_40000baseSR4_Full_BIT;
+ if (!(current_link.supported_caps & sup_caps)) {
DP_INFO(edev, "40G speed not supported\n");
return -EINVAL;
}
- params.adv_speeds = QED_LM_40000baseLR4_Full_BIT;
+ params.adv_speeds = current_link.supported_caps &
+ sup_caps;
break;
case SPEED_50000:
- if (!(current_link.supported_caps &
- QED_LM_50000baseKR2_Full_BIT)) {
+ sup_caps = QED_LM_50000baseKR2_Full_BIT |
+ QED_LM_50000baseCR2_Full_BIT |
+ QED_LM_50000baseSR2_Full_BIT;
+ if (!(current_link.supported_caps & sup_caps)) {
DP_INFO(edev, "50G speed not supported\n");
return -EINVAL;
}
- params.adv_speeds = QED_LM_50000baseKR2_Full_BIT;
+ params.adv_speeds = current_link.supported_caps &
+ sup_caps;
break;
case SPEED_100000:
- if (!(current_link.supported_caps &
- QED_LM_100000baseKR4_Full_BIT)) {
+ sup_caps = QED_LM_100000baseKR4_Full_BIT |
+ QED_LM_100000baseSR4_Full_BIT |
+ QED_LM_100000baseCR4_Full_BIT |
+ QED_LM_100000baseLR4_ER4_Full_BIT;
+ if (!(current_link.supported_caps & sup_caps)) {
DP_INFO(edev, "100G speed not supported\n");
return -EINVAL;
}
- params.adv_speeds = QED_LM_100000baseKR4_Full_BIT;
+ params.adv_speeds = current_link.supported_caps &
+ sup_caps;
break;
default:
DP_INFO(edev, "Unsupported speed %u\n", base->speed);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index b48f76182049..10b075bc5959 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -380,8 +380,6 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
- ql_write_nvram_reg(qdev, spir,
- ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
}
/*
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 81312924df14..0c443ea98479 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1800,7 +1800,8 @@ struct qlcnic_hardware_ops {
int (*config_loopback) (struct qlcnic_adapter *, u8);
int (*clear_loopback) (struct qlcnic_adapter *, u8);
int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
- void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
+ void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
+ u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
int (*get_board_info) (struct qlcnic_adapter *);
void (*set_mac_filter_count) (struct qlcnic_adapter *);
void (*free_mac_list) (struct qlcnic_adapter *);
@@ -2064,9 +2065,10 @@ static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
}
static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
- u64 *addr, u16 id)
+ u64 *addr, u16 vlan,
+ struct qlcnic_host_tx_ring *tx_ring)
{
- adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
+ adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
}
static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 569d54ededec..2a533280b124 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2135,7 +2135,8 @@ out:
}
void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
- u16 vlan_id)
+ u16 vlan_id,
+ struct qlcnic_host_tx_ring *tx_ring)
{
u8 mac[ETH_ALEN];
memcpy(&mac, addr, ETH_ALEN);
@@ -4232,7 +4233,6 @@ static void qlcnic_83xx_io_resume(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
- pci_cleanup_aer_uncorrect_error_status(pdev);
if (test_and_clear_bit(__QLCNIC_AER, &adapter->state))
qlcnic_83xx_aer_start_poll_work(adapter);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index b75a81246856..73fe2f64491d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -550,7 +550,8 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
-void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+ u16 vlan, struct qlcnic_host_tx_ring *ring);
int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index 4b76c69fe86d..834208e55f7b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -883,7 +883,7 @@ static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
struct qlcnic_adapter *adapter = netdev_priv(netdev);
if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
- return 0;
+ return 1;
switch (capid) {
case DCB_CAP_ATTR_PG:
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 4bb33af8e2b3..56a3bd9e37dc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -173,7 +173,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
struct net_device *netdev);
void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
- u64 *uaddr, u16 vlan_id);
+ u64 *uaddr, u16 vlan_id,
+ struct qlcnic_host_tx_ring *tx_ring);
int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
struct ethtool_coalesce *);
int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 84dd83031a1b..9647578cbe6a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -268,13 +268,12 @@ static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
}
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
- u16 vlan_id)
+ u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
{
struct cmd_desc_type0 *hwdesc;
struct qlcnic_nic_req *req;
struct qlcnic_mac_req *mac_req;
struct qlcnic_vlan_req *vlan_req;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
u32 producer;
u64 word;
@@ -301,7 +300,8 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *first_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct qlcnic_host_tx_ring *tx_ring)
{
struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@@ -335,7 +335,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
tmp_fil->vlan_id == vlan_id) {
if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
qlcnic_change_filter(adapter, &src_addr,
- vlan_id);
+ vlan_id, tx_ring);
tmp_fil->ftime = jiffies;
return;
}
@@ -350,7 +350,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
if (!fil)
return;
- qlcnic_change_filter(adapter, &src_addr, vlan_id);
+ qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
fil->ftime = jiffies;
fil->vlan_id = vlan_id;
memcpy(fil->faddr, &src_addr, ETH_ALEN);
@@ -766,7 +766,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
if (adapter->drv_mac_learn)
- qlcnic_send_filter(adapter, first_desc, skb);
+ qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
tx_ring->tx_stats.tx_bytes += skb->len;
tx_ring->tx_stats.xmit_called++;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 2d38d1ac2aae..d42ba2293d8c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -59,9 +59,6 @@ static int qlcnic_close(struct net_device *netdev);
static void qlcnic_tx_timeout(struct net_device *netdev);
static void qlcnic_attach_work(struct work_struct *work);
static void qlcnic_fwinit_work(struct work_struct *work);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void qlcnic_poll_controller(struct net_device *netdev);
-#endif
static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
@@ -545,9 +542,6 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_udp_tunnel_add = qlcnic_add_vxlan_port,
.ndo_udp_tunnel_del = qlcnic_del_vxlan_port,
.ndo_features_check = qlcnic_features_check,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = qlcnic_poll_controller,
-#endif
#ifdef CONFIG_QLCNIC_SRIOV
.ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
.ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate,
@@ -3200,45 +3194,6 @@ static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
return IRQ_HANDLED;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void qlcnic_poll_controller(struct net_device *netdev)
-{
- struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_recv_context *recv_ctx;
- struct qlcnic_host_tx_ring *tx_ring;
- int ring;
-
- if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
- return;
-
- recv_ctx = adapter->recv_ctx;
-
- for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- qlcnic_disable_sds_intr(adapter, sds_ring);
- napi_schedule(&sds_ring->napi);
- }
-
- if (adapter->flags & QLCNIC_MSIX_ENABLED) {
- /* Only Multi-Tx queue capable devices need to
- * schedule NAPI for TX rings
- */
- if ((qlcnic_83xx_check(adapter) &&
- (adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
- (qlcnic_82xx_check(adapter) &&
- !qlcnic_check_multi_tx(adapter)))
- return;
-
- for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
- tx_ring = &adapter->tx_ring[ring];
- qlcnic_disable_tx_intr(adapter, tx_ring);
- napi_schedule(&tx_ring->napi);
- }
- }
-}
-#endif
-
static void
qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
{
@@ -3975,7 +3930,6 @@ static void qlcnic_82xx_io_resume(struct pci_dev *pdev)
u32 state;
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
- pci_cleanup_aer_uncorrect_error_status(pdev);
state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER,
&adapter->state))
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 7fd86d40a337..11167abe5934 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -113,7 +113,7 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
struct sk_buff *skbn;
if (skb->dev->type == ARPHRD_ETHER) {
- if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) {
+ if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
kfree_skb(skb);
return;
}
@@ -147,7 +147,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
}
if (skb_headroom(skb) < required_headroom) {
- if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
+ if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
return -ENOMEM;
}
@@ -189,6 +189,9 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
if (!skb)
goto done;
+ if (skb->pkt_type == PACKET_LOOPBACK)
+ return RX_HANDLER_PASS;
+
dev = skb->dev;
port = rmnet_get_port(dev);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index ed8ffd498c88..1fd01688d37b 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -631,7 +631,6 @@ struct rtl8169_tc_offsets {
enum rtl_flag {
RTL_FLAG_TASK_ENABLED = 0,
- RTL_FLAG_TASK_SLOW_PENDING,
RTL_FLAG_TASK_RESET_PENDING,
RTL_FLAG_MAX
};
@@ -4059,13 +4058,12 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
genphy_soft_reset(dev->phydev);
- /* It was reported that chip version 33 ends up with 10MBit/Half on a
+ /* It was reported that several chips end up with 10MBit/Half on a
* 1GBit link after resuming from S3. For whatever reason the PHY on
- * this chip doesn't properly start a renegotiation when soft-reset.
+ * these chips doesn't properly start a renegotiation when soft-reset.
* Explicitly requesting a renegotiation fixes this.
*/
- if (tp->mac_version == RTL_GIGA_MAC_VER_33 &&
- dev->phydev->autoneg == AUTONEG_ENABLE)
+ if (dev->phydev->autoneg == AUTONEG_ENABLE)
phy_restart_aneg(dev->phydev);
}
@@ -4163,10 +4161,15 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
{
- if (!netif_running(tp->dev) || !__rtl8169_get_wol(tp))
+ struct phy_device *phydev;
+
+ if (!__rtl8169_get_wol(tp))
return false;
- phy_speed_down(tp->dev->phydev, false);
+ /* phydev may not be attached to netdevice */
+ phydev = mdiobus_get_phy(tp->mii_bus, 0);
+
+ phy_speed_down(phydev, false);
rtl_wol_suspend_quirk(tp);
return true;
@@ -4270,8 +4273,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
+ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
+ case RTL_GIGA_MAC_VER_38:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
@@ -4523,9 +4526,14 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
{
- /* Set DMA burst size and Interframe Gap Time */
- RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) |
- (InterFrameGap << TxInterFrameGapShift));
+ u32 val = TX_DMA_BURST << TxDMAShift |
+ InterFrameGap << TxInterFrameGapShift;
+
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_39)
+ val |= TXCFG_AUTO_FIFO;
+
+ RTL_W32(tp, TxConfig, val);
}
static void rtl_set_rx_max_size(struct rtl8169_private *tp)
@@ -4549,27 +4557,19 @@ static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp)
static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version)
{
- static const struct rtl_cfg2_info {
- u32 mac_version;
- u32 clk;
- u32 val;
- } cfg2_info [] = {
- { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
- { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
- { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
- { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
- };
- const struct rtl_cfg2_info *p = cfg2_info;
- unsigned int i;
- u32 clk;
+ u32 val;
- clk = RTL_R8(tp, Config2) & PCI_Clock_66MHz;
- for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
- if ((p->mac_version == mac_version) && (p->clk == clk)) {
- RTL_W32(tp, 0x7c, p->val);
- break;
- }
- }
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ val = 0x000fff00;
+ else if (tp->mac_version == RTL_GIGA_MAC_VER_06)
+ val = 0x00ffff00;
+ else
+ return;
+
+ if (RTL_R8(tp, Config2) & PCI_Clock_66MHz)
+ val |= 0xff;
+
+ RTL_W32(tp, 0x7c, val);
}
static void rtl_set_rx_mode(struct net_device *dev)
@@ -5020,7 +5020,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
rtl_disable_clock_request(tp);
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
/* Adjust EEE LED frequency */
@@ -5054,7 +5053,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
rtl_disable_clock_request(tp);
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
@@ -5099,8 +5097,6 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
static void rtl_hw_start_8168g(struct rtl8169_private *tp)
{
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5198,8 +5194,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5282,8 +5276,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
{
rtl8168ep_stop_cmac(tp);
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC);
@@ -5605,7 +5597,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
/* Force LAN exit from ASPM if Rx/Tx are not idle */
RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
@@ -5865,6 +5856,7 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
{
rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
tp->cur_tx = tp->dirty_tx = 0;
+ netdev_reset_queue(tp->dev);
}
static void rtl_reset_work(struct rtl8169_private *tp)
@@ -6167,6 +6159,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd->opts2 = cpu_to_le32(opts[1]);
+ netdev_sent_queue(dev, skb->len);
+
skb_tx_timestamp(skb);
/* Force memory writes to complete before releasing descriptor */
@@ -6265,7 +6259,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
{
- unsigned int dirty_tx, tx_left;
+ unsigned int dirty_tx, tx_left, bytes_compl = 0, pkts_compl = 0;
dirty_tx = tp->dirty_tx;
smp_rmb();
@@ -6289,10 +6283,8 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb,
tp->TxDescArray + entry);
if (status & LastFrag) {
- u64_stats_update_begin(&tp->tx_stats.syncp);
- tp->tx_stats.packets++;
- tp->tx_stats.bytes += tx_skb->skb->len;
- u64_stats_update_end(&tp->tx_stats.syncp);
+ pkts_compl++;
+ bytes_compl += tx_skb->skb->len;
dev_consume_skb_any(tx_skb->skb);
tx_skb->skb = NULL;
}
@@ -6301,6 +6293,13 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
}
if (tp->dirty_tx != dirty_tx) {
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+ u64_stats_update_begin(&tp->tx_stats.syncp);
+ tp->tx_stats.packets += pkts_compl;
+ tp->tx_stats.bytes += bytes_compl;
+ u64_stats_update_end(&tp->tx_stats.syncp);
+
tp->dirty_tx = dirty_tx;
/* Sync with rtl8169_start_xmit:
* - publish dirty_tx ring index (write barrier)
@@ -6465,42 +6464,29 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
if (status == 0xffff || !(status & (RTL_EVENT_NAPI | tp->event_slow)))
return IRQ_NONE;
- rtl_irq_disable(tp);
- napi_schedule_irqoff(&tp->napi);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Workqueue context.
- */
-static void rtl_slow_event_work(struct rtl8169_private *tp)
-{
- struct net_device *dev = tp->dev;
- u16 status;
+ if (unlikely(status & SYSErr)) {
+ rtl8169_pcierr_interrupt(tp->dev);
+ goto out;
+ }
- status = rtl_get_events(tp) & tp->event_slow;
- rtl_ack_events(tp, status);
+ if (status & LinkChg)
+ phy_mac_interrupt(tp->dev->phydev);
- if (unlikely(status & RxFIFOOver)) {
- switch (tp->mac_version) {
- /* Work around for rx fifo overflow */
- case RTL_GIGA_MAC_VER_11:
- netif_stop_queue(dev);
- /* XXX - Hack alert. See rtl_task(). */
- set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
- default:
- break;
- }
+ if (unlikely(status & RxFIFOOver &&
+ tp->mac_version == RTL_GIGA_MAC_VER_11)) {
+ netif_stop_queue(tp->dev);
+ /* XXX - Hack alert. See rtl_task(). */
+ set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
}
- if (unlikely(status & SYSErr))
- rtl8169_pcierr_interrupt(dev);
-
- if (status & LinkChg)
- phy_mac_interrupt(dev->phydev);
+ if (status & RTL_EVENT_NAPI) {
+ rtl_irq_disable(tp);
+ napi_schedule_irqoff(&tp->napi);
+ }
+out:
+ rtl_ack_events(tp, status);
- rtl_irq_enable_all(tp);
+ return IRQ_HANDLED;
}
static void rtl_task(struct work_struct *work)
@@ -6509,8 +6495,6 @@ static void rtl_task(struct work_struct *work)
int bitnr;
void (*action)(struct rtl8169_private *);
} rtl_work[] = {
- /* XXX - keep rtl_slow_event_work() as first element. */
- { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
{ RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
};
struct rtl8169_private *tp =
@@ -6540,29 +6524,16 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
{
struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
struct net_device *dev = tp->dev;
- u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
- int work_done= 0;
- u16 status;
+ int work_done;
- status = rtl_get_events(tp);
- rtl_ack_events(tp, status & ~tp->event_slow);
+ work_done = rtl_rx(dev, tp, (u32) budget);
- if (status & RTL_EVENT_NAPI_RX)
- work_done = rtl_rx(dev, tp, (u32) budget);
-
- if (status & RTL_EVENT_NAPI_TX)
- rtl_tx(dev, tp);
-
- if (status & tp->event_slow) {
- enable_mask &= ~tp->event_slow;
-
- rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
- }
+ rtl_tx(dev, tp);
if (work_done < budget) {
napi_complete_done(napi, work_done);
- rtl_irq_enable(tp, enable_mask);
+ rtl_irq_enable_all(tp);
mmiowb();
}
@@ -6838,7 +6809,6 @@ static void rtl8169_net_suspend(struct net_device *dev)
phy_stop(dev->phydev);
netif_device_detach(dev);
- netif_stop_queue(dev);
rtl_lock_work(tp);
napi_disable(&tp->napi);
@@ -6856,8 +6826,10 @@ static int rtl8169_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
rtl8169_net_suspend(dev);
+ clk_disable_unprepare(tp->clk);
return 0;
}
@@ -6885,6 +6857,9 @@ static int rtl8169_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ clk_prepare_enable(tp->clk);
if (netif_running(dev))
__rtl8169_resume(dev);
@@ -7080,20 +7055,12 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
{
unsigned int flags;
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
flags = PCI_IRQ_LEGACY;
- break;
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_40:
- /* This version was reported to have issues with resume
- * from suspend when using MSI-X
- */
- flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
- break;
- default:
+ } else {
flags = PCI_IRQ_ALL_TYPES;
}
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index aeafdb9ac015..beb06628f22d 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -371,7 +371,7 @@ static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
static struct rocker_desc_info *
rocker_desc_head_get(const struct rocker_dma_ring_info *info)
{
- static struct rocker_desc_info *desc_info;
+ struct rocker_desc_info *desc_info;
u32 head = __pos_inc(info->head, info->size);
desc_info = &info->desc_info[info->head];
@@ -402,7 +402,7 @@ static void rocker_desc_head_set(const struct rocker *rocker,
static struct rocker_desc_info *
rocker_desc_tail_get(struct rocker_dma_ring_info *info)
{
- static struct rocker_desc_info *desc_info;
+ struct rocker_desc_info *desc_info;
if (info->tail == info->head)
return NULL; /* nothing to be done between head and tail */
@@ -2728,6 +2728,7 @@ rocker_fdb_offload_notify(struct rocker_port *rocker_port,
info.addr = recv_info->addr;
info.vid = recv_info->vid;
+ info.offloaded = true;
call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
rocker_port->dev, &info.info);
}
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 330233286e78..98fe7e762e17 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2208,29 +2208,6 @@ static void efx_fini_napi(struct efx_nic *efx)
/**************************************************************************
*
- * Kernel netpoll interface
- *
- *************************************************************************/
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-
-/* Although in the common case interrupts will be disabled, this is not
- * guaranteed. However, all our work happens inside the NAPI callback,
- * so no locking is required.
- */
-static void efx_netpoll(struct net_device *net_dev)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- efx_schedule_channel(channel);
-}
-
-#endif
-
-/**************************************************************************
- *
* Kernel net device interface
*
*************************************************************************/
@@ -2509,9 +2486,6 @@ static const struct net_device_ops efx_netdev_ops = {
#endif
.ndo_get_phys_port_id = efx_get_phys_port_id,
.ndo_get_phys_port_name = efx_get_phys_port_name,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = efx_netpoll,
-#endif
.ndo_setup_tc = efx_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs,
@@ -3847,7 +3821,6 @@ static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
{
struct efx_nic *efx = pci_get_drvdata(pdev);
pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
- int rc;
if (pci_enable_device(pdev)) {
netif_err(efx, hw, efx->net_dev,
@@ -3855,13 +3828,6 @@ static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
status = PCI_ERS_RESULT_DISCONNECT;
}
- rc = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (rc) {
- netif_err(efx, hw, efx->net_dev,
- "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
- /* Non-fatal error. Continue. */
- }
-
return status;
}
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index dd5530a4f8c8..8b1f94d7a6c5 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2054,29 +2054,6 @@ static void ef4_fini_napi(struct ef4_nic *efx)
/**************************************************************************
*
- * Kernel netpoll interface
- *
- *************************************************************************/
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-
-/* Although in the common case interrupts will be disabled, this is not
- * guaranteed. However, all our work happens inside the NAPI callback,
- * so no locking is required.
- */
-static void ef4_netpoll(struct net_device *net_dev)
-{
- struct ef4_nic *efx = netdev_priv(net_dev);
- struct ef4_channel *channel;
-
- ef4_for_each_channel(channel, efx)
- ef4_schedule_channel(channel);
-}
-
-#endif
-
-/**************************************************************************
- *
* Kernel net device interface
*
*************************************************************************/
@@ -2250,9 +2227,6 @@ static const struct net_device_ops ef4_netdev_ops = {
.ndo_set_mac_address = ef4_set_mac_address,
.ndo_set_rx_mode = ef4_set_rx_mode,
.ndo_set_features = ef4_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ef4_netpoll,
-#endif
.ndo_setup_tc = ef4_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = ef4_filter_rfs,
@@ -3186,7 +3160,6 @@ static pci_ers_result_t ef4_io_slot_reset(struct pci_dev *pdev)
{
struct ef4_nic *efx = pci_get_drvdata(pdev);
pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
- int rc;
if (pci_enable_device(pdev)) {
netif_err(efx, hw, efx->net_dev,
@@ -3194,13 +3167,6 @@ static pci_ers_result_t ef4_io_slot_reset(struct pci_dev *pdev)
status = PCI_ERS_RESULT_DISCONNECT;
}
- rc = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (rc) {
- netif_err(efx, hw, efx->net_dev,
- "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
- /* Non-fatal error. Continue. */
- }
-
return status;
}
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 8d6cff8bd162..4823b6a51134 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2447,8 +2447,7 @@ static int smc_drv_remove(struct platform_device *pdev)
static int smc_drv_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
if (ndev) {
if (netif_running(ndev)) {
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 7aa5ebb6766c..d9d0d03e4ce7 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -274,6 +274,7 @@ struct netsec_priv {
struct clk *clk;
u32 msg_enable;
u32 freq;
+ u32 phy_addr;
bool rx_cksum_offload_flag;
};
@@ -431,9 +432,12 @@ static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
return 0;
}
+static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr);
+
static int netsec_phy_write(struct mii_bus *bus,
int phy_addr, int reg, u16 val)
{
+ int status;
struct netsec_priv *priv = bus->priv;
if (netsec_mac_write(priv, GMAC_REG_GDR, val))
@@ -446,8 +450,19 @@ static int netsec_phy_write(struct mii_bus *bus,
GMAC_REG_SHIFT_CR_GAR)))
return -ETIMEDOUT;
- return netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
- NETSEC_GMAC_GAR_REG_GB);
+ status = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
+ NETSEC_GMAC_GAR_REG_GB);
+
+ /* Developerbox implements RTL8211E PHY and there is
+ * a compatibility problem with F_GMAC4.
+ * RTL8211E expects MDC clock must be kept toggling for several
+ * clock cycle with MDIO high before entering the IDLE state.
+ * To meet this requirement, netsec driver needs to issue dummy
+ * read(e.g. read PHYID1(offset 0x2) register) right after write.
+ */
+ netsec_phy_read(bus, phy_addr, MII_PHYSID1);
+
+ return status;
}
static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
@@ -735,8 +750,11 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
u16 idx = dring->tail;
struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
- if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD))
+ if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
+ /* reading the register clears the irq */
+ netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
break;
+ }
/* This barrier is needed to keep us from reading
* any other fields out of the netsec_de until we have
@@ -937,6 +955,9 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
dring->head = 0;
dring->tail = 0;
dring->pkt_cnt = 0;
+
+ if (id == NETSEC_RING_TX)
+ netdev_reset_queue(priv->ndev);
}
static void netsec_free_dring(struct netsec_priv *priv, int id)
@@ -1340,11 +1361,11 @@ static int netsec_netdev_stop(struct net_device *ndev)
netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
- ret = netsec_reset_hardware(priv, false);
-
phy_stop(ndev->phydev);
phy_disconnect(ndev->phydev);
+ ret = netsec_reset_hardware(priv, false);
+
pm_runtime_put_sync(priv->dev);
return ret;
@@ -1354,6 +1375,7 @@ static int netsec_netdev_init(struct net_device *ndev)
{
struct netsec_priv *priv = netdev_priv(ndev);
int ret;
+ u16 data;
ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
if (ret)
@@ -1363,6 +1385,11 @@ static int netsec_netdev_init(struct net_device *ndev)
if (ret)
goto err1;
+ /* set phy power down */
+ data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) |
+ BMCR_PDOWN;
+ netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
+
ret = netsec_reset_hardware(priv, true);
if (ret)
goto err2;
@@ -1412,7 +1439,7 @@ static const struct net_device_ops netsec_netdev_ops = {
};
static int netsec_of_probe(struct platform_device *pdev,
- struct netsec_priv *priv)
+ struct netsec_priv *priv, u32 *phy_addr)
{
priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (!priv->phy_np) {
@@ -1420,6 +1447,8 @@ static int netsec_of_probe(struct platform_device *pdev,
return -EINVAL;
}
+ *phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np);
+
priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "phy_ref_clk not found\n");
@@ -1620,12 +1649,14 @@ static int netsec_probe(struct platform_device *pdev)
}
if (dev_of_node(&pdev->dev))
- ret = netsec_of_probe(pdev, priv);
+ ret = netsec_of_probe(pdev, priv, &phy_addr);
else
ret = netsec_acpi_probe(pdev, priv, &phy_addr);
if (ret)
goto free_ndev;
+ priv->phy_addr = phy_addr;
+
if (!priv->freq) {
dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
ret = -ENODEV;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index f9a61f90cfbc..0f660af01a4b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -714,8 +714,9 @@ static int get_ephy_nodes(struct stmmac_priv *priv)
return -ENODEV;
}
- mdio_internal = of_find_compatible_node(mdio_mux, NULL,
+ mdio_internal = of_get_compatible_child(mdio_mux,
"allwinner,sun8i-h3-mdio-internal");
+ of_node_put(mdio_mux);
if (!mdio_internal) {
dev_err(priv->device, "Cannot get internal_mdio node\n");
return -ENODEV;
@@ -729,13 +730,20 @@ static int get_ephy_nodes(struct stmmac_priv *priv)
gmac->rst_ephy = of_reset_control_get_exclusive(iphynode, NULL);
if (IS_ERR(gmac->rst_ephy)) {
ret = PTR_ERR(gmac->rst_ephy);
- if (ret == -EPROBE_DEFER)
+ if (ret == -EPROBE_DEFER) {
+ of_node_put(iphynode);
+ of_node_put(mdio_internal);
return ret;
+ }
continue;
}
dev_info(priv->device, "Found internal PHY node\n");
+ of_node_put(iphynode);
+ of_node_put(mdio_internal);
return 0;
}
+
+ of_node_put(mdio_internal);
return -ENODEV;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index a7ffc73fffe8..abc3f85270cd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -59,7 +59,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
- STMMAC_RING_MODE, 0, false, skb->len);
+ STMMAC_RING_MODE, 1, false, skb->len);
tx_q->tx_skbuff[entry] = NULL;
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
@@ -91,7 +91,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
tx_q->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
- STMMAC_RING_MODE, 0, true, skb->len);
+ STMMAC_RING_MODE, 1, true, skb->len);
}
tx_q->cur_tx = entry;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index b72ef171477e..bdd351597b55 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -243,7 +243,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
*/
int stmmac_mdio_reset(struct mii_bus *bus)
{
-#if defined(CONFIG_STMMAC_PLATFORM)
+#if IS_ENABLED(CONFIG_STMMAC_PLATFORM)
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned int mii_address = priv->hw->mii.addr;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 16dcbf36f8cc..500f7ed8c58c 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -570,16 +570,14 @@ static inline int cpsw_get_slave_port(u32 slave_num)
return slave_num + 1;
}
-static void cpsw_add_mcast(struct cpsw_priv *priv, u8 *addr)
+static void cpsw_add_mcast(struct cpsw_priv *priv, const u8 *addr)
{
struct cpsw_common *cpsw = priv->cpsw;
if (cpsw->data.dual_emac) {
struct cpsw_slave *slave = cpsw->slaves + priv->emac_port;
- int slave_port = cpsw_get_slave_port(slave->slave_num);
- cpsw_ale_add_mcast(cpsw->ale, addr,
- 1 << slave_port | ALE_PORT_HOST,
+ cpsw_ale_add_mcast(cpsw->ale, addr, ALE_PORT_HOST,
ALE_VLAN, slave->port_vlan, 0);
return;
}
@@ -642,6 +640,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
/* Clear all mcast from ALE */
cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
+ __dev_mc_unsync(ndev, NULL);
/* Flood All Unicast Packets to Host port */
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -662,16 +661,35 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
}
}
-static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ cpsw_add_mcast(priv, addr);
+ return 0;
+}
+
+static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
- int vid;
+ int vid, flags;
- if (cpsw->data.dual_emac)
+ if (cpsw->data.dual_emac) {
vid = cpsw->slaves[priv->emac_port].port_vlan;
- else
- vid = cpsw->data.default_vlan;
+ flags = ALE_VLAN;
+ } else {
+ vid = 0;
+ flags = 0;
+ }
+
+ cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
+ return 0;
+}
+
+static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
if (ndev->flags & IFF_PROMISC) {
/* Enable promiscuous mode */
@@ -684,19 +702,9 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
}
/* Restore allmulti on vlans if necessary */
- cpsw_ale_set_allmulti(cpsw->ale, priv->ndev->flags & IFF_ALLMULTI);
-
- /* Clear all mcast from ALE */
- cpsw_ale_flush_multicast(cpsw->ale, ALE_ALL_PORTS, vid);
+ cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI);
- if (!netdev_mc_empty(ndev)) {
- struct netdev_hw_addr *ha;
-
- /* program multicast address list into ALE register */
- netdev_for_each_mc_addr(ha, ndev) {
- cpsw_add_mcast(priv, ha->addr);
- }
- }
+ __dev_mc_sync(ndev, cpsw_add_mc_addr, cpsw_del_mc_addr);
}
static void cpsw_intr_enable(struct cpsw_common *cpsw)
@@ -1410,7 +1418,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
port_mask, port_mask, 0);
cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
- port_mask, ALE_VLAN, slave->port_vlan, 0);
+ ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 0);
cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
HOST_PORT_NUM, ALE_VLAN |
ALE_SECURE, slave->port_vlan);
@@ -1956,6 +1964,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
struct cpsw_common *cpsw = priv->cpsw;
cpsw_info(priv, ifdown, "shutting down cpsw device\n");
+ __dev_mc_unsync(priv->ndev, cpsw_del_mc_addr);
netif_tx_stop_all_queues(priv->ndev);
netif_carrier_off(priv->ndev);
@@ -2293,16 +2302,19 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
{
int ret;
int unreg_mcast_mask = 0;
+ int mcast_mask;
u32 port_mask;
struct cpsw_common *cpsw = priv->cpsw;
if (cpsw->data.dual_emac) {
port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
+ mcast_mask = ALE_PORT_HOST;
if (priv->ndev->flags & IFF_ALLMULTI)
- unreg_mcast_mask = port_mask;
+ unreg_mcast_mask = mcast_mask;
} else {
port_mask = ALE_ALL_PORTS;
+ mcast_mask = port_mask;
if (priv->ndev->flags & IFF_ALLMULTI)
unreg_mcast_mask = ALE_ALL_PORTS;
@@ -2321,7 +2333,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
goto clean_vid;
ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
- port_mask, ALE_VLAN, vid, 0);
+ mcast_mask, ALE_VLAN, vid, 0);
if (ret != 0)
goto clean_vlan_ucast;
return 0;
@@ -3658,8 +3670,7 @@ static int cpsw_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int cpsw_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
if (cpsw->data.dual_emac) {
@@ -3682,8 +3693,7 @@ static int cpsw_suspend(struct device *dev)
static int cpsw_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
/* Select default pin state */
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 5766225a4ce1..798c989d5d93 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -136,7 +136,7 @@ static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
}
-static inline void cpsw_ale_set_addr(u32 *ale_entry, u8 *addr)
+static inline void cpsw_ale_set_addr(u32 *ale_entry, const u8 *addr)
{
int i;
@@ -175,7 +175,7 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
return idx;
}
-static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
+static int cpsw_ale_match_addr(struct cpsw_ale *ale, const u8 *addr, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS];
int type, idx;
@@ -309,7 +309,7 @@ static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
}
}
-int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
@@ -336,7 +336,7 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
}
EXPORT_SYMBOL_GPL(cpsw_ale_add_ucast);
-int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
@@ -352,7 +352,7 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
}
EXPORT_SYMBOL_GPL(cpsw_ale_del_ucast);
-int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+int cpsw_ale_add_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid, int mcast_state)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
@@ -386,7 +386,7 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
}
EXPORT_SYMBOL_GPL(cpsw_ale_add_mcast);
-int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index d4fe9016429b..cd07a3e96d57 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -105,13 +105,13 @@ void cpsw_ale_start(struct cpsw_ale *ale);
void cpsw_ale_stop(struct cpsw_ale *ale);
int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
-int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
int flags, u16 vid);
-int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
int flags, u16 vid);
-int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+int cpsw_ale_add_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid, int mcast_state);
-int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid);
int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
int reg_mcast, int unreg_mcast);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index f270beebb428..9153db120352 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -2002,8 +2002,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
static int davinci_emac_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
if (netif_running(ndev))
emac_dev_stop(ndev);
@@ -2013,8 +2012,7 @@ static int davinci_emac_suspend(struct device *dev)
static int davinci_emac_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
if (netif_running(ndev))
emac_dev_open(ndev);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 80fdbff67d82..f9da5d6172e3 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -661,8 +661,7 @@ static int w5300_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int w5300_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct w5300_priv *priv = netdev_priv(ndev);
if (netif_running(ndev)) {
@@ -676,8 +675,7 @@ static int w5300_suspend(struct device *dev)
static int w5300_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct w5300_priv *priv = netdev_priv(ndev);
if (!netif_running(ndev)) {
diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig
index 3a424c864f4d..d62e8c6205f7 100644
--- a/drivers/net/fddi/Kconfig
+++ b/drivers/net/fddi/Kconfig
@@ -15,6 +15,17 @@ config FDDI
if FDDI
+config DEFZA
+ tristate "DEC FDDIcontroller 700/700-C (DEFZA-xx) support"
+ depends on FDDI && TC
+ help
+ This is support for the DEC FDDIcontroller 700 (DEFZA-AA, fiber)
+ and 700-C (DEFZA-CA, copper) TURBOchannel network cards which
+ can connect you to a local FDDI network.
+
+ To compile this driver as a module, choose M here: the module
+ will be called defza. If unsure, say N.
+
config DEFXX
tristate "Digital DEFTA/DEFEA/DEFPA adapter support"
depends on FDDI && (PCI || EISA || TC)
diff --git a/drivers/net/fddi/Makefile b/drivers/net/fddi/Makefile
index 36da19c9a8aa..194b52cc20b0 100644
--- a/drivers/net/fddi/Makefile
+++ b/drivers/net/fddi/Makefile
@@ -3,4 +3,5 @@
#
obj-$(CONFIG_DEFXX) += defxx.o
+obj-$(CONFIG_DEFZA) += defza.o
obj-$(CONFIG_SKFP) += skfp/
diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c
new file mode 100644
index 000000000000..3b7f10a5f06a
--- /dev/null
+++ b/drivers/net/fddi/defza.c
@@ -0,0 +1,1564 @@
+// SPDX-License-Identifier: GPL-2.0
+/* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
+ *
+ * Copyright (c) 2018 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * References:
+ *
+ * Dave Sawyer & Phil Weeks & Frank Itkowsky,
+ * "DEC FDDIcontroller 700 Port Specification",
+ * Revision 1.1, Digital Equipment Corporation
+ */
+
+/* ------------------------------------------------------------------------- */
+/* FZA configurable parameters. */
+
+/* The number of transmit ring descriptors; either 0 for 512 or 1 for 1024. */
+#define FZA_RING_TX_MODE 0
+
+/* The number of receive ring descriptors; from 2 up to 256. */
+#define FZA_RING_RX_SIZE 256
+
+/* End of FZA configurable parameters. No need to change anything below. */
+/* ------------------------------------------------------------------------- */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/fddidevice.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/tc.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <asm/barrier.h>
+
+#include "defza.h"
+
+#define DRV_NAME "defza"
+#define DRV_VERSION "v.1.1.4"
+#define DRV_RELDATE "Oct 6 2018"
+
+static char version[] =
+ DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Maciej W. Rozycki\n";
+
+MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
+MODULE_DESCRIPTION("DEC FDDIcontroller 700 (DEFZA-xx) driver");
+MODULE_LICENSE("GPL");
+
+static int loopback;
+module_param(loopback, int, 0644);
+
+/* Ring Purger Multicast */
+static u8 hw_addr_purger[8] = { 0x09, 0x00, 0x2b, 0x02, 0x01, 0x05 };
+/* Directed Beacon Multicast */
+static u8 hw_addr_beacon[8] = { 0x01, 0x80, 0xc2, 0x00, 0x01, 0x00 };
+
+/* Shorthands for MMIO accesses that we require to be strongly ordered
+ * WRT preceding MMIO accesses.
+ */
+#define readw_o readw_relaxed
+#define readl_o readl_relaxed
+
+#define writew_o writew_relaxed
+#define writel_o writel_relaxed
+
+/* Shorthands for MMIO accesses that we are happy with being weakly ordered
+ * WRT preceding MMIO accesses.
+ */
+#define readw_u readw_relaxed
+#define readl_u readl_relaxed
+#define readq_u readq_relaxed
+
+#define writew_u writew_relaxed
+#define writel_u writel_relaxed
+#define writeq_u writeq_relaxed
+
+static inline struct sk_buff *fza_alloc_skb_irq(struct net_device *dev,
+ unsigned int length)
+{
+ return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
+}
+
+static inline struct sk_buff *fza_alloc_skb(struct net_device *dev,
+ unsigned int length)
+{
+ return __netdev_alloc_skb(dev, length, GFP_KERNEL);
+}
+
+static inline void fza_skb_align(struct sk_buff *skb, unsigned int v)
+{
+ unsigned long x, y;
+
+ x = (unsigned long)skb->data;
+ y = ALIGN(x, v);
+
+ skb_reserve(skb, y - x);
+}
+
+static inline void fza_reads(const void __iomem *from, void *to,
+ unsigned long size)
+{
+ if (sizeof(unsigned long) == 8) {
+ const u64 __iomem *src = from;
+ const u32 __iomem *src_trail;
+ u64 *dst = to;
+ u32 *dst_trail;
+
+ for (size = (size + 3) / 4; size > 1; size -= 2)
+ *dst++ = readq_u(src++);
+ if (size) {
+ src_trail = (u32 __iomem *)src;
+ dst_trail = (u32 *)dst;
+ *dst_trail = readl_u(src_trail);
+ }
+ } else {
+ const u32 __iomem *src = from;
+ u32 *dst = to;
+
+ for (size = (size + 3) / 4; size; size--)
+ *dst++ = readl_u(src++);
+ }
+}
+
+static inline void fza_writes(const void *from, void __iomem *to,
+ unsigned long size)
+{
+ if (sizeof(unsigned long) == 8) {
+ const u64 *src = from;
+ const u32 *src_trail;
+ u64 __iomem *dst = to;
+ u32 __iomem *dst_trail;
+
+ for (size = (size + 3) / 4; size > 1; size -= 2)
+ writeq_u(*src++, dst++);
+ if (size) {
+ src_trail = (u32 *)src;
+ dst_trail = (u32 __iomem *)dst;
+ writel_u(*src_trail, dst_trail);
+ }
+ } else {
+ const u32 *src = from;
+ u32 __iomem *dst = to;
+
+ for (size = (size + 3) / 4; size; size--)
+ writel_u(*src++, dst++);
+ }
+}
+
+static inline void fza_moves(const void __iomem *from, void __iomem *to,
+ unsigned long size)
+{
+ if (sizeof(unsigned long) == 8) {
+ const u64 __iomem *src = from;
+ const u32 __iomem *src_trail;
+ u64 __iomem *dst = to;
+ u32 __iomem *dst_trail;
+
+ for (size = (size + 3) / 4; size > 1; size -= 2)
+ writeq_u(readq_u(src++), dst++);
+ if (size) {
+ src_trail = (u32 __iomem *)src;
+ dst_trail = (u32 __iomem *)dst;
+ writel_u(readl_u(src_trail), dst_trail);
+ }
+ } else {
+ const u32 __iomem *src = from;
+ u32 __iomem *dst = to;
+
+ for (size = (size + 3) / 4; size; size--)
+ writel_u(readl_u(src++), dst++);
+ }
+}
+
+static inline void fza_zeros(void __iomem *to, unsigned long size)
+{
+ if (sizeof(unsigned long) == 8) {
+ u64 __iomem *dst = to;
+ u32 __iomem *dst_trail;
+
+ for (size = (size + 3) / 4; size > 1; size -= 2)
+ writeq_u(0, dst++);
+ if (size) {
+ dst_trail = (u32 __iomem *)dst;
+ writel_u(0, dst_trail);
+ }
+ } else {
+ u32 __iomem *dst = to;
+
+ for (size = (size + 3) / 4; size; size--)
+ writel_u(0, dst++);
+ }
+}
+
+static inline void fza_regs_dump(struct fza_private *fp)
+{
+ pr_debug("%s: iomem registers:\n", fp->name);
+ pr_debug(" reset: 0x%04x\n", readw_o(&fp->regs->reset));
+ pr_debug(" interrupt event: 0x%04x\n", readw_u(&fp->regs->int_event));
+ pr_debug(" status: 0x%04x\n", readw_u(&fp->regs->status));
+ pr_debug(" interrupt mask: 0x%04x\n", readw_u(&fp->regs->int_mask));
+ pr_debug(" control A: 0x%04x\n", readw_u(&fp->regs->control_a));
+ pr_debug(" control B: 0x%04x\n", readw_u(&fp->regs->control_b));
+}
+
+static inline void fza_do_reset(struct fza_private *fp)
+{
+ /* Reset the board. */
+ writew_o(FZA_RESET_INIT, &fp->regs->reset);
+ readw_o(&fp->regs->reset); /* Synchronize. */
+ readw_o(&fp->regs->reset); /* Read it back for a small delay. */
+ writew_o(FZA_RESET_CLR, &fp->regs->reset);
+
+ /* Enable all interrupt events we handle. */
+ writew_o(fp->int_mask, &fp->regs->int_mask);
+ readw_o(&fp->regs->int_mask); /* Synchronize. */
+}
+
+static inline void fza_do_shutdown(struct fza_private *fp)
+{
+ /* Disable the driver mode. */
+ writew_o(FZA_CONTROL_B_IDLE, &fp->regs->control_b);
+
+ /* And reset the board. */
+ writew_o(FZA_RESET_INIT, &fp->regs->reset);
+ readw_o(&fp->regs->reset); /* Synchronize. */
+ writew_o(FZA_RESET_CLR, &fp->regs->reset);
+ readw_o(&fp->regs->reset); /* Synchronize. */
+}
+
+static int fza_reset(struct fza_private *fp)
+{
+ unsigned long flags;
+ uint status, state;
+ long t;
+
+ pr_info("%s: resetting the board...\n", fp->name);
+
+ spin_lock_irqsave(&fp->lock, flags);
+ fp->state_chg_flag = 0;
+ fza_do_reset(fp);
+ spin_unlock_irqrestore(&fp->lock, flags);
+
+ /* DEC says RESET needs up to 30 seconds to complete. My DEFZA-AA
+ * rev. C03 happily finishes in 9.7 seconds. :-) But we need to
+ * be on the safe side...
+ */
+ t = wait_event_timeout(fp->state_chg_wait, fp->state_chg_flag,
+ 45 * HZ);
+ status = readw_u(&fp->regs->status);
+ state = FZA_STATUS_GET_STATE(status);
+ if (fp->state_chg_flag == 0) {
+ pr_err("%s: RESET timed out!, state %x\n", fp->name, state);
+ return -EIO;
+ }
+ if (state != FZA_STATE_UNINITIALIZED) {
+ pr_err("%s: RESET failed!, state %x, failure ID %x\n",
+ fp->name, state, FZA_STATUS_GET_TEST(status));
+ return -EIO;
+ }
+ pr_info("%s: OK\n", fp->name);
+ pr_debug("%s: RESET: %lums elapsed\n", fp->name,
+ (45 * HZ - t) * 1000 / HZ);
+
+ return 0;
+}
+
+static struct fza_ring_cmd __iomem *fza_cmd_send(struct net_device *dev,
+ int command)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct fza_ring_cmd __iomem *ring = fp->ring_cmd + fp->ring_cmd_index;
+ unsigned int old_mask, new_mask;
+ union fza_cmd_buf __iomem *buf;
+ struct netdev_hw_addr *ha;
+ int i;
+
+ old_mask = fp->int_mask;
+ new_mask = old_mask & ~FZA_MASK_STATE_CHG;
+ writew_u(new_mask, &fp->regs->int_mask);
+ readw_o(&fp->regs->int_mask); /* Synchronize. */
+ fp->int_mask = new_mask;
+
+ buf = fp->mmio + readl_u(&ring->buffer);
+
+ if ((readl_u(&ring->cmd_own) & FZA_RING_OWN_MASK) !=
+ FZA_RING_OWN_HOST) {
+ pr_warn("%s: command buffer full, command: %u!\n", fp->name,
+ command);
+ return NULL;
+ }
+
+ switch (command) {
+ case FZA_RING_CMD_INIT:
+ writel_u(FZA_RING_TX_MODE, &buf->init.tx_mode);
+ writel_u(FZA_RING_RX_SIZE, &buf->init.hst_rx_size);
+ fza_zeros(&buf->init.counters, sizeof(buf->init.counters));
+ break;
+
+ case FZA_RING_CMD_MODCAM:
+ i = 0;
+ fza_writes(&hw_addr_purger, &buf->cam.hw_addr[i++],
+ sizeof(*buf->cam.hw_addr));
+ fza_writes(&hw_addr_beacon, &buf->cam.hw_addr[i++],
+ sizeof(*buf->cam.hw_addr));
+ netdev_for_each_mc_addr(ha, dev) {
+ if (i >= FZA_CMD_CAM_SIZE)
+ break;
+ fza_writes(ha->addr, &buf->cam.hw_addr[i++],
+ sizeof(*buf->cam.hw_addr));
+ }
+ while (i < FZA_CMD_CAM_SIZE)
+ fza_zeros(&buf->cam.hw_addr[i++],
+ sizeof(*buf->cam.hw_addr));
+ break;
+
+ case FZA_RING_CMD_PARAM:
+ writel_u(loopback, &buf->param.loop_mode);
+ writel_u(fp->t_max, &buf->param.t_max);
+ writel_u(fp->t_req, &buf->param.t_req);
+ writel_u(fp->tvx, &buf->param.tvx);
+ writel_u(fp->lem_threshold, &buf->param.lem_threshold);
+ fza_writes(&fp->station_id, &buf->param.station_id,
+ sizeof(buf->param.station_id));
+ /* Convert to milliseconds due to buggy firmware. */
+ writel_u(fp->rtoken_timeout / 12500,
+ &buf->param.rtoken_timeout);
+ writel_u(fp->ring_purger, &buf->param.ring_purger);
+ break;
+
+ case FZA_RING_CMD_MODPROM:
+ if (dev->flags & IFF_PROMISC) {
+ writel_u(1, &buf->modprom.llc_prom);
+ writel_u(1, &buf->modprom.smt_prom);
+ } else {
+ writel_u(0, &buf->modprom.llc_prom);
+ writel_u(0, &buf->modprom.smt_prom);
+ }
+ if (dev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(dev) > FZA_CMD_CAM_SIZE - 2)
+ writel_u(1, &buf->modprom.llc_multi);
+ else
+ writel_u(0, &buf->modprom.llc_multi);
+ writel_u(1, &buf->modprom.llc_bcast);
+ break;
+ }
+
+ /* Trigger the command. */
+ writel_u(FZA_RING_OWN_FZA | command, &ring->cmd_own);
+ writew_o(FZA_CONTROL_A_CMD_POLL, &fp->regs->control_a);
+
+ fp->ring_cmd_index = (fp->ring_cmd_index + 1) % FZA_RING_CMD_SIZE;
+
+ fp->int_mask = old_mask;
+ writew_u(fp->int_mask, &fp->regs->int_mask);
+
+ return ring;
+}
+
+static int fza_init_send(struct net_device *dev,
+ struct fza_cmd_init *__iomem *init)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct fza_ring_cmd __iomem *ring;
+ unsigned long flags;
+ u32 stat;
+ long t;
+
+ spin_lock_irqsave(&fp->lock, flags);
+ fp->cmd_done_flag = 0;
+ ring = fza_cmd_send(dev, FZA_RING_CMD_INIT);
+ spin_unlock_irqrestore(&fp->lock, flags);
+ if (!ring)
+ /* This should never happen in the uninitialized state,
+ * so do not try to recover and just consider it fatal.
+ */
+ return -ENOBUFS;
+
+ /* INIT may take quite a long time (160ms for my C03). */
+ t = wait_event_timeout(fp->cmd_done_wait, fp->cmd_done_flag, 3 * HZ);
+ if (fp->cmd_done_flag == 0) {
+ pr_err("%s: INIT command timed out!, state %x\n", fp->name,
+ FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
+ return -EIO;
+ }
+ stat = readl_u(&ring->stat);
+ if (stat != FZA_RING_STAT_SUCCESS) {
+ pr_err("%s: INIT command failed!, status %02x, state %x\n",
+ fp->name, stat,
+ FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
+ return -EIO;
+ }
+ pr_debug("%s: INIT: %lums elapsed\n", fp->name,
+ (3 * HZ - t) * 1000 / HZ);
+
+ if (init)
+ *init = fp->mmio + readl_u(&ring->buffer);
+ return 0;
+}
+
+static void fza_rx_init(struct fza_private *fp)
+{
+ int i;
+
+ /* Fill the host receive descriptor ring. */
+ for (i = 0; i < FZA_RING_RX_SIZE; i++) {
+ writel_o(0, &fp->ring_hst_rx[i].rmc);
+ writel_o((fp->rx_dma[i] + 0x1000) >> 9,
+ &fp->ring_hst_rx[i].buffer1);
+ writel_o(fp->rx_dma[i] >> 9 | FZA_RING_OWN_FZA,
+ &fp->ring_hst_rx[i].buf0_own);
+ }
+}
+
+static void fza_set_rx_mode(struct net_device *dev)
+{
+ fza_cmd_send(dev, FZA_RING_CMD_MODCAM);
+ fza_cmd_send(dev, FZA_RING_CMD_MODPROM);
+}
+
+union fza_buffer_txp {
+ struct fza_buffer_tx *data_ptr;
+ struct fza_buffer_tx __iomem *mmio_ptr;
+};
+
+static int fza_do_xmit(union fza_buffer_txp ub, int len,
+ struct net_device *dev, int smt)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct fza_buffer_tx __iomem *rmc_tx_ptr;
+ int i, first, frag_len, left_len;
+ u32 own, rmc;
+
+ if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) -
+ fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) *
+ FZA_TX_BUFFER_SIZE) < len)
+ return 1;
+
+ first = fp->ring_rmc_tx_index;
+
+ left_len = len;
+ frag_len = FZA_TX_BUFFER_SIZE;
+ /* First descriptor is relinquished last. */
+ own = FZA_RING_TX_OWN_HOST;
+ /* First descriptor carries frame length; we don't use cut-through. */
+ rmc = FZA_RING_TX_SOP | FZA_RING_TX_VBC | len;
+ do {
+ i = fp->ring_rmc_tx_index;
+ rmc_tx_ptr = &fp->buffer_tx[i];
+
+ if (left_len < FZA_TX_BUFFER_SIZE)
+ frag_len = left_len;
+ left_len -= frag_len;
+
+ /* Length must be a multiple of 4 as only word writes are
+ * permitted!
+ */
+ frag_len = (frag_len + 3) & ~3;
+ if (smt)
+ fza_moves(ub.mmio_ptr, rmc_tx_ptr, frag_len);
+ else
+ fza_writes(ub.data_ptr, rmc_tx_ptr, frag_len);
+
+ if (left_len == 0)
+ rmc |= FZA_RING_TX_EOP; /* Mark last frag. */
+
+ writel_o(rmc, &fp->ring_rmc_tx[i].rmc);
+ writel_o(own, &fp->ring_rmc_tx[i].own);
+
+ ub.data_ptr++;
+ fp->ring_rmc_tx_index = (fp->ring_rmc_tx_index + 1) %
+ fp->ring_rmc_tx_size;
+
+ /* Settings for intermediate frags. */
+ own = FZA_RING_TX_OWN_RMC;
+ rmc = 0;
+ } while (left_len > 0);
+
+ if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) -
+ fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) *
+ FZA_TX_BUFFER_SIZE) < dev->mtu + dev->hard_header_len) {
+ netif_stop_queue(dev);
+ pr_debug("%s: queue stopped\n", fp->name);
+ }
+
+ writel_o(FZA_RING_TX_OWN_RMC, &fp->ring_rmc_tx[first].own);
+
+ /* Go, go, go! */
+ writew_o(FZA_CONTROL_A_TX_POLL, &fp->regs->control_a);
+
+ return 0;
+}
+
+static int fza_do_recv_smt(struct fza_buffer_tx *data_ptr, int len,
+ u32 rmc, struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct fza_buffer_tx __iomem *smt_rx_ptr;
+ u32 own;
+ int i;
+
+ i = fp->ring_smt_rx_index;
+ own = readl_o(&fp->ring_smt_rx[i].own);
+ if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
+ return 1;
+
+ smt_rx_ptr = fp->mmio + readl_u(&fp->ring_smt_rx[i].buffer);
+
+ /* Length must be a multiple of 4 as only word writes are permitted! */
+ fza_writes(data_ptr, smt_rx_ptr, (len + 3) & ~3);
+
+ writel_o(rmc, &fp->ring_smt_rx[i].rmc);
+ writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_rx[i].own);
+
+ fp->ring_smt_rx_index =
+ (fp->ring_smt_rx_index + 1) % fp->ring_smt_rx_size;
+
+ /* Grab it! */
+ writew_o(FZA_CONTROL_A_SMT_RX_POLL, &fp->regs->control_a);
+
+ return 0;
+}
+
+static void fza_tx(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ u32 own, rmc;
+ int i;
+
+ while (1) {
+ i = fp->ring_rmc_txd_index;
+ if (i == fp->ring_rmc_tx_index)
+ break;
+ own = readl_o(&fp->ring_rmc_tx[i].own);
+ if ((own & FZA_RING_OWN_MASK) == FZA_RING_TX_OWN_RMC)
+ break;
+
+ rmc = readl_u(&fp->ring_rmc_tx[i].rmc);
+ /* Only process the first descriptor. */
+ if ((rmc & FZA_RING_TX_SOP) != 0) {
+ if ((rmc & FZA_RING_TX_DCC_MASK) ==
+ FZA_RING_TX_DCC_SUCCESS) {
+ int pkt_len = (rmc & FZA_RING_PBC_MASK) - 3;
+ /* Omit PRH. */
+
+ fp->stats.tx_packets++;
+ fp->stats.tx_bytes += pkt_len;
+ } else {
+ fp->stats.tx_errors++;
+ switch (rmc & FZA_RING_TX_DCC_MASK) {
+ case FZA_RING_TX_DCC_DTP_SOP:
+ case FZA_RING_TX_DCC_DTP:
+ case FZA_RING_TX_DCC_ABORT:
+ fp->stats.tx_aborted_errors++;
+ break;
+ case FZA_RING_TX_DCC_UNDRRUN:
+ fp->stats.tx_fifo_errors++;
+ break;
+ case FZA_RING_TX_DCC_PARITY:
+ default:
+ break;
+ }
+ }
+ }
+
+ fp->ring_rmc_txd_index = (fp->ring_rmc_txd_index + 1) %
+ fp->ring_rmc_tx_size;
+ }
+
+ if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) -
+ fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) *
+ FZA_TX_BUFFER_SIZE) >= dev->mtu + dev->hard_header_len) {
+ if (fp->queue_active) {
+ netif_wake_queue(dev);
+ pr_debug("%s: queue woken\n", fp->name);
+ }
+ }
+}
+
+static inline int fza_rx_err(struct fza_private *fp,
+ const u32 rmc, const u8 fc)
+{
+ int len, min_len, max_len;
+
+ len = rmc & FZA_RING_PBC_MASK;
+
+ if (unlikely((rmc & FZA_RING_RX_BAD) != 0)) {
+ fp->stats.rx_errors++;
+
+ /* Check special status codes. */
+ if ((rmc & (FZA_RING_RX_CRC | FZA_RING_RX_RRR_MASK |
+ FZA_RING_RX_DA_MASK | FZA_RING_RX_SA_MASK)) ==
+ (FZA_RING_RX_CRC | FZA_RING_RX_RRR_DADDR |
+ FZA_RING_RX_DA_CAM | FZA_RING_RX_SA_ALIAS)) {
+ if (len >= 8190)
+ fp->stats.rx_length_errors++;
+ return 1;
+ }
+ if ((rmc & (FZA_RING_RX_CRC | FZA_RING_RX_RRR_MASK |
+ FZA_RING_RX_DA_MASK | FZA_RING_RX_SA_MASK)) ==
+ (FZA_RING_RX_CRC | FZA_RING_RX_RRR_DADDR |
+ FZA_RING_RX_DA_CAM | FZA_RING_RX_SA_CAM)) {
+ /* Halt the interface to trigger a reset. */
+ writew_o(FZA_CONTROL_A_HALT, &fp->regs->control_a);
+ readw_o(&fp->regs->control_a); /* Synchronize. */
+ return 1;
+ }
+
+ /* Check the MAC status. */
+ switch (rmc & FZA_RING_RX_RRR_MASK) {
+ case FZA_RING_RX_RRR_OK:
+ if ((rmc & FZA_RING_RX_CRC) != 0)
+ fp->stats.rx_crc_errors++;
+ else if ((rmc & FZA_RING_RX_FSC_MASK) == 0 ||
+ (rmc & FZA_RING_RX_FSB_ERR) != 0)
+ fp->stats.rx_frame_errors++;
+ return 1;
+ case FZA_RING_RX_RRR_SADDR:
+ case FZA_RING_RX_RRR_DADDR:
+ case FZA_RING_RX_RRR_ABORT:
+ /* Halt the interface to trigger a reset. */
+ writew_o(FZA_CONTROL_A_HALT, &fp->regs->control_a);
+ readw_o(&fp->regs->control_a); /* Synchronize. */
+ return 1;
+ case FZA_RING_RX_RRR_LENGTH:
+ fp->stats.rx_frame_errors++;
+ return 1;
+ default:
+ return 1;
+ }
+ }
+
+ /* Packet received successfully; validate the length. */
+ switch (fc & FDDI_FC_K_FORMAT_MASK) {
+ case FDDI_FC_K_FORMAT_MANAGEMENT:
+ if ((fc & FDDI_FC_K_CLASS_MASK) == FDDI_FC_K_CLASS_ASYNC)
+ min_len = 37;
+ else
+ min_len = 17;
+ break;
+ case FDDI_FC_K_FORMAT_LLC:
+ min_len = 20;
+ break;
+ default:
+ min_len = 17;
+ break;
+ }
+ max_len = 4495;
+ if (len < min_len || len > max_len) {
+ fp->stats.rx_errors++;
+ fp->stats.rx_length_errors++;
+ return 1;
+ }
+
+ return 0;
+}
+
+static void fza_rx(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct sk_buff *skb, *newskb;
+ struct fza_fddihdr *frame;
+ dma_addr_t dma, newdma;
+ u32 own, rmc, buf;
+ int i, len;
+ u8 fc;
+
+ while (1) {
+ i = fp->ring_hst_rx_index;
+ own = readl_o(&fp->ring_hst_rx[i].buf0_own);
+ if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
+ break;
+
+ rmc = readl_u(&fp->ring_hst_rx[i].rmc);
+ skb = fp->rx_skbuff[i];
+ dma = fp->rx_dma[i];
+
+ /* The RMC doesn't count the preamble and the starting
+ * delimiter. We fix it up here for a total of 3 octets.
+ */
+ dma_rmb();
+ len = (rmc & FZA_RING_PBC_MASK) + 3;
+ frame = (struct fza_fddihdr *)skb->data;
+
+ /* We need to get at real FC. */
+ dma_sync_single_for_cpu(fp->bdev,
+ dma +
+ ((u8 *)&frame->hdr.fc - (u8 *)frame),
+ sizeof(frame->hdr.fc),
+ DMA_FROM_DEVICE);
+ fc = frame->hdr.fc;
+
+ if (fza_rx_err(fp, rmc, fc))
+ goto err_rx;
+
+ /* We have to 512-byte-align RX buffers... */
+ newskb = fza_alloc_skb_irq(dev, FZA_RX_BUFFER_SIZE + 511);
+ if (newskb) {
+ fza_skb_align(newskb, 512);
+ newdma = dma_map_single(fp->bdev, newskb->data,
+ FZA_RX_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(fp->bdev, newdma)) {
+ dev_kfree_skb_irq(newskb);
+ newskb = NULL;
+ }
+ }
+ if (newskb) {
+ int pkt_len = len - 7; /* Omit P, SD and FCS. */
+ int is_multi;
+ int rx_stat;
+
+ dma_unmap_single(fp->bdev, dma, FZA_RX_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+
+ /* Queue SMT frames to the SMT receive ring. */
+ if ((fc & (FDDI_FC_K_CLASS_MASK |
+ FDDI_FC_K_FORMAT_MASK)) ==
+ (FDDI_FC_K_CLASS_ASYNC |
+ FDDI_FC_K_FORMAT_MANAGEMENT) &&
+ (rmc & FZA_RING_RX_DA_MASK) !=
+ FZA_RING_RX_DA_PROM) {
+ if (fza_do_recv_smt((struct fza_buffer_tx *)
+ skb->data, len, rmc,
+ dev)) {
+ writel_o(FZA_CONTROL_A_SMT_RX_OVFL,
+ &fp->regs->control_a);
+ }
+ }
+
+ is_multi = ((frame->hdr.daddr[0] & 0x01) != 0);
+
+ skb_reserve(skb, 3); /* Skip over P and SD. */
+ skb_put(skb, pkt_len); /* And cut off FCS. */
+ skb->protocol = fddi_type_trans(skb, dev);
+
+ rx_stat = netif_rx(skb);
+ if (rx_stat != NET_RX_DROP) {
+ fp->stats.rx_packets++;
+ fp->stats.rx_bytes += pkt_len;
+ if (is_multi)
+ fp->stats.multicast++;
+ } else {
+ fp->stats.rx_dropped++;
+ }
+
+ skb = newskb;
+ dma = newdma;
+ fp->rx_skbuff[i] = skb;
+ fp->rx_dma[i] = dma;
+ } else {
+ fp->stats.rx_dropped++;
+ pr_notice("%s: memory squeeze, dropping packet\n",
+ fp->name);
+ }
+
+err_rx:
+ writel_o(0, &fp->ring_hst_rx[i].rmc);
+ buf = (dma + 0x1000) >> 9;
+ writel_o(buf, &fp->ring_hst_rx[i].buffer1);
+ buf = dma >> 9 | FZA_RING_OWN_FZA;
+ writel_o(buf, &fp->ring_hst_rx[i].buf0_own);
+ fp->ring_hst_rx_index =
+ (fp->ring_hst_rx_index + 1) % fp->ring_hst_rx_size;
+ }
+}
+
+static void fza_tx_smt(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct fza_buffer_tx __iomem *smt_tx_ptr, *skb_data_ptr;
+ int i, len;
+ u32 own;
+
+ while (1) {
+ i = fp->ring_smt_tx_index;
+ own = readl_o(&fp->ring_smt_tx[i].own);
+ if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
+ break;
+
+ smt_tx_ptr = fp->mmio + readl_u(&fp->ring_smt_tx[i].buffer);
+ len = readl_u(&fp->ring_smt_tx[i].rmc) & FZA_RING_PBC_MASK;
+
+ if (!netif_queue_stopped(dev)) {
+ if (dev_nit_active(dev)) {
+ struct sk_buff *skb;
+
+ /* Length must be a multiple of 4 as only word
+ * reads are permitted!
+ */
+ skb = fza_alloc_skb_irq(dev, (len + 3) & ~3);
+ if (!skb)
+ goto err_no_skb; /* Drop. */
+
+ skb_data_ptr = (struct fza_buffer_tx *)
+ skb->data;
+
+ fza_reads(smt_tx_ptr, skb_data_ptr,
+ (len + 3) & ~3);
+ skb->dev = dev;
+ skb_reserve(skb, 3); /* Skip over PRH. */
+ skb_put(skb, len - 3);
+ skb_reset_network_header(skb);
+
+ dev_queue_xmit_nit(skb, dev);
+
+ dev_kfree_skb_irq(skb);
+
+err_no_skb:
+ ;
+ }
+
+ /* Queue the frame to the RMC transmit ring. */
+ fza_do_xmit((union fza_buffer_txp)
+ { .mmio_ptr = smt_tx_ptr },
+ len, dev, 1);
+ }
+
+ writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_tx[i].own);
+ fp->ring_smt_tx_index =
+ (fp->ring_smt_tx_index + 1) % fp->ring_smt_tx_size;
+ }
+}
+
+static void fza_uns(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ u32 own;
+ int i;
+
+ while (1) {
+ i = fp->ring_uns_index;
+ own = readl_o(&fp->ring_uns[i].own);
+ if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
+ break;
+
+ if (readl_u(&fp->ring_uns[i].id) == FZA_RING_UNS_RX_OVER) {
+ fp->stats.rx_errors++;
+ fp->stats.rx_over_errors++;
+ }
+
+ writel_o(FZA_RING_OWN_FZA, &fp->ring_uns[i].own);
+ fp->ring_uns_index =
+ (fp->ring_uns_index + 1) % FZA_RING_UNS_SIZE;
+ }
+}
+
+static void fza_tx_flush(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ u32 own;
+ int i;
+
+ /* Clean up the SMT TX ring. */
+ i = fp->ring_smt_tx_index;
+ do {
+ writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_tx[i].own);
+ fp->ring_smt_tx_index =
+ (fp->ring_smt_tx_index + 1) % fp->ring_smt_tx_size;
+
+ } while (i != fp->ring_smt_tx_index);
+
+ /* Clean up the RMC TX ring. */
+ i = fp->ring_rmc_tx_index;
+ do {
+ own = readl_o(&fp->ring_rmc_tx[i].own);
+ if ((own & FZA_RING_OWN_MASK) == FZA_RING_TX_OWN_RMC) {
+ u32 rmc = readl_u(&fp->ring_rmc_tx[i].rmc);
+
+ writel_u(rmc | FZA_RING_TX_DTP,
+ &fp->ring_rmc_tx[i].rmc);
+ }
+ fp->ring_rmc_tx_index =
+ (fp->ring_rmc_tx_index + 1) % fp->ring_rmc_tx_size;
+
+ } while (i != fp->ring_rmc_tx_index);
+
+ /* Done. */
+ writew_o(FZA_CONTROL_A_FLUSH_DONE, &fp->regs->control_a);
+}
+
+static irqreturn_t fza_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct fza_private *fp = netdev_priv(dev);
+ uint int_event;
+
+ /* Get interrupt events. */
+ int_event = readw_o(&fp->regs->int_event) & fp->int_mask;
+ if (int_event == 0)
+ return IRQ_NONE;
+
+ /* Clear the events. */
+ writew_u(int_event, &fp->regs->int_event);
+
+ /* Now handle the events. The order matters. */
+
+ /* Command finished interrupt. */
+ if ((int_event & FZA_EVENT_CMD_DONE) != 0) {
+ fp->irq_count_cmd_done++;
+
+ spin_lock(&fp->lock);
+ fp->cmd_done_flag = 1;
+ wake_up(&fp->cmd_done_wait);
+ spin_unlock(&fp->lock);
+ }
+
+ /* Transmit finished interrupt. */
+ if ((int_event & FZA_EVENT_TX_DONE) != 0) {
+ fp->irq_count_tx_done++;
+ fza_tx(dev);
+ }
+
+ /* Host receive interrupt. */
+ if ((int_event & FZA_EVENT_RX_POLL) != 0) {
+ fp->irq_count_rx_poll++;
+ fza_rx(dev);
+ }
+
+ /* SMT transmit interrupt. */
+ if ((int_event & FZA_EVENT_SMT_TX_POLL) != 0) {
+ fp->irq_count_smt_tx_poll++;
+ fza_tx_smt(dev);
+ }
+
+ /* Transmit ring flush request. */
+ if ((int_event & FZA_EVENT_FLUSH_TX) != 0) {
+ fp->irq_count_flush_tx++;
+ fza_tx_flush(dev);
+ }
+
+ /* Link status change interrupt. */
+ if ((int_event & FZA_EVENT_LINK_ST_CHG) != 0) {
+ uint status;
+
+ fp->irq_count_link_st_chg++;
+ status = readw_u(&fp->regs->status);
+ if (FZA_STATUS_GET_LINK(status) == FZA_LINK_ON) {
+ netif_carrier_on(dev);
+ pr_info("%s: link available\n", fp->name);
+ } else {
+ netif_carrier_off(dev);
+ pr_info("%s: link unavailable\n", fp->name);
+ }
+ }
+
+ /* Unsolicited event interrupt. */
+ if ((int_event & FZA_EVENT_UNS_POLL) != 0) {
+ fp->irq_count_uns_poll++;
+ fza_uns(dev);
+ }
+
+ /* State change interrupt. */
+ if ((int_event & FZA_EVENT_STATE_CHG) != 0) {
+ uint status, state;
+
+ fp->irq_count_state_chg++;
+
+ status = readw_u(&fp->regs->status);
+ state = FZA_STATUS_GET_STATE(status);
+ pr_debug("%s: state change: %x\n", fp->name, state);
+ switch (state) {
+ case FZA_STATE_RESET:
+ break;
+
+ case FZA_STATE_UNINITIALIZED:
+ netif_carrier_off(dev);
+ del_timer_sync(&fp->reset_timer);
+ fp->ring_cmd_index = 0;
+ fp->ring_uns_index = 0;
+ fp->ring_rmc_tx_index = 0;
+ fp->ring_rmc_txd_index = 0;
+ fp->ring_hst_rx_index = 0;
+ fp->ring_smt_tx_index = 0;
+ fp->ring_smt_rx_index = 0;
+ if (fp->state > state) {
+ pr_info("%s: OK\n", fp->name);
+ fza_cmd_send(dev, FZA_RING_CMD_INIT);
+ }
+ break;
+
+ case FZA_STATE_INITIALIZED:
+ if (fp->state > state) {
+ fza_set_rx_mode(dev);
+ fza_cmd_send(dev, FZA_RING_CMD_PARAM);
+ }
+ break;
+
+ case FZA_STATE_RUNNING:
+ case FZA_STATE_MAINTENANCE:
+ fp->state = state;
+ fza_rx_init(fp);
+ fp->queue_active = 1;
+ netif_wake_queue(dev);
+ pr_debug("%s: queue woken\n", fp->name);
+ break;
+
+ case FZA_STATE_HALTED:
+ fp->queue_active = 0;
+ netif_stop_queue(dev);
+ pr_debug("%s: queue stopped\n", fp->name);
+ del_timer_sync(&fp->reset_timer);
+ pr_warn("%s: halted, reason: %x\n", fp->name,
+ FZA_STATUS_GET_HALT(status));
+ fza_regs_dump(fp);
+ pr_info("%s: resetting the board...\n", fp->name);
+ fza_do_reset(fp);
+ fp->timer_state = 0;
+ fp->reset_timer.expires = jiffies + 45 * HZ;
+ add_timer(&fp->reset_timer);
+ break;
+
+ default:
+ pr_warn("%s: undefined state: %x\n", fp->name, state);
+ break;
+ }
+
+ spin_lock(&fp->lock);
+ fp->state_chg_flag = 1;
+ wake_up(&fp->state_chg_wait);
+ spin_unlock(&fp->lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void fza_reset_timer(struct timer_list *t)
+{
+ struct fza_private *fp = from_timer(fp, t, reset_timer);
+
+ if (!fp->timer_state) {
+ pr_err("%s: RESET timed out!\n", fp->name);
+ pr_info("%s: trying harder...\n", fp->name);
+
+ /* Assert the board reset. */
+ writew_o(FZA_RESET_INIT, &fp->regs->reset);
+ readw_o(&fp->regs->reset); /* Synchronize. */
+
+ fp->timer_state = 1;
+ fp->reset_timer.expires = jiffies + HZ;
+ } else {
+ /* Clear the board reset. */
+ writew_u(FZA_RESET_CLR, &fp->regs->reset);
+
+ /* Enable all interrupt events we handle. */
+ writew_o(fp->int_mask, &fp->regs->int_mask);
+ readw_o(&fp->regs->int_mask); /* Synchronize. */
+
+ fp->timer_state = 0;
+ fp->reset_timer.expires = jiffies + 45 * HZ;
+ }
+ add_timer(&fp->reset_timer);
+}
+
+static int fza_set_mac_address(struct net_device *dev, void *addr)
+{
+ return -EOPNOTSUPP;
+}
+
+static netdev_tx_t fza_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ unsigned int old_mask, new_mask;
+ int ret;
+ u8 fc;
+
+ skb_push(skb, 3); /* Make room for PRH. */
+
+ /* Decode FC to set PRH. */
+ fc = skb->data[3];
+ skb->data[0] = 0;
+ skb->data[1] = 0;
+ skb->data[2] = FZA_PRH2_NORMAL;
+ if ((fc & FDDI_FC_K_CLASS_MASK) == FDDI_FC_K_CLASS_SYNC)
+ skb->data[0] |= FZA_PRH0_FRAME_SYNC;
+ switch (fc & FDDI_FC_K_FORMAT_MASK) {
+ case FDDI_FC_K_FORMAT_MANAGEMENT:
+ if ((fc & FDDI_FC_K_CONTROL_MASK) == 0) {
+ /* Token. */
+ skb->data[0] |= FZA_PRH0_TKN_TYPE_IMM;
+ skb->data[1] |= FZA_PRH1_TKN_SEND_NONE;
+ } else {
+ /* SMT or MAC. */
+ skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
+ skb->data[1] |= FZA_PRH1_TKN_SEND_UNR;
+ }
+ skb->data[1] |= FZA_PRH1_CRC_NORMAL;
+ break;
+ case FDDI_FC_K_FORMAT_LLC:
+ case FDDI_FC_K_FORMAT_FUTURE:
+ skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
+ skb->data[1] |= FZA_PRH1_CRC_NORMAL | FZA_PRH1_TKN_SEND_UNR;
+ break;
+ case FDDI_FC_K_FORMAT_IMPLEMENTOR:
+ skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
+ skb->data[1] |= FZA_PRH1_TKN_SEND_ORIG;
+ break;
+ }
+
+ /* SMT transmit interrupts may sneak frames into the RMC
+ * transmit ring. We disable them while queueing a frame
+ * to maintain consistency.
+ */
+ old_mask = fp->int_mask;
+ new_mask = old_mask & ~FZA_MASK_SMT_TX_POLL;
+ writew_u(new_mask, &fp->regs->int_mask);
+ readw_o(&fp->regs->int_mask); /* Synchronize. */
+ fp->int_mask = new_mask;
+ ret = fza_do_xmit((union fza_buffer_txp)
+ { .data_ptr = (struct fza_buffer_tx *)skb->data },
+ skb->len, dev, 0);
+ fp->int_mask = old_mask;
+ writew_u(fp->int_mask, &fp->regs->int_mask);
+
+ if (ret) {
+ /* Probably an SMT packet filled the remaining space,
+ * so just stop the queue, but don't report it as an error.
+ */
+ netif_stop_queue(dev);
+ pr_debug("%s: queue stopped\n", fp->name);
+ fp->stats.tx_dropped++;
+ }
+
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+static int fza_open(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct fza_ring_cmd __iomem *ring;
+ struct sk_buff *skb;
+ unsigned long flags;
+ dma_addr_t dma;
+ int ret, i;
+ u32 stat;
+ long t;
+
+ for (i = 0; i < FZA_RING_RX_SIZE; i++) {
+ /* We have to 512-byte-align RX buffers... */
+ skb = fza_alloc_skb(dev, FZA_RX_BUFFER_SIZE + 511);
+ if (skb) {
+ fza_skb_align(skb, 512);
+ dma = dma_map_single(fp->bdev, skb->data,
+ FZA_RX_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(fp->bdev, dma)) {
+ dev_kfree_skb(skb);
+ skb = NULL;
+ }
+ }
+ if (!skb) {
+ for (--i; i >= 0; i--) {
+ dma_unmap_single(fp->bdev, fp->rx_dma[i],
+ FZA_RX_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(fp->rx_skbuff[i]);
+ fp->rx_dma[i] = 0;
+ fp->rx_skbuff[i] = NULL;
+ }
+ return -ENOMEM;
+ }
+ fp->rx_skbuff[i] = skb;
+ fp->rx_dma[i] = dma;
+ }
+
+ ret = fza_init_send(dev, NULL);
+ if (ret != 0)
+ return ret;
+
+ /* Purger and Beacon multicasts need to be supplied before PARAM. */
+ fza_set_rx_mode(dev);
+
+ spin_lock_irqsave(&fp->lock, flags);
+ fp->cmd_done_flag = 0;
+ ring = fza_cmd_send(dev, FZA_RING_CMD_PARAM);
+ spin_unlock_irqrestore(&fp->lock, flags);
+ if (!ring)
+ return -ENOBUFS;
+
+ t = wait_event_timeout(fp->cmd_done_wait, fp->cmd_done_flag, 3 * HZ);
+ if (fp->cmd_done_flag == 0) {
+ pr_err("%s: PARAM command timed out!, state %x\n", fp->name,
+ FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
+ return -EIO;
+ }
+ stat = readl_u(&ring->stat);
+ if (stat != FZA_RING_STAT_SUCCESS) {
+ pr_err("%s: PARAM command failed!, status %02x, state %x\n",
+ fp->name, stat,
+ FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
+ return -EIO;
+ }
+ pr_debug("%s: PARAM: %lums elapsed\n", fp->name,
+ (3 * HZ - t) * 1000 / HZ);
+
+ return 0;
+}
+
+static int fza_close(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ unsigned long flags;
+ uint state;
+ long t;
+ int i;
+
+ netif_stop_queue(dev);
+ pr_debug("%s: queue stopped\n", fp->name);
+
+ del_timer_sync(&fp->reset_timer);
+ spin_lock_irqsave(&fp->lock, flags);
+ fp->state = FZA_STATE_UNINITIALIZED;
+ fp->state_chg_flag = 0;
+ /* Shut the interface down. */
+ writew_o(FZA_CONTROL_A_SHUT, &fp->regs->control_a);
+ readw_o(&fp->regs->control_a); /* Synchronize. */
+ spin_unlock_irqrestore(&fp->lock, flags);
+
+ /* DEC says SHUT needs up to 10 seconds to complete. */
+ t = wait_event_timeout(fp->state_chg_wait, fp->state_chg_flag,
+ 15 * HZ);
+ state = FZA_STATUS_GET_STATE(readw_o(&fp->regs->status));
+ if (fp->state_chg_flag == 0) {
+ pr_err("%s: SHUT timed out!, state %x\n", fp->name, state);
+ return -EIO;
+ }
+ if (state != FZA_STATE_UNINITIALIZED) {
+ pr_err("%s: SHUT failed!, state %x\n", fp->name, state);
+ return -EIO;
+ }
+ pr_debug("%s: SHUT: %lums elapsed\n", fp->name,
+ (15 * HZ - t) * 1000 / HZ);
+
+ for (i = 0; i < FZA_RING_RX_SIZE; i++)
+ if (fp->rx_skbuff[i]) {
+ dma_unmap_single(fp->bdev, fp->rx_dma[i],
+ FZA_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb(fp->rx_skbuff[i]);
+ fp->rx_dma[i] = 0;
+ fp->rx_skbuff[i] = NULL;
+ }
+
+ return 0;
+}
+
+static struct net_device_stats *fza_get_stats(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+
+ return &fp->stats;
+}
+
+static int fza_probe(struct device *bdev)
+{
+ static const struct net_device_ops netdev_ops = {
+ .ndo_open = fza_open,
+ .ndo_stop = fza_close,
+ .ndo_start_xmit = fza_start_xmit,
+ .ndo_set_rx_mode = fza_set_rx_mode,
+ .ndo_set_mac_address = fza_set_mac_address,
+ .ndo_get_stats = fza_get_stats,
+ };
+ static int version_printed;
+ char rom_rev[4], fw_rev[4], rmc_rev[4];
+ struct tc_dev *tdev = to_tc_dev(bdev);
+ struct fza_cmd_init __iomem *init;
+ resource_size_t start, len;
+ struct net_device *dev;
+ struct fza_private *fp;
+ uint smt_ver, pmd_type;
+ void __iomem *mmio;
+ uint hw_addr[2];
+ int ret, i;
+
+ if (!version_printed) {
+ pr_info("%s", version);
+ version_printed = 1;
+ }
+
+ dev = alloc_fddidev(sizeof(*fp));
+ if (!dev)
+ return -ENOMEM;
+ SET_NETDEV_DEV(dev, bdev);
+
+ fp = netdev_priv(dev);
+ dev_set_drvdata(bdev, dev);
+
+ fp->bdev = bdev;
+ fp->name = dev_name(bdev);
+
+ /* Request the I/O MEM resource. */
+ start = tdev->resource.start;
+ len = tdev->resource.end - start + 1;
+ if (!request_mem_region(start, len, dev_name(bdev))) {
+ pr_err("%s: cannot reserve MMIO region\n", fp->name);
+ ret = -EBUSY;
+ goto err_out_kfree;
+ }
+
+ /* MMIO mapping setup. */
+ mmio = ioremap_nocache(start, len);
+ if (!mmio) {
+ pr_err("%s: cannot map MMIO\n", fp->name);
+ ret = -ENOMEM;
+ goto err_out_resource;
+ }
+
+ /* Initialize the new device structure. */
+ switch (loopback) {
+ case FZA_LOOP_NORMAL:
+ case FZA_LOOP_INTERN:
+ case FZA_LOOP_EXTERN:
+ break;
+ default:
+ loopback = FZA_LOOP_NORMAL;
+ }
+
+ fp->mmio = mmio;
+ dev->irq = tdev->interrupt;
+
+ pr_info("%s: DEC FDDIcontroller 700 or 700-C at 0x%08llx, irq %d\n",
+ fp->name, (long long)tdev->resource.start, dev->irq);
+ pr_debug("%s: mapped at: 0x%p\n", fp->name, mmio);
+
+ fp->regs = mmio + FZA_REG_BASE;
+ fp->ring_cmd = mmio + FZA_RING_CMD;
+ fp->ring_uns = mmio + FZA_RING_UNS;
+
+ init_waitqueue_head(&fp->state_chg_wait);
+ init_waitqueue_head(&fp->cmd_done_wait);
+ spin_lock_init(&fp->lock);
+ fp->int_mask = FZA_MASK_NORMAL;
+
+ timer_setup(&fp->reset_timer, fza_reset_timer, 0);
+
+ /* Sanitize the board. */
+ fza_regs_dump(fp);
+ fza_do_shutdown(fp);
+
+ ret = request_irq(dev->irq, fza_interrupt, IRQF_SHARED, fp->name, dev);
+ if (ret != 0) {
+ pr_err("%s: unable to get IRQ %d!\n", fp->name, dev->irq);
+ goto err_out_map;
+ }
+
+ /* Enable the driver mode. */
+ writew_o(FZA_CONTROL_B_DRIVER, &fp->regs->control_b);
+
+ /* For some reason transmit done interrupts can trigger during
+ * reset. This avoids a division error in the handler.
+ */
+ fp->ring_rmc_tx_size = FZA_RING_TX_SIZE;
+
+ ret = fza_reset(fp);
+ if (ret != 0)
+ goto err_out_irq;
+
+ ret = fza_init_send(dev, &init);
+ if (ret != 0)
+ goto err_out_irq;
+
+ fza_reads(&init->hw_addr, &hw_addr, sizeof(hw_addr));
+ memcpy(dev->dev_addr, &hw_addr, FDDI_K_ALEN);
+
+ fza_reads(&init->rom_rev, &rom_rev, sizeof(rom_rev));
+ fza_reads(&init->fw_rev, &fw_rev, sizeof(fw_rev));
+ fza_reads(&init->rmc_rev, &rmc_rev, sizeof(rmc_rev));
+ for (i = 3; i >= 0 && rom_rev[i] == ' '; i--)
+ rom_rev[i] = 0;
+ for (i = 3; i >= 0 && fw_rev[i] == ' '; i--)
+ fw_rev[i] = 0;
+ for (i = 3; i >= 0 && rmc_rev[i] == ' '; i--)
+ rmc_rev[i] = 0;
+
+ fp->ring_rmc_tx = mmio + readl_u(&init->rmc_tx);
+ fp->ring_rmc_tx_size = readl_u(&init->rmc_tx_size);
+ fp->ring_hst_rx = mmio + readl_u(&init->hst_rx);
+ fp->ring_hst_rx_size = readl_u(&init->hst_rx_size);
+ fp->ring_smt_tx = mmio + readl_u(&init->smt_tx);
+ fp->ring_smt_tx_size = readl_u(&init->smt_tx_size);
+ fp->ring_smt_rx = mmio + readl_u(&init->smt_rx);
+ fp->ring_smt_rx_size = readl_u(&init->smt_rx_size);
+
+ fp->buffer_tx = mmio + FZA_TX_BUFFER_ADDR(readl_u(&init->rmc_tx));
+
+ fp->t_max = readl_u(&init->def_t_max);
+ fp->t_req = readl_u(&init->def_t_req);
+ fp->tvx = readl_u(&init->def_tvx);
+ fp->lem_threshold = readl_u(&init->lem_threshold);
+ fza_reads(&init->def_station_id, &fp->station_id,
+ sizeof(fp->station_id));
+ fp->rtoken_timeout = readl_u(&init->rtoken_timeout);
+ fp->ring_purger = readl_u(&init->ring_purger);
+
+ smt_ver = readl_u(&init->smt_ver);
+ pmd_type = readl_u(&init->pmd_type);
+
+ pr_debug("%s: INIT parameters:\n", fp->name);
+ pr_debug(" tx_mode: %u\n", readl_u(&init->tx_mode));
+ pr_debug(" hst_rx_size: %u\n", readl_u(&init->hst_rx_size));
+ pr_debug(" rmc_rev: %.4s\n", rmc_rev);
+ pr_debug(" rom_rev: %.4s\n", rom_rev);
+ pr_debug(" fw_rev: %.4s\n", fw_rev);
+ pr_debug(" mop_type: %u\n", readl_u(&init->mop_type));
+ pr_debug(" hst_rx: 0x%08x\n", readl_u(&init->hst_rx));
+ pr_debug(" rmc_tx: 0x%08x\n", readl_u(&init->rmc_tx));
+ pr_debug(" rmc_tx_size: %u\n", readl_u(&init->rmc_tx_size));
+ pr_debug(" smt_tx: 0x%08x\n", readl_u(&init->smt_tx));
+ pr_debug(" smt_tx_size: %u\n", readl_u(&init->smt_tx_size));
+ pr_debug(" smt_rx: 0x%08x\n", readl_u(&init->smt_rx));
+ pr_debug(" smt_rx_size: %u\n", readl_u(&init->smt_rx_size));
+ /* TC systems are always LE, so don't bother swapping. */
+ pr_debug(" hw_addr: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ (readl_u(&init->hw_addr[0]) >> 0) & 0xff,
+ (readl_u(&init->hw_addr[0]) >> 8) & 0xff,
+ (readl_u(&init->hw_addr[0]) >> 16) & 0xff,
+ (readl_u(&init->hw_addr[0]) >> 24) & 0xff,
+ (readl_u(&init->hw_addr[1]) >> 0) & 0xff,
+ (readl_u(&init->hw_addr[1]) >> 8) & 0xff,
+ (readl_u(&init->hw_addr[1]) >> 16) & 0xff,
+ (readl_u(&init->hw_addr[1]) >> 24) & 0xff);
+ pr_debug(" def_t_req: %u\n", readl_u(&init->def_t_req));
+ pr_debug(" def_tvx: %u\n", readl_u(&init->def_tvx));
+ pr_debug(" def_t_max: %u\n", readl_u(&init->def_t_max));
+ pr_debug(" lem_threshold: %u\n", readl_u(&init->lem_threshold));
+ /* Don't bother swapping, see above. */
+ pr_debug(" def_station_id: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ (readl_u(&init->def_station_id[0]) >> 0) & 0xff,
+ (readl_u(&init->def_station_id[0]) >> 8) & 0xff,
+ (readl_u(&init->def_station_id[0]) >> 16) & 0xff,
+ (readl_u(&init->def_station_id[0]) >> 24) & 0xff,
+ (readl_u(&init->def_station_id[1]) >> 0) & 0xff,
+ (readl_u(&init->def_station_id[1]) >> 8) & 0xff,
+ (readl_u(&init->def_station_id[1]) >> 16) & 0xff,
+ (readl_u(&init->def_station_id[1]) >> 24) & 0xff);
+ pr_debug(" pmd_type_alt: %u\n", readl_u(&init->pmd_type_alt));
+ pr_debug(" smt_ver: %u\n", readl_u(&init->smt_ver));
+ pr_debug(" rtoken_timeout: %u\n", readl_u(&init->rtoken_timeout));
+ pr_debug(" ring_purger: %u\n", readl_u(&init->ring_purger));
+ pr_debug(" smt_ver_max: %u\n", readl_u(&init->smt_ver_max));
+ pr_debug(" smt_ver_min: %u\n", readl_u(&init->smt_ver_min));
+ pr_debug(" pmd_type: %u\n", readl_u(&init->pmd_type));
+
+ pr_info("%s: model %s, address %pMF\n",
+ fp->name,
+ pmd_type == FZA_PMD_TYPE_TW ?
+ "700-C (DEFZA-CA), ThinWire PMD selected" :
+ pmd_type == FZA_PMD_TYPE_STP ?
+ "700-C (DEFZA-CA), STP PMD selected" :
+ "700 (DEFZA-AA), MMF PMD",
+ dev->dev_addr);
+ pr_info("%s: ROM rev. %.4s, firmware rev. %.4s, RMC rev. %.4s, "
+ "SMT ver. %u\n", fp->name, rom_rev, fw_rev, rmc_rev, smt_ver);
+
+ /* Now that we fetched initial parameters just shut the interface
+ * until opened.
+ */
+ ret = fza_close(dev);
+ if (ret != 0)
+ goto err_out_irq;
+
+ /* The FZA-specific entries in the device structure. */
+ dev->netdev_ops = &netdev_ops;
+
+ ret = register_netdev(dev);
+ if (ret != 0)
+ goto err_out_irq;
+
+ pr_info("%s: registered as %s\n", fp->name, dev->name);
+ fp->name = (const char *)dev->name;
+
+ get_device(bdev);
+ return 0;
+
+err_out_irq:
+ del_timer_sync(&fp->reset_timer);
+ fza_do_shutdown(fp);
+ free_irq(dev->irq, dev);
+
+err_out_map:
+ iounmap(mmio);
+
+err_out_resource:
+ release_mem_region(start, len);
+
+err_out_kfree:
+ free_netdev(dev);
+
+ pr_err("%s: initialization failure, aborting!\n", fp->name);
+ return ret;
+}
+
+static int fza_remove(struct device *bdev)
+{
+ struct net_device *dev = dev_get_drvdata(bdev);
+ struct fza_private *fp = netdev_priv(dev);
+ struct tc_dev *tdev = to_tc_dev(bdev);
+ resource_size_t start, len;
+
+ put_device(bdev);
+
+ unregister_netdev(dev);
+
+ del_timer_sync(&fp->reset_timer);
+ fza_do_shutdown(fp);
+ free_irq(dev->irq, dev);
+
+ iounmap(fp->mmio);
+
+ start = tdev->resource.start;
+ len = tdev->resource.end - start + 1;
+ release_mem_region(start, len);
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct tc_device_id const fza_tc_table[] = {
+ { "DEC ", "PMAF-AA " },
+ { }
+};
+MODULE_DEVICE_TABLE(tc, fza_tc_table);
+
+static struct tc_driver fza_driver = {
+ .id_table = fza_tc_table,
+ .driver = {
+ .name = "defza",
+ .bus = &tc_bus_type,
+ .probe = fza_probe,
+ .remove = fza_remove,
+ },
+};
+
+static int fza_init(void)
+{
+ return tc_register_driver(&fza_driver);
+}
+
+static void fza_exit(void)
+{
+ tc_unregister_driver(&fza_driver);
+}
+
+module_init(fza_init);
+module_exit(fza_exit);
diff --git a/drivers/net/fddi/defza.h b/drivers/net/fddi/defza.h
new file mode 100644
index 000000000000..b06acf32738e
--- /dev/null
+++ b/drivers/net/fddi/defza.h
@@ -0,0 +1,791 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
+ *
+ * Copyright (c) 2018 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * References:
+ *
+ * Dave Sawyer & Phil Weeks & Frank Itkowsky,
+ * "DEC FDDIcontroller 700 Port Specification",
+ * Revision 1.1, Digital Equipment Corporation
+ */
+
+#include <linux/compiler.h>
+#include <linux/if_fddi.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+
+/* IOmem register offsets. */
+#define FZA_REG_BASE 0x100000 /* register base address */
+#define FZA_REG_RESET 0x100200 /* reset, r/w */
+#define FZA_REG_INT_EVENT 0x100400 /* interrupt event, r/w1c */
+#define FZA_REG_STATUS 0x100402 /* status, r/o */
+#define FZA_REG_INT_MASK 0x100404 /* interrupt mask, r/w */
+#define FZA_REG_CONTROL_A 0x100500 /* control A, r/w1s */
+#define FZA_REG_CONTROL_B 0x100502 /* control B, r/w */
+
+/* Reset register constants. Bits 1:0 are r/w, others are fixed at 0. */
+#define FZA_RESET_DLU 0x0002 /* OR with INIT to blast flash memory */
+#define FZA_RESET_INIT 0x0001 /* switch into the reset state */
+#define FZA_RESET_CLR 0x0000 /* run self-test and return to work */
+
+/* Interrupt event register constants. All bits are r/w1c. */
+#define FZA_EVENT_DLU_DONE 0x0800 /* flash memory write complete */
+#define FZA_EVENT_FLUSH_TX 0x0400 /* transmit ring flush request */
+#define FZA_EVENT_PM_PARITY_ERR 0x0200 /* onboard packet memory parity err */
+#define FZA_EVENT_HB_PARITY_ERR 0x0100 /* host bus parity error */
+#define FZA_EVENT_NXM_ERR 0x0080 /* non-existent memory access error;
+ * also raised for unaligned and
+ * unsupported partial-word accesses
+ */
+#define FZA_EVENT_LINK_ST_CHG 0x0040 /* link status change */
+#define FZA_EVENT_STATE_CHG 0x0020 /* adapter state change */
+#define FZA_EVENT_UNS_POLL 0x0010 /* unsolicited event service request */
+#define FZA_EVENT_CMD_DONE 0x0008 /* command done ack */
+#define FZA_EVENT_SMT_TX_POLL 0x0004 /* SMT frame transmit request */
+#define FZA_EVENT_RX_POLL 0x0002 /* receive request (packet avail.) */
+#define FZA_EVENT_TX_DONE 0x0001 /* RMC transmit done ack */
+
+/* Status register constants. All bits are r/o. */
+#define FZA_STATUS_DLU_SHIFT 0xc /* down line upgrade status bits */
+#define FZA_STATUS_DLU_MASK 0x03
+#define FZA_STATUS_LINK_SHIFT 0xb /* link status bits */
+#define FZA_STATUS_LINK_MASK 0x01
+#define FZA_STATUS_STATE_SHIFT 0x8 /* adapter state bits */
+#define FZA_STATUS_STATE_MASK 0x07
+#define FZA_STATUS_HALT_SHIFT 0x0 /* halt reason bits */
+#define FZA_STATUS_HALT_MASK 0xff
+#define FZA_STATUS_TEST_SHIFT 0x0 /* test failure bits */
+#define FZA_STATUS_TEST_MASK 0xff
+
+#define FZA_STATUS_GET_DLU(x) (((x) >> FZA_STATUS_DLU_SHIFT) & \
+ FZA_STATUS_DLU_MASK)
+#define FZA_STATUS_GET_LINK(x) (((x) >> FZA_STATUS_LINK_SHIFT) & \
+ FZA_STATUS_LINK_MASK)
+#define FZA_STATUS_GET_STATE(x) (((x) >> FZA_STATUS_STATE_SHIFT) & \
+ FZA_STATUS_STATE_MASK)
+#define FZA_STATUS_GET_HALT(x) (((x) >> FZA_STATUS_HALT_SHIFT) & \
+ FZA_STATUS_HALT_MASK)
+#define FZA_STATUS_GET_TEST(x) (((x) >> FZA_STATUS_TEST_SHIFT) & \
+ FZA_STATUS_TEST_MASK)
+
+#define FZA_DLU_FAILURE 0x0 /* DLU catastrophic error; brain dead */
+#define FZA_DLU_ERROR 0x1 /* DLU error; old firmware intact */
+#define FZA_DLU_SUCCESS 0x2 /* DLU OK; new firmware loaded */
+
+#define FZA_LINK_OFF 0x0 /* link unavailable */
+#define FZA_LINK_ON 0x1 /* link available */
+
+#define FZA_STATE_RESET 0x0 /* resetting */
+#define FZA_STATE_UNINITIALIZED 0x1 /* after a reset */
+#define FZA_STATE_INITIALIZED 0x2 /* initialized */
+#define FZA_STATE_RUNNING 0x3 /* running (link active) */
+#define FZA_STATE_MAINTENANCE 0x4 /* running (link looped back) */
+#define FZA_STATE_HALTED 0x5 /* halted (error condition) */
+
+#define FZA_HALT_UNKNOWN 0x00 /* unknown reason */
+#define FZA_HALT_HOST 0x01 /* host-directed HALT */
+#define FZA_HALT_HB_PARITY 0x02 /* host bus parity error */
+#define FZA_HALT_NXM 0x03 /* adapter non-existent memory ref. */
+#define FZA_HALT_SW 0x04 /* adapter software fault */
+#define FZA_HALT_HW 0x05 /* adapter hardware fault */
+#define FZA_HALT_PC_TRACE 0x06 /* PC Trace path test */
+#define FZA_HALT_DLSW 0x07 /* data link software fault */
+#define FZA_HALT_DLHW 0x08 /* data link hardware fault */
+
+#define FZA_TEST_FATAL 0x00 /* self-test catastrophic failure */
+#define FZA_TEST_68K 0x01 /* 68000 CPU */
+#define FZA_TEST_SRAM_BWADDR 0x02 /* SRAM byte/word address */
+#define FZA_TEST_SRAM_DBUS 0x03 /* SRAM data bus */
+#define FZA_TEST_SRAM_STUCK1 0x04 /* SRAM stuck-at range 1 */
+#define FZA_TEST_SRAM_STUCK2 0x05 /* SRAM stuck-at range 2 */
+#define FZA_TEST_SRAM_COUPL1 0x06 /* SRAM coupling range 1 */
+#define FZA_TEST_SRAM_COUPL2 0x07 /* SRAM coupling */
+#define FZA_TEST_FLASH_CRC 0x08 /* Flash CRC */
+#define FZA_TEST_ROM 0x09 /* option ROM */
+#define FZA_TEST_PHY_CSR 0x0a /* PHY CSR */
+#define FZA_TEST_MAC_BIST 0x0b /* MAC BiST */
+#define FZA_TEST_MAC_CSR 0x0c /* MAC CSR */
+#define FZA_TEST_MAC_ADDR_UNIQ 0x0d /* MAC unique address */
+#define FZA_TEST_ELM_BIST 0x0e /* ELM BiST */
+#define FZA_TEST_ELM_CSR 0x0f /* ELM CSR */
+#define FZA_TEST_ELM_ADDR_UNIQ 0x10 /* ELM unique address */
+#define FZA_TEST_CAM 0x11 /* CAM */
+#define FZA_TEST_NIROM 0x12 /* NI ROM checksum */
+#define FZA_TEST_SC_LOOP 0x13 /* SC loopback packet */
+#define FZA_TEST_LM_LOOP 0x14 /* LM loopback packet */
+#define FZA_TEST_EB_LOOP 0x15 /* EB loopback packet */
+#define FZA_TEST_SC_LOOP_BYPS 0x16 /* SC bypass loopback packet */
+#define FZA_TEST_LM_LOOP_LOCAL 0x17 /* LM local loopback packet */
+#define FZA_TEST_EB_LOOP_LOCAL 0x18 /* EB local loopback packet */
+#define FZA_TEST_CDC_LOOP 0x19 /* CDC loopback packet */
+#define FZA_TEST_FIBER_LOOP 0x1A /* FIBER loopback packet */
+#define FZA_TEST_CAM_MATCH_LOOP 0x1B /* CAM match packet loopback */
+#define FZA_TEST_68K_IRQ_STUCK 0x1C /* 68000 interrupt line stuck-at */
+#define FZA_TEST_IRQ_PRESENT 0x1D /* interrupt present register */
+#define FZA_TEST_RMC_BIST 0x1E /* RMC BiST */
+#define FZA_TEST_RMC_CSR 0x1F /* RMC CSR */
+#define FZA_TEST_RMC_ADDR_UNIQ 0x20 /* RMC unique address */
+#define FZA_TEST_PM_DPATH 0x21 /* packet memory data path */
+#define FZA_TEST_PM_ADDR 0x22 /* packet memory address */
+#define FZA_TEST_RES_23 0x23 /* reserved */
+#define FZA_TEST_PM_DESC 0x24 /* packet memory descriptor */
+#define FZA_TEST_PM_OWN 0x25 /* packet memory own bit */
+#define FZA_TEST_PM_PARITY 0x26 /* packet memory parity */
+#define FZA_TEST_PM_BSWAP 0x27 /* packet memory byte swap */
+#define FZA_TEST_PM_WSWAP 0x28 /* packet memory word swap */
+#define FZA_TEST_PM_REF 0x29 /* packet memory refresh */
+#define FZA_TEST_PM_CSR 0x2A /* PM CSR */
+#define FZA_TEST_PORT_STATUS 0x2B /* port status register */
+#define FZA_TEST_HOST_IRQMASK 0x2C /* host interrupt mask */
+#define FZA_TEST_TIMER_IRQ1 0x2D /* RTOS timer */
+#define FZA_TEST_FORCE_IRQ1 0x2E /* force RTOS IRQ1 */
+#define FZA_TEST_TIMER_IRQ5 0x2F /* IRQ5 backoff timer */
+#define FZA_TEST_FORCE_IRQ5 0x30 /* force IRQ5 */
+#define FZA_TEST_RES_31 0x31 /* reserved */
+#define FZA_TEST_IC_PRIO 0x32 /* interrupt controller priority */
+#define FZA_TEST_PM_FULL 0x33 /* full packet memory */
+#define FZA_TEST_PMI_DMA 0x34 /* PMI DMA */
+
+/* Interrupt mask register constants. All bits are r/w. */
+#define FZA_MASK_RESERVED 0xf000 /* unused */
+#define FZA_MASK_DLU_DONE 0x0800 /* flash memory write complete */
+#define FZA_MASK_FLUSH_TX 0x0400 /* transmit ring flush request */
+#define FZA_MASK_PM_PARITY_ERR 0x0200 /* onboard packet memory parity error
+ */
+#define FZA_MASK_HB_PARITY_ERR 0x0100 /* host bus parity error */
+#define FZA_MASK_NXM_ERR 0x0080 /* adapter non-existent memory
+ * reference
+ */
+#define FZA_MASK_LINK_ST_CHG 0x0040 /* link status change */
+#define FZA_MASK_STATE_CHG 0x0020 /* adapter state change */
+#define FZA_MASK_UNS_POLL 0x0010 /* unsolicited event service request */
+#define FZA_MASK_CMD_DONE 0x0008 /* command ring entry processed */
+#define FZA_MASK_SMT_TX_POLL 0x0004 /* SMT frame transmit request */
+#define FZA_MASK_RCV_POLL 0x0002 /* receive request (packet available)
+ */
+#define FZA_MASK_TX_DONE 0x0001 /* RMC transmit done acknowledge */
+
+/* Which interrupts to receive: 0/1 is mask/unmask. */
+#define FZA_MASK_NONE 0x0000
+#define FZA_MASK_NORMAL \
+ ((~(FZA_MASK_RESERVED | FZA_MASK_DLU_DONE | \
+ FZA_MASK_PM_PARITY_ERR | FZA_MASK_HB_PARITY_ERR | \
+ FZA_MASK_NXM_ERR)) & 0xffff)
+
+/* Control A register constants. */
+#define FZA_CONTROL_A_HB_PARITY_ERR 0x8000 /* host bus parity error */
+#define FZA_CONTROL_A_NXM_ERR 0x4000 /* adapter non-existent memory
+ * reference
+ */
+#define FZA_CONTROL_A_SMT_RX_OVFL 0x0040 /* SMT receive overflow */
+#define FZA_CONTROL_A_FLUSH_DONE 0x0020 /* flush tx request complete */
+#define FZA_CONTROL_A_SHUT 0x0010 /* turn the interface off */
+#define FZA_CONTROL_A_HALT 0x0008 /* halt the controller */
+#define FZA_CONTROL_A_CMD_POLL 0x0004 /* command ring poll */
+#define FZA_CONTROL_A_SMT_RX_POLL 0x0002 /* SMT receive ring poll */
+#define FZA_CONTROL_A_TX_POLL 0x0001 /* transmit poll */
+
+/* Control B register constants. All bits are r/w.
+ *
+ * Possible values:
+ * 0x0000 after booting into REX,
+ * 0x0003 after issuing `boot #/mop'.
+ */
+#define FZA_CONTROL_B_CONSOLE 0x0002 /* OR with DRIVER for console
+ * (TC firmware) mode
+ */
+#define FZA_CONTROL_B_DRIVER 0x0001 /* driver mode */
+#define FZA_CONTROL_B_IDLE 0x0000 /* no driver installed */
+
+#define FZA_RESET_PAD \
+ (FZA_REG_RESET - FZA_REG_BASE)
+#define FZA_INT_EVENT_PAD \
+ (FZA_REG_INT_EVENT - FZA_REG_RESET - sizeof(u16))
+#define FZA_CONTROL_A_PAD \
+ (FZA_REG_CONTROL_A - FZA_REG_INT_MASK - sizeof(u16))
+
+/* Layout of registers. */
+struct fza_regs {
+ u8 pad0[FZA_RESET_PAD];
+ u16 reset; /* reset register */
+ u8 pad1[FZA_INT_EVENT_PAD];
+ u16 int_event; /* interrupt event register */
+ u16 status; /* status register */
+ u16 int_mask; /* interrupt mask register */
+ u8 pad2[FZA_CONTROL_A_PAD];
+ u16 control_a; /* control A register */
+ u16 control_b; /* control B register */
+};
+
+/* Command descriptor ring entry. */
+struct fza_ring_cmd {
+ u32 cmd_own; /* bit 31: ownership, bits [30:0]: command */
+ u32 stat; /* command status */
+ u32 buffer; /* address of the buffer in the FZA space */
+ u32 pad0;
+};
+
+#define FZA_RING_CMD 0x200400 /* command ring address */
+#define FZA_RING_CMD_SIZE 0x40 /* command descriptor ring
+ * size
+/* Command constants. */
+#define FZA_RING_CMD_MASK 0x7fffffff
+#define FZA_RING_CMD_NOP 0x00000000 /* nop */
+#define FZA_RING_CMD_INIT 0x00000001 /* initialize */
+#define FZA_RING_CMD_MODCAM 0x00000002 /* modify CAM */
+#define FZA_RING_CMD_PARAM 0x00000003 /* set system parameters */
+#define FZA_RING_CMD_MODPROM 0x00000004 /* modify promiscuous mode */
+#define FZA_RING_CMD_SETCHAR 0x00000005 /* set link characteristics */
+#define FZA_RING_CMD_RDCNTR 0x00000006 /* read counters */
+#define FZA_RING_CMD_STATUS 0x00000007 /* get link status */
+#define FZA_RING_CMD_RDCAM 0x00000008 /* read CAM */
+
+/* Command status constants. */
+#define FZA_RING_STAT_SUCCESS 0x00000000
+
+/* Unsolicited event descriptor ring entry. */
+struct fza_ring_uns {
+ u32 own; /* bit 31: ownership, bits [30:0]: reserved */
+ u32 id; /* event ID */
+ u32 buffer; /* address of the buffer in the FZA space */
+ u32 pad0; /* reserved */
+};
+
+#define FZA_RING_UNS 0x200800 /* unsolicited ring address */
+#define FZA_RING_UNS_SIZE 0x40 /* unsolicited descriptor ring
+ * size
+ */
+/* Unsolicited event constants. */
+#define FZA_RING_UNS_UND 0x00000000 /* undefined event ID */
+#define FZA_RING_UNS_INIT_IN 0x00000001 /* ring init initiated */
+#define FZA_RING_UNS_INIT_RX 0x00000002 /* ring init received */
+#define FZA_RING_UNS_BEAC_IN 0x00000003 /* ring beaconing initiated */
+#define FZA_RING_UNS_DUP_ADDR 0x00000004 /* duplicate address detected */
+#define FZA_RING_UNS_DUP_TOK 0x00000005 /* duplicate token detected */
+#define FZA_RING_UNS_PURG_ERR 0x00000006 /* ring purger error */
+#define FZA_RING_UNS_STRIP_ERR 0x00000007 /* bridge strip error */
+#define FZA_RING_UNS_OP_OSC 0x00000008 /* ring op oscillation */
+#define FZA_RING_UNS_BEAC_RX 0x00000009 /* directed beacon received */
+#define FZA_RING_UNS_PCT_IN 0x0000000a /* PC trace initiated */
+#define FZA_RING_UNS_PCT_RX 0x0000000b /* PC trace received */
+#define FZA_RING_UNS_TX_UNDER 0x0000000c /* transmit underrun */
+#define FZA_RING_UNS_TX_FAIL 0x0000000d /* transmit failure */
+#define FZA_RING_UNS_RX_OVER 0x0000000e /* receive overrun */
+
+/* RMC (Ring Memory Control) transmit descriptor ring entry. */
+struct fza_ring_rmc_tx {
+ u32 rmc; /* RMC information */
+ u32 avl; /* available for host (unused by RMC) */
+ u32 own; /* bit 31: ownership, bits [30:0]: reserved */
+ u32 pad0; /* reserved */
+};
+
+#define FZA_TX_BUFFER_ADDR(x) (0x200000 | (((x) & 0xffff) << 5))
+#define FZA_TX_BUFFER_SIZE 512
+struct fza_buffer_tx {
+ u32 data[FZA_TX_BUFFER_SIZE / sizeof(u32)];
+};
+
+/* Transmit ring RMC constants. */
+#define FZA_RING_TX_SOP 0x80000000 /* start of packet */
+#define FZA_RING_TX_EOP 0x40000000 /* end of packet */
+#define FZA_RING_TX_DTP 0x20000000 /* discard this packet */
+#define FZA_RING_TX_VBC 0x10000000 /* valid buffer byte count */
+#define FZA_RING_TX_DCC_MASK 0x0f000000 /* DMA completion code */
+#define FZA_RING_TX_DCC_SUCCESS 0x01000000 /* transmit succeeded */
+#define FZA_RING_TX_DCC_DTP_SOP 0x02000000 /* DTP set at SOP */
+#define FZA_RING_TX_DCC_DTP 0x04000000 /* DTP set within packet */
+#define FZA_RING_TX_DCC_ABORT 0x05000000 /* MAC-requested abort */
+#define FZA_RING_TX_DCC_PARITY 0x06000000 /* xmit data parity error */
+#define FZA_RING_TX_DCC_UNDRRUN 0x07000000 /* transmit underrun */
+#define FZA_RING_TX_XPO_MASK 0x003fe000 /* transmit packet offset */
+
+/* Host receive descriptor ring entry. */
+struct fza_ring_hst_rx {
+ u32 buf0_own; /* bit 31: ownership, bits [30:23]: unused,
+ * bits [22:0]: right-shifted address of the
+ * buffer in system memory (low buffer)
+ */
+ u32 buffer1; /* bits [31:23]: unused,
+ * bits [22:0]: right-shifted address of the
+ * buffer in system memory (high buffer)
+ */
+ u32 rmc; /* RMC information */
+ u32 pad0;
+};
+
+#define FZA_RX_BUFFER_SIZE (4096 + 512) /* buffer length */
+
+/* Receive ring RMC constants. */
+#define FZA_RING_RX_SOP 0x80000000 /* start of packet */
+#define FZA_RING_RX_EOP 0x40000000 /* end of packet */
+#define FZA_RING_RX_FSC_MASK 0x38000000 /* # of frame status bits */
+#define FZA_RING_RX_FSB_MASK 0x07c00000 /* frame status bits */
+#define FZA_RING_RX_FSB_ERR 0x04000000 /* error detected */
+#define FZA_RING_RX_FSB_ADDR 0x02000000 /* address recognized */
+#define FZA_RING_RX_FSB_COP 0x01000000 /* frame copied */
+#define FZA_RING_RX_FSB_F0 0x00800000 /* first additional flag */
+#define FZA_RING_RX_FSB_F1 0x00400000 /* second additional flag */
+#define FZA_RING_RX_BAD 0x00200000 /* bad packet */
+#define FZA_RING_RX_CRC 0x00100000 /* CRC error */
+#define FZA_RING_RX_RRR_MASK 0x000e0000 /* MAC receive status bits */
+#define FZA_RING_RX_RRR_OK 0x00000000 /* receive OK */
+#define FZA_RING_RX_RRR_SADDR 0x00020000 /* source address matched */
+#define FZA_RING_RX_RRR_DADDR 0x00040000 /* dest address not matched */
+#define FZA_RING_RX_RRR_ABORT 0x00060000 /* RMC abort */
+#define FZA_RING_RX_RRR_LENGTH 0x00080000 /* invalid length */
+#define FZA_RING_RX_RRR_FRAG 0x000a0000 /* fragment */
+#define FZA_RING_RX_RRR_FORMAT 0x000c0000 /* format error */
+#define FZA_RING_RX_RRR_RESET 0x000e0000 /* MAC reset */
+#define FZA_RING_RX_DA_MASK 0x00018000 /* daddr match status bits */
+#define FZA_RING_RX_DA_NONE 0x00000000 /* no match */
+#define FZA_RING_RX_DA_PROM 0x00008000 /* promiscuous match */
+#define FZA_RING_RX_DA_CAM 0x00010000 /* CAM entry match */
+#define FZA_RING_RX_DA_LOCAL 0x00018000 /* link addr or LLC bcast */
+#define FZA_RING_RX_SA_MASK 0x00006000 /* saddr match status bits */
+#define FZA_RING_RX_SA_NONE 0x00000000 /* no match */
+#define FZA_RING_RX_SA_ALIAS 0x00002000 /* alias address match */
+#define FZA_RING_RX_SA_CAM 0x00004000 /* CAM entry match */
+#define FZA_RING_RX_SA_LOCAL 0x00006000 /* link address match */
+
+/* SMT (Station Management) transmit/receive descriptor ring entry. */
+struct fza_ring_smt {
+ u32 own; /* bit 31: ownership, bits [30:0]: unused */
+ u32 rmc; /* RMC information */
+ u32 buffer; /* address of the buffer */
+ u32 pad0; /* reserved */
+};
+
+/* Ownership constants.
+ *
+ * Only an owner is permitted to process a given ring entry.
+ * RMC transmit ring meanings are reversed.
+ */
+#define FZA_RING_OWN_MASK 0x80000000
+#define FZA_RING_OWN_FZA 0x00000000 /* permit FZA, forbid host */
+#define FZA_RING_OWN_HOST 0x80000000 /* permit host, forbid FZA */
+#define FZA_RING_TX_OWN_RMC 0x80000000 /* permit RMC, forbid host */
+#define FZA_RING_TX_OWN_HOST 0x00000000 /* permit host, forbid RMC */
+
+/* RMC constants. */
+#define FZA_RING_PBC_MASK 0x00001fff /* frame length */
+
+/* Layout of counter buffers. */
+
+struct fza_counter {
+ u32 msw;
+ u32 lsw;
+};
+
+struct fza_counters {
+ struct fza_counter sys_buf; /* system buffer unavailable */
+ struct fza_counter tx_under; /* transmit underruns */
+ struct fza_counter tx_fail; /* transmit failures */
+ struct fza_counter rx_over; /* receive data overruns */
+ struct fza_counter frame_cnt; /* frame count */
+ struct fza_counter error_cnt; /* error count */
+ struct fza_counter lost_cnt; /* lost count */
+ struct fza_counter rinit_in; /* ring initialization initiated */
+ struct fza_counter rinit_rx; /* ring initialization received */
+ struct fza_counter beac_in; /* ring beacon initiated */
+ struct fza_counter dup_addr; /* duplicate address test failures */
+ struct fza_counter dup_tok; /* duplicate token detected */
+ struct fza_counter purg_err; /* ring purge errors */
+ struct fza_counter strip_err; /* bridge strip errors */
+ struct fza_counter pct_in; /* traces initiated */
+ struct fza_counter pct_rx; /* traces received */
+ struct fza_counter lem_rej; /* LEM rejects */
+ struct fza_counter tne_rej; /* TNE expiry rejects */
+ struct fza_counter lem_event; /* LEM events */
+ struct fza_counter lct_rej; /* LCT rejects */
+ struct fza_counter conn_cmpl; /* connections completed */
+ struct fza_counter el_buf; /* elasticity buffer errors */
+};
+
+/* Layout of command buffers. */
+
+/* INIT command buffer.
+ *
+ * Values of default link parameters given are as obtained from a
+ * DEFZA-AA rev. C03 board. The board counts time in units of 80ns.
+ */
+struct fza_cmd_init {
+ u32 tx_mode; /* transmit mode */
+ u32 hst_rx_size; /* host receive ring entries */
+
+ struct fza_counters counters; /* counters */
+
+ u8 rmc_rev[4]; /* RMC revision */
+ u8 rom_rev[4]; /* ROM revision */
+ u8 fw_rev[4]; /* firmware revision */
+
+ u32 mop_type; /* MOP device type */
+
+ u32 hst_rx; /* base of host rx descriptor ring */
+ u32 rmc_tx; /* base of RMC tx descriptor ring */
+ u32 rmc_tx_size; /* size of RMC tx descriptor ring */
+ u32 smt_tx; /* base of SMT tx descriptor ring */
+ u32 smt_tx_size; /* size of SMT tx descriptor ring */
+ u32 smt_rx; /* base of SMT rx descriptor ring */
+ u32 smt_rx_size; /* size of SMT rx descriptor ring */
+
+ u32 hw_addr[2]; /* link address */
+
+ u32 def_t_req; /* default Requested TTRT (T_REQ) --
+ * C03: 100000 [80ns]
+ */
+ u32 def_tvx; /* default Valid Transmission Time
+ * (TVX) -- C03: 32768 [80ns]
+ */
+ u32 def_t_max; /* default Maximum TTRT (T_MAX) --
+ * C03: 2162688 [80ns]
+ */
+ u32 lem_threshold; /* default LEM threshold -- C03: 8 */
+ u32 def_station_id[2]; /* default station ID */
+
+ u32 pmd_type_alt; /* alternative PMD type code */
+
+ u32 smt_ver; /* SMT version */
+
+ u32 rtoken_timeout; /* default restricted token timeout
+ * -- C03: 12500000 [80ns]
+ */
+ u32 ring_purger; /* default ring purger enable --
+ * C03: 1
+ */
+
+ u32 smt_ver_max; /* max SMT version ID */
+ u32 smt_ver_min; /* min SMT version ID */
+ u32 pmd_type; /* PMD type code */
+};
+
+/* INIT command PMD type codes. */
+#define FZA_PMD_TYPE_MMF 0 /* Multimode fiber */
+#define FZA_PMD_TYPE_TW 101 /* ThinWire */
+#define FZA_PMD_TYPE_STP 102 /* STP */
+
+/* MODCAM/RDCAM command buffer. */
+#define FZA_CMD_CAM_SIZE 64 /* CAM address entry count */
+struct fza_cmd_cam {
+ u32 hw_addr[FZA_CMD_CAM_SIZE][2]; /* CAM address entries */
+};
+
+/* PARAM command buffer.
+ *
+ * Permitted ranges given are as defined by the spec and obtained from a
+ * DEFZA-AA rev. C03 board, respectively. The rtoken_timeout field is
+ * erroneously interpreted in units of ms.
+ */
+struct fza_cmd_param {
+ u32 loop_mode; /* loopback mode */
+ u32 t_max; /* Maximum TTRT (T_MAX)
+ * def: ??? [80ns]
+ * C03: [t_req+1,4294967295] [80ns]
+ */
+ u32 t_req; /* Requested TTRT (T_REQ)
+ * def: [50000,2097151] [80ns]
+ * C03: [50001,t_max-1] [80ns]
+ */
+ u32 tvx; /* Valid Transmission Time (TVX)
+ * def: [29375,65280] [80ns]
+ * C03: [29376,65279] [80ns]
+ */
+ u32 lem_threshold; /* LEM threshold */
+ u32 station_id[2]; /* station ID */
+ u32 rtoken_timeout; /* restricted token timeout
+ * def: [0,125000000] [80ns]
+ * C03: [0,9999] [ms]
+ */
+ u32 ring_purger; /* ring purger enable: 0|1 */
+};
+
+/* Loopback modes for the PARAM command. */
+#define FZA_LOOP_NORMAL 0
+#define FZA_LOOP_INTERN 1
+#define FZA_LOOP_EXTERN 2
+
+/* MODPROM command buffer. */
+struct fza_cmd_modprom {
+ u32 llc_prom; /* LLC promiscuous enable */
+ u32 smt_prom; /* SMT promiscuous enable */
+ u32 llc_multi; /* LLC multicast promiscuous enable */
+ u32 llc_bcast; /* LLC broadcast promiscuous enable */
+};
+
+/* SETCHAR command buffer.
+ *
+ * Permitted ranges are as for the PARAM command.
+ */
+struct fza_cmd_setchar {
+ u32 t_max; /* Maximum TTRT (T_MAX) */
+ u32 t_req; /* Requested TTRT (T_REQ) */
+ u32 tvx; /* Valid Transmission Time (TVX) */
+ u32 lem_threshold; /* LEM threshold */
+ u32 rtoken_timeout; /* restricted token timeout */
+ u32 ring_purger; /* ring purger enable */
+};
+
+/* RDCNTR command buffer. */
+struct fza_cmd_rdcntr {
+ struct fza_counters counters; /* counters */
+};
+
+/* STATUS command buffer. */
+struct fza_cmd_status {
+ u32 led_state; /* LED state */
+ u32 rmt_state; /* ring management state */
+ u32 link_state; /* link state */
+ u32 dup_addr; /* duplicate address flag */
+ u32 ring_purger; /* ring purger state */
+ u32 t_neg; /* negotiated TTRT [80ns] */
+ u32 una[2]; /* upstream neighbour address */
+ u32 una_timeout; /* UNA timed out */
+ u32 strip_mode; /* frame strip mode */
+ u32 yield_mode; /* claim token yield mode */
+ u32 phy_state; /* PHY state */
+ u32 neigh_phy; /* neighbour PHY type */
+ u32 reject; /* reject reason */
+ u32 phy_lee; /* PHY link error estimate [-log10] */
+ u32 una_old[2]; /* old upstream neighbour address */
+ u32 rmt_mac; /* remote MAC indicated */
+ u32 ring_err; /* ring error reason */
+ u32 beac_rx[2]; /* sender of last directed beacon */
+ u32 un_dup_addr; /* upstream neighbr dup address flag */
+ u32 dna[2]; /* downstream neighbour address */
+ u32 dna_old[2]; /* old downstream neighbour address */
+};
+
+/* Common command buffer. */
+union fza_cmd_buf {
+ struct fza_cmd_init init;
+ struct fza_cmd_cam cam;
+ struct fza_cmd_param param;
+ struct fza_cmd_modprom modprom;
+ struct fza_cmd_setchar setchar;
+ struct fza_cmd_rdcntr rdcntr;
+ struct fza_cmd_status status;
+};
+
+/* MAC (Media Access Controller) chip packet request header constants. */
+
+/* Packet request header byte #0. */
+#define FZA_PRH0_FMT_TYPE_MASK 0xc0 /* type of packet, always zero */
+#define FZA_PRH0_TOK_TYPE_MASK 0x30 /* type of token required
+ * to send this frame
+ */
+#define FZA_PRH0_TKN_TYPE_ANY 0x30 /* use either token type */
+#define FZA_PRH0_TKN_TYPE_UNR 0x20 /* use an unrestricted token */
+#define FZA_PRH0_TKN_TYPE_RST 0x10 /* use a restricted token */
+#define FZA_PRH0_TKN_TYPE_IMM 0x00 /* send immediately, no token required
+ */
+#define FZA_PRH0_FRAME_MASK 0x08 /* type of frame to send */
+#define FZA_PRH0_FRAME_SYNC 0x08 /* send a synchronous frame */
+#define FZA_PRH0_FRAME_ASYNC 0x00 /* send an asynchronous frame */
+#define FZA_PRH0_MODE_MASK 0x04 /* send mode */
+#define FZA_PRH0_MODE_IMMED 0x04 /* an immediate mode, send regardless
+ * of the ring operational state
+ */
+#define FZA_PRH0_MODE_NORMAL 0x00 /* a normal mode, send only if ring
+ * operational
+ */
+#define FZA_PRH0_SF_MASK 0x02 /* send frame first */
+#define FZA_PRH0_SF_FIRST 0x02 /* send this frame first
+ * with this token capture
+ */
+#define FZA_PRH0_SF_NORMAL 0x00 /* treat this frame normally */
+#define FZA_PRH0_BCN_MASK 0x01 /* beacon frame */
+#define FZA_PRH0_BCN_BEACON 0x01 /* send the frame only
+ * if in the beacon state
+ */
+#define FZA_PRH0_BCN_DATA 0x01 /* send the frame only
+ * if in the data state
+ */
+/* Packet request header byte #1. */
+ /* bit 7 always zero */
+#define FZA_PRH1_SL_MASK 0x40 /* send frame last */
+#define FZA_PRH1_SL_LAST 0x40 /* send this frame last, releasing
+ * the token afterwards
+ */
+#define FZA_PRH1_SL_NORMAL 0x00 /* treat this frame normally */
+#define FZA_PRH1_CRC_MASK 0x20 /* CRC append */
+#define FZA_PRH1_CRC_NORMAL 0x20 /* calculate the CRC and append it
+ * as the FCS field to the frame
+ */
+#define FZA_PRH1_CRC_SKIP 0x00 /* leave the frame as is */
+#define FZA_PRH1_TKN_SEND_MASK 0x18 /* type of token to send after the
+ * frame if this is the last frame
+ */
+#define FZA_PRH1_TKN_SEND_ORIG 0x18 /* send a token of the same type as the
+ * originally captured one
+ */
+#define FZA_PRH1_TKN_SEND_RST 0x10 /* send a restricted token */
+#define FZA_PRH1_TKN_SEND_UNR 0x08 /* send an unrestricted token */
+#define FZA_PRH1_TKN_SEND_NONE 0x00 /* send no token */
+#define FZA_PRH1_EXTRA_FS_MASK 0x07 /* send extra frame status indicators
+ */
+#define FZA_PRH1_EXTRA_FS_ST 0x07 /* TR RR ST II */
+#define FZA_PRH1_EXTRA_FS_SS 0x06 /* TR RR SS II */
+#define FZA_PRH1_EXTRA_FS_SR 0x05 /* TR RR SR II */
+#define FZA_PRH1_EXTRA_FS_NONE1 0x04 /* TR RR II II */
+#define FZA_PRH1_EXTRA_FS_RT 0x03 /* TR RR RT II */
+#define FZA_PRH1_EXTRA_FS_RS 0x02 /* TR RR RS II */
+#define FZA_PRH1_EXTRA_FS_RR 0x01 /* TR RR RR II */
+#define FZA_PRH1_EXTRA_FS_NONE 0x00 /* TR RR II II */
+/* Packet request header byte #2. */
+#define FZA_PRH2_NORMAL 0x00 /* always zero */
+
+/* PRH used for LLC frames. */
+#define FZA_PRH0_LLC (FZA_PRH0_TKN_TYPE_UNR)
+#define FZA_PRH1_LLC (FZA_PRH1_CRC_NORMAL | FZA_PRH1_TKN_SEND_UNR)
+#define FZA_PRH2_LLC (FZA_PRH2_NORMAL)
+
+/* PRH used for SMT frames. */
+#define FZA_PRH0_SMT (FZA_PRH0_TKN_TYPE_UNR)
+#define FZA_PRH1_SMT (FZA_PRH1_CRC_NORMAL | FZA_PRH1_TKN_SEND_UNR)
+#define FZA_PRH2_SMT (FZA_PRH2_NORMAL)
+
+#if ((FZA_RING_RX_SIZE) < 2) || ((FZA_RING_RX_SIZE) > 256)
+# error FZA_RING_RX_SIZE has to be from 2 up to 256
+#endif
+#if ((FZA_RING_TX_MODE) != 0) && ((FZA_RING_TX_MODE) != 1)
+# error FZA_RING_TX_MODE has to be either 0 or 1
+#endif
+
+#define FZA_RING_TX_SIZE (512 << (FZA_RING_TX_MODE))
+
+struct fza_private {
+ struct device *bdev; /* pointer to the bus device */
+ const char *name; /* printable device name */
+ void __iomem *mmio; /* MMIO ioremap cookie */
+ struct fza_regs __iomem *regs; /* pointer to FZA registers */
+
+ struct sk_buff *rx_skbuff[FZA_RING_RX_SIZE];
+ /* all skbs assigned to the host
+ * receive descriptors
+ */
+ dma_addr_t rx_dma[FZA_RING_RX_SIZE];
+ /* their corresponding DMA addresses */
+
+ struct fza_ring_cmd __iomem *ring_cmd;
+ /* pointer to the command descriptor
+ * ring
+ */
+ int ring_cmd_index; /* index to the command descriptor ring
+ * for the next command
+ */
+ struct fza_ring_uns __iomem *ring_uns;
+ /* pointer to the unsolicited
+ * descriptor ring
+ */
+ int ring_uns_index; /* index to the unsolicited descriptor
+ * ring for the next event
+ */
+
+ struct fza_ring_rmc_tx __iomem *ring_rmc_tx;
+ /* pointer to the RMC transmit
+ * descriptor ring (obtained from the
+ * INIT command)
+ */
+ int ring_rmc_tx_size; /* number of entries in the RMC
+ * transmit descriptor ring (obtained
+ * from the INIT command)
+ */
+ int ring_rmc_tx_index; /* index to the RMC transmit descriptor
+ * ring for the next transmission
+ */
+ int ring_rmc_txd_index; /* index to the RMC transmit descriptor
+ * ring for the next transmit done
+ * acknowledge
+ */
+
+ struct fza_ring_hst_rx __iomem *ring_hst_rx;
+ /* pointer to the host receive
+ * descriptor ring (obtained from the
+ * INIT command)
+ */
+ int ring_hst_rx_size; /* number of entries in the host
+ * receive descriptor ring (set by the
+ * INIT command)
+ */
+ int ring_hst_rx_index; /* index to the host receive descriptor
+ * ring for the next transmission
+ */
+
+ struct fza_ring_smt __iomem *ring_smt_tx;
+ /* pointer to the SMT transmit
+ * descriptor ring (obtained from the
+ * INIT command)
+ */
+ int ring_smt_tx_size; /* number of entries in the SMT
+ * transmit descriptor ring (obtained
+ * from the INIT command)
+ */
+ int ring_smt_tx_index; /* index to the SMT transmit descriptor
+ * ring for the next transmission
+ */
+
+ struct fza_ring_smt __iomem *ring_smt_rx;
+ /* pointer to the SMT transmit
+ * descriptor ring (obtained from the
+ * INIT command)
+ */
+ int ring_smt_rx_size; /* number of entries in the SMT
+ * receive descriptor ring (obtained
+ * from the INIT command)
+ */
+ int ring_smt_rx_index; /* index to the SMT receive descriptor
+ * ring for the next transmission
+ */
+
+ struct fza_buffer_tx __iomem *buffer_tx;
+ /* pointer to the RMC transmit buffers
+ */
+
+ uint state; /* adapter expected state */
+
+ spinlock_t lock; /* for device & private data access */
+ uint int_mask; /* interrupt source selector */
+
+ int cmd_done_flag; /* command completion trigger */
+ wait_queue_head_t cmd_done_wait;
+
+ int state_chg_flag; /* state change trigger */
+ wait_queue_head_t state_chg_wait;
+
+ struct timer_list reset_timer; /* RESET time-out trigger */
+ int timer_state; /* RESET trigger state */
+
+ int queue_active; /* whether to enable queueing */
+
+ struct net_device_stats stats;
+
+ uint irq_count_flush_tx; /* transmit flush irqs */
+ uint irq_count_uns_poll; /* unsolicited event irqs */
+ uint irq_count_smt_tx_poll; /* SMT transmit irqs */
+ uint irq_count_rx_poll; /* host receive irqs */
+ uint irq_count_tx_done; /* transmit done irqs */
+ uint irq_count_cmd_done; /* command done irqs */
+ uint irq_count_state_chg; /* state change irqs */
+ uint irq_count_link_st_chg; /* link status change irqs */
+
+ uint t_max; /* T_MAX */
+ uint t_req; /* T_REQ */
+ uint tvx; /* TVX */
+ uint lem_threshold; /* LEM threshold */
+ uint station_id[2]; /* station ID */
+ uint rtoken_timeout; /* restricted token timeout */
+ uint ring_purger; /* ring purger enable flag */
+};
+
+struct fza_fddihdr {
+ u8 pa[2]; /* preamble */
+ u8 sd; /* starting delimiter */
+ struct fddihdr hdr;
+} __packed;
diff --git a/drivers/net/fddi/skfp/h/cmtdef.h b/drivers/net/fddi/skfp/h/cmtdef.h
index a12f464941ed..448d66c2e372 100644
--- a/drivers/net/fddi/skfp/h/cmtdef.h
+++ b/drivers/net/fddi/skfp/h/cmtdef.h
@@ -655,14 +655,6 @@ void dump_hex(char *p, int len);
#ifndef PNMI_INIT
#define PNMI_INIT(smc) /* Nothing */
#endif
-#ifndef PNMI_GET_ID
-#define PNMI_GET_ID( smc, ndis_oid, buf, len, BytesWritten, BytesNeeded ) \
- ( 1 ? (-1) : (-1) )
-#endif
-#ifndef PNMI_SET_ID
-#define PNMI_SET_ID( smc, ndis_oid, buf, len, BytesRead, BytesNeeded, \
- set_type) ( 1 ? (-1) : (-1) )
-#endif
/*
* SMT_PANIC defines
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 6625fabe2c88..a0cd1c41cf5f 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -831,12 +831,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (IS_ERR(rt))
return PTR_ERR(rt);
- if (skb_dst(skb)) {
- int mtu = dst_mtu(&rt->dst) - GENEVE_IPV4_HLEN -
- info->options_len;
-
- skb_dst_update_pmtu(skb, mtu);
- }
+ skb_tunnel_check_pmtu(skb, &rt->dst,
+ GENEVE_IPV4_HLEN + info->options_len);
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
@@ -881,11 +877,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (IS_ERR(dst))
return PTR_ERR(dst);
- if (skb_dst(skb)) {
- int mtu = dst_mtu(dst) - GENEVE_IPV6_HLEN - info->options_len;
-
- skb_dst_update_pmtu(skb, mtu);
- }
+ skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len);
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
@@ -1325,11 +1317,15 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
info->key.tun_id = tunid;
}
- if (data[IFLA_GENEVE_TTL])
+ if (data[IFLA_GENEVE_TTL_INHERIT]) {
+ if (nla_get_u8(data[IFLA_GENEVE_TTL_INHERIT]))
+ *ttl_inherit = true;
+ else
+ *ttl_inherit = false;
+ } else if (data[IFLA_GENEVE_TTL]) {
info->key.ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
-
- if (data[IFLA_GENEVE_TTL_INHERIT])
- *ttl_inherit = true;
+ *ttl_inherit = false;
+ }
if (data[IFLA_GENEVE_TOS])
info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index d79a69dd2146..17e6dcd2eb42 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -34,7 +34,6 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/semaphore.h>
-#include <linux/compat.h>
#include <linux/refcount.h>
#define SIXPACK_VERSION "Revision: 0.3.0"
@@ -752,23 +751,6 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
return err;
}
-#ifdef CONFIG_COMPAT
-static long sixpack_compat_ioctl(struct tty_struct * tty, struct file * file,
- unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case SIOCGIFNAME:
- case SIOCGIFENCAP:
- case SIOCSIFENCAP:
- case SIOCSIFHWADDR:
- return sixpack_ioctl(tty, file, cmd,
- (unsigned long)compat_ptr(arg));
- }
-
- return -ENOIOCTLCMD;
-}
-#endif
-
static struct tty_ldisc_ops sp_ldisc = {
.owner = THIS_MODULE,
.magic = TTY_LDISC_MAGIC,
@@ -776,9 +758,6 @@ static struct tty_ldisc_ops sp_ldisc = {
.open = sixpack_open,
.close = sixpack_close,
.ioctl = sixpack_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = sixpack_compat_ioctl,
-#endif
.receive_buf = sixpack_receive_buf,
.write_wakeup = sixpack_write_wakeup,
};
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 13e4c1eff353..802233d41b25 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -35,7 +35,6 @@
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/jiffies.h>
-#include <linux/compat.h>
#include <net/ax25.h>
@@ -875,23 +874,6 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
return err;
}
-#ifdef CONFIG_COMPAT
-static long mkiss_compat_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case SIOCGIFNAME:
- case SIOCGIFENCAP:
- case SIOCSIFENCAP:
- case SIOCSIFHWADDR:
- return mkiss_ioctl(tty, file, cmd,
- (unsigned long)compat_ptr(arg));
- }
-
- return -ENOIOCTLCMD;
-}
-#endif
-
/*
* Handle the 'receiver data ready' interrupt.
* This function is called by the 'tty_io' module in the kernel when
@@ -966,9 +948,6 @@ static struct tty_ldisc_ops ax_ldisc = {
.open = mkiss_open,
.close = mkiss_close,
.ioctl = mkiss_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = mkiss_compat_ioctl,
-#endif
.receive_buf = mkiss_receive_buf,
.write_wakeup = mkiss_write_wakeup
};
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 16ec7af6ab7b..ba9df430fca6 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -966,6 +966,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
sizeof(struct yamdrv_ioctl_mcs));
if (IS_ERR(ym))
return PTR_ERR(ym);
+ if (ym->cmd != SIOCYAMSMCS)
+ return -EINVAL;
if (ym->bitrate > YAM_MAXBITRATE) {
kfree(ym);
return -EINVAL;
@@ -981,6 +983,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg)))
return -EFAULT;
+ if (yi.cmd != SIOCYAMSCFG)
+ return -EINVAL;
if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev))
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 9bcaf204a7d4..cf36e7ff3191 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2030,14 +2030,15 @@ static void netvsc_vf_setup(struct work_struct *w)
rtnl_unlock();
}
-/* Find netvsc by VMBus serial number.
- * The PCI hyperv controller records the serial number as the slot.
+/* Find netvsc by VF serial number.
+ * The PCI hyperv controller records the serial number as the slot kobj name.
*/
static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
{
struct device *parent = vf_netdev->dev.parent;
struct net_device_context *ndev_ctx;
struct pci_dev *pdev;
+ u32 serial;
if (!parent || !dev_is_pci(parent))
return NULL; /* not a PCI device */
@@ -2048,16 +2049,22 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
return NULL;
}
+ if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) {
+ netdev_notice(vf_netdev, "Invalid vf serial:%s\n",
+ pci_slot_name(pdev->slot));
+ return NULL;
+ }
+
list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
if (!ndev_ctx->vf_alloc)
continue;
- if (ndev_ctx->vf_serial == pdev->slot->number)
+ if (ndev_ctx->vf_serial == serial)
return hv_get_drvdata(ndev_ctx->device_ctx);
}
netdev_notice(vf_netdev,
- "no netdev found for slot %u\n", pdev->slot->number);
+ "no netdev found for vf serial:%u\n", serial);
return NULL;
}
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 23a52b9293f3..cd1d8faccca5 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1308,8 +1308,7 @@ static int adf7242_remove(struct spi_device *spi)
{
struct adf7242_local *lp = spi_get_drvdata(spi);
- if (!IS_ERR_OR_NULL(lp->debugfs_root))
- debugfs_remove_recursive(lp->debugfs_root);
+ debugfs_remove_recursive(lp->debugfs_root);
cancel_delayed_work_sync(&lp->work);
destroy_workqueue(lp->wqueue);
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 58299fb666ed..0ff5a403a8dc 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -634,10 +634,9 @@ static int ca8210_test_int_driver_write(
for (i = 0; i < len; i++)
dev_dbg(&priv->spi->dev, "%#03x\n", buf[i]);
- fifo_buffer = kmalloc(len, GFP_KERNEL);
+ fifo_buffer = kmemdup(buf, len, GFP_KERNEL);
if (!fifo_buffer)
return -ENOMEM;
- memcpy(fifo_buffer, buf, len);
kfifo_in(&test->up_fifo, &fifo_buffer, 4);
wake_up_interruptible(&priv->test.readq);
@@ -3044,8 +3043,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv)
{
struct ca8210_test *test = &priv->test;
- if (!IS_ERR(test->ca8210_dfs_spi_int))
- debugfs_remove(test->ca8210_dfs_spi_int);
+ debugfs_remove(test->ca8210_dfs_spi_int);
kfifo_free(&test->up_fifo);
dev_info(&priv->spi->dev, "Test interface removed\n");
}
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index bf70ab892e69..51b5198d5943 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -37,8 +37,6 @@ MODULE_LICENSE("GPL");
static LIST_HEAD(hwsim_phys);
static DEFINE_MUTEX(hwsim_phys_lock);
-static LIST_HEAD(hwsim_ifup_phys);
-
static struct platform_device *mac802154hwsim_dev;
/* MAC802154_HWSIM netlink family */
@@ -85,7 +83,6 @@ struct hwsim_phy {
struct list_head edges;
struct list_head list;
- struct list_head list_ifup;
};
static int hwsim_add_one(struct genl_info *info, struct device *dev,
@@ -159,9 +156,6 @@ static int hwsim_hw_start(struct ieee802154_hw *hw)
struct hwsim_phy *phy = hw->priv;
phy->suspended = false;
- list_add_rcu(&phy->list_ifup, &hwsim_ifup_phys);
- synchronize_rcu();
-
return 0;
}
@@ -170,8 +164,6 @@ static void hwsim_hw_stop(struct ieee802154_hw *hw)
struct hwsim_phy *phy = hw->priv;
phy->suspended = true;
- list_del_rcu(&phy->list_ifup);
- synchronize_rcu();
}
static int
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index e428277781ac..44de81e5f140 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -132,11 +132,6 @@ static const struct reg_sequence mar20a_iar_overwrites[] = {
};
#define MCR20A_VALID_CHANNELS (0x07FFF800)
-
-struct mcr20a_platform_data {
- int rst_gpio;
-};
-
#define MCR20A_MAX_BUF (127)
#define printdev(X) (&X->spi->dev)
@@ -412,7 +407,6 @@ struct mcr20a_local {
struct spi_device *spi;
struct ieee802154_hw *hw;
- struct mcr20a_platform_data *pdata;
struct regmap *regmap_dar;
struct regmap *regmap_iar;
@@ -903,19 +897,19 @@ mcr20a_irq_clean_complete(void *context)
switch (seq_state) {
/* TX IRQ, RX IRQ and SEQ IRQ */
- case (0x03):
+ case (DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
if (lp->is_tx) {
lp->is_tx = 0;
dev_dbg(printdev(lp), "TX is done. No ACK\n");
mcr20a_handle_tx_complete(lp);
}
break;
- case (0x05):
+ case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ):
/* rx is starting */
dev_dbg(printdev(lp), "RX is starting\n");
mcr20a_handle_rx(lp);
break;
- case (0x07):
+ case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
if (lp->is_tx) {
/* tx is done */
lp->is_tx = 0;
@@ -927,7 +921,7 @@ mcr20a_irq_clean_complete(void *context)
mcr20a_handle_rx(lp);
}
break;
- case (0x01):
+ case (DAR_IRQSTS1_SEQIRQ):
if (lp->is_tx) {
dev_dbg(printdev(lp), "TX is starting\n");
mcr20a_handle_tx(lp);
@@ -976,20 +970,6 @@ static irqreturn_t mcr20a_irq_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static int mcr20a_get_platform_data(struct spi_device *spi,
- struct mcr20a_platform_data *pdata)
-{
- int ret = 0;
-
- if (!spi->dev.of_node)
- return -EINVAL;
-
- pdata->rst_gpio = of_get_named_gpio(spi->dev.of_node, "rst_b-gpio", 0);
- dev_dbg(&spi->dev, "rst_b-gpio: %d\n", pdata->rst_gpio);
-
- return ret;
-}
-
static void mcr20a_hw_setup(struct mcr20a_local *lp)
{
u8 i;
@@ -1249,7 +1229,7 @@ mcr20a_probe(struct spi_device *spi)
{
struct ieee802154_hw *hw;
struct mcr20a_local *lp;
- struct mcr20a_platform_data *pdata;
+ struct gpio_desc *rst_b;
int irq_type;
int ret = -ENOMEM;
@@ -1260,48 +1240,32 @@ mcr20a_probe(struct spi_device *spi)
return -EINVAL;
}
- pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- /* set mcr20a platform data */
- ret = mcr20a_get_platform_data(spi, pdata);
- if (ret < 0) {
- dev_crit(&spi->dev, "mcr20a_get_platform_data failed.\n");
- goto free_pdata;
- }
-
- /* init reset gpio */
- if (gpio_is_valid(pdata->rst_gpio)) {
- ret = devm_gpio_request_one(&spi->dev, pdata->rst_gpio,
- GPIOF_OUT_INIT_HIGH, "reset");
- if (ret)
- goto free_pdata;
+ rst_b = devm_gpiod_get(&spi->dev, "rst_b", GPIOD_OUT_HIGH);
+ if (IS_ERR(rst_b)) {
+ ret = PTR_ERR(rst_b);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&spi->dev, "Failed to get 'rst_b' gpio: %d", ret);
+ return ret;
}
/* reset mcr20a */
- if (gpio_is_valid(pdata->rst_gpio)) {
- usleep_range(10, 20);
- gpio_set_value_cansleep(pdata->rst_gpio, 0);
- usleep_range(10, 20);
- gpio_set_value_cansleep(pdata->rst_gpio, 1);
- usleep_range(120, 240);
- }
+ usleep_range(10, 20);
+ gpiod_set_value_cansleep(rst_b, 1);
+ usleep_range(10, 20);
+ gpiod_set_value_cansleep(rst_b, 0);
+ usleep_range(120, 240);
/* allocate ieee802154_hw and private data */
hw = ieee802154_alloc_hw(sizeof(*lp), &mcr20a_hw_ops);
if (!hw) {
dev_crit(&spi->dev, "ieee802154_alloc_hw failed\n");
- ret = -ENOMEM;
- goto free_pdata;
+ return ret;
}
/* init mcr20a local data */
lp = hw->priv;
lp->hw = hw;
lp->spi = spi;
- lp->spi->dev.platform_data = pdata;
- lp->pdata = pdata;
/* init ieee802154_hw */
hw->parent = &spi->dev;
@@ -1370,8 +1334,6 @@ mcr20a_probe(struct spi_device *spi)
free_dev:
ieee802154_free_hw(lp->hw);
-free_pdata:
- kfree(pdata);
return ret;
}
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index a7207fa7e451..2df7f60fe052 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -69,6 +69,10 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
int len;
skb_tx_timestamp(skb);
+
+ /* do not fool net_timestamp_check() with various clock bases */
+ skb->tstamp = 0;
+
skb_orphan(skb);
/* Before queueing this packet to netif_rx(),
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 4bb90b6867a2..64a982563d59 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2812,9 +2812,6 @@ static int macsec_dev_open(struct net_device *dev)
struct net_device *real_dev = macsec->real_dev;
int err;
- if (!(real_dev->flags & IFF_UP))
- return -ENETDOWN;
-
err = dev_uc_add(real_dev, dev->dev_addr);
if (err < 0)
return err;
@@ -3306,6 +3303,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (err < 0)
goto del_dev;
+ netif_stacked_transfer_operstate(real_dev, dev);
+ linkwatch_fire_event(dev);
+
macsec_generation++;
return 0;
@@ -3490,6 +3490,20 @@ static int macsec_notify(struct notifier_block *this, unsigned long event,
return NOTIFY_DONE;
switch (event) {
+ case NETDEV_DOWN:
+ case NETDEV_UP:
+ case NETDEV_CHANGE: {
+ struct macsec_dev *m, *n;
+ struct macsec_rxh_data *rxd;
+
+ rxd = macsec_data_rtnl(real_dev);
+ list_for_each_entry_safe(m, n, &rxd->secys, secys) {
+ struct net_device *dev = m->secy.netdev;
+
+ netif_stacked_transfer_operstate(real_dev, dev);
+ }
+ break;
+ }
case NETDEV_UNREGISTER: {
struct macsec_dev *m, *n;
struct macsec_rxh_data *rxd;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index cfda146f3b3b..fc8d5f1ee1ad 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1077,7 +1077,7 @@ static void macvlan_dev_netpoll_cleanup(struct net_device *dev)
vlan->netpoll = NULL;
- __netpoll_free_async(netpoll);
+ __netpoll_free(netpoll);
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index 81444208b216..cb3518474f0e 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -86,8 +86,14 @@ nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
return 0;
}
+static int nsim_bpf_finalize(struct bpf_verifier_env *env)
+{
+ return 0;
+}
+
static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = {
- .insn_hook = nsim_bpf_verify_insn,
+ .insn_hook = nsim_bpf_verify_insn,
+ .finalize = nsim_bpf_finalize,
};
static bool nsim_xdp_offload_active(struct netdevsim *ns)
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index b12023bc2cab..a5bab614ff84 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -71,7 +71,6 @@ static unsigned int tx_start = 10;
static unsigned int tx_stop = 5;
struct ntb_netdev {
- struct list_head list;
struct pci_dev *pdev;
struct net_device *ndev;
struct ntb_transport_qp *qp;
@@ -81,8 +80,6 @@ struct ntb_netdev {
#define NTB_TX_TIMEOUT_MS 1000
#define NTB_RXQ_SIZE 100
-static LIST_HEAD(dev_list);
-
static void ntb_netdev_event_handler(void *data, int link_is_up)
{
struct net_device *ndev = data;
@@ -236,7 +233,7 @@ static void ntb_netdev_tx_timer(struct timer_list *t)
struct net_device *ndev = dev->ndev;
if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) {
- mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time));
+ mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
} else {
/* Make sure anybody stopping the queue after this sees the new
* value of ntb_transport_tx_free_entry()
@@ -452,7 +449,7 @@ static int ntb_netdev_probe(struct device *client_dev)
if (rc)
goto err1;
- list_add(&dev->list, &dev_list);
+ dev_set_drvdata(client_dev, ndev);
dev_info(&pdev->dev, "%s created\n", ndev->name);
return 0;
@@ -465,27 +462,8 @@ err:
static void ntb_netdev_remove(struct device *client_dev)
{
- struct ntb_dev *ntb;
- struct net_device *ndev;
- struct pci_dev *pdev;
- struct ntb_netdev *dev;
- bool found = false;
-
- ntb = dev_ntb(client_dev->parent);
- pdev = ntb->pdev;
-
- list_for_each_entry(dev, &dev_list, list) {
- if (dev->pdev == pdev) {
- found = true;
- break;
- }
- }
- if (!found)
- return;
-
- list_del(&dev->list);
-
- ndev = dev->ndev;
+ struct net_device *ndev = dev_get_drvdata(client_dev);
+ struct ntb_netdev *dev = netdev_priv(ndev);
unregister_netdev(ndev);
ntb_transport_free_queue(dev->qp);
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
index c017486e9b86..696bdf1e4576 100644
--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
+++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
@@ -289,8 +289,7 @@ static int mdio_mux_iproc_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int mdio_mux_iproc_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
+ struct iproc_mdiomux_desc *md = dev_get_drvdata(dev);
clk_disable_unprepare(md->core_clk);
@@ -299,8 +298,7 @@ static int mdio_mux_iproc_suspend(struct device *dev)
static int mdio_mux_iproc_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
+ struct iproc_mdiomux_desc *md = dev_get_drvdata(dev);
clk_prepare_enable(md->core_clk);
mdio_mux_iproc_config(md);
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index bc90764a8b8d..fe34576262bd 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -20,23 +20,21 @@
struct mdio_mux_gpio_state {
struct gpio_descs *gpios;
void *mux_handle;
- int values[];
};
static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
void *data)
{
struct mdio_mux_gpio_state *s = data;
- unsigned int n;
+ DECLARE_BITMAP(values, BITS_PER_TYPE(desired_child));
if (current_child == desired_child)
return 0;
- for (n = 0; n < s->gpios->ndescs; n++)
- s->values[n] = (desired_child >> n) & 1;
+ values[0] = desired_child;
gpiod_set_array_value_cansleep(s->gpios->ndescs, s->gpios->desc,
- s->values);
+ s->gpios->info, values);
return 0;
}
@@ -51,8 +49,7 @@ static int mdio_mux_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gpios))
return PTR_ERR(gpios);
- s = devm_kzalloc(&pdev->dev, struct_size(s, values, gpios->ndescs),
- GFP_KERNEL);
+ s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
if (!s) {
gpiod_put_array(gpios);
return -ENOMEM;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 3db06b40580d..9265dea79412 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -14,7 +14,7 @@
* option) any later version.
*
* Support : Micrel Phys:
- * Giga phys: ksz9021, ksz9031
+ * Giga phys: ksz9021, ksz9031, ksz9131
* 100/10 Phys : ksz8001, ksz8721, ksz8737, ksz8041
* ksz8021, ksz8031, ksz8051,
* ksz8081, ksz8091,
@@ -609,6 +609,116 @@ err_force_master:
return result;
}
+#define KSZ9131_SKEW_5BIT_MAX 2400
+#define KSZ9131_SKEW_4BIT_MAX 800
+#define KSZ9131_OFFSET 700
+#define KSZ9131_STEP 100
+
+static int ksz9131_of_load_skew_values(struct phy_device *phydev,
+ struct device_node *of_node,
+ u16 reg, size_t field_sz,
+ char *field[], u8 numfields)
+{
+ int val[4] = {-(1 + KSZ9131_OFFSET), -(2 + KSZ9131_OFFSET),
+ -(3 + KSZ9131_OFFSET), -(4 + KSZ9131_OFFSET)};
+ int skewval, skewmax = 0;
+ int matches = 0;
+ u16 maxval;
+ u16 newval;
+ u16 mask;
+ int i;
+
+ /* psec properties in dts should mean x pico seconds */
+ if (field_sz == 5)
+ skewmax = KSZ9131_SKEW_5BIT_MAX;
+ else
+ skewmax = KSZ9131_SKEW_4BIT_MAX;
+
+ for (i = 0; i < numfields; i++)
+ if (!of_property_read_s32(of_node, field[i], &skewval)) {
+ if (skewval < -KSZ9131_OFFSET)
+ skewval = -KSZ9131_OFFSET;
+ else if (skewval > skewmax)
+ skewval = skewmax;
+
+ val[i] = skewval + KSZ9131_OFFSET;
+ matches++;
+ }
+
+ if (!matches)
+ return 0;
+
+ if (matches < numfields)
+ newval = ksz9031_extended_read(phydev, OP_DATA, 2, reg);
+ else
+ newval = 0;
+
+ maxval = (field_sz == 4) ? 0xf : 0x1f;
+ for (i = 0; i < numfields; i++)
+ if (val[i] != -(i + 1 + KSZ9131_OFFSET)) {
+ mask = 0xffff;
+ mask ^= maxval << (field_sz * i);
+ newval = (newval & mask) |
+ (((val[i] / KSZ9131_STEP) & maxval)
+ << (field_sz * i));
+ }
+
+ return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
+}
+
+static int ksz9131_config_init(struct phy_device *phydev)
+{
+ const struct device *dev = &phydev->mdio.dev;
+ struct device_node *of_node = dev->of_node;
+ char *clk_skews[2] = {"rxc-skew-psec", "txc-skew-psec"};
+ char *rx_data_skews[4] = {
+ "rxd0-skew-psec", "rxd1-skew-psec",
+ "rxd2-skew-psec", "rxd3-skew-psec"
+ };
+ char *tx_data_skews[4] = {
+ "txd0-skew-psec", "txd1-skew-psec",
+ "txd2-skew-psec", "txd3-skew-psec"
+ };
+ char *control_skews[2] = {"txen-skew-psec", "rxdv-skew-psec"};
+ const struct device *dev_walker;
+ int ret;
+
+ dev_walker = &phydev->mdio.dev;
+ do {
+ of_node = dev_walker->of_node;
+ dev_walker = dev_walker->parent;
+ } while (!of_node && dev_walker);
+
+ if (!of_node)
+ return 0;
+
+ ret = ksz9131_of_load_skew_values(phydev, of_node,
+ MII_KSZ9031RN_CLK_PAD_SKEW, 5,
+ clk_skews, 2);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz9131_of_load_skew_values(phydev, of_node,
+ MII_KSZ9031RN_CONTROL_PAD_SKEW, 4,
+ control_skews, 2);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz9131_of_load_skew_values(phydev, of_node,
+ MII_KSZ9031RN_RX_DATA_PAD_SKEW, 4,
+ rx_data_skews, 4);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz9131_of_load_skew_values(phydev, of_node,
+ MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
+ tx_data_skews, 4);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX BIT(6)
#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED BIT(4)
@@ -975,6 +1085,23 @@ static struct phy_driver ksphy_driver[] = {
.suspend = genphy_suspend,
.resume = kszphy_resume,
}, {
+ .phy_id = PHY_ID_KSZ9131,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
+ .name = "Microchip KSZ9131 Gigabit PHY",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .driver_data = &ksz9021_type,
+ .probe = kszphy_probe,
+ .config_init = ksz9131_config_init,
+ .read_status = ksz9031_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = kszphy_config_intr,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
+ .suspend = genphy_suspend,
+ .resume = kszphy_resume,
+}, {
.phy_id = PHY_ID_KSZ8873MLL,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8873MLL Switch",
@@ -1022,6 +1149,7 @@ MODULE_LICENSE("GPL");
static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ9021, 0x000ffffe },
{ PHY_ID_KSZ9031, MICREL_PHY_ID_MASK },
+ { PHY_ID_KSZ9131, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ8001, 0x00fffffc },
{ PHY_ID_KS8737, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ8021, 0x00ffffff },
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 7d0384e26c99..a2e59f4f6f01 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -6,6 +6,8 @@
* Copyright (c) 2016 Microsemi Corporation
*/
+#include <linux/firmware.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mdio.h>
@@ -32,6 +34,15 @@ enum rgmii_rx_clock_delay {
#define DISABLE_HP_AUTO_MDIX_MASK 0x0080
#define DISABLE_PAIR_SWAP_CORR_MASK 0x0020
#define DISABLE_POLARITY_CORR_MASK 0x0010
+#define PARALLEL_DET_IGNORE_ADVERTISED 0x0008
+
+#define MSCC_PHY_EXT_CNTL_STATUS 22
+#define SMI_BROADCAST_WR_EN 0x0001
+
+#define MSCC_PHY_ERR_RX_CNT 19
+#define MSCC_PHY_ERR_FALSE_CARRIER_CNT 20
+#define MSCC_PHY_ERR_LINK_DISCONNECT_CNT 21
+#define ERR_CNT_MASK GENMASK(7, 0)
#define MSCC_PHY_EXT_PHY_CNTL_1 23
#define MAC_IF_SELECTION_MASK 0x1800
@@ -39,7 +50,22 @@ enum rgmii_rx_clock_delay {
#define MAC_IF_SELECTION_RMII 1
#define MAC_IF_SELECTION_RGMII 2
#define MAC_IF_SELECTION_POS 11
+#define VSC8584_MAC_IF_SELECTION_MASK 0x1000
+#define VSC8584_MAC_IF_SELECTION_SGMII 0
+#define VSC8584_MAC_IF_SELECTION_1000BASEX 1
+#define VSC8584_MAC_IF_SELECTION_POS 12
#define FAR_END_LOOPBACK_MODE_MASK 0x0008
+#define MEDIA_OP_MODE_MASK 0x0700
+#define MEDIA_OP_MODE_COPPER 0
+#define MEDIA_OP_MODE_SERDES 1
+#define MEDIA_OP_MODE_1000BASEX 2
+#define MEDIA_OP_MODE_100BASEFX 3
+#define MEDIA_OP_MODE_AMS_COPPER_SERDES 5
+#define MEDIA_OP_MODE_AMS_COPPER_1000BASEX 6
+#define MEDIA_OP_MODE_AMS_COPPER_100BASEFX 7
+#define MEDIA_OP_MODE_POS 8
+
+#define MSCC_PHY_EXT_PHY_CNTL_2 24
#define MII_VSC85XX_INT_MASK 25
#define MII_VSC85XX_INT_MASK_MASK 0xa000
@@ -62,19 +88,40 @@ enum rgmii_rx_clock_delay {
#define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */
#define MSCC_PHY_PAGE_EXTENDED 0x0001 /* Extended registers */
#define MSCC_PHY_PAGE_EXTENDED_2 0x0002 /* Extended reg - page 2 */
+#define MSCC_PHY_PAGE_EXTENDED_3 0x0003 /* Extended reg - page 3 */
+#define MSCC_PHY_PAGE_EXTENDED_4 0x0004 /* Extended reg - page 4 */
+/* Extended reg - GPIO; this is a bank of registers that are shared for all PHYs
+ * in the same package.
+ */
+#define MSCC_PHY_PAGE_EXTENDED_GPIO 0x0010 /* Extended reg - GPIO */
+#define MSCC_PHY_PAGE_TEST 0x2a30 /* Test reg */
+#define MSCC_PHY_PAGE_TR 0x52b5 /* Token ring registers */
/* Extended Page 1 Registers */
+#define MSCC_PHY_CU_MEDIA_CRC_VALID_CNT 18
+#define VALID_CRC_CNT_CRC_MASK GENMASK(13, 0)
+
#define MSCC_PHY_EXT_MODE_CNTL 19
#define FORCE_MDI_CROSSOVER_MASK 0x000C
#define FORCE_MDI_CROSSOVER_MDIX 0x000C
#define FORCE_MDI_CROSSOVER_MDI 0x0008
#define MSCC_PHY_ACTIPHY_CNTL 20
+#define PHY_ADDR_REVERSED 0x0200
#define DOWNSHIFT_CNTL_MASK 0x001C
#define DOWNSHIFT_EN 0x0010
#define DOWNSHIFT_CNTL_POS 2
+#define MSCC_PHY_EXT_PHY_CNTL_4 23
+#define PHY_CNTL_4_ADDR_POS 11
+
+#define MSCC_PHY_VERIPHY_CNTL_2 25
+
+#define MSCC_PHY_VERIPHY_CNTL_3 26
+
/* Extended Page 2 Registers */
+#define MSCC_PHY_CU_PMD_TX_CNTL 16
+
#define MSCC_PHY_RGMII_CNTL 20
#define RGMII_RX_CLK_DELAY_MASK 0x0070
#define RGMII_RX_CLK_DELAY_POS 4
@@ -90,11 +137,90 @@ enum rgmii_rx_clock_delay {
#define SECURE_ON_ENABLE 0x8000
#define SECURE_ON_PASSWD_LEN_4 0x4000
+/* Extended Page 3 Registers */
+#define MSCC_PHY_SERDES_TX_VALID_CNT 21
+#define MSCC_PHY_SERDES_TX_CRC_ERR_CNT 22
+#define MSCC_PHY_SERDES_RX_VALID_CNT 28
+#define MSCC_PHY_SERDES_RX_CRC_ERR_CNT 29
+
+/* Extended page GPIO Registers */
+#define MSCC_DW8051_CNTL_STATUS 0
+#define MICRO_NSOFT_RESET 0x8000
+#define RUN_FROM_INT_ROM 0x4000
+#define AUTOINC_ADDR 0x2000
+#define PATCH_RAM_CLK 0x1000
+#define MICRO_PATCH_EN 0x0080
+#define DW8051_CLK_EN 0x0010
+#define MICRO_CLK_EN 0x0008
+#define MICRO_CLK_DIVIDE(x) ((x) >> 1)
+#define MSCC_DW8051_VLD_MASK 0xf1ff
+
+/* x Address in range 1-4 */
+#define MSCC_TRAP_ROM_ADDR(x) ((x) * 2 + 1)
+#define MSCC_PATCH_RAM_ADDR(x) (((x) + 1) * 2)
+#define MSCC_INT_MEM_ADDR 11
+
+#define MSCC_INT_MEM_CNTL 12
+#define READ_SFR 0x6000
+#define READ_PRAM 0x4000
+#define READ_ROM 0x2000
+#define READ_RAM 0x0000
+#define INT_MEM_WRITE_EN 0x1000
+#define EN_PATCH_RAM_TRAP_ADDR(x) (0x0100 << ((x) - 1))
+#define INT_MEM_DATA_M 0x00ff
+#define INT_MEM_DATA(x) (INT_MEM_DATA_M & (x))
+
+#define MSCC_PHY_PROC_CMD 18
+#define PROC_CMD_NCOMPLETED 0x8000
+#define PROC_CMD_FAILED 0x4000
+#define PROC_CMD_SGMII_PORT(x) ((x) << 8)
+#define PROC_CMD_FIBER_PORT(x) (0x0100 << (x) % 4)
+#define PROC_CMD_QSGMII_PORT 0x0c00
+#define PROC_CMD_RST_CONF_PORT 0x0080
+#define PROC_CMD_RECONF_PORT 0x0000
+#define PROC_CMD_READ_MOD_WRITE_PORT 0x0040
+#define PROC_CMD_WRITE 0x0040
+#define PROC_CMD_READ 0x0000
+#define PROC_CMD_FIBER_DISABLE 0x0020
+#define PROC_CMD_FIBER_100BASE_FX 0x0010
+#define PROC_CMD_FIBER_1000BASE_X 0x0000
+#define PROC_CMD_SGMII_MAC 0x0030
+#define PROC_CMD_QSGMII_MAC 0x0020
+#define PROC_CMD_NO_MAC_CONF 0x0000
+#define PROC_CMD_1588_DEFAULT_INIT 0x0010
+#define PROC_CMD_NOP 0x000f
+#define PROC_CMD_PHY_INIT 0x000a
+#define PROC_CMD_CRC16 0x0008
+#define PROC_CMD_FIBER_MEDIA_CONF 0x0001
+#define PROC_CMD_MCB_ACCESS_MAC_CONF 0x0000
+#define PROC_CMD_NCOMPLETED_TIMEOUT_MS 500
+
+#define MSCC_PHY_MAC_CFG_FASTLINK 19
+#define MAC_CFG_MASK 0xc000
+#define MAC_CFG_SGMII 0x0000
+#define MAC_CFG_QSGMII 0x4000
+
+/* Test page Registers */
+#define MSCC_PHY_TEST_PAGE_5 5
+#define MSCC_PHY_TEST_PAGE_8 8
+#define MSCC_PHY_TEST_PAGE_9 9
+#define MSCC_PHY_TEST_PAGE_20 20
+#define MSCC_PHY_TEST_PAGE_24 24
+
+/* Token ring page Registers */
+#define MSCC_PHY_TR_CNTL 16
+#define TR_WRITE 0x8000
+#define TR_ADDR(x) (0x7fff & (x))
+#define MSCC_PHY_TR_LSB 17
+#define MSCC_PHY_TR_MSB 18
+
/* Microsemi PHY ID's */
#define PHY_ID_VSC8530 0x00070560
#define PHY_ID_VSC8531 0x00070570
#define PHY_ID_VSC8540 0x00070760
#define PHY_ID_VSC8541 0x00070770
+#define PHY_ID_VSC8574 0x000704a0
+#define PHY_ID_VSC8584 0x000707c0
#define MSCC_VDDMAC_1500 1500
#define MSCC_VDDMAC_1800 1800
@@ -104,6 +230,24 @@ enum rgmii_rx_clock_delay {
#define DOWNSHIFT_COUNT_MAX 5
#define MAX_LEDS 4
+
+#define VSC8584_SUPP_LED_MODES (BIT(VSC8531_LINK_ACTIVITY) | \
+ BIT(VSC8531_LINK_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_100_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_ACTIVITY) | \
+ BIT(VSC8531_LINK_100_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_100_ACTIVITY) | \
+ BIT(VSC8584_LINK_100FX_1000X_ACTIVITY) | \
+ BIT(VSC8531_DUPLEX_COLLISION) | \
+ BIT(VSC8531_COLLISION) | \
+ BIT(VSC8531_ACTIVITY) | \
+ BIT(VSC8584_100FX_1000X_ACTIVITY) | \
+ BIT(VSC8531_AUTONEG_FAULT) | \
+ BIT(VSC8531_SERIAL_MODE) | \
+ BIT(VSC8531_FORCE_LED_OFF) | \
+ BIT(VSC8531_FORCE_LED_ON))
+
#define VSC85XX_SUPP_LED_MODES (BIT(VSC8531_LINK_ACTIVITY) | \
BIT(VSC8531_LINK_1000_ACTIVITY) | \
BIT(VSC8531_LINK_100_ACTIVITY) | \
@@ -119,11 +263,120 @@ enum rgmii_rx_clock_delay {
BIT(VSC8531_FORCE_LED_OFF) | \
BIT(VSC8531_FORCE_LED_ON))
+#define MSCC_VSC8584_REVB_INT8051_FW "mscc_vsc8584_revb_int8051_fb48.bin"
+#define MSCC_VSC8584_REVB_INT8051_FW_START_ADDR 0xe800
+#define MSCC_VSC8584_REVB_INT8051_FW_CRC 0xfb48
+
+#define MSCC_VSC8574_REVB_INT8051_FW "mscc_vsc8574_revb_int8051_29e8.bin"
+#define MSCC_VSC8574_REVB_INT8051_FW_START_ADDR 0x4000
+#define MSCC_VSC8574_REVB_INT8051_FW_CRC 0x29e8
+
+#define VSC8584_REVB 0x0001
+#define MSCC_DEV_REV_MASK GENMASK(3, 0)
+
+struct reg_val {
+ u16 reg;
+ u32 val;
+};
+
+struct vsc85xx_hw_stat {
+ const char *string;
+ u8 reg;
+ u16 page;
+ u16 mask;
+};
+
+static const struct vsc85xx_hw_stat vsc85xx_hw_stats[] = {
+ {
+ .string = "phy_receive_errors",
+ .reg = MSCC_PHY_ERR_RX_CNT,
+ .page = MSCC_PHY_PAGE_STANDARD,
+ .mask = ERR_CNT_MASK,
+ }, {
+ .string = "phy_false_carrier",
+ .reg = MSCC_PHY_ERR_FALSE_CARRIER_CNT,
+ .page = MSCC_PHY_PAGE_STANDARD,
+ .mask = ERR_CNT_MASK,
+ }, {
+ .string = "phy_cu_media_link_disconnect",
+ .reg = MSCC_PHY_ERR_LINK_DISCONNECT_CNT,
+ .page = MSCC_PHY_PAGE_STANDARD,
+ .mask = ERR_CNT_MASK,
+ }, {
+ .string = "phy_cu_media_crc_good_count",
+ .reg = MSCC_PHY_CU_MEDIA_CRC_VALID_CNT,
+ .page = MSCC_PHY_PAGE_EXTENDED,
+ .mask = VALID_CRC_CNT_CRC_MASK,
+ }, {
+ .string = "phy_cu_media_crc_error_count",
+ .reg = MSCC_PHY_EXT_PHY_CNTL_4,
+ .page = MSCC_PHY_PAGE_EXTENDED,
+ .mask = ERR_CNT_MASK,
+ },
+};
+
+static const struct vsc85xx_hw_stat vsc8584_hw_stats[] = {
+ {
+ .string = "phy_receive_errors",
+ .reg = MSCC_PHY_ERR_RX_CNT,
+ .page = MSCC_PHY_PAGE_STANDARD,
+ .mask = ERR_CNT_MASK,
+ }, {
+ .string = "phy_false_carrier",
+ .reg = MSCC_PHY_ERR_FALSE_CARRIER_CNT,
+ .page = MSCC_PHY_PAGE_STANDARD,
+ .mask = ERR_CNT_MASK,
+ }, {
+ .string = "phy_cu_media_link_disconnect",
+ .reg = MSCC_PHY_ERR_LINK_DISCONNECT_CNT,
+ .page = MSCC_PHY_PAGE_STANDARD,
+ .mask = ERR_CNT_MASK,
+ }, {
+ .string = "phy_cu_media_crc_good_count",
+ .reg = MSCC_PHY_CU_MEDIA_CRC_VALID_CNT,
+ .page = MSCC_PHY_PAGE_EXTENDED,
+ .mask = VALID_CRC_CNT_CRC_MASK,
+ }, {
+ .string = "phy_cu_media_crc_error_count",
+ .reg = MSCC_PHY_EXT_PHY_CNTL_4,
+ .page = MSCC_PHY_PAGE_EXTENDED,
+ .mask = ERR_CNT_MASK,
+ }, {
+ .string = "phy_serdes_tx_good_pkt_count",
+ .reg = MSCC_PHY_SERDES_TX_VALID_CNT,
+ .page = MSCC_PHY_PAGE_EXTENDED_3,
+ .mask = VALID_CRC_CNT_CRC_MASK,
+ }, {
+ .string = "phy_serdes_tx_bad_crc_count",
+ .reg = MSCC_PHY_SERDES_TX_CRC_ERR_CNT,
+ .page = MSCC_PHY_PAGE_EXTENDED_3,
+ .mask = ERR_CNT_MASK,
+ }, {
+ .string = "phy_serdes_rx_good_pkt_count",
+ .reg = MSCC_PHY_SERDES_RX_VALID_CNT,
+ .page = MSCC_PHY_PAGE_EXTENDED_3,
+ .mask = VALID_CRC_CNT_CRC_MASK,
+ }, {
+ .string = "phy_serdes_rx_bad_crc_count",
+ .reg = MSCC_PHY_SERDES_RX_CRC_ERR_CNT,
+ .page = MSCC_PHY_PAGE_EXTENDED_3,
+ .mask = ERR_CNT_MASK,
+ },
+};
+
struct vsc8531_private {
int rate_magic;
u16 supp_led_modes;
u32 leds_mode[MAX_LEDS];
u8 nleds;
+ const struct vsc85xx_hw_stat *hw_stats;
+ u64 *stats;
+ int nstats;
+ bool pkg_init;
+ /* For multiple port PHYs; the MDIO address of the base PHY in the
+ * package.
+ */
+ unsigned int base_addr;
};
#ifdef CONFIG_OF_MDIO
@@ -140,12 +393,66 @@ static const struct vsc8531_edge_rate_table edge_table[] = {
};
#endif /* CONFIG_OF_MDIO */
-static int vsc85xx_phy_page_set(struct phy_device *phydev, u16 page)
+static int vsc85xx_phy_read_page(struct phy_device *phydev)
{
- int rc;
+ return __phy_read(phydev, MSCC_EXT_PAGE_ACCESS);
+}
- rc = phy_write(phydev, MSCC_EXT_PAGE_ACCESS, page);
- return rc;
+static int vsc85xx_phy_write_page(struct phy_device *phydev, int page)
+{
+ return __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, page);
+}
+
+static int vsc85xx_get_sset_count(struct phy_device *phydev)
+{
+ struct vsc8531_private *priv = phydev->priv;
+
+ if (!priv)
+ return 0;
+
+ return priv->nstats;
+}
+
+static void vsc85xx_get_strings(struct phy_device *phydev, u8 *data)
+{
+ struct vsc8531_private *priv = phydev->priv;
+ int i;
+
+ if (!priv)
+ return;
+
+ for (i = 0; i < priv->nstats; i++)
+ strlcpy(data + i * ETH_GSTRING_LEN, priv->hw_stats[i].string,
+ ETH_GSTRING_LEN);
+}
+
+static u64 vsc85xx_get_stat(struct phy_device *phydev, int i)
+{
+ struct vsc8531_private *priv = phydev->priv;
+ int val;
+
+ val = phy_read_paged(phydev, priv->hw_stats[i].page,
+ priv->hw_stats[i].reg);
+ if (val < 0)
+ return U64_MAX;
+
+ val = val & priv->hw_stats[i].mask;
+ priv->stats[i] += val;
+
+ return priv->stats[i];
+}
+
+static void vsc85xx_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct vsc8531_private *priv = phydev->priv;
+ int i;
+
+ if (!priv)
+ return;
+
+ for (i = 0; i < priv->nstats; i++)
+ data[i] = vsc85xx_get_stat(phydev, i);
}
static int vsc85xx_led_cntl_set(struct phy_device *phydev,
@@ -184,7 +491,7 @@ static int vsc85xx_mdix_set(struct phy_device *phydev, u8 mdix)
u16 reg_val;
reg_val = phy_read(phydev, MSCC_PHY_BYPASS_CONTROL);
- if ((mdix == ETH_TP_MDI) || (mdix == ETH_TP_MDI_X)) {
+ if (mdix == ETH_TP_MDI || mdix == ETH_TP_MDI_X) {
reg_val |= (DISABLE_PAIR_SWAP_CORR_MASK |
DISABLE_POLARITY_CORR_MASK |
DISABLE_HP_AUTO_MDIX_MASK);
@@ -194,25 +501,20 @@ static int vsc85xx_mdix_set(struct phy_device *phydev, u8 mdix)
DISABLE_HP_AUTO_MDIX_MASK);
}
rc = phy_write(phydev, MSCC_PHY_BYPASS_CONTROL, reg_val);
- if (rc != 0)
+ if (rc)
return rc;
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED);
- if (rc != 0)
- return rc;
+ reg_val = 0;
- reg_val = phy_read(phydev, MSCC_PHY_EXT_MODE_CNTL);
- reg_val &= ~(FORCE_MDI_CROSSOVER_MASK);
if (mdix == ETH_TP_MDI)
- reg_val |= FORCE_MDI_CROSSOVER_MDI;
+ reg_val = FORCE_MDI_CROSSOVER_MDI;
else if (mdix == ETH_TP_MDI_X)
- reg_val |= FORCE_MDI_CROSSOVER_MDIX;
- rc = phy_write(phydev, MSCC_PHY_EXT_MODE_CNTL, reg_val);
- if (rc != 0)
- return rc;
+ reg_val = FORCE_MDI_CROSSOVER_MDIX;
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
- if (rc != 0)
+ rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED,
+ MSCC_PHY_EXT_MODE_CNTL, FORCE_MDI_CROSSOVER_MASK,
+ reg_val);
+ if (rc < 0)
return rc;
return genphy_restart_aneg(phydev);
@@ -220,30 +522,24 @@ static int vsc85xx_mdix_set(struct phy_device *phydev, u8 mdix)
static int vsc85xx_downshift_get(struct phy_device *phydev, u8 *count)
{
- int rc;
- u16 reg_val;
+ int reg_val;
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED);
- if (rc != 0)
- goto out;
+ reg_val = phy_read_paged(phydev, MSCC_PHY_PAGE_EXTENDED,
+ MSCC_PHY_ACTIPHY_CNTL);
+ if (reg_val < 0)
+ return reg_val;
- reg_val = phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL);
reg_val &= DOWNSHIFT_CNTL_MASK;
if (!(reg_val & DOWNSHIFT_EN))
*count = DOWNSHIFT_DEV_DISABLE;
else
*count = ((reg_val & ~DOWNSHIFT_EN) >> DOWNSHIFT_CNTL_POS) + 2;
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
-out:
- return rc;
+ return 0;
}
static int vsc85xx_downshift_set(struct phy_device *phydev, u8 count)
{
- int rc;
- u16 reg_val;
-
if (count == DOWNSHIFT_DEV_DEFAULT_COUNT) {
/* Default downshift count 3 (i.e. Bit3:2 = 0b01) */
count = ((1 << DOWNSHIFT_CNTL_POS) | DOWNSHIFT_EN);
@@ -255,21 +551,9 @@ static int vsc85xx_downshift_set(struct phy_device *phydev, u8 count)
count = (((count - 2) << DOWNSHIFT_CNTL_POS) | DOWNSHIFT_EN);
}
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED);
- if (rc != 0)
- goto out;
-
- reg_val = phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL);
- reg_val &= ~(DOWNSHIFT_CNTL_MASK);
- reg_val |= count;
- rc = phy_write(phydev, MSCC_PHY_ACTIPHY_CNTL, reg_val);
- if (rc != 0)
- goto out;
-
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
-
-out:
- return rc;
+ return phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED,
+ MSCC_PHY_ACTIPHY_CNTL, DOWNSHIFT_CNTL_MASK,
+ count);
}
static int vsc85xx_wol_set(struct phy_device *phydev,
@@ -283,46 +567,48 @@ static int vsc85xx_wol_set(struct phy_device *phydev,
u8 *mac_addr = phydev->attached_dev->dev_addr;
mutex_lock(&phydev->lock);
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
- if (rc != 0)
+ rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
+ if (rc < 0) {
+ rc = phy_restore_page(phydev, rc, rc);
goto out_unlock;
+ }
if (wol->wolopts & WAKE_MAGIC) {
/* Store the device address for the magic packet */
for (i = 0; i < ARRAY_SIZE(pwd); i++)
pwd[i] = mac_addr[5 - (i * 2 + 1)] << 8 |
mac_addr[5 - i * 2];
- phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, pwd[0]);
- phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, pwd[1]);
- phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, pwd[2]);
+ __phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, pwd[0]);
+ __phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, pwd[1]);
+ __phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, pwd[2]);
} else {
- phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, 0);
- phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, 0);
- phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, 0);
+ __phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, 0);
+ __phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, 0);
+ __phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, 0);
}
if (wol_conf->wolopts & WAKE_MAGICSECURE) {
for (i = 0; i < ARRAY_SIZE(pwd); i++)
pwd[i] = wol_conf->sopass[5 - (i * 2 + 1)] << 8 |
wol_conf->sopass[5 - i * 2];
- phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, pwd[0]);
- phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, pwd[1]);
- phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, pwd[2]);
+ __phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, pwd[0]);
+ __phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, pwd[1]);
+ __phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, pwd[2]);
} else {
- phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, 0);
- phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, 0);
- phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, 0);
+ __phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, 0);
+ __phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, 0);
+ __phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, 0);
}
- reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
+ reg_val = __phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
if (wol_conf->wolopts & WAKE_MAGICSECURE)
reg_val |= SECURE_ON_ENABLE;
else
reg_val &= ~SECURE_ON_ENABLE;
- phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val);
+ __phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val);
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
- if (rc != 0)
+ rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
+ if (rc < 0)
goto out_unlock;
if (wol->wolopts & WAKE_MAGIC) {
@@ -330,14 +616,14 @@ static int vsc85xx_wol_set(struct phy_device *phydev,
reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK);
reg_val |= MII_VSC85XX_INT_MASK_WOL;
rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val);
- if (rc != 0)
+ if (rc)
goto out_unlock;
} else {
/* Disable the WOL interrupt */
reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK);
reg_val &= (~MII_VSC85XX_INT_MASK_WOL);
rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val);
- if (rc != 0)
+ if (rc)
goto out_unlock;
}
/* Clear WOL iterrupt status */
@@ -359,17 +645,17 @@ static void vsc85xx_wol_get(struct phy_device *phydev,
struct ethtool_wolinfo *wol_conf = wol;
mutex_lock(&phydev->lock);
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
- if (rc != 0)
+ rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
+ if (rc < 0)
goto out_unlock;
- reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
+ reg_val = __phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
if (reg_val & SECURE_ON_ENABLE)
wol_conf->wolopts |= WAKE_MAGICSECURE;
if (wol_conf->wolopts & WAKE_MAGICSECURE) {
- pwd[0] = phy_read(phydev, MSCC_PHY_WOL_LOWER_PASSWD);
- pwd[1] = phy_read(phydev, MSCC_PHY_WOL_MID_PASSWD);
- pwd[2] = phy_read(phydev, MSCC_PHY_WOL_UPPER_PASSWD);
+ pwd[0] = __phy_read(phydev, MSCC_PHY_WOL_LOWER_PASSWD);
+ pwd[1] = __phy_read(phydev, MSCC_PHY_WOL_MID_PASSWD);
+ pwd[2] = __phy_read(phydev, MSCC_PHY_WOL_UPPER_PASSWD);
for (i = 0; i < ARRAY_SIZE(pwd); i++) {
wol_conf->sopass[5 - i * 2] = pwd[i] & 0x00ff;
wol_conf->sopass[5 - (i * 2 + 1)] = (pwd[i] & 0xff00)
@@ -377,9 +663,8 @@ static void vsc85xx_wol_get(struct phy_device *phydev,
}
}
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
-
out_unlock:
+ phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
mutex_unlock(&phydev->lock);
}
@@ -387,7 +672,7 @@ out_unlock:
static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev)
{
u32 vdd, sd;
- int rc, i, j;
+ int i, j;
struct device *dev = &phydev->mdio.dev;
struct device_node *of_node = dev->of_node;
u8 sd_array_size = ARRAY_SIZE(edge_table[0].slowdown);
@@ -395,12 +680,10 @@ static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev)
if (!of_node)
return -ENODEV;
- rc = of_property_read_u32(of_node, "vsc8531,vddmac", &vdd);
- if (rc != 0)
+ if (of_property_read_u32(of_node, "vsc8531,vddmac", &vdd))
vdd = MSCC_VDDMAC_3300;
- rc = of_property_read_u32(of_node, "vsc8531,edge-slowdown", &sd);
- if (rc != 0)
+ if (of_property_read_u32(of_node, "vsc8531,edge-slowdown", &sd))
sd = 0;
for (i = 0; i < ARRAY_SIZE(edge_table); i++)
@@ -474,21 +757,11 @@ static int vsc85xx_dt_led_modes_get(struct phy_device *phydev,
static int vsc85xx_edge_rate_cntl_set(struct phy_device *phydev, u8 edge_rate)
{
int rc;
- u16 reg_val;
mutex_lock(&phydev->lock);
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
- if (rc != 0)
- goto out_unlock;
- reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
- reg_val &= ~(EDGE_RATE_CNTL_MASK);
- reg_val |= (edge_rate << EDGE_RATE_CNTL_POS);
- rc = phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val);
- if (rc != 0)
- goto out_unlock;
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
-
-out_unlock:
+ rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
+ MSCC_PHY_WOL_MAC_CONTROL, EDGE_RATE_CNTL_MASK,
+ edge_rate << EDGE_RATE_CNTL_POS);
mutex_unlock(&phydev->lock);
return rc;
@@ -519,7 +792,7 @@ static int vsc85xx_mac_if_set(struct phy_device *phydev,
goto out_unlock;
}
rc = phy_write(phydev, MSCC_PHY_EXT_PHY_CNTL_1, reg_val);
- if (rc != 0)
+ if (rc)
goto out_unlock;
rc = genphy_soft_reset(phydev);
@@ -537,17 +810,17 @@ static int vsc85xx_default_config(struct phy_device *phydev)
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
mutex_lock(&phydev->lock);
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
- if (rc != 0)
+ rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
+ if (rc < 0)
goto out_unlock;
reg_val = phy_read(phydev, MSCC_PHY_RGMII_CNTL);
reg_val &= ~(RGMII_RX_CLK_DELAY_MASK);
reg_val |= (RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS);
phy_write(phydev, MSCC_PHY_RGMII_CNTL, reg_val);
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
out_unlock:
+ rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
mutex_unlock(&phydev->lock);
return rc;
@@ -576,6 +849,809 @@ static int vsc85xx_set_tunable(struct phy_device *phydev,
}
}
+/* mdiobus lock should be locked when using this function */
+static void vsc85xx_tr_write(struct phy_device *phydev, u16 addr, u32 val)
+{
+ __phy_write(phydev, MSCC_PHY_TR_MSB, val >> 16);
+ __phy_write(phydev, MSCC_PHY_TR_LSB, val & GENMASK(15, 0));
+ __phy_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(addr));
+}
+
+static int vsc85xx_eee_init_seq_set(struct phy_device *phydev)
+{
+ const struct reg_val init_eee[] = {
+ {0x0f82, 0x0012b00a},
+ {0x1686, 0x00000004},
+ {0x168c, 0x00d2c46f},
+ {0x17a2, 0x00000620},
+ {0x16a0, 0x00eeffdd},
+ {0x16a6, 0x00071448},
+ {0x16a4, 0x0013132f},
+ {0x16a8, 0x00000000},
+ {0x0ffc, 0x00c0a028},
+ {0x0fe8, 0x0091b06c},
+ {0x0fea, 0x00041600},
+ {0x0f80, 0x00000af4},
+ {0x0fec, 0x00901809},
+ {0x0fee, 0x0000a6a1},
+ {0x0ffe, 0x00b01007},
+ {0x16b0, 0x00eeff00},
+ {0x16b2, 0x00007000},
+ {0x16b4, 0x00000814},
+ };
+ unsigned int i;
+ int oldpage;
+
+ mutex_lock(&phydev->lock);
+ oldpage = phy_select_page(phydev, MSCC_PHY_PAGE_TR);
+ if (oldpage < 0)
+ goto out_unlock;
+
+ for (i = 0; i < ARRAY_SIZE(init_eee); i++)
+ vsc85xx_tr_write(phydev, init_eee[i].reg, init_eee[i].val);
+
+out_unlock:
+ oldpage = phy_restore_page(phydev, oldpage, oldpage);
+ mutex_unlock(&phydev->lock);
+
+ return oldpage;
+}
+
+/* phydev->bus->mdio_lock should be locked when using this function */
+static int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val)
+{
+ struct vsc8531_private *priv = phydev->priv;
+
+ if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) {
+ dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n");
+ dump_stack();
+ }
+
+ return __mdiobus_write(phydev->mdio.bus, priv->base_addr, regnum, val);
+}
+
+/* phydev->bus->mdio_lock should be locked when using this function */
+static int phy_base_read(struct phy_device *phydev, u32 regnum)
+{
+ struct vsc8531_private *priv = phydev->priv;
+
+ if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) {
+ dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n");
+ dump_stack();
+ }
+
+ return __mdiobus_read(phydev->mdio.bus, priv->base_addr, regnum);
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static void vsc8584_csr_write(struct phy_device *phydev, u16 addr, u32 val)
+{
+ phy_base_write(phydev, MSCC_PHY_TR_MSB, val >> 16);
+ phy_base_write(phydev, MSCC_PHY_TR_LSB, val & GENMASK(15, 0));
+ phy_base_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(addr));
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8584_cmd(struct phy_device *phydev, u16 val)
+{
+ unsigned long deadline;
+ u16 reg_val;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+ phy_base_write(phydev, MSCC_PHY_PROC_CMD, PROC_CMD_NCOMPLETED | val);
+
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ reg_val = phy_base_read(phydev, MSCC_PHY_PROC_CMD);
+ } while (time_before(jiffies, deadline) &&
+ (reg_val & PROC_CMD_NCOMPLETED) &&
+ !(reg_val & PROC_CMD_FAILED));
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ if (reg_val & PROC_CMD_FAILED)
+ return -EIO;
+
+ if (reg_val & PROC_CMD_NCOMPLETED)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8584_micro_deassert_reset(struct phy_device *phydev,
+ bool patch_en)
+{
+ u32 enable, release;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+ enable = RUN_FROM_INT_ROM | MICRO_CLK_EN | DW8051_CLK_EN;
+ release = MICRO_NSOFT_RESET | RUN_FROM_INT_ROM | DW8051_CLK_EN |
+ MICRO_CLK_EN;
+
+ if (patch_en) {
+ enable |= MICRO_PATCH_EN;
+ release |= MICRO_PATCH_EN;
+
+ /* Clear all patches */
+ phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_RAM);
+ }
+
+ /* Enable 8051 Micro clock; CLEAR/SET patch present; disable PRAM clock
+ * override and addr. auto-incr; operate at 125 MHz
+ */
+ phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, enable);
+ /* Release 8051 Micro SW reset */
+ phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, release);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ return 0;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8584_micro_assert_reset(struct phy_device *phydev)
+{
+ int ret;
+ u16 reg;
+
+ ret = vsc8584_cmd(phydev, PROC_CMD_NOP);
+ if (ret)
+ return ret;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+ reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
+ reg &= ~EN_PATCH_RAM_TRAP_ADDR(4);
+ phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg);
+
+ phy_base_write(phydev, MSCC_TRAP_ROM_ADDR(4), 0x005b);
+ phy_base_write(phydev, MSCC_PATCH_RAM_ADDR(4), 0x005b);
+
+ reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
+ reg |= EN_PATCH_RAM_TRAP_ADDR(4);
+ phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg);
+
+ phy_base_write(phydev, MSCC_PHY_PROC_CMD, PROC_CMD_NOP);
+
+ reg = phy_base_read(phydev, MSCC_DW8051_CNTL_STATUS);
+ reg &= ~MICRO_NSOFT_RESET;
+ phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, reg);
+
+ phy_base_write(phydev, MSCC_PHY_PROC_CMD, PROC_CMD_MCB_ACCESS_MAC_CONF |
+ PROC_CMD_SGMII_PORT(0) | PROC_CMD_NO_MAC_CONF |
+ PROC_CMD_READ);
+
+ reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
+ reg &= ~EN_PATCH_RAM_TRAP_ADDR(4);
+ phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ return 0;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8584_get_fw_crc(struct phy_device *phydev, u16 start, u16 size,
+ u16 *crc)
+{
+ int ret;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
+
+ phy_base_write(phydev, MSCC_PHY_VERIPHY_CNTL_2, start);
+ phy_base_write(phydev, MSCC_PHY_VERIPHY_CNTL_3, size);
+
+ /* Start Micro command */
+ ret = vsc8584_cmd(phydev, PROC_CMD_CRC16);
+ if (ret)
+ goto out;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
+
+ *crc = phy_base_read(phydev, MSCC_PHY_VERIPHY_CNTL_2);
+
+out:
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ return ret;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8584_patch_fw(struct phy_device *phydev,
+ const struct firmware *fw)
+{
+ int i, ret;
+
+ ret = vsc8584_micro_assert_reset(phydev);
+ if (ret) {
+ dev_err(&phydev->mdio.dev,
+ "%s: failed to assert reset of micro\n", __func__);
+ return ret;
+ }
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+ /* Hold 8051 Micro in SW Reset, Enable auto incr address and patch clock
+ * Disable the 8051 Micro clock
+ */
+ phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, RUN_FROM_INT_ROM |
+ AUTOINC_ADDR | PATCH_RAM_CLK | MICRO_CLK_EN |
+ MICRO_CLK_DIVIDE(2));
+ phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_PRAM | INT_MEM_WRITE_EN |
+ INT_MEM_DATA(2));
+ phy_base_write(phydev, MSCC_INT_MEM_ADDR, 0x0000);
+
+ for (i = 0; i < fw->size; i++)
+ phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_PRAM |
+ INT_MEM_WRITE_EN | fw->data[i]);
+
+ /* Clear internal memory access */
+ phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_RAM);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ return 0;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static bool vsc8574_is_serdes_init(struct phy_device *phydev)
+{
+ u16 reg;
+ bool ret;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+ reg = phy_base_read(phydev, MSCC_TRAP_ROM_ADDR(1));
+ if (reg != 0x3eb7) {
+ ret = false;
+ goto out;
+ }
+
+ reg = phy_base_read(phydev, MSCC_PATCH_RAM_ADDR(1));
+ if (reg != 0x4012) {
+ ret = false;
+ goto out;
+ }
+
+ reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
+ if (reg != EN_PATCH_RAM_TRAP_ADDR(1)) {
+ ret = false;
+ goto out;
+ }
+
+ reg = phy_base_read(phydev, MSCC_DW8051_CNTL_STATUS);
+ if ((MICRO_NSOFT_RESET | RUN_FROM_INT_ROM | DW8051_CLK_EN |
+ MICRO_CLK_EN) != (reg & MSCC_DW8051_VLD_MASK)) {
+ ret = false;
+ goto out;
+ }
+
+ ret = true;
+out:
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ return ret;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8574_config_pre_init(struct phy_device *phydev)
+{
+ const struct reg_val pre_init1[] = {
+ {0x0fae, 0x000401bd},
+ {0x0fac, 0x000f000f},
+ {0x17a0, 0x00a0f147},
+ {0x0fe4, 0x00052f54},
+ {0x1792, 0x0027303d},
+ {0x07fe, 0x00000704},
+ {0x0fe0, 0x00060150},
+ {0x0f82, 0x0012b00a},
+ {0x0f80, 0x00000d74},
+ {0x02e0, 0x00000012},
+ {0x03a2, 0x00050208},
+ {0x03b2, 0x00009186},
+ {0x0fb0, 0x000e3700},
+ {0x1688, 0x00049f81},
+ {0x0fd2, 0x0000ffff},
+ {0x168a, 0x00039fa2},
+ {0x1690, 0x0020640b},
+ {0x0258, 0x00002220},
+ {0x025a, 0x00002a20},
+ {0x025c, 0x00003060},
+ {0x025e, 0x00003fa0},
+ {0x03a6, 0x0000e0f0},
+ {0x0f92, 0x00001489},
+ {0x16a2, 0x00007000},
+ {0x16a6, 0x00071448},
+ {0x16a0, 0x00eeffdd},
+ {0x0fe8, 0x0091b06c},
+ {0x0fea, 0x00041600},
+ {0x16b0, 0x00eeff00},
+ {0x16b2, 0x00007000},
+ {0x16b4, 0x00000814},
+ {0x0f90, 0x00688980},
+ {0x03a4, 0x0000d8f0},
+ {0x0fc0, 0x00000400},
+ {0x07fa, 0x0050100f},
+ {0x0796, 0x00000003},
+ {0x07f8, 0x00c3ff98},
+ {0x0fa4, 0x0018292a},
+ {0x168c, 0x00d2c46f},
+ {0x17a2, 0x00000620},
+ {0x16a4, 0x0013132f},
+ {0x16a8, 0x00000000},
+ {0x0ffc, 0x00c0a028},
+ {0x0fec, 0x00901c09},
+ {0x0fee, 0x0004a6a1},
+ {0x0ffe, 0x00b01807},
+ };
+ const struct reg_val pre_init2[] = {
+ {0x0486, 0x0008a518},
+ {0x0488, 0x006dc696},
+ {0x048a, 0x00000912},
+ {0x048e, 0x00000db6},
+ {0x049c, 0x00596596},
+ {0x049e, 0x00000514},
+ {0x04a2, 0x00410280},
+ {0x04a4, 0x00000000},
+ {0x04a6, 0x00000000},
+ {0x04a8, 0x00000000},
+ {0x04aa, 0x00000000},
+ {0x04ae, 0x007df7dd},
+ {0x04b0, 0x006d95d4},
+ {0x04b2, 0x00492410},
+ };
+ struct device *dev = &phydev->mdio.dev;
+ const struct firmware *fw;
+ unsigned int i;
+ u16 crc, reg;
+ bool serdes_init;
+ int ret;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ /* all writes below are broadcasted to all PHYs in the same package */
+ reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
+ reg |= SMI_BROADCAST_WR_EN;
+ phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
+
+ phy_base_write(phydev, MII_VSC85XX_INT_MASK, 0);
+
+ /* The below register writes are tweaking analog and electrical
+ * configuration that were determined through characterization by PHY
+ * engineers. These don't mean anything more than "these are the best
+ * values".
+ */
+ phy_base_write(phydev, MSCC_PHY_EXT_PHY_CNTL_2, 0x0040);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
+
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_20, 0x4320);
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_24, 0x0c00);
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_9, 0x18ca);
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_5, 0x1b20);
+
+ reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
+ reg |= 0x8000;
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
+
+ for (i = 0; i < ARRAY_SIZE(pre_init1); i++)
+ vsc8584_csr_write(phydev, pre_init1[i].reg, pre_init1[i].val);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_2);
+
+ phy_base_write(phydev, MSCC_PHY_CU_PMD_TX_CNTL, 0x028e);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
+
+ for (i = 0; i < ARRAY_SIZE(pre_init2); i++)
+ vsc8584_csr_write(phydev, pre_init2[i].reg, pre_init2[i].val);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
+
+ reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
+ reg &= ~0x8000;
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ /* end of write broadcasting */
+ reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
+ reg &= ~SMI_BROADCAST_WR_EN;
+ phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
+
+ ret = request_firmware(&fw, MSCC_VSC8574_REVB_INT8051_FW, dev);
+ if (ret) {
+ dev_err(dev, "failed to load firmware %s, ret: %d\n",
+ MSCC_VSC8574_REVB_INT8051_FW, ret);
+ return ret;
+ }
+
+ /* Add one byte to size for the one added by the patch_fw function */
+ ret = vsc8584_get_fw_crc(phydev,
+ MSCC_VSC8574_REVB_INT8051_FW_START_ADDR,
+ fw->size + 1, &crc);
+ if (ret)
+ goto out;
+
+ if (crc == MSCC_VSC8574_REVB_INT8051_FW_CRC) {
+ serdes_init = vsc8574_is_serdes_init(phydev);
+
+ if (!serdes_init) {
+ ret = vsc8584_micro_assert_reset(phydev);
+ if (ret) {
+ dev_err(dev,
+ "%s: failed to assert reset of micro\n",
+ __func__);
+ goto out;
+ }
+ }
+ } else {
+ dev_dbg(dev, "FW CRC is not the expected one, patching FW\n");
+
+ serdes_init = false;
+
+ if (vsc8584_patch_fw(phydev, fw))
+ dev_warn(dev,
+ "failed to patch FW, expect non-optimal device\n");
+ }
+
+ if (!serdes_init) {
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+ phy_base_write(phydev, MSCC_TRAP_ROM_ADDR(1), 0x3eb7);
+ phy_base_write(phydev, MSCC_PATCH_RAM_ADDR(1), 0x4012);
+ phy_base_write(phydev, MSCC_INT_MEM_CNTL,
+ EN_PATCH_RAM_TRAP_ADDR(1));
+
+ vsc8584_micro_deassert_reset(phydev, false);
+
+ /* Add one byte to size for the one added by the patch_fw
+ * function
+ */
+ ret = vsc8584_get_fw_crc(phydev,
+ MSCC_VSC8574_REVB_INT8051_FW_START_ADDR,
+ fw->size + 1, &crc);
+ if (ret)
+ goto out;
+
+ if (crc != MSCC_VSC8574_REVB_INT8051_FW_CRC)
+ dev_warn(dev,
+ "FW CRC after patching is not the expected one, expect non-optimal device\n");
+ }
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+ ret = vsc8584_cmd(phydev, PROC_CMD_1588_DEFAULT_INIT |
+ PROC_CMD_PHY_INIT);
+
+out:
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8584_config_pre_init(struct phy_device *phydev)
+{
+ const struct reg_val pre_init1[] = {
+ {0x07fa, 0x0050100f},
+ {0x1688, 0x00049f81},
+ {0x0f90, 0x00688980},
+ {0x03a4, 0x0000d8f0},
+ {0x0fc0, 0x00000400},
+ {0x0f82, 0x0012b002},
+ {0x1686, 0x00000004},
+ {0x168c, 0x00d2c46f},
+ {0x17a2, 0x00000620},
+ {0x16a0, 0x00eeffdd},
+ {0x16a6, 0x00071448},
+ {0x16a4, 0x0013132f},
+ {0x16a8, 0x00000000},
+ {0x0ffc, 0x00c0a028},
+ {0x0fe8, 0x0091b06c},
+ {0x0fea, 0x00041600},
+ {0x0f80, 0x00fffaff},
+ {0x0fec, 0x00901809},
+ {0x0ffe, 0x00b01007},
+ {0x16b0, 0x00eeff00},
+ {0x16b2, 0x00007000},
+ {0x16b4, 0x00000814},
+ };
+ const struct reg_val pre_init2[] = {
+ {0x0486, 0x0008a518},
+ {0x0488, 0x006dc696},
+ {0x048a, 0x00000912},
+ };
+ const struct firmware *fw;
+ struct device *dev = &phydev->mdio.dev;
+ unsigned int i;
+ u16 crc, reg;
+ int ret;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ /* all writes below are broadcasted to all PHYs in the same package */
+ reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
+ reg |= SMI_BROADCAST_WR_EN;
+ phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
+
+ phy_base_write(phydev, MII_VSC85XX_INT_MASK, 0);
+
+ reg = phy_base_read(phydev, MSCC_PHY_BYPASS_CONTROL);
+ reg |= PARALLEL_DET_IGNORE_ADVERTISED;
+ phy_base_write(phydev, MSCC_PHY_BYPASS_CONTROL, reg);
+
+ /* The below register writes are tweaking analog and electrical
+ * configuration that were determined through characterization by PHY
+ * engineers. These don't mean anything more than "these are the best
+ * values".
+ */
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_3);
+
+ phy_base_write(phydev, MSCC_PHY_SERDES_TX_CRC_ERR_CNT, 0x2000);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
+
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_5, 0x1f20);
+
+ reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
+ reg |= 0x8000;
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
+
+ phy_base_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(0x2fa4));
+
+ reg = phy_base_read(phydev, MSCC_PHY_TR_MSB);
+ reg &= ~0x007f;
+ reg |= 0x0019;
+ phy_base_write(phydev, MSCC_PHY_TR_MSB, reg);
+
+ phy_base_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(0x0fa4));
+
+ for (i = 0; i < ARRAY_SIZE(pre_init1); i++)
+ vsc8584_csr_write(phydev, pre_init1[i].reg, pre_init1[i].val);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_2);
+
+ phy_base_write(phydev, MSCC_PHY_CU_PMD_TX_CNTL, 0x028e);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
+
+ for (i = 0; i < ARRAY_SIZE(pre_init2); i++)
+ vsc8584_csr_write(phydev, pre_init2[i].reg, pre_init2[i].val);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
+
+ reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
+ reg &= ~0x8000;
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ /* end of write broadcasting */
+ reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
+ reg &= ~SMI_BROADCAST_WR_EN;
+ phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
+
+ ret = request_firmware(&fw, MSCC_VSC8584_REVB_INT8051_FW, dev);
+ if (ret) {
+ dev_err(dev, "failed to load firmware %s, ret: %d\n",
+ MSCC_VSC8584_REVB_INT8051_FW, ret);
+ return ret;
+ }
+
+ /* Add one byte to size for the one added by the patch_fw function */
+ ret = vsc8584_get_fw_crc(phydev,
+ MSCC_VSC8584_REVB_INT8051_FW_START_ADDR,
+ fw->size + 1, &crc);
+ if (ret)
+ goto out;
+
+ if (crc != MSCC_VSC8584_REVB_INT8051_FW_CRC) {
+ dev_dbg(dev, "FW CRC is not the expected one, patching FW\n");
+ if (vsc8584_patch_fw(phydev, fw))
+ dev_warn(dev,
+ "failed to patch FW, expect non-optimal device\n");
+ }
+
+ vsc8584_micro_deassert_reset(phydev, false);
+
+ /* Add one byte to size for the one added by the patch_fw function */
+ ret = vsc8584_get_fw_crc(phydev,
+ MSCC_VSC8584_REVB_INT8051_FW_START_ADDR,
+ fw->size + 1, &crc);
+ if (ret)
+ goto out;
+
+ if (crc != MSCC_VSC8584_REVB_INT8051_FW_CRC)
+ dev_warn(dev,
+ "FW CRC after patching is not the expected one, expect non-optimal device\n");
+
+ ret = vsc8584_micro_assert_reset(phydev);
+ if (ret)
+ goto out;
+
+ vsc8584_micro_deassert_reset(phydev, true);
+
+out:
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+/* Check if one PHY has already done the init of the parts common to all PHYs
+ * in the Quad PHY package.
+ */
+static bool vsc8584_is_pkg_init(struct phy_device *phydev, bool reversed)
+{
+ struct mdio_device **map = phydev->mdio.bus->mdio_map;
+ struct vsc8531_private *vsc8531;
+ struct phy_device *phy;
+ int i, addr;
+
+ /* VSC8584 is a Quad PHY */
+ for (i = 0; i < 4; i++) {
+ vsc8531 = phydev->priv;
+
+ if (reversed)
+ addr = vsc8531->base_addr - i;
+ else
+ addr = vsc8531->base_addr + i;
+
+ phy = container_of(map[addr], struct phy_device, mdio);
+
+ if ((phy->phy_id & phydev->drv->phy_id_mask) !=
+ (phydev->drv->phy_id & phydev->drv->phy_id_mask))
+ continue;
+
+ vsc8531 = phy->priv;
+
+ if (vsc8531 && vsc8531->pkg_init)
+ return true;
+ }
+
+ return false;
+}
+
+static int vsc8584_config_init(struct phy_device *phydev)
+{
+ struct vsc8531_private *vsc8531 = phydev->priv;
+ u16 addr, val;
+ int ret, i;
+
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+
+ __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr,
+ MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
+ addr = __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
+ MSCC_PHY_EXT_PHY_CNTL_4);
+ addr >>= PHY_CNTL_4_ADDR_POS;
+
+ val = __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
+ MSCC_PHY_ACTIPHY_CNTL);
+ if (val & PHY_ADDR_REVERSED)
+ vsc8531->base_addr = phydev->mdio.addr + addr;
+ else
+ vsc8531->base_addr = phydev->mdio.addr - addr;
+
+ /* Some parts of the init sequence are identical for every PHY in the
+ * package. Some parts are modifying the GPIO register bank which is a
+ * set of registers that are affecting all PHYs, a few resetting the
+ * microprocessor common to all PHYs. The CRC check responsible of the
+ * checking the firmware within the 8051 microprocessor can only be
+ * accessed via the PHY whose internal address in the package is 0.
+ * All PHYs' interrupts mask register has to be zeroed before enabling
+ * any PHY's interrupt in this register.
+ * For all these reasons, we need to do the init sequence once and only
+ * once whatever is the first PHY in the package that is initialized and
+ * do the correct init sequence for all PHYs that are package-critical
+ * in this pre-init function.
+ */
+ if (!vsc8584_is_pkg_init(phydev, val & PHY_ADDR_REVERSED ? 1 : 0)) {
+ if ((phydev->phy_id & phydev->drv->phy_id_mask) ==
+ (PHY_ID_VSC8574 & phydev->drv->phy_id_mask))
+ ret = vsc8574_config_pre_init(phydev);
+ else if ((phydev->phy_id & phydev->drv->phy_id_mask) ==
+ (PHY_ID_VSC8584 & phydev->drv->phy_id_mask))
+ ret = vsc8584_config_pre_init(phydev);
+ else
+ ret = -EINVAL;
+
+ if (ret)
+ goto err;
+ }
+
+ vsc8531->pkg_init = true;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+ val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
+ val &= ~MAC_CFG_MASK;
+ if (phydev->interface == PHY_INTERFACE_MODE_QSGMII)
+ val |= MAC_CFG_QSGMII;
+ else
+ val |= MAC_CFG_SGMII;
+
+ ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val);
+ if (ret)
+ goto err;
+
+ val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT |
+ PROC_CMD_READ_MOD_WRITE_PORT;
+ if (phydev->interface == PHY_INTERFACE_MODE_QSGMII)
+ val |= PROC_CMD_QSGMII_MAC;
+ else
+ val |= PROC_CMD_SGMII_MAC;
+
+ ret = vsc8584_cmd(phydev, val);
+ if (ret)
+ goto err;
+
+ usleep_range(10000, 20000);
+
+ /* Disable SerDes for 100Base-FX */
+ ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
+ PROC_CMD_FIBER_PORT(addr) | PROC_CMD_FIBER_DISABLE |
+ PROC_CMD_READ_MOD_WRITE_PORT |
+ PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_100BASE_FX);
+ if (ret)
+ goto err;
+
+ /* Disable SerDes for 1000Base-X */
+ ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
+ PROC_CMD_FIBER_PORT(addr) | PROC_CMD_FIBER_DISABLE |
+ PROC_CMD_READ_MOD_WRITE_PORT |
+ PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X);
+ if (ret)
+ goto err;
+
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+ phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1);
+ val &= ~(MEDIA_OP_MODE_MASK | VSC8584_MAC_IF_SELECTION_MASK);
+ val |= MEDIA_OP_MODE_COPPER | (VSC8584_MAC_IF_SELECTION_SGMII <<
+ VSC8584_MAC_IF_SELECTION_POS);
+ ret = phy_write(phydev, MSCC_PHY_EXT_PHY_CNTL_1, val);
+
+ ret = genphy_soft_reset(phydev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < vsc8531->nleds; i++) {
+ ret = vsc85xx_led_cntl_set(phydev, i, vsc8531->leds_mode[i]);
+ if (ret)
+ return ret;
+ }
+
+ return genphy_config_init(phydev);
+
+err:
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ return ret;
+}
+
static int vsc85xx_config_init(struct phy_device *phydev)
{
int rc, i;
@@ -593,15 +1669,27 @@ static int vsc85xx_config_init(struct phy_device *phydev)
if (rc)
return rc;
+ rc = vsc85xx_eee_init_seq_set(phydev);
+ if (rc)
+ return rc;
+
for (i = 0; i < vsc8531->nleds; i++) {
rc = vsc85xx_led_cntl_set(phydev, i, vsc8531->leds_mode[i]);
if (rc)
return rc;
}
- rc = genphy_config_init(phydev);
+ return genphy_config_init(phydev);
+}
- return rc;
+static int vsc8584_did_interrupt(struct phy_device *phydev)
+{
+ int rc = 0;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ rc = phy_read(phydev, MII_VSC85XX_INT_STATUS);
+
+ return (rc < 0) ? 0 : rc & MII_VSC85XX_INT_MASK_MASK;
}
static int vsc85xx_ack_interrupt(struct phy_device *phydev)
@@ -653,6 +1741,61 @@ static int vsc85xx_read_status(struct phy_device *phydev)
return genphy_read_status(phydev);
}
+static int vsc8574_probe(struct phy_device *phydev)
+{
+ struct vsc8531_private *vsc8531;
+ u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY,
+ VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY,
+ VSC8531_DUPLEX_COLLISION};
+
+ vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
+ if (!vsc8531)
+ return -ENOMEM;
+
+ phydev->priv = vsc8531;
+
+ vsc8531->nleds = 4;
+ vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
+ vsc8531->hw_stats = vsc8584_hw_stats;
+ vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats);
+ vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
+ sizeof(u64), GFP_KERNEL);
+ if (!vsc8531->stats)
+ return -ENOMEM;
+
+ return vsc85xx_dt_led_modes_get(phydev, default_mode);
+}
+
+static int vsc8584_probe(struct phy_device *phydev)
+{
+ struct vsc8531_private *vsc8531;
+ u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY,
+ VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY,
+ VSC8531_DUPLEX_COLLISION};
+
+ if ((phydev->phy_id & MSCC_DEV_REV_MASK) != VSC8584_REVB) {
+ dev_err(&phydev->mdio.dev, "Only VSC8584 revB is supported.\n");
+ return -ENOTSUPP;
+ }
+
+ vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
+ if (!vsc8531)
+ return -ENOMEM;
+
+ phydev->priv = vsc8531;
+
+ vsc8531->nleds = 4;
+ vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
+ vsc8531->hw_stats = vsc8584_hw_stats;
+ vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats);
+ vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
+ sizeof(u64), GFP_KERNEL);
+ if (!vsc8531->stats)
+ return -ENOMEM;
+
+ return vsc85xx_dt_led_modes_get(phydev, default_mode);
+}
+
static int vsc85xx_probe(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531;
@@ -673,6 +1816,12 @@ static int vsc85xx_probe(struct phy_device *phydev)
vsc8531->rate_magic = rate_magic;
vsc8531->nleds = 2;
vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
+ vsc8531->hw_stats = vsc85xx_hw_stats;
+ vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats);
+ vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
+ sizeof(u64), GFP_KERNEL);
+ if (!vsc8531->stats)
+ return -ENOMEM;
return vsc85xx_dt_led_modes_get(phydev, default_mode);
}
@@ -699,6 +1848,11 @@ static struct phy_driver vsc85xx_driver[] = {
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
},
{
.phy_id = PHY_ID_VSC8531,
@@ -720,6 +1874,11 @@ static struct phy_driver vsc85xx_driver[] = {
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
},
{
.phy_id = PHY_ID_VSC8540,
@@ -741,6 +1900,11 @@ static struct phy_driver vsc85xx_driver[] = {
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
},
{
.phy_id = PHY_ID_VSC8541,
@@ -762,6 +1926,63 @@ static struct phy_driver vsc85xx_driver[] = {
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
+ .phy_id = PHY_ID_VSC8574,
+ .name = "Microsemi GE VSC8574 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
+ .phy_id = PHY_ID_VSC8584,
+ .name = "Microsemi GE VSC8584 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
}
};
@@ -773,6 +1994,8 @@ static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
{ PHY_ID_VSC8531, 0xfffffff0, },
{ PHY_ID_VSC8540, 0xfffffff0, },
{ PHY_ID_VSC8541, 0xfffffff0, },
+ { PHY_ID_VSC8574, 0xfffffff0, },
+ { PHY_ID_VSC8584, 0xfffffff0, },
{ }
};
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index e1225545362d..d7636ff03bc7 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -329,7 +329,7 @@ struct phy_driver genphy_10g_driver = {
.name = "Generic 10G PHY",
.soft_reset = gen10g_no_soft_reset,
.config_init = gen10g_config_init,
- .features = 0,
+ .features = PHY_10GBIT_FEATURES,
.config_aneg = gen10g_config_aneg,
.read_status = gen10g_read_status,
.suspend = gen10g_suspend,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 14509a8903c6..1d73ac3309ce 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -482,16 +482,15 @@ static int phy_config_aneg(struct phy_device *phydev)
}
/**
- * phy_start_aneg_priv - start auto-negotiation for this PHY device
+ * phy_start_aneg - start auto-negotiation for this PHY device
* @phydev: the phy_device struct
- * @sync: indicate whether we should wait for the workqueue cancelation
*
* Description: Sanitizes the settings (if we're not autonegotiating
* them), and then calls the driver's config_aneg function.
* If the PHYCONTROL Layer is operating, we change the state to
* reflect the beginning of Auto-negotiation or forcing.
*/
-static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
+int phy_start_aneg(struct phy_device *phydev)
{
bool trigger = 0;
int err;
@@ -541,20 +540,6 @@ out_unlock:
return err;
}
-
-/**
- * phy_start_aneg - start auto-negotiation for this PHY device
- * @phydev: the phy_device struct
- *
- * Description: Sanitizes the settings (if we're not autonegotiating
- * them), and then calls the driver's config_aneg function.
- * If the PHYCONTROL Layer is operating, we change the state to
- * reflect the beginning of Auto-negotiation or forcing.
- */
-int phy_start_aneg(struct phy_device *phydev)
-{
- return phy_start_aneg_priv(phydev, true);
-}
EXPORT_SYMBOL(phy_start_aneg);
static int phy_poll_aneg_done(struct phy_device *phydev)
@@ -654,7 +639,7 @@ static void phy_queue_state_machine(struct phy_device *phydev,
*/
void phy_start_machine(struct phy_device *phydev)
{
- phy_queue_state_machine(phydev, 1);
+ phy_trigger_machine(phydev);
}
EXPORT_SYMBOL_GPL(phy_start_machine);
@@ -941,7 +926,6 @@ void phy_state_machine(struct work_struct *work)
bool needs_aneg = false, do_suspend = false;
enum phy_state old_state;
int err = 0;
- int old_link;
mutex_lock(&phydev->lock);
@@ -1025,26 +1009,16 @@ void phy_state_machine(struct work_struct *work)
}
break;
case PHY_RUNNING:
- /* Only register a CHANGE if we are polling and link changed
- * since latest checking.
- */
- if (phy_polling_mode(phydev)) {
- old_link = phydev->link;
- err = phy_read_status(phydev);
- if (err)
- break;
+ if (!phy_polling_mode(phydev))
+ break;
- if (old_link != phydev->link)
- phydev->state = PHY_CHANGELINK;
- }
- /*
- * Failsafe: check that nobody set phydev->link=0 between two
- * poll cycles, otherwise we won't leave RUNNING state as long
- * as link remains down.
- */
- if (!phydev->link && phydev->state == PHY_RUNNING) {
- phydev->state = PHY_CHANGELINK;
- phydev_err(phydev, "no link in PHY_RUNNING\n");
+ err = phy_read_status(phydev);
+ if (err)
+ break;
+
+ if (!phydev->link) {
+ phydev->state = PHY_NOLINK;
+ phy_link_down(phydev, true);
}
break;
case PHY_CHANGELINK:
@@ -1070,48 +1044,33 @@ void phy_state_machine(struct work_struct *work)
case PHY_RESUMING:
if (AUTONEG_ENABLE == phydev->autoneg) {
err = phy_aneg_done(phydev);
- if (err < 0)
+ if (err < 0) {
break;
-
- /* err > 0 if AN is done.
- * Otherwise, it's 0, and we're still waiting for AN
- */
- if (err > 0) {
- err = phy_read_status(phydev);
- if (err)
- break;
-
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- phy_link_up(phydev);
- } else {
- phydev->state = PHY_NOLINK;
- phy_link_down(phydev, false);
- }
- } else {
+ } else if (!err) {
phydev->state = PHY_AN;
phydev->link_timeout = PHY_AN_TIMEOUT;
- }
- } else {
- err = phy_read_status(phydev);
- if (err)
break;
-
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- phy_link_up(phydev);
- } else {
- phydev->state = PHY_NOLINK;
- phy_link_down(phydev, false);
}
}
+
+ err = phy_read_status(phydev);
+ if (err)
+ break;
+
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ phy_link_up(phydev);
+ } else {
+ phydev->state = PHY_NOLINK;
+ phy_link_down(phydev, false);
+ }
break;
}
mutex_unlock(&phydev->lock);
if (needs_aneg)
- err = phy_start_aneg_priv(phydev, false);
+ err = phy_start_aneg(phydev);
else if (do_suspend)
phy_suspend(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index f53ce65f45c5..ab33d1777132 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -237,7 +237,12 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
if (!netdev)
return !phydev->suspended;
- /* Don't suspend PHY if the attached netdev parent may wakeup.
+ if (netdev->wol_enabled)
+ return false;
+
+ /* As long as not all affected network drivers support the
+ * wol_enabled flag, let's check for hints that WoL is enabled.
+ * Don't suspend PHY if the attached netdev parent may wake up.
* The parent may point to a PCI device, as in tg3 driver.
*/
if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent))
@@ -1274,9 +1279,9 @@ void phy_detach(struct phy_device *phydev)
sysfs_remove_link(&dev->dev.kobj, "phydev");
sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev");
}
+ phy_suspend(phydev);
phydev->attached_dev->phydev = NULL;
phydev->attached_dev = NULL;
- phy_suspend(phydev);
phydev->phylink = NULL;
phy_led_triggers_unregister(phydev);
@@ -1310,12 +1315,13 @@ EXPORT_SYMBOL(phy_detach);
int phy_suspend(struct phy_device *phydev)
{
struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
+ struct net_device *netdev = phydev->attached_dev;
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
int ret = 0;
/* If the device has WOL enabled, we cannot suspend the PHY */
phy_ethtool_get_wol(phydev, &wol);
- if (wol.wolopts)
+ if (wol.wolopts || (netdev && netdev->wol_enabled))
return -EBUSY;
if (phydev->drv && phydrv->suspend)
@@ -1934,6 +1940,7 @@ EXPORT_SYMBOL(phy_remove_link_mode);
*/
void phy_support_sym_pause(struct phy_device *phydev)
{
+ phydev->supported &= ~SUPPORTED_Asym_Pause;
phydev->supported |= SUPPORTED_Pause;
phydev->advertising = phydev->supported;
}
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index b6993af5c9e4..9b8dd0d0ee42 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -690,6 +690,30 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
return 0;
}
+static int __phylink_connect_phy(struct phylink *pl, struct phy_device *phy,
+ phy_interface_t interface)
+{
+ int ret;
+
+ if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
+ (pl->link_an_mode == MLO_AN_INBAND &&
+ phy_interface_mode_is_8023z(interface))))
+ return -EINVAL;
+
+ if (pl->phydev)
+ return -EBUSY;
+
+ ret = phy_attach_direct(pl->netdev, phy, 0, interface);
+ if (ret)
+ return ret;
+
+ ret = phylink_bringup_phy(pl, phy);
+ if (ret)
+ phy_detach(phy);
+
+ return ret;
+}
+
/**
* phylink_connect_phy() - connect a PHY to the phylink instance
* @pl: a pointer to a &struct phylink returned from phylink_create()
@@ -707,31 +731,13 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
*/
int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
{
- int ret;
-
- if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
- (pl->link_an_mode == MLO_AN_INBAND &&
- phy_interface_mode_is_8023z(pl->link_interface))))
- return -EINVAL;
-
- if (pl->phydev)
- return -EBUSY;
-
/* Use PHY device/driver interface */
if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
pl->link_interface = phy->interface;
pl->link_config.interface = pl->link_interface;
}
- ret = phy_attach_direct(pl->netdev, phy, 0, pl->link_interface);
- if (ret)
- return ret;
-
- ret = phylink_bringup_phy(pl, phy);
- if (ret)
- phy_detach(phy);
-
- return ret;
+ return __phylink_connect_phy(pl, phy, pl->link_interface);
}
EXPORT_SYMBOL_GPL(phylink_connect_phy);
@@ -1648,7 +1654,9 @@ static void phylink_sfp_link_up(void *upstream)
static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
{
- return phylink_connect_phy(upstream, phy);
+ struct phylink *pl = upstream;
+
+ return __phylink_connect_phy(upstream, phy, pl->link_config.interface);
}
static void phylink_sfp_disconnect_phy(void *upstream)
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 52fffb98fde9..fd8bb998ae52 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -163,8 +163,6 @@ static const enum gpiod_flags gpio_flags[] = {
/* Give this long for the PHY to reset. */
#define T_PHY_RESET_MS 50
-static DEFINE_MUTEX(sfp_mutex);
-
struct sff_data {
unsigned int gpios;
bool (*module_supported)(const struct sfp_eeprom_id *id);
@@ -1098,8 +1096,11 @@ static int sfp_hwmon_insert(struct sfp *sfp)
static void sfp_hwmon_remove(struct sfp *sfp)
{
- hwmon_device_unregister(sfp->hwmon_dev);
- kfree(sfp->hwmon_name);
+ if (!IS_ERR_OR_NULL(sfp->hwmon_dev)) {
+ hwmon_device_unregister(sfp->hwmon_dev);
+ sfp->hwmon_dev = NULL;
+ kfree(sfp->hwmon_name);
+ }
}
#else
static int sfp_hwmon_insert(struct sfp *sfp)
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index a205750b431b..7ccdc62c6052 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -95,7 +95,7 @@ static inline void sha_pad_init(struct sha_pad *shapad)
* State for an MPPE (de)compressor.
*/
struct ppp_mppe_state {
- struct crypto_skcipher *arc4;
+ struct crypto_sync_skcipher *arc4;
struct shash_desc *sha1;
unsigned char *sha1_digest;
unsigned char master_key[MPPE_MAX_KEY_LEN];
@@ -155,15 +155,15 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state)
static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
{
struct scatterlist sg_in[1], sg_out[1];
- SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
- skcipher_request_set_tfm(req, state->arc4);
+ skcipher_request_set_sync_tfm(req, state->arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
get_new_key_from_sha(state);
if (!initial_key) {
- crypto_skcipher_setkey(state->arc4, state->sha1_digest,
- state->keylen);
+ crypto_sync_skcipher_setkey(state->arc4, state->sha1_digest,
+ state->keylen);
sg_init_table(sg_in, 1);
sg_init_table(sg_out, 1);
setup_sg(sg_in, state->sha1_digest, state->keylen);
@@ -181,7 +181,8 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
state->session_key[1] = 0x26;
state->session_key[2] = 0x9e;
}
- crypto_skcipher_setkey(state->arc4, state->session_key, state->keylen);
+ crypto_sync_skcipher_setkey(state->arc4, state->session_key,
+ state->keylen);
skcipher_request_zero(req);
}
@@ -203,7 +204,7 @@ static void *mppe_alloc(unsigned char *options, int optlen)
goto out;
- state->arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ state->arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(state->arc4)) {
state->arc4 = NULL;
goto out_free;
@@ -250,7 +251,7 @@ out_free:
crypto_free_shash(state->sha1->tfm);
kzfree(state->sha1);
}
- crypto_free_skcipher(state->arc4);
+ crypto_free_sync_skcipher(state->arc4);
kfree(state);
out:
return NULL;
@@ -266,7 +267,7 @@ static void mppe_free(void *arg)
kfree(state->sha1_digest);
crypto_free_shash(state->sha1->tfm);
kzfree(state->sha1);
- crypto_free_skcipher(state->arc4);
+ crypto_free_sync_skcipher(state->arc4);
kfree(state);
}
}
@@ -366,7 +367,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
int isize, int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
- SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
int proto;
int err;
struct scatterlist sg_in[1], sg_out[1];
@@ -426,7 +427,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
setup_sg(sg_in, ibuf, isize);
setup_sg(sg_out, obuf, osize);
- skcipher_request_set_tfm(req, state->arc4);
+ skcipher_request_set_sync_tfm(req, state->arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg_in, sg_out, isize, NULL);
err = crypto_skcipher_encrypt(req);
@@ -480,7 +481,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
- SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
unsigned ccount;
int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
struct scatterlist sg_in[1], sg_out[1];
@@ -615,7 +616,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
setup_sg(sg_in, ibuf, 1);
setup_sg(sg_out, obuf, 1);
- skcipher_request_set_tfm(req, state->arc4);
+ skcipher_request_set_sync_tfm(req, state->arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg_in, sg_out, 1, NULL);
if (crypto_skcipher_decrypt(req)) {
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index b008266e91ea..9757f1fc104f 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -79,7 +79,6 @@
#include <linux/rtnetlink.h>
#include <linux/if_arp.h>
#include <linux/if_slip.h>
-#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -1167,27 +1166,6 @@ static int slip_ioctl(struct tty_struct *tty, struct file *file,
}
}
-#ifdef CONFIG_COMPAT
-static long slip_compat_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case SIOCGIFNAME:
- case SIOCGIFENCAP:
- case SIOCSIFENCAP:
- case SIOCSIFHWADDR:
- case SIOCSKEEPALIVE:
- case SIOCGKEEPALIVE:
- case SIOCSOUTFILL:
- case SIOCGOUTFILL:
- return slip_ioctl(tty, file, cmd,
- (unsigned long)compat_ptr(arg));
- }
-
- return -ENOIOCTLCMD;
-}
-#endif
-
/* VSV changes start here */
#ifdef CONFIG_SLIP_SMART
/* function do_ioctl called from net/core/dev.c
@@ -1280,9 +1258,6 @@ static struct tty_ldisc_ops sl_ldisc = {
.close = slip_close,
.hangup = slip_hangup,
.ioctl = slip_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = slip_compat_ioctl,
-#endif
.receive_buf = slip_receive_buf,
.write_wakeup = slip_write_wakeup,
};
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 6a047d30e8c6..db633ae9f784 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1104,10 +1104,7 @@ static void team_port_disable_netpoll(struct team_port *port)
return;
port->np = NULL;
- /* Wait for transmitting packets to finish before freeing. */
- synchronize_rcu_bh();
- __netpoll_cleanup(np);
- kfree(np);
+ __netpoll_free(np);
}
#else
static int team_port_enable_netpoll(struct team_port *port)
@@ -1167,6 +1164,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return -EBUSY;
}
+ if (dev == port_dev) {
+ NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself");
+ netdev_err(dev, "Cannot enslave team device to itself\n");
+ return -EINVAL;
+ }
+
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
vlan_uses_dev(dev)) {
NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 3eb88b7147f0..060135ceaf0e 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -180,6 +180,7 @@ struct tun_file {
};
struct napi_struct napi;
bool napi_enabled;
+ bool napi_frags_enabled;
struct mutex napi_mutex; /* Protects access to the above napi */
struct list_head next;
struct tun_struct *detached;
@@ -312,32 +313,32 @@ static int tun_napi_poll(struct napi_struct *napi, int budget)
}
static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
- bool napi_en)
+ bool napi_en, bool napi_frags)
{
tfile->napi_enabled = napi_en;
+ tfile->napi_frags_enabled = napi_en && napi_frags;
if (napi_en) {
netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
NAPI_POLL_WEIGHT);
napi_enable(&tfile->napi);
- mutex_init(&tfile->napi_mutex);
}
}
-static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile)
+static void tun_napi_disable(struct tun_file *tfile)
{
if (tfile->napi_enabled)
napi_disable(&tfile->napi);
}
-static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile)
+static void tun_napi_del(struct tun_file *tfile)
{
if (tfile->napi_enabled)
netif_napi_del(&tfile->napi);
}
-static bool tun_napi_frags_enabled(const struct tun_struct *tun)
+static bool tun_napi_frags_enabled(const struct tun_file *tfile)
{
- return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS;
+ return tfile->napi_frags_enabled;
}
#ifdef CONFIG_TUN_VNET_CROSS_LE
@@ -561,12 +562,11 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
e->rps_rxhash = hash;
}
-/* We try to identify a flow through its rxhash first. The reason that
+/* We try to identify a flow through its rxhash. The reason that
* we do not check rxq no. is because some cards(e.g 82599), chooses
* the rxq based on the txq where the last packet of the flow comes. As
* the userspace application move between processors, we may get a
- * different rxq no. here. If we could not get rxhash, then we would
- * hope the rxq no. may help here.
+ * different rxq no. here.
*/
static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
{
@@ -577,18 +577,13 @@ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
numqueues = READ_ONCE(tun->numqueues);
txq = __skb_get_hash_symmetric(skb);
- if (txq) {
- e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
- if (e) {
- tun_flow_save_rps_rxhash(e, txq);
- txq = e->queue_index;
- } else
- /* use multiply and shift instead of expensive divide */
- txq = ((u64)txq * numqueues) >> 32;
- } else if (likely(skb_rx_queue_recorded(skb))) {
- txq = skb_get_rx_queue(skb);
- while (unlikely(txq >= numqueues))
- txq -= numqueues;
+ e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
+ if (e) {
+ tun_flow_save_rps_rxhash(e, txq);
+ txq = e->queue_index;
+ } else {
+ /* use multiply and shift instead of expensive divide */
+ txq = ((u64)txq * numqueues) >> 32;
}
return txq;
@@ -689,8 +684,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun = rtnl_dereference(tfile->tun);
if (tun && clean) {
- tun_napi_disable(tun, tfile);
- tun_napi_del(tun, tfile);
+ tun_napi_disable(tfile);
+ tun_napi_del(tfile);
}
if (tun && !tfile->detached) {
@@ -757,7 +752,7 @@ static void tun_detach_all(struct net_device *dev)
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
BUG_ON(!tfile);
- tun_napi_disable(tun, tfile);
+ tun_napi_disable(tfile);
tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
tfile->socket.sk->sk_data_ready(tfile->socket.sk);
RCU_INIT_POINTER(tfile->tun, NULL);
@@ -773,7 +768,7 @@ static void tun_detach_all(struct net_device *dev)
synchronize_net();
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- tun_napi_del(tun, tfile);
+ tun_napi_del(tfile);
/* Drop read queue */
tun_queue_purge(tfile);
xdp_rxq_info_unreg(&tfile->xdp_rxq);
@@ -792,7 +787,7 @@ static void tun_detach_all(struct net_device *dev)
}
static int tun_attach(struct tun_struct *tun, struct file *file,
- bool skip_filter, bool napi)
+ bool skip_filter, bool napi, bool napi_frags)
{
struct tun_file *tfile = file->private_data;
struct net_device *dev = tun->dev;
@@ -865,7 +860,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
tun_enable_queue(tfile);
} else {
sock_hold(&tfile->sk);
- tun_napi_init(tun, tfile, napi);
+ tun_napi_init(tun, tfile, napi, napi_frags);
}
if (rtnl_dereference(tun->xdp_prog))
@@ -1046,16 +1041,13 @@ static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
/* Select queue was not called for the skbuff, so we extract the
* RPS hash and save it into the flow_table here.
*/
+ struct tun_flow_entry *e;
__u32 rxhash;
rxhash = __skb_get_hash_symmetric(skb);
- if (rxhash) {
- struct tun_flow_entry *e;
- e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
- rxhash);
- if (e)
- tun_flow_save_rps_rxhash(e, rxhash);
- }
+ e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
+ if (e)
+ tun_flow_save_rps_rxhash(e, rxhash);
}
#endif
}
@@ -1743,7 +1735,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
int err;
u32 rxhash = 0;
int skb_xdp = 1;
- bool frags = tun_napi_frags_enabled(tun);
+ bool frags = tun_napi_frags_enabled(tfile);
if (!(tun->dev->flags & IFF_UP))
return -EIO;
@@ -2297,6 +2289,8 @@ static void tun_setup(struct net_device *dev)
static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
+ if (!data)
+ return 0;
return -EINVAL;
}
@@ -2683,7 +2677,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
return err;
err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
- ifr->ifr_flags & IFF_NAPI);
+ ifr->ifr_flags & IFF_NAPI,
+ ifr->ifr_flags & IFF_NAPI_FRAGS);
if (err < 0)
return err;
@@ -2781,7 +2776,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
(ifr->ifr_flags & TUN_FEATURES);
INIT_LIST_HEAD(&tun->disabled);
- err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI);
+ err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
+ ifr->ifr_flags & IFF_NAPI_FRAGS);
if (err < 0)
goto err_free_flow;
@@ -2930,7 +2926,8 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
ret = security_tun_dev_attach_queue(tun->security);
if (ret < 0)
goto unlock;
- ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI);
+ ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
+ tun->flags & IFF_NAPI_FRAGS);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
tun = rtnl_dereference(tfile->tun);
if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
@@ -3348,6 +3345,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
return -ENOMEM;
}
+ mutex_init(&tfile->napi_mutex);
RCU_INIT_POINTER(tfile->tun, NULL);
tfile->flags = 0;
tfile->ifindex = 0;
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index e95dd12edec4..023b8d0bf175 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -607,6 +607,9 @@ int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
struct usbnet *dev = netdev_priv(net);
u8 opt = 0;
+ if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
+
if (wolinfo->wolopts & WAKE_PHY)
opt |= AX_MONITOR_LINK;
if (wolinfo->wolopts & WAKE_MAGIC)
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 9e8ad372f419..2207f7a7d1ff 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -566,6 +566,9 @@ ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
struct usbnet *dev = netdev_priv(net);
u8 opt = 0;
+ if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
+
if (wolinfo->wolopts & WAKE_PHY)
opt |= AX_MONITOR_MODE_RWLC;
if (wolinfo->wolopts & WAKE_MAGIC)
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 1eaec648bd1f..50c05d0f44cb 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -779,8 +779,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ctx->tx_timer.function = &cdc_ncm_tx_timer_cb;
- ctx->bh.data = (unsigned long)dev;
- ctx->bh.func = cdc_ncm_txpath_bh;
+ tasklet_init(&ctx->bh, cdc_ncm_txpath_bh, (unsigned long)dev);
atomic_set(&ctx->stop, 0);
spin_lock_init(&ctx->mtx);
@@ -1601,11 +1600,8 @@ cdc_ncm_speed_change(struct usbnet *dev,
static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
{
- struct cdc_ncm_ctx *ctx;
struct usb_cdc_notification *event;
- ctx = (struct cdc_ncm_ctx *)dev->data[0];
-
if (urb->actual_length < sizeof(*event))
return;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 656441d9a955..be1917be28f2 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1387,19 +1387,10 @@ static int lan78xx_set_wol(struct net_device *netdev,
if (ret < 0)
return ret;
- pdata->wol = 0;
- if (wol->wolopts & WAKE_UCAST)
- pdata->wol |= WAKE_UCAST;
- if (wol->wolopts & WAKE_MCAST)
- pdata->wol |= WAKE_MCAST;
- if (wol->wolopts & WAKE_BCAST)
- pdata->wol |= WAKE_BCAST;
- if (wol->wolopts & WAKE_MAGIC)
- pdata->wol |= WAKE_MAGIC;
- if (wol->wolopts & WAKE_PHY)
- pdata->wol |= WAKE_PHY;
- if (wol->wolopts & WAKE_ARP)
- pdata->wol |= WAKE_ARP;
+ if (wol->wolopts & ~WAKE_ALL)
+ return -EINVAL;
+
+ pdata->wol = wol->wolopts;
device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 533b6fb8d923..72a55b6b4211 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1241,6 +1241,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
+ {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 2cd71bdb6484..f1b5201cc320 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -4506,6 +4506,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (!rtl_can_wakeup(tp))
return -EOPNOTSUPP;
+ if (wol->wolopts & ~WAKE_ANY)
+ return -EINVAL;
+
ret = usb_autopm_get_interface(tp->intf);
if (ret < 0)
goto out_set_wol;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 05553d252446..ec287c9741e8 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -731,6 +731,9 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net,
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
int ret;
+ if (wolinfo->wolopts & ~SUPPORTED_WAKE)
+ return -EINVAL;
+
pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
@@ -1517,6 +1520,7 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
if (pdata) {
+ cancel_work_sync(&pdata->set_multicast);
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
pdata = NULL;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 06b4d290784d..262e7a3c23cb 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -774,6 +774,9 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
int ret;
+ if (wolinfo->wolopts & ~SUPPORTED_WAKE)
+ return -EINVAL;
+
pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 9277a0f228df..35f39f23d881 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -421,6 +421,9 @@ sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
struct usbnet *dev = netdev_priv(net);
u8 opt = 0;
+ if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
+
if (wolinfo->wolopts & WAKE_PHY)
opt |= SR_MONITOR_LINK;
if (wolinfo->wolopts & WAKE_MAGIC)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 224c56a4e2b1..890fa5b905e2 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -37,11 +37,19 @@
#define VETH_XDP_TX BIT(0)
#define VETH_XDP_REDIR BIT(1)
+struct veth_rq_stats {
+ u64 xdp_packets;
+ u64 xdp_bytes;
+ u64 xdp_drops;
+ struct u64_stats_sync syncp;
+};
+
struct veth_rq {
struct napi_struct xdp_napi;
struct net_device *dev;
struct bpf_prog __rcu *xdp_prog;
struct xdp_mem_info xdp_mem;
+ struct veth_rq_stats stats;
bool rx_notify_masked;
struct ptr_ring xdp_ring;
struct xdp_rxq_info xdp_rxq;
@@ -59,6 +67,21 @@ struct veth_priv {
* ethtool interface
*/
+struct veth_q_stat_desc {
+ char desc[ETH_GSTRING_LEN];
+ size_t offset;
+};
+
+#define VETH_RQ_STAT(m) offsetof(struct veth_rq_stats, m)
+
+static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
+ { "xdp_packets", VETH_RQ_STAT(xdp_packets) },
+ { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
+ { "xdp_drops", VETH_RQ_STAT(xdp_drops) },
+};
+
+#define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
+
static struct {
const char string[ETH_GSTRING_LEN];
} ethtool_stats_keys[] = {
@@ -83,9 +106,20 @@ static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *inf
static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
+ char *p = (char *)buf;
+ int i, j;
+
switch(stringset) {
case ETH_SS_STATS:
- memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
+ memcpy(p, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
+ p += sizeof(ethtool_stats_keys);
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
+ snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s",
+ i, veth_rq_stats_desc[j].desc);
+ p += ETH_GSTRING_LEN;
+ }
+ }
break;
}
}
@@ -94,7 +128,8 @@ static int veth_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
- return ARRAY_SIZE(ethtool_stats_keys);
+ return ARRAY_SIZE(ethtool_stats_keys) +
+ VETH_RQ_STATS_LEN * dev->real_num_rx_queues;
default:
return -EOPNOTSUPP;
}
@@ -105,8 +140,25 @@ static void veth_get_ethtool_stats(struct net_device *dev,
{
struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer = rtnl_dereference(priv->peer);
+ int i, j, idx;
data[0] = peer ? peer->ifindex : 0;
+ idx = 1;
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
+ const void *stats_base = (void *)rq_stats;
+ unsigned int start;
+ size_t offset;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
+ for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
+ offset = veth_rq_stats_desc[j].offset;
+ data[idx + j] = *(u64 *)(stats_base + offset);
+ }
+ } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
+ idx += VETH_RQ_STATS_LEN;
+ }
}
static int veth_get_ts_info(struct net_device *dev,
@@ -211,12 +263,14 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
- struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
+ if (!rcv_xdp) {
+ struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
- u64_stats_update_begin(&stats->syncp);
- stats->bytes += length;
- stats->packets++;
- u64_stats_update_end(&stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ stats->bytes += length;
+ stats->packets++;
+ u64_stats_update_end(&stats->syncp);
+ }
} else {
drop:
atomic64_inc(&priv->dropped);
@@ -230,7 +284,7 @@ drop:
return NETDEV_TX_OK;
}
-static u64 veth_stats_one(struct pcpu_lstats *result, struct net_device *dev)
+static u64 veth_stats_tx(struct pcpu_lstats *result, struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
int cpu;
@@ -253,23 +307,58 @@ static u64 veth_stats_one(struct pcpu_lstats *result, struct net_device *dev)
return atomic64_read(&priv->dropped);
}
+static void veth_stats_rx(struct veth_rq_stats *result, struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ int i;
+
+ result->xdp_packets = 0;
+ result->xdp_bytes = 0;
+ result->xdp_drops = 0;
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ struct veth_rq_stats *stats = &priv->rq[i].stats;
+ u64 packets, bytes, drops;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ packets = stats->xdp_packets;
+ bytes = stats->xdp_bytes;
+ drops = stats->xdp_drops;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ result->xdp_packets += packets;
+ result->xdp_bytes += bytes;
+ result->xdp_drops += drops;
+ }
+}
+
static void veth_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *tot)
{
struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer;
- struct pcpu_lstats one;
+ struct veth_rq_stats rx;
+ struct pcpu_lstats tx;
- tot->tx_dropped = veth_stats_one(&one, dev);
- tot->tx_bytes = one.bytes;
- tot->tx_packets = one.packets;
+ tot->tx_dropped = veth_stats_tx(&tx, dev);
+ tot->tx_bytes = tx.bytes;
+ tot->tx_packets = tx.packets;
+
+ veth_stats_rx(&rx, dev);
+ tot->rx_dropped = rx.xdp_drops;
+ tot->rx_bytes = rx.xdp_bytes;
+ tot->rx_packets = rx.xdp_packets;
rcu_read_lock();
peer = rcu_dereference(priv->peer);
if (peer) {
- tot->rx_dropped = veth_stats_one(&one, peer);
- tot->rx_bytes = one.bytes;
- tot->rx_packets = one.packets;
+ tot->rx_dropped += veth_stats_tx(&tx, peer);
+ tot->rx_bytes += tx.bytes;
+ tot->rx_packets += tx.packets;
+
+ veth_stats_rx(&rx, peer);
+ tot->tx_bytes += rx.xdp_bytes;
+ tot->tx_packets += rx.xdp_packets;
}
rcu_read_unlock();
}
@@ -308,16 +397,20 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
struct net_device *rcv;
+ int i, ret, drops = n;
unsigned int max_len;
struct veth_rq *rq;
- int i, drops = 0;
- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
- return -EINVAL;
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
+ ret = -EINVAL;
+ goto drop;
+ }
rcv = rcu_dereference(priv->peer);
- if (unlikely(!rcv))
- return -ENXIO;
+ if (unlikely(!rcv)) {
+ ret = -ENXIO;
+ goto drop;
+ }
rcv_priv = netdev_priv(rcv);
rq = &rcv_priv->rq[veth_select_rxq(rcv)];
@@ -325,9 +418,12 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
* side. This means an XDP program is loaded on the peer and the peer
* device is up.
*/
- if (!rcu_access_pointer(rq->xdp_prog))
- return -ENXIO;
+ if (!rcu_access_pointer(rq->xdp_prog)) {
+ ret = -ENXIO;
+ goto drop;
+ }
+ drops = 0;
max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
spin_lock(&rq->xdp_ring.producer_lock);
@@ -346,7 +442,14 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
if (flags & XDP_XMIT_FLUSH)
__veth_xdp_flush(rq);
- return n - drops;
+ if (likely(!drops))
+ return n;
+
+ ret = n - drops;
+drop:
+ atomic64_add(drops, &priv->dropped);
+
+ return ret;
}
static void veth_xdp_flush(struct net_device *dev)
@@ -595,28 +698,42 @@ xdp_xmit:
static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit)
{
- int i, done = 0;
+ int i, done = 0, drops = 0, bytes = 0;
for (i = 0; i < budget; i++) {
void *ptr = __ptr_ring_consume(&rq->xdp_ring);
+ unsigned int xdp_xmit_one = 0;
struct sk_buff *skb;
if (!ptr)
break;
if (veth_is_xdp_frame(ptr)) {
- skb = veth_xdp_rcv_one(rq, veth_ptr_to_xdp(ptr),
- xdp_xmit);
+ struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
+
+ bytes += frame->len;
+ skb = veth_xdp_rcv_one(rq, frame, &xdp_xmit_one);
} else {
- skb = veth_xdp_rcv_skb(rq, ptr, xdp_xmit);
+ skb = ptr;
+ bytes += skb->len;
+ skb = veth_xdp_rcv_skb(rq, skb, &xdp_xmit_one);
}
+ *xdp_xmit |= xdp_xmit_one;
if (skb)
napi_gro_receive(&rq->xdp_napi, skb);
+ else if (!xdp_xmit_one)
+ drops++;
done++;
}
+ u64_stats_update_begin(&rq->stats.syncp);
+ rq->stats.xdp_packets += done;
+ rq->stats.xdp_bytes += bytes;
+ rq->stats.xdp_drops += drops;
+ u64_stats_update_end(&rq->stats.syncp);
+
return done;
}
@@ -807,8 +924,10 @@ static int veth_alloc_queues(struct net_device *dev)
if (!priv->rq)
return -ENOMEM;
- for (i = 0; i < dev->num_rx_queues; i++)
+ for (i = 0; i < dev->num_rx_queues; i++) {
priv->rq[i].dev = dev;
+ u64_stats_init(&priv->rq[i].stats.syncp);
+ }
return 0;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 765920905226..3e2c041d76ac 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1699,17 +1699,6 @@ static void virtnet_stats(struct net_device *dev,
tot->rx_frame_errors = dev->stats.rx_frame_errors;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void virtnet_netpoll(struct net_device *dev)
-{
- struct virtnet_info *vi = netdev_priv(dev);
- int i;
-
- for (i = 0; i < vi->curr_queue_pairs; i++)
- napi_schedule(&vi->rq[i].napi);
-}
-#endif
-
static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
rtnl_lock();
@@ -2181,6 +2170,53 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
return 0;
}
+static int virtnet_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct ethtool_coalesce ec_default = {
+ .cmd = ETHTOOL_SCOALESCE,
+ .rx_max_coalesced_frames = 1,
+ };
+ struct virtnet_info *vi = netdev_priv(dev);
+ int i, napi_weight;
+
+ if (ec->tx_max_coalesced_frames > 1)
+ return -EINVAL;
+
+ ec_default.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
+ napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
+
+ /* disallow changes to fields not explicitly tested above */
+ if (memcmp(ec, &ec_default, sizeof(ec_default)))
+ return -EINVAL;
+
+ if (napi_weight ^ vi->sq[0].napi.weight) {
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ vi->sq[i].napi.weight = napi_weight;
+ }
+
+ return 0;
+}
+
+static int virtnet_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct ethtool_coalesce ec_default = {
+ .cmd = ETHTOOL_GCOALESCE,
+ .rx_max_coalesced_frames = 1,
+ };
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ memcpy(ec, &ec_default, sizeof(ec_default));
+
+ if (vi->sq[0].napi.weight)
+ ec->tx_max_coalesced_frames = 1;
+
+ return 0;
+}
+
static void virtnet_init_settings(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -2219,6 +2255,8 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_link_ksettings = virtnet_get_link_ksettings,
.set_link_ksettings = virtnet_set_link_ksettings,
+ .set_coalesce = virtnet_set_coalesce,
+ .get_coalesce = virtnet_get_coalesce,
};
static void virtnet_freeze_down(struct virtio_device *vdev)
@@ -2229,8 +2267,9 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
/* Make sure no work handler is accessing the device */
flush_work(&vi->config_work);
+ netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
- netif_tx_disable(vi->dev);
+ netif_tx_unlock_bh(vi->dev);
cancel_delayed_work_sync(&vi->refill);
if (netif_running(vi->dev)) {
@@ -2266,7 +2305,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
}
}
+ netif_tx_lock_bh(vi->dev);
netif_device_attach(vi->dev);
+ netif_tx_unlock_bh(vi->dev);
return err;
}
@@ -2447,9 +2488,6 @@ static const struct net_device_ops virtnet_netdev = {
.ndo_get_stats64 = virtnet_stats,
.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = virtnet_netpoll,
-#endif
.ndo_bpf = virtnet_xdp,
.ndo_xdp_xmit = virtnet_xdp_xmit,
.ndo_features_check = passthru_features_check,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index e5d236595206..297cdeaef479 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -103,22 +103,6 @@ bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
}
-static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
-{
- if (ipa->sa.sa_family == AF_INET6)
- return ipv6_addr_any(&ipa->sin6.sin6_addr);
- else
- return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
-}
-
-static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
-{
- if (ipa->sa.sa_family == AF_INET6)
- return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
- else
- return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
-}
-
static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
{
if (nla_len(nla) >= sizeof(struct in6_addr)) {
@@ -151,16 +135,6 @@ bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
}
-static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
-{
- return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
-}
-
-static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
-{
- return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
-}
-
static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
{
if (nla_len(nla) >= sizeof(struct in6_addr)) {
@@ -298,6 +272,8 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
ndm->ndm_state = fdb->state;
ndm->ndm_ifindex = vxlan->dev->ifindex;
ndm->ndm_flags = fdb->flags;
+ if (rdst->offloaded)
+ ndm->ndm_flags |= NTF_OFFLOADED;
ndm->ndm_type = RTN_UNICAST;
if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
@@ -353,8 +329,8 @@ static inline size_t vxlan_nlmsg_size(void)
+ nla_total_size(sizeof(struct nda_cacheinfo));
}
-static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
- struct vxlan_rdst *rd, int type)
+static void __vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
+ struct vxlan_rdst *rd, int type)
{
struct net *net = dev_net(vxlan->dev);
struct sk_buff *skb;
@@ -379,6 +355,49 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}
+static void vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan,
+ struct vxlan_fdb *fdb,
+ struct vxlan_rdst *rd,
+ bool adding)
+{
+ struct switchdev_notifier_vxlan_fdb_info info;
+ enum switchdev_notifier_type notifier_type;
+
+ if (WARN_ON(!rd))
+ return;
+
+ notifier_type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE
+ : SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE;
+
+ info = (struct switchdev_notifier_vxlan_fdb_info){
+ .remote_ip = rd->remote_ip,
+ .remote_port = rd->remote_port,
+ .remote_vni = rd->remote_vni,
+ .remote_ifindex = rd->remote_ifindex,
+ .vni = fdb->vni,
+ .offloaded = rd->offloaded,
+ };
+ memcpy(info.eth_addr, fdb->eth_addr, ETH_ALEN);
+
+ call_switchdev_notifiers(notifier_type, vxlan->dev,
+ &info.info);
+}
+
+static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
+ struct vxlan_rdst *rd, int type)
+{
+ switch (type) {
+ case RTM_NEWNEIGH:
+ vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, true);
+ break;
+ case RTM_DELNEIGH:
+ vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, false);
+ break;
+ }
+
+ __vxlan_fdb_notify(vxlan, fdb, rd, type);
+}
+
static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -488,6 +507,47 @@ static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
return NULL;
}
+int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
+ struct switchdev_notifier_vxlan_fdb_info *fdb_info)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ u8 eth_addr[ETH_ALEN + 2] = { 0 };
+ struct vxlan_rdst *rdst;
+ struct vxlan_fdb *f;
+ int rc = 0;
+
+ if (is_multicast_ether_addr(mac) ||
+ is_zero_ether_addr(mac))
+ return -EINVAL;
+
+ ether_addr_copy(eth_addr, mac);
+
+ rcu_read_lock();
+
+ f = __vxlan_find_mac(vxlan, eth_addr, vni);
+ if (!f) {
+ rc = -ENOENT;
+ goto out;
+ }
+
+ rdst = first_remote_rcu(f);
+
+ memset(fdb_info, 0, sizeof(*fdb_info));
+ fdb_info->info.dev = dev;
+ fdb_info->remote_ip = rdst->remote_ip;
+ fdb_info->remote_port = rdst->remote_port;
+ fdb_info->remote_vni = rdst->remote_vni;
+ fdb_info->remote_ifindex = rdst->remote_ifindex;
+ fdb_info->vni = vni;
+ fdb_info->offloaded = rdst->offloaded;
+ ether_addr_copy(fdb_info->eth_addr, mac);
+
+out:
+ rcu_read_unlock();
+ return rc;
+}
+EXPORT_SYMBOL_GPL(vxlan_fdb_find_uc);
+
/* Replace destination of unicast mac */
static int vxlan_fdb_replace(struct vxlan_fdb *f,
union vxlan_addr *ip, __be16 port, __be32 vni,
@@ -533,6 +593,7 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
rd->remote_ip = *ip;
rd->remote_port = port;
+ rd->offloaded = false;
rd->remote_vni = vni;
rd->remote_ifindex = ifindex;
@@ -697,6 +758,7 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
__be16 port, __be32 src_vni, __be32 vni,
__u32 ifindex, __u8 ndm_flags)
{
+ __u8 fdb_flags = (ndm_flags & ~NTF_USE);
struct vxlan_rdst *rd = NULL;
struct vxlan_fdb *f;
int notify = 0;
@@ -714,8 +776,8 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
f->updated = jiffies;
notify = 1;
}
- if (f->flags != ndm_flags) {
- f->flags = ndm_flags;
+ if (f->flags != fdb_flags) {
+ f->flags = fdb_flags;
f->updated = jiffies;
notify = 1;
}
@@ -737,6 +799,9 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
return rc;
notify |= rc;
}
+
+ if (ndm_flags & NTF_USE)
+ f->used = jiffies;
} else {
if (!(flags & NLM_F_CREATE))
return -ENOENT;
@@ -748,7 +813,7 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
- vni, ifindex, ndm_flags, &f);
+ vni, ifindex, fdb_flags, &f);
if (rc < 0)
return rc;
notify = 1;
@@ -778,12 +843,15 @@ static void vxlan_fdb_free(struct rcu_head *head)
static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
bool do_notify)
{
+ struct vxlan_rdst *rd;
+
netdev_dbg(vxlan->dev,
"delete %pM\n", f->eth_addr);
--vxlan->addrcnt;
if (do_notify)
- vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
+ list_for_each_entry(rd, &f->remotes, list)
+ vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
hlist_del_rcu(&f->hlist);
call_rcu(&f->rcu, vxlan_fdb_free);
@@ -2194,11 +2262,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
ndst = &rt->dst;
- if (skb_dst(skb)) {
- int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
-
- skb_dst_update_pmtu(skb, mtu);
- }
+ skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
@@ -2235,11 +2299,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto out_unlock;
}
- if (skb_dst(skb)) {
- int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
-
- skb_dst_update_pmtu(skb, mtu);
- }
+ skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
@@ -3539,6 +3599,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL_INHERIT */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
@@ -3603,6 +3664,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
}
if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
+ nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT,
+ !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) ||
nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
nla_put_u8(skb, IFLA_VXLAN_LEARNING,
@@ -3754,6 +3817,51 @@ static struct notifier_block vxlan_notifier_block __read_mostly = {
.notifier_call = vxlan_netdevice_event,
};
+static void
+vxlan_fdb_offloaded_set(struct net_device *dev,
+ struct switchdev_notifier_vxlan_fdb_info *fdb_info)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_rdst *rdst;
+ struct vxlan_fdb *f;
+
+ spin_lock_bh(&vxlan->hash_lock);
+
+ f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
+ if (!f)
+ goto out;
+
+ rdst = vxlan_fdb_find_rdst(f, &fdb_info->remote_ip,
+ fdb_info->remote_port,
+ fdb_info->remote_vni,
+ fdb_info->remote_ifindex);
+ if (!rdst)
+ goto out;
+
+ rdst->offloaded = fdb_info->offloaded;
+
+out:
+ spin_unlock_bh(&vxlan->hash_lock);
+}
+
+static int vxlan_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+
+ switch (event) {
+ case SWITCHDEV_VXLAN_FDB_OFFLOADED:
+ vxlan_fdb_offloaded_set(dev, ptr);
+ break;
+ }
+
+ return 0;
+}
+
+static struct notifier_block vxlan_switchdev_notifier_block __read_mostly = {
+ .notifier_call = vxlan_switchdev_event,
+};
+
static __net_init int vxlan_init_net(struct net *net)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
@@ -3827,11 +3935,17 @@ static int __init vxlan_init_module(void)
if (rc)
goto out2;
- rc = rtnl_link_register(&vxlan_link_ops);
+ rc = register_switchdev_notifier(&vxlan_switchdev_notifier_block);
if (rc)
goto out3;
+ rc = rtnl_link_register(&vxlan_link_ops);
+ if (rc)
+ goto out4;
+
return 0;
+out4:
+ unregister_switchdev_notifier(&vxlan_switchdev_notifier_block);
out3:
unregister_netdevice_notifier(&vxlan_notifier_block);
out2:
@@ -3844,6 +3958,7 @@ late_initcall(vxlan_init_module);
static void __exit vxlan_cleanup_module(void)
{
rtnl_link_unregister(&vxlan_link_ops);
+ unregister_switchdev_notifier(&vxlan_switchdev_notifier_block);
unregister_netdevice_notifier(&vxlan_notifier_block);
unregister_pernet_subsys(&vxlan_net_ops);
/* rcu_barrier() is called by netns */
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 8523ade16030..4d6409605207 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -36,6 +36,7 @@
#define DRV_NAME "ucc_hdlc"
#define TDM_PPPOHT_SLIC_MAXIN
+#define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
static struct ucc_tdm_info utdm_primary_info = {
.uf_info = {
@@ -430,12 +431,25 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static int hdlc_tx_restart(struct ucc_hdlc_private *priv)
+{
+ u32 cecr_subblock;
+
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(priv->ut_info->uf_info.ucc_num);
+
+ qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+ return 0;
+}
+
static int hdlc_tx_done(struct ucc_hdlc_private *priv)
{
/* Start from the next BD that should be filled */
struct net_device *dev = priv->ndev;
struct qe_bd *bd; /* BD pointer */
u16 bd_status;
+ int tx_restart = 0;
bd = priv->dirty_tx;
bd_status = ioread16be(&bd->status);
@@ -444,6 +458,15 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
while ((bd_status & T_R_S) == 0) {
struct sk_buff *skb;
+ if (bd_status & T_UN_S) { /* Underrun */
+ dev->stats.tx_fifo_errors++;
+ tx_restart = 1;
+ }
+ if (bd_status & T_CT_S) { /* Carrier lost */
+ dev->stats.tx_carrier_errors++;
+ tx_restart = 1;
+ }
+
/* BD contains already transmitted buffer. */
/* Handle the transmitted buffer and release */
/* the BD to be used with the current frame */
@@ -475,6 +498,9 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
}
priv->dirty_tx = bd;
+ if (tx_restart)
+ hdlc_tx_restart(priv);
+
return 0;
}
@@ -493,11 +519,22 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
/* while there are received buffers and BD is full (~R_E) */
while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
- if (bd_status & R_OV_S)
- dev->stats.rx_over_errors++;
- if (bd_status & R_CR_S) {
- dev->stats.rx_crc_errors++;
- dev->stats.rx_dropped++;
+ if (bd_status & (RX_BD_ERRORS)) {
+ dev->stats.rx_errors++;
+
+ if (bd_status & R_CD_S)
+ dev->stats.collisions++;
+ if (bd_status & R_OV_S)
+ dev->stats.rx_fifo_errors++;
+ if (bd_status & R_CR_S)
+ dev->stats.rx_crc_errors++;
+ if (bd_status & R_AB_S)
+ dev->stats.rx_over_errors++;
+ if (bd_status & R_NO_S)
+ dev->stats.rx_frame_errors++;
+ if (bd_status & R_LG_S)
+ dev->stats.rx_length_errors++;
+
goto recycle;
}
bdbuffer = priv->rx_buffer +
@@ -546,7 +583,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
netif_receive_skb(skb);
recycle:
- iowrite16be(bd_status | R_E_S | R_I_S, &bd->status);
+ iowrite16be((bd_status & R_W_S) | R_E_S | R_I_S, &bd->status);
/* update to point at the next bd */
if (bd_status & R_W_S) {
@@ -622,7 +659,7 @@ static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
/* Errors and other events */
if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
- dev->stats.rx_errors++;
+ dev->stats.rx_missed_errors++;
if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
dev->stats.tx_errors++;
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 74c06a5f586f..1098263ab862 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -33,7 +33,6 @@
#include <linux/lapb.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
-#include <linux/compat.h>
#include <linux/slab.h>
#include <net/x25device.h>
#include "x25_asy.h"
@@ -703,21 +702,6 @@ static int x25_asy_ioctl(struct tty_struct *tty, struct file *file,
}
}
-#ifdef CONFIG_COMPAT
-static long x25_asy_compat_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case SIOCGIFNAME:
- case SIOCSIFHWADDR:
- return x25_asy_ioctl(tty, file, cmd,
- (unsigned long)compat_ptr(arg));
- }
-
- return -ENOIOCTLCMD;
-}
-#endif
-
static int x25_asy_open_dev(struct net_device *dev)
{
struct x25_asy *sl = netdev_priv(dev);
@@ -769,9 +753,6 @@ static struct tty_ldisc_ops x25_ldisc = {
.open = x25_asy_open_tty,
.close = x25_asy_close_tty,
.ioctl = x25_asy_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = x25_asy_compat_ioctl,
-#endif
.receive_buf = x25_asy_receive_buf,
.write_wakeup = x25_asy_write_wakeup,
};
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 094cea775d0c..ef298d8525c5 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -257,7 +257,7 @@ static const struct
[I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO },
[I2400M_MS_BUSY] = { "busy", -EBUSY },
[I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ },
- [I2400M_MS_UNINITIALIZED] = { "not unitialized", -EILSEQ },
+ [I2400M_MS_UNINITIALIZED] = { "uninitialized", -EILSEQ },
[I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO },
[I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO },
[I2400M_MS_NO_RF] = { "no RF", -EIO },
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index d113bd997f4b..dfc4c34298d4 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -1518,13 +1518,15 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
}
} else {
/* More than a single header/data pair were missed.
- * Report this error, and reset the controller to
+ * Report this error. If running with open-source
+ * firmware, then reset the controller to
* revive operation.
*/
b43dbg(dev->wl,
"Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
ring->index, firstused, slot);
- b43_controller_restart(dev, "Out of order TX");
+ if (dev->fw.opensource)
+ b43_controller_restart(dev, "Out of order TX");
return;
}
}
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 4daa1ce8cba3..74be3c809225 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -5493,13 +5493,11 @@ err_powerdown:
static void b43_one_core_detach(struct b43_bus_dev *dev)
{
struct b43_wldev *wldev;
- struct b43_wl *wl;
/* Do not cancel ieee80211-workqueue based work here.
* See comment in b43_remove(). */
wldev = b43_bus_get_wldev(dev);
- wl = wldev->wl;
b43_debugfs_remove_device(wldev);
b43_wireless_core_detach(wldev);
list_del(&wldev->list);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
index 1f5a9b948abf..22fd95a736a8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
@@ -54,3 +54,5 @@ brcmfmac-$(CONFIG_BRCM_TRACING) += \
tracepoint.o
brcmfmac-$(CONFIG_OF) += \
of.o
+brcmfmac-$(CONFIG_DMI) += \
+ dmi.o
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 94044a7a6021..e738112ed87c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -214,7 +214,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
sizeof(ifp->mac_addr));
if (err < 0) {
- brcmf_err("Retreiving cur_etheraddr failed, %d\n", err);
+ brcmf_err("Retrieving cur_etheraddr failed, %d\n", err);
goto done;
}
memcpy(ifp->drvr->wiphy->perm_addr, ifp->drvr->mac, ETH_ALEN);
@@ -269,7 +269,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
strcpy(buf, "ver");
err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf));
if (err < 0) {
- brcmf_err("Retreiving version information failed, %d\n",
+ brcmf_err("Retrieving version information failed, %d\n",
err);
goto done;
}
@@ -448,8 +448,9 @@ struct brcmf_mp_device *brcmf_get_module_param(struct device *dev,
}
}
if (!found) {
- /* No platform data for this device, try OF (Open Firwmare) */
+ /* No platform data for this device, try OF and DMI data */
brcmf_of_probe(dev, bus_type, settings);
+ brcmf_dmi_probe(settings, chip, chiprev);
}
return settings;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
index a34642cb4d2f..4ce56be90b74 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
@@ -59,6 +59,7 @@ struct brcmf_mp_device {
bool iapp;
bool ignore_probe_fail;
struct brcmfmac_pd_cc *country_codes;
+ const char *board_type;
union {
struct brcmfmac_sdio_pd sdio;
} bus;
@@ -74,4 +75,11 @@ void brcmf_release_module_param(struct brcmf_mp_device *module_param);
/* Sets dongle media info (drv_version, mac address). */
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+#ifdef CONFIG_DMI
+void brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev);
+#else
+static inline void
+brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev) {}
+#endif
+
#endif /* BRCMFMAC_COMMON_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
new file mode 100644
index 000000000000..51d76ac45075
--- /dev/null
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2018 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/dmi.h>
+#include <linux/mod_devicetable.h>
+#include "core.h"
+#include "common.h"
+#include "brcm_hw_ids.h"
+
+/* The DMI data never changes so we can use a static buf for this */
+static char dmi_board_type[128];
+
+struct brcmf_dmi_data {
+ u32 chip;
+ u32 chiprev;
+ const char *board_type;
+};
+
+/* NOTE: Please keep all entries sorted alphabetically */
+
+static const struct brcmf_dmi_data gpd_win_pocket_data = {
+ BRCM_CC_4356_CHIP_ID, 2, "gpd-win-pocket"
+};
+
+static const struct brcmf_dmi_data jumper_ezpad_mini3_data = {
+ BRCM_CC_43430_CHIP_ID, 0, "jumper-ezpad-mini3"
+};
+
+static const struct brcmf_dmi_data meegopad_t08_data = {
+ BRCM_CC_43340_CHIP_ID, 2, "meegopad-t08"
+};
+
+static const struct dmi_system_id dmi_platform_data[] = {
+ {
+ /* Match for the GPDwin which unfortunately uses somewhat
+ * generic dmi strings, which is why we test for 4 strings.
+ * Comparing against 23 other byt/cht boards, board_vendor
+ * and board_name are unique to the GPDwin, where as only one
+ * other board has the same board_serial and 3 others have
+ * the same default product_name. Also the GPDwin is the
+ * only device to have both board_ and product_name not set.
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Default string"),
+ DMI_MATCH(DMI_BOARD_SERIAL, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ },
+ .driver_data = (void *)&gpd_win_pocket_data,
+ },
+ {
+ /* Jumper EZpad mini3 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"),
+ /* jumperx.T87.KFBNEEA02 with the version-nr dropped */
+ DMI_MATCH(DMI_BIOS_VERSION, "jumperx.T87.KFBNEEA"),
+ },
+ .driver_data = (void *)&jumper_ezpad_mini3_data,
+ },
+ {
+ /* Meegopad T08 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
+ DMI_MATCH(DMI_BOARD_VERSION, "V1.1"),
+ },
+ .driver_data = (void *)&meegopad_t08_data,
+ },
+ {}
+};
+
+void brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev)
+{
+ const struct dmi_system_id *match;
+ const struct brcmf_dmi_data *data;
+ const char *sys_vendor;
+ const char *product_name;
+
+ /* Some models have DMI strings which are too generic, e.g.
+ * "Default string", we use a quirk table for these.
+ */
+ for (match = dmi_first_match(dmi_platform_data);
+ match;
+ match = dmi_first_match(match + 1)) {
+ data = match->driver_data;
+
+ if (data->chip == chip && data->chiprev == chiprev) {
+ settings->board_type = data->board_type;
+ return;
+ }
+ }
+
+ /* Not found in the quirk-table, use sys_vendor-product_name */
+ sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
+ product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
+ if (sys_vendor && product_name) {
+ snprintf(dmi_board_type, sizeof(dmi_board_type), "%s-%s",
+ sys_vendor, product_name);
+ settings->board_type = dmi_board_type;
+ }
+}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 9095b830ae4d..72d8c0c3c3a1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -14,6 +14,7 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/efi.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
@@ -445,6 +446,75 @@ struct brcmf_fw {
static void brcmf_fw_request_done(const struct firmware *fw, void *ctx);
+#ifdef CONFIG_EFI
+/* In some cases the EFI-var stored nvram contains "ccode=ALL" or "ccode=XV"
+ * to specify "worldwide" compatible settings, but these 2 ccode-s do not work
+ * properly. "ccode=ALL" causes channels 12 and 13 to not be available,
+ * "ccode=XV" causes all 5GHz channels to not be available. So we replace both
+ * with "ccode=X2" which allows channels 12+13 and 5Ghz channels in
+ * no-Initiate-Radiation mode. This means that we will never send on these
+ * channels without first having received valid wifi traffic on the channel.
+ */
+static void brcmf_fw_fix_efi_nvram_ccode(char *data, unsigned long data_len)
+{
+ char *ccode;
+
+ ccode = strnstr((char *)data, "ccode=ALL", data_len);
+ if (!ccode)
+ ccode = strnstr((char *)data, "ccode=XV\r", data_len);
+ if (!ccode)
+ return;
+
+ ccode[6] = 'X';
+ ccode[7] = '2';
+ ccode[8] = '\r';
+}
+
+static u8 *brcmf_fw_nvram_from_efi(size_t *data_len_ret)
+{
+ const u16 name[] = { 'n', 'v', 'r', 'a', 'm', 0 };
+ struct efivar_entry *nvram_efivar;
+ unsigned long data_len = 0;
+ u8 *data = NULL;
+ int err;
+
+ nvram_efivar = kzalloc(sizeof(*nvram_efivar), GFP_KERNEL);
+ if (!nvram_efivar)
+ return NULL;
+
+ memcpy(&nvram_efivar->var.VariableName, name, sizeof(name));
+ nvram_efivar->var.VendorGuid = EFI_GUID(0x74b00bd9, 0x805a, 0x4d61,
+ 0xb5, 0x1f, 0x43, 0x26,
+ 0x81, 0x23, 0xd1, 0x13);
+
+ err = efivar_entry_size(nvram_efivar, &data_len);
+ if (err)
+ goto fail;
+
+ data = kmalloc(data_len, GFP_KERNEL);
+ if (!data)
+ goto fail;
+
+ err = efivar_entry_get(nvram_efivar, NULL, &data_len, data);
+ if (err)
+ goto fail;
+
+ brcmf_fw_fix_efi_nvram_ccode(data, data_len);
+ brcmf_info("Using nvram EFI variable\n");
+
+ kfree(nvram_efivar);
+ *data_len_ret = data_len;
+ return data;
+
+fail:
+ kfree(data);
+ kfree(nvram_efivar);
+ return NULL;
+}
+#else
+static u8 *brcmf_fw_nvram_from_efi(size_t *data_len) { return NULL; }
+#endif
+
static void brcmf_fw_free_request(struct brcmf_fw_request *req)
{
struct brcmf_fw_item *item;
@@ -463,11 +533,12 @@ static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
{
struct brcmf_fw *fwctx = ctx;
struct brcmf_fw_item *cur;
+ bool free_bcm47xx_nvram = false;
+ bool kfree_nvram = false;
u32 nvram_length = 0;
void *nvram = NULL;
u8 *data = NULL;
size_t data_len;
- bool raw_nvram;
brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
@@ -476,12 +547,13 @@ static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
if (fw && fw->data) {
data = (u8 *)fw->data;
data_len = fw->size;
- raw_nvram = false;
} else {
- data = bcm47xx_nvram_get_contents(&data_len);
- if (!data && !(cur->flags & BRCMF_FW_REQF_OPTIONAL))
+ if ((data = bcm47xx_nvram_get_contents(&data_len)))
+ free_bcm47xx_nvram = true;
+ else if ((data = brcmf_fw_nvram_from_efi(&data_len)))
+ kfree_nvram = true;
+ else if (!(cur->flags & BRCMF_FW_REQF_OPTIONAL))
goto fail;
- raw_nvram = true;
}
if (data)
@@ -489,8 +561,11 @@ static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
fwctx->req->domain_nr,
fwctx->req->bus_nr);
- if (raw_nvram)
+ if (free_bcm47xx_nvram)
bcm47xx_nvram_release_contents(data);
+ if (kfree_nvram)
+ kfree(data);
+
release_firmware(fw);
if (!nvram && !(cur->flags & BRCMF_FW_REQF_OPTIONAL))
goto fail;
@@ -504,90 +579,75 @@ fail:
return -ENOENT;
}
-static int brcmf_fw_request_next_item(struct brcmf_fw *fwctx, bool async)
-{
- struct brcmf_fw_item *cur;
- const struct firmware *fw = NULL;
- int ret;
-
- cur = &fwctx->req->items[fwctx->curpos];
-
- brcmf_dbg(TRACE, "%srequest for %s\n", async ? "async " : "",
- cur->path);
-
- if (async)
- ret = request_firmware_nowait(THIS_MODULE, true, cur->path,
- fwctx->dev, GFP_KERNEL, fwctx,
- brcmf_fw_request_done);
- else
- ret = request_firmware(&fw, cur->path, fwctx->dev);
-
- if (ret < 0) {
- brcmf_fw_request_done(NULL, fwctx);
- } else if (!async && fw) {
- brcmf_dbg(TRACE, "firmware %s %sfound\n", cur->path,
- fw ? "" : "not ");
- if (cur->type == BRCMF_FW_TYPE_BINARY)
- cur->binary = fw;
- else if (cur->type == BRCMF_FW_TYPE_NVRAM)
- brcmf_fw_request_nvram_done(fw, fwctx);
- else
- release_firmware(fw);
-
- return -EAGAIN;
- }
- return 0;
-}
-
-static void brcmf_fw_request_done(const struct firmware *fw, void *ctx)
+static int brcmf_fw_complete_request(const struct firmware *fw,
+ struct brcmf_fw *fwctx)
{
- struct brcmf_fw *fwctx = ctx;
- struct brcmf_fw_item *cur;
+ struct brcmf_fw_item *cur = &fwctx->req->items[fwctx->curpos];
int ret = 0;
- cur = &fwctx->req->items[fwctx->curpos];
-
- brcmf_dbg(TRACE, "enter: firmware %s %sfound\n", cur->path,
- fw ? "" : "not ");
-
- if (!fw)
- ret = -ENOENT;
+ brcmf_dbg(TRACE, "firmware %s %sfound\n", cur->path, fw ? "" : "not ");
switch (cur->type) {
case BRCMF_FW_TYPE_NVRAM:
ret = brcmf_fw_request_nvram_done(fw, fwctx);
break;
case BRCMF_FW_TYPE_BINARY:
- cur->binary = fw;
+ if (fw)
+ cur->binary = fw;
+ else
+ ret = -ENOENT;
break;
default:
/* something fishy here so bail out early */
brcmf_err("unknown fw type: %d\n", cur->type);
release_firmware(fw);
ret = -EINVAL;
- goto fail;
}
- if (ret < 0 && !(cur->flags & BRCMF_FW_REQF_OPTIONAL))
- goto fail;
+ return (cur->flags & BRCMF_FW_REQF_OPTIONAL) ? 0 : ret;
+}
- do {
- if (++fwctx->curpos == fwctx->req->n_items) {
- ret = 0;
- goto done;
- }
+static int brcmf_fw_request_firmware(const struct firmware **fw,
+ struct brcmf_fw *fwctx)
+{
+ struct brcmf_fw_item *cur = &fwctx->req->items[fwctx->curpos];
+ int ret;
- ret = brcmf_fw_request_next_item(fwctx, false);
- } while (ret == -EAGAIN);
+ /* nvram files are board-specific, first try a board-specific path */
+ if (cur->type == BRCMF_FW_TYPE_NVRAM && fwctx->req->board_type) {
+ char alt_path[BRCMF_FW_NAME_LEN];
- return;
+ strlcpy(alt_path, cur->path, BRCMF_FW_NAME_LEN);
+ /* strip .txt at the end */
+ alt_path[strlen(alt_path) - 4] = 0;
+ strlcat(alt_path, ".", BRCMF_FW_NAME_LEN);
+ strlcat(alt_path, fwctx->req->board_type, BRCMF_FW_NAME_LEN);
+ strlcat(alt_path, ".txt", BRCMF_FW_NAME_LEN);
-fail:
- brcmf_dbg(TRACE, "failed err=%d: dev=%s, fw=%s\n", ret,
- dev_name(fwctx->dev), cur->path);
- brcmf_fw_free_request(fwctx->req);
- fwctx->req = NULL;
-done:
+ ret = request_firmware(fw, alt_path, fwctx->dev);
+ if (ret == 0)
+ return ret;
+ }
+
+ return request_firmware(fw, cur->path, fwctx->dev);
+}
+
+static void brcmf_fw_request_done(const struct firmware *fw, void *ctx)
+{
+ struct brcmf_fw *fwctx = ctx;
+ int ret;
+
+ ret = brcmf_fw_complete_request(fw, fwctx);
+
+ while (ret == 0 && ++fwctx->curpos < fwctx->req->n_items) {
+ brcmf_fw_request_firmware(&fw, fwctx);
+ ret = brcmf_fw_complete_request(fw, ctx);
+ }
+
+ if (ret) {
+ brcmf_fw_free_request(fwctx->req);
+ fwctx->req = NULL;
+ }
fwctx->done(fwctx->dev, ret, fwctx->req);
kfree(fwctx);
}
@@ -611,7 +671,9 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
void (*fw_cb)(struct device *dev, int err,
struct brcmf_fw_request *req))
{
+ struct brcmf_fw_item *first = &req->items[0];
struct brcmf_fw *fwctx;
+ int ret;
brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev));
if (!fw_cb)
@@ -628,7 +690,12 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
fwctx->req = req;
fwctx->done = fw_cb;
- brcmf_fw_request_next_item(fwctx, true);
+ ret = request_firmware_nowait(THIS_MODULE, true, first->path,
+ fwctx->dev, GFP_KERNEL, fwctx,
+ brcmf_fw_request_done);
+ if (ret < 0)
+ brcmf_fw_request_done(NULL, fwctx);
+
return 0;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
index 2893e56910f0..a0834be8864e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
@@ -70,6 +70,7 @@ struct brcmf_fw_request {
u16 domain_nr;
u16 bus_nr;
u32 n_items;
+ const char *board_type;
struct brcmf_fw_item items[0];
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index aee6e5937c41..84e3373289eb 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -27,11 +27,20 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
struct brcmf_mp_device *settings)
{
struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio;
- struct device_node *np = dev->of_node;
+ struct device_node *root, *np = dev->of_node;
+ struct property *prop;
int irq;
u32 irqf;
u32 val;
+ /* Set board-type to the first string of the machine compatible prop */
+ root = of_find_node_by_path("/");
+ if (root) {
+ prop = of_find_property(root, "compatible", NULL);
+ settings->board_type = of_prop_next_string(prop, NULL);
+ of_node_put(root);
+ }
+
if (!np || bus_type != BRCMF_BUSTYPE_SDIO ||
!of_device_is_compatible(np, "brcm,bcm4329-fmac"))
return;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 3e9c4f2f5dd1..456a1bf008b3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -74,7 +74,7 @@
#define P2P_AF_MAX_WAIT_TIME msecs_to_jiffies(2000)
#define P2P_INVALID_CHANNEL -1
#define P2P_CHANNEL_SYNC_RETRY 5
-#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(1500)
+#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(450)
#define P2P_DEFAULT_SLEEP_TIME_VSDB 200
/* WiFi P2P Public Action Frame OUI Subtypes */
@@ -1134,7 +1134,6 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
{
struct afx_hdl *afx_hdl = &p2p->afx_hdl;
struct brcmf_cfg80211_vif *pri_vif;
- unsigned long duration;
s32 retry;
brcmf_dbg(TRACE, "Enter\n");
@@ -1150,7 +1149,6 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
* pending action frame tx is cancelled.
*/
retry = 0;
- duration = msecs_to_jiffies(P2P_AF_FRM_SCAN_MAX_WAIT);
while ((retry < P2P_CHANNEL_SYNC_RETRY) &&
(afx_hdl->peer_chan == P2P_INVALID_CHANNEL)) {
afx_hdl->is_listen = false;
@@ -1158,7 +1156,8 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
retry);
/* search peer on peer's listen channel */
schedule_work(&afx_hdl->afx_work);
- wait_for_completion_timeout(&afx_hdl->act_frm_scan, duration);
+ wait_for_completion_timeout(&afx_hdl->act_frm_scan,
+ P2P_AF_FRM_SCAN_MAX_WAIT);
if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
(!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
&p2p->status)))
@@ -1171,7 +1170,7 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
afx_hdl->is_listen = true;
schedule_work(&afx_hdl->afx_work);
wait_for_completion_timeout(&afx_hdl->act_frm_scan,
- duration);
+ P2P_AF_FRM_SCAN_MAX_WAIT);
}
if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
(!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
@@ -1458,10 +1457,12 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
return 0;
if (e->event_code == BRCMF_E_ACTION_FRAME_COMPLETE) {
- if (e->status == BRCMF_E_STATUS_SUCCESS)
+ if (e->status == BRCMF_E_STATUS_SUCCESS) {
set_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED,
&p2p->status);
- else {
+ if (!p2p->wait_for_offchan_complete)
+ complete(&p2p->send_af_done);
+ } else {
set_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
/* If there is no ack, we don't need to wait for
* WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE event
@@ -1512,6 +1513,17 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
p2p->af_sent_channel = le32_to_cpu(af_params->channel);
p2p->af_tx_sent_jiffies = jiffies;
+ if (test_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status) &&
+ p2p->af_sent_channel ==
+ ieee80211_frequency_to_channel(p2p->remain_on_channel.center_freq))
+ p2p->wait_for_offchan_complete = false;
+ else
+ p2p->wait_for_offchan_complete = true;
+
+ brcmf_dbg(TRACE, "Waiting for %s tx completion event\n",
+ (p2p->wait_for_offchan_complete) ?
+ "off-channel" : "on-channel");
+
timeout = wait_for_completion_timeout(&p2p->send_af_done,
P2P_AF_MAX_WAIT_TIME);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
index 0e8b34d2d85c..39f0d0218088 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
@@ -124,6 +124,7 @@ struct afx_hdl {
* @gon_req_action: about to send go negotiation requets frame.
* @block_gon_req_tx: drop tx go negotiation requets frame.
* @p2pdev_dynamically: is p2p device if created by module param or supplicant.
+ * @wait_for_offchan_complete: wait for off-channel tx completion event.
*/
struct brcmf_p2p_info {
struct brcmf_cfg80211_info *cfg;
@@ -144,6 +145,7 @@ struct brcmf_p2p_info {
bool gon_req_action;
bool block_gon_req_tx;
bool p2pdev_dynamically;
+ bool wait_for_offchan_complete;
};
s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 5dea569d63ed..956a8b236836 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1785,6 +1785,7 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
+ fwreq->board_type = devinfo->settings->board_type;
/* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
fwreq->bus_nr = devinfo->pdev->bus->number;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index b2e1ab5adb64..22646a3b911e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -4174,6 +4174,7 @@ brcmf_sdio_prepare_fw_request(struct brcmf_sdio *bus)
fwreq->items[BRCMF_SDIO_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
fwreq->items[BRCMF_SDIO_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
+ fwreq->board_type = bus->sdiodev->settings->board_type;
return fwreq;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 6255fb6d97a7..6188275b17e5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -502,6 +502,7 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
spin_lock_bh(&wl->lock);
+ wl->wlc->vif = vif;
wl->mute_tx = false;
brcms_c_mute(wl->wlc, false);
if (vif->type == NL80211_IFTYPE_STATION)
@@ -519,6 +520,11 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
static void
brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
+ struct brcms_info *wl = hw->priv;
+
+ spin_lock_bh(&wl->lock);
+ wl->wlc->vif = NULL;
+ spin_unlock_bh(&wl->lock);
}
static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
@@ -840,8 +846,8 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw,
status = brcms_c_aggregatable(wl->wlc, tid);
spin_unlock_bh(&wl->lock);
if (!status) {
- brcms_err(wl->wlc->hw->d11core,
- "START: tid %d is not agg\'able\n", tid);
+ brcms_dbg_ht(wl->wlc->hw->d11core,
+ "START: tid %d is not agg\'able\n", tid);
return -EINVAL;
}
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
@@ -937,6 +943,25 @@ static void brcms_ops_set_tsf(struct ieee80211_hw *hw,
spin_unlock_bh(&wl->lock);
}
+static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, bool set)
+{
+ struct brcms_info *wl = hw->priv;
+ struct sk_buff *beacon = NULL;
+ u16 tim_offset = 0;
+
+ spin_lock_bh(&wl->lock);
+ if (wl->wlc->vif)
+ beacon = ieee80211_beacon_get_tim(hw, wl->wlc->vif,
+ &tim_offset, NULL);
+ if (beacon)
+ brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset,
+ wl->wlc->vif->bss_conf.dtim_period);
+ spin_unlock_bh(&wl->lock);
+
+ return 0;
+}
+
static const struct ieee80211_ops brcms_ops = {
.tx = brcms_ops_tx,
.start = brcms_ops_start,
@@ -955,6 +980,7 @@ static const struct ieee80211_ops brcms_ops = {
.flush = brcms_ops_flush,
.get_tsf = brcms_ops_get_tsf,
.set_tsf = brcms_ops_set_tsf,
+ .set_tim = brcms_ops_beacon_set_tim,
};
void brcms_dpc(unsigned long data)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
index c4d135cff04a..9f76b880814e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
@@ -563,6 +563,7 @@ struct brcms_c_info {
struct wiphy *wiphy;
struct scb pri_scb;
+ struct ieee80211_vif *vif;
struct sk_buff *beacon;
u16 beacon_tim_offset;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
index e7584b842dce..40e94fe72f46 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
@@ -128,7 +128,7 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
}
break;
default:
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
break;
}
@@ -140,7 +140,7 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
ch->band = BRCMU_CHAN_BAND_2G;
break;
default:
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
break;
}
}
@@ -167,7 +167,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
ch->sb = BRCMU_CHAN_SB_U;
ch->control_ch_num += CH_10MHZ_APART;
} else {
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
}
break;
case BRCMU_CHSPEC_D11AC_BW_80:
@@ -188,7 +188,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
ch->control_ch_num += CH_30MHZ_APART;
break;
default:
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
break;
}
break;
@@ -219,13 +219,13 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
ch->control_ch_num += CH_70MHZ_APART;
break;
default:
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
break;
}
break;
case BRCMU_CHSPEC_D11AC_BW_8080:
default:
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
break;
}
@@ -237,7 +237,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
ch->band = BRCMU_CHAN_BAND_2G;
break;
default:
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
break;
}
}
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 04dd7a936593..5512c7f73fce 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -5462,7 +5462,7 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
we have to add a spin lock... */
rc = readBSSListRid(ai, doLoseSync, &BSSList_rid);
while(rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) {
- ptr += sprintf(ptr, "%pM %*s rssi = %d",
+ ptr += sprintf(ptr, "%pM %.*s rssi = %d",
BSSList_rid.bssid,
(int)BSSList_rid.ssidLen,
BSSList_rid.ssid,
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
index e8983c6a2b7b..a697edd46e7f 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
@@ -781,7 +781,7 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
switch (scale_action) {
case -1:
- /* Decrese rate */
+ /* Decrease rate */
if (low != RATE_INVALID)
idx = low;
break;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 280cd8ae1696..6b4488a178a7 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -559,7 +559,7 @@ il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
break;
}
- /* fall through if TTAK OK */
+ /* fall through - if TTAK OK */
default:
if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
index c3c638ed0ed7..ce4144a89217 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -1297,6 +1297,8 @@ il4965_send_rxon_assoc(struct il_priv *il)
const struct il_rxon_cmd *rxon1 = &il->staging;
const struct il_rxon_cmd *rxon2 = &il->active;
+ lockdep_assert_held(&il->mutex);
+
if (rxon1->flags == rxon2->flags &&
rxon1->filter_flags == rxon2->filter_flags &&
rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 6514baf799fe..a2f86cbcc740 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -2695,6 +2695,7 @@ il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
RX_RES_STATUS_BAD_KEY_TTAK)
break;
+ /* fall through */
case RX_RES_STATUS_SEC_TYPE_WEP:
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
@@ -2704,6 +2705,7 @@ il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
D_RX("Packet destroyed\n");
return -1;
}
+ /* fall through */
case RX_RES_STATUS_SEC_TYPE_CCMP:
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
RX_RES_STATUS_DECRYPT_OK) {
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
index 46686ee88ff4..76b5ddb20248 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
@@ -47,6 +47,7 @@
static const struct iwl_base_params iwl1000_base_params = {
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.pll_cfg = true,
.max_ll_items = OTP_MAX_LL_ITEMS_1000,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index a8acc755a02c..da5d5f9b2573 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -56,7 +56,7 @@
#include "iwl-config.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 38
+#define IWL_22000_UCODE_API_MAX 41
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index 37deaf4fd7b3..d55fd23cafe6 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -57,7 +57,7 @@
#include "fw/file.h"
/* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX 38
+#define IWL9000_UCODE_API_MAX 41
/* Lowest firmware API version supported */
#define IWL9000_UCODE_API_MIN 30
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 6c5338364794..93b392f0c6a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -165,7 +165,7 @@ struct iwl_nvm_access_resp {
*/
struct iwl_nvm_get_info {
__le32 reserved;
-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_S_VER_1 */
+} __packed; /* REGULATORY_NVM_GET_INFO_CMD_API_S_VER_1 */
/**
* enum iwl_nvm_info_general_flags - flags in NVM_GET_INFO resp
@@ -180,14 +180,14 @@ enum iwl_nvm_info_general_flags {
* @flags: bit 0: 1 - empty, 0 - non-empty
* @nvm_version: nvm version
* @board_type: board type
- * @reserved: reserved
+ * @n_hw_addrs: number of reserved MAC addresses
*/
struct iwl_nvm_get_info_general {
__le32 flags;
__le16 nvm_version;
u8 board_type;
- u8 reserved;
-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */
+ u8 n_hw_addrs;
+} __packed; /* REGULATORY_NVM_GET_INFO_GENERAL_S_VER_2 */
/**
* enum iwl_nvm_mac_sku_flags - flags in &iwl_nvm_get_info_sku
@@ -231,7 +231,7 @@ struct iwl_nvm_get_info_sku {
struct iwl_nvm_get_info_phy {
__le32 tx_chains;
__le32 rx_chains;
-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
+} __packed; /* REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
#define IWL_NUM_CHANNELS (51)
@@ -245,7 +245,7 @@ struct iwl_nvm_get_info_regulatory {
__le32 lar_enabled;
__le16 channel_profile[IWL_NUM_CHANNELS];
__le16 reserved;
-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
+} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
/**
* struct iwl_nvm_get_info_rsp - response to get NVM data
@@ -259,7 +259,7 @@ struct iwl_nvm_get_info_rsp {
struct iwl_nvm_get_info_sku mac_sku;
struct iwl_nvm_get_info_phy phy_sku;
struct iwl_nvm_get_info_regulatory regulatory;
-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_2 */
+} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */
/**
* struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed
@@ -270,22 +270,6 @@ struct iwl_nvm_access_complete_cmd {
} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
/**
- * struct iwl_mcc_update_cmd_v1 - Request the device to update geographic
- * regulatory profile according to the given MCC (Mobile Country Code).
- * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
- * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
- * MCC in the cmd response will be the relevant MCC in the NVM.
- * @mcc: given mobile country code
- * @source_id: the source from where we got the MCC, see iwl_mcc_source
- * @reserved: reserved for alignment
- */
-struct iwl_mcc_update_cmd_v1 {
- __le16 mcc;
- u8 source_id;
- u8 reserved;
-} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_1 */
-
-/**
* struct iwl_mcc_update_cmd - Request the device to update geographic
* regulatory profile according to the given MCC (Mobile Country Code).
* The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
@@ -306,7 +290,18 @@ struct iwl_mcc_update_cmd {
} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */
/**
- * struct iwl_mcc_update_resp_v1 - response to MCC_UPDATE_CMD.
+ * enum iwl_geo_information - geographic information.
+ * @GEO_NO_INFO: no special info for this geo profile.
+ * @GEO_WMM_ETSI_5GHZ_INFO: this geo profile limits the WMM params
+ * for the 5 GHz band.
+ */
+enum iwl_geo_information {
+ GEO_NO_INFO = 0,
+ GEO_WMM_ETSI_5GHZ_INFO = BIT(0),
+};
+
+/**
+ * struct iwl_mcc_update_resp_v3 - response to MCC_UPDATE_CMD.
* Contains the new channel control profile map, if changed, and the new MCC
* (mobile country code).
* The new MCC may be different than what was requested in MCC_UPDATE_CMD.
@@ -314,30 +309,23 @@ struct iwl_mcc_update_cmd {
* @mcc: the new applied MCC
* @cap: capabilities for all channels which matches the MCC
* @source_id: the MCC source, see iwl_mcc_source
- * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
- * channels, depending on platform)
+ * @time: time elapsed from the MCC test start (in units of 30 seconds)
+ * @geo_info: geographic specific profile information
+ * see &enum iwl_geo_information.
+ * @n_channels: number of channels in @channels_data.
* @channels: channel control data map, DWORD for each channel. Only the first
* 16bits are used.
*/
-struct iwl_mcc_update_resp_v1 {
+struct iwl_mcc_update_resp_v3 {
__le32 status;
__le16 mcc;
u8 cap;
u8 source_id;
+ __le16 time;
+ __le16 geo_info;
__le32 n_channels;
__le32 channels[0];
-} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_1 */
-
-/**
- * enum iwl_geo_information - geographic information.
- * @GEO_NO_INFO: no special info for this geo profile.
- * @GEO_WMM_ETSI_5GHZ_INFO: this geo profile limits the WMM params
- * for the 5 GHz band.
- */
-enum iwl_geo_information {
- GEO_NO_INFO = 0,
- GEO_WMM_ETSI_5GHZ_INFO = BIT(0),
-};
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */
/**
* struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD.
@@ -347,25 +335,26 @@ enum iwl_geo_information {
* @status: see &enum iwl_mcc_update_status
* @mcc: the new applied MCC
* @cap: capabilities for all channels which matches the MCC
- * @source_id: the MCC source, see iwl_mcc_source
- * @time: time elapsed from the MCC test start (in 30 seconds TU)
+ * @time: time elapsed from the MCC test start (in units of 30 seconds)
* @geo_info: geographic specific profile information
* see &enum iwl_geo_information.
- * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
- * channels, depending on platform)
+ * @source_id: the MCC source, see iwl_mcc_source
+ * @reserved: for four bytes alignment.
+ * @n_channels: number of channels in @channels_data.
* @channels: channel control data map, DWORD for each channel. Only the first
* 16bits are used.
*/
struct iwl_mcc_update_resp {
__le32 status;
__le16 mcc;
- u8 cap;
- u8 source_id;
+ __le16 cap;
__le16 time;
__le16 geo_info;
+ u8 source_id;
+ u8 reserved[3];
__le32 n_channels;
__le32 channels[0];
-} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_4 */
/**
* struct iwl_mcc_chub_notif - chub notifies of mcc change
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 415b8842b426..0537496b6eb1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -368,10 +368,10 @@ enum iwl_rx_he_phy {
/* trigger encoded */
IWL_RX_HE_PHY_RU_ALLOC_MASK = 0xfe0000000000ULL,
IWL_RX_HE_PHY_INFO_TYPE_MASK = 0xf000000000000000ULL,
- IWL_RX_HE_PHY_INFO_TYPE_SU = 0x0,
- IWL_RX_HE_PHY_INFO_TYPE_MU = 0x1,
- IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO = 0x2,
- IWL_RX_HE_PHY_INFO_TYPE_TB_EXT_INFO = 0x3,
+ IWL_RX_HE_PHY_INFO_TYPE_SU = 0x0, /* TSF low valid (first DW) */
+ IWL_RX_HE_PHY_INFO_TYPE_MU = 0x1, /* TSF low/high valid (both DWs) */
+ IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO = 0x2, /* same + SIGB-common0/1/2 valid */
+ IWL_RX_HE_PHY_INFO_TYPE_TB = 0x3, /* TSF low/high valid (both DWs) */
/* second dword - MU data */
IWL_RX_HE_PHY_MU_SIGB_COMPRESSION = BIT_ULL(32 + 0),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 310b01e3cce1..18741889ec30 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -596,9 +596,12 @@ enum iwl_umac_scan_general_flags {
* enum iwl_umac_scan_general_flags2 - UMAC scan general flags #2
* @IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL: Whether to send a complete
* notification per channel or not.
+ * @IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER: Whether to allow channel
+ * reorder optimization or not.
*/
enum iwl_umac_scan_general_flags2 {
- IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0),
+ IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0),
+ IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER = BIT(1),
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 0dcf1a673478..c16757051f16 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -240,7 +240,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
return;
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
/* Pull RXF1 */
iwl_fwrt_dump_rxf(fwrt, dump_data,
cfg->lmac[0].rxfifo1_size, 0, 0);
@@ -254,7 +254,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
LMAC2_PRPH_OFFSET, 2);
}
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
/* Pull TXF data from LMAC1 */
for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
/* Mark the number of TXF we're pulling now */
@@ -279,7 +279,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
}
}
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
/* Pull UMAC internal TXF data from all TXFs */
@@ -573,103 +573,95 @@ static int iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt)
static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt,
struct iwl_fw_error_dump_data **dump_data,
- u32 sram_len, u32 sram_ofs, u32 smem_len,
- u32 sram2_len)
+ u32 len, u32 ofs, u32 type)
{
- const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg_mem_tlv;
struct iwl_fw_error_dump_mem *dump_mem;
+
+ if (!len)
+ return;
+
+ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+ (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem));
+ dump_mem = (void *)(*dump_data)->data;
+ dump_mem->type = cpu_to_le32(type);
+ dump_mem->offset = cpu_to_le32(ofs);
+ iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len);
+ *dump_data = iwl_fw_error_next_data(*dump_data);
+
+ IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type);
+}
+
+#define ADD_LEN(len, item_len, const_len) \
+ do {size_t item = item_len; len += (!!item) * const_len + item; } \
+ while (0)
+
+static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_shared_mem_cfg *mem_cfg)
+{
+ size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+ u32 fifo_len = 0;
int i;
- if (!fwrt->fw->n_dbg_mem_tlv) {
- (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
- (*dump_data)->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
- dump_mem = (void *)(*dump_data)->data;
- dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
- dump_mem->offset = cpu_to_le32(sram_ofs);
- iwl_trans_read_mem_bytes(fwrt->trans, sram_ofs, dump_mem->data,
- sram_len);
- *dump_data = iwl_fw_error_next_data(*dump_data);
- }
+ if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)))
+ goto dump_txf;
- for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
- u32 len = le32_to_cpu(fw_dbg_mem[i].len);
- u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
+ /* Count RXF2 size */
+ ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len);
- (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
- (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem));
- dump_mem = (void *)(*dump_data)->data;
- dump_mem->type = fw_dbg_mem[i].data_type;
- dump_mem->offset = cpu_to_le32(ofs);
+ /* Count RXF1 sizes */
+ for (i = 0; i < mem_cfg->num_lmacs; i++)
+ ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len);
- IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n",
- dump_mem->type);
+dump_txf:
+ if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)))
+ goto dump_internal_txf;
- iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len);
- *dump_data = iwl_fw_error_next_data(*dump_data);
- }
+ /* Count TXF sizes */
+ for (i = 0; i < mem_cfg->num_lmacs; i++) {
+ int j;
- if (smem_len) {
- IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n");
- (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
- (*dump_data)->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
- dump_mem = (void *)(*dump_data)->data;
- dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
- dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->smem_offset);
- iwl_trans_read_mem_bytes(fwrt->trans,
- fwrt->trans->cfg->smem_offset,
- dump_mem->data, smem_len);
- *dump_data = iwl_fw_error_next_data(*dump_data);
+ for (j = 0; j < mem_cfg->num_txfifo_entries; j++)
+ ADD_LEN(fifo_len, mem_cfg->lmac[i].txfifo_size[j],
+ hdr_len);
}
- if (sram2_len) {
- IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n");
- (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
- (*dump_data)->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
- dump_mem = (void *)(*dump_data)->data;
- dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
- dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->dccm2_offset);
- iwl_trans_read_mem_bytes(fwrt->trans,
- fwrt->trans->cfg->dccm2_offset,
- dump_mem->data, sram2_len);
- *dump_data = iwl_fw_error_next_data(*dump_data);
- }
+dump_internal_txf:
+ if (!((fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
+ fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)))
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); i++)
+ ADD_LEN(fifo_len, mem_cfg->internal_txfifo_size[i], hdr_len);
+
+out:
+ return fifo_len;
}
-void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
+static struct iwl_fw_error_dump_file *
+_iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_dump_ptrs *fw_error_dump)
{
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_error_dump_data *dump_data;
struct iwl_fw_error_dump_info *dump_info;
struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg;
struct iwl_fw_error_dump_trigger_desc *dump_trig;
- struct iwl_fw_dump_ptrs *fw_error_dump;
- struct scatterlist *sg_dump_data;
u32 sram_len, sram_ofs;
- const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg_mem_tlv;
+ const struct iwl_fw_dbg_mem_seg_tlv *fw_mem = fwrt->fw->dbg.mem_tlv;
struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg;
- u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0;
- u32 smem_len = fwrt->fw->n_dbg_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
- u32 sram2_len = fwrt->fw->n_dbg_mem_tlv ?
+ u32 file_len, fifo_len = 0, prph_len = 0, radio_len = 0;
+ u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
+ u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ?
0 : fwrt->trans->cfg->dccm2_len;
bool monitor_dump_only = false;
int i;
- IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
-
- /* there's no point in fw dump if the bus is dead */
- if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
- IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
- goto out;
- }
-
if (fwrt->dump.trig &&
fwrt->dump.trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
monitor_dump_only = true;
- fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
- if (!fw_error_dump)
- goto out;
-
/* SRAM - include stack CCM if driver knows the values for it */
if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
const struct fw_img *img;
@@ -684,112 +676,43 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
/* reading RXF/TXF sizes */
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
- fifo_data_len = 0;
-
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
-
- /* Count RXF2 size */
- if (mem_cfg->rxfifo2_size) {
- /* Add header info */
- fifo_data_len +=
- mem_cfg->rxfifo2_size +
- sizeof(*dump_data) +
- sizeof(struct iwl_fw_error_dump_fifo);
- }
-
- /* Count RXF1 sizes */
- for (i = 0; i < mem_cfg->num_lmacs; i++) {
- if (!mem_cfg->lmac[i].rxfifo1_size)
- continue;
-
- /* Add header info */
- fifo_data_len +=
- mem_cfg->lmac[i].rxfifo1_size +
- sizeof(*dump_data) +
- sizeof(struct iwl_fw_error_dump_fifo);
- }
- }
-
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
- size_t fifo_const_len = sizeof(*dump_data) +
- sizeof(struct iwl_fw_error_dump_fifo);
-
- /* Count TXF sizes */
- for (i = 0; i < mem_cfg->num_lmacs; i++) {
- int j;
-
- for (j = 0; j < mem_cfg->num_txfifo_entries;
- j++) {
- if (!mem_cfg->lmac[i].txfifo_size[j])
- continue;
-
- /* Add header info */
- fifo_data_len +=
- fifo_const_len +
- mem_cfg->lmac[i].txfifo_size[j];
- }
- }
- }
-
- if ((fwrt->fw->dbg_dump_mask &
- BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
- fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
- for (i = 0;
- i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
- i++) {
- if (!mem_cfg->internal_txfifo_size[i])
- continue;
-
- /* Add header info */
- fifo_data_len +=
- mem_cfg->internal_txfifo_size[i] +
- sizeof(*dump_data) +
- sizeof(struct iwl_fw_error_dump_fifo);
- }
- }
+ fifo_len = iwl_fw_fifo_len(fwrt, mem_cfg);
/* Make room for PRPH registers */
if (!fwrt->trans->cfg->gen2 &&
- fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH))
+ fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH))
prph_len += iwl_fw_get_prph_len(fwrt);
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
- fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
+ fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
}
- file_len = sizeof(*dump_file) +
- fifo_data_len +
- prph_len +
- radio_len;
+ file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len;
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
file_len += sizeof(*dump_data) + sizeof(*dump_info);
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
- /* Make room for the SMEM, if it exists */
- if (smem_len)
- file_len += sizeof(*dump_data) + smem_len +
- sizeof(struct iwl_fw_error_dump_mem);
-
- /* Make room for the secondary SRAM, if it exists */
- if (sram2_len)
- file_len += sizeof(*dump_data) + sram2_len +
- sizeof(struct iwl_fw_error_dump_mem);
-
- /* Make room for MEM segments */
- for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
- file_len += sizeof(*dump_data) +
- le32_to_cpu(fw_dbg_mem[i].len) +
- sizeof(struct iwl_fw_error_dump_mem);
- }
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
+ size_t hdr_len = sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_mem);
+
+ /* Dump SRAM only if no mem_tlvs */
+ if (!fwrt->fw->dbg.n_mem_tlv)
+ ADD_LEN(file_len, sram_len, hdr_len);
+
+ /* Make room for all mem types that exist */
+ ADD_LEN(file_len, smem_len, hdr_len);
+ ADD_LEN(file_len, sram2_len, hdr_len);
+
+ for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++)
+ ADD_LEN(file_len, le32_to_cpu(fw_mem[i].len), hdr_len);
}
/* Make room for fw's virtual image pages, if it exists */
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
!fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block)
@@ -809,28 +732,21 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
sizeof(*dump_info) + sizeof(*dump_smem_cfg);
}
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
fwrt->dump.desc)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
fwrt->dump.desc->len;
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM) &&
- !fwrt->fw->n_dbg_mem_tlv)
- file_len += sizeof(*dump_data) + sram_len +
- sizeof(struct iwl_fw_error_dump_mem);
-
dump_file = vzalloc(file_len);
- if (!dump_file) {
- kfree(fw_error_dump);
- goto out;
- }
+ if (!dump_file)
+ return NULL;
fw_error_dump->fwrt_ptr = dump_file;
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
dump_data = (void *)dump_file->data;
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_info));
dump_info = (void *)dump_data->data;
@@ -851,7 +767,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
dump_data = iwl_fw_error_next_data(dump_data);
}
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
/* Dump shared memory configuration */
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
@@ -882,13 +798,13 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
/* We only dump the FIFOs if the FW is in error state */
- if (fifo_data_len) {
+ if (fifo_len) {
iwl_fw_dump_fifos(fwrt, &dump_data);
if (radio_len)
iwl_read_radio_regs(fwrt, &dump_data);
}
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
fwrt->dump.desc) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
@@ -902,12 +818,32 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
/* In case we only want monitor dump, skip to dump trasport data */
if (monitor_dump_only)
- goto dump_trans_data;
+ goto out;
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM))
- iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs, smem_len,
- sram2_len);
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
+ const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem =
+ fwrt->fw->dbg.mem_tlv;
+ if (!fwrt->fw->dbg.n_mem_tlv)
+ iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs,
+ IWL_FW_ERROR_DUMP_MEM_SRAM);
+
+ for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) {
+ u32 len = le32_to_cpu(fw_dbg_mem[i].len);
+ u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
+
+ iwl_fw_dump_mem(fwrt, &dump_data, len, ofs,
+ le32_to_cpu(fw_dbg_mem[i].data_type));
+ }
+
+ iwl_fw_dump_mem(fwrt, &dump_data, smem_len,
+ fwrt->trans->cfg->smem_offset,
+ IWL_FW_ERROR_DUMP_MEM_SMEM);
+
+ iwl_fw_dump_mem(fwrt, &dump_data, sram2_len,
+ fwrt->trans->cfg->dccm2_offset,
+ IWL_FW_ERROR_DUMP_MEM_SRAM);
+ }
if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr;
@@ -929,7 +865,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
/* Dump fw's virtual image */
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
!fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block) {
@@ -965,13 +901,44 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
ARRAY_SIZE(iwl_prph_dump_addr_9000));
}
-dump_trans_data:
+out:
+ dump_file->file_len = cpu_to_le32(file_len);
+ return dump_file;
+}
+
+void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_fw_dump_ptrs *fw_error_dump;
+ struct iwl_fw_error_dump_file *dump_file;
+ struct scatterlist *sg_dump_data;
+ u32 file_len;
+
+ IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
+
+ /* there's no point in fw dump if the bus is dead */
+ if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
+ IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
+ goto out;
+ }
+
+ fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
+ if (!fw_error_dump)
+ goto out;
+
+ dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump);
+ if (!dump_file) {
+ kfree(fw_error_dump);
+ goto out;
+ }
+
fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans,
fwrt->dump.trig);
+ file_len = le32_to_cpu(dump_file->file_len);
fw_error_dump->fwrt_len = file_len;
- if (fw_error_dump->trans_ptr)
+ if (fw_error_dump->trans_ptr) {
file_len += fw_error_dump->trans_ptr->len;
- dump_file->file_len = cpu_to_le32(file_len);
+ dump_file->file_len = cpu_to_le32(file_len);
+ }
sg_dump_data = alloc_sgtable(file_len);
if (sg_dump_data) {
@@ -1006,15 +973,34 @@ const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
};
IWL_EXPORT_SYMBOL(iwl_dump_desc_assert);
-int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
- const struct iwl_fw_dump_desc *desc,
- const struct iwl_fw_dbg_trigger_tlv *trigger)
+void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt)
{
- unsigned int delay = 0;
+ struct iwl_fw_dump_desc *iwl_dump_desc_no_alive =
+ kmalloc(sizeof(*iwl_dump_desc_no_alive), GFP_KERNEL);
- if (trigger)
- delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
+ if (!iwl_dump_desc_no_alive)
+ return;
+
+ iwl_dump_desc_no_alive->trig_desc.type =
+ cpu_to_le32(FW_DBG_TRIGGER_NO_ALIVE);
+ iwl_dump_desc_no_alive->len = 0;
+
+ if (WARN_ON(fwrt->dump.desc))
+ iwl_fw_free_dump_desc(fwrt);
+
+ IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
+ FW_DBG_TRIGGER_NO_ALIVE);
+ fwrt->dump.desc = iwl_dump_desc_no_alive;
+ iwl_fw_error_dump(fwrt);
+ clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_alive_error_dump);
+
+int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
+ const struct iwl_fw_dump_desc *desc, void *trigger,
+ unsigned int delay)
+{
/*
* If the loading of the FW completed successfully, the next step is to
* get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non
@@ -1031,7 +1017,8 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
fwrt->smem_cfg.num_lmacs)
return -EIO;
- if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
+ if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status) ||
+ test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status))
return -EBUSY;
if (WARN_ON(fwrt->dump.desc))
@@ -1052,25 +1039,38 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc);
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig,
const char *str, size_t len,
- const struct iwl_fw_dbg_trigger_tlv *trigger)
+ struct iwl_fw_dbg_trigger_tlv *trigger)
{
struct iwl_fw_dump_desc *desc;
+ unsigned int delay = 0;
- if (trigger && trigger->flags & IWL_FW_DBG_FORCE_RESTART) {
- IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", trig);
- iwl_force_nmi(fwrt->trans);
- return 0;
+ if (trigger) {
+ u16 occurrences = le16_to_cpu(trigger->occurrences) - 1;
+
+ if (!le16_to_cpu(trigger->occurrences))
+ return 0;
+
+ if (trigger->flags & IWL_FW_DBG_FORCE_RESTART) {
+ IWL_WARN(fwrt, "Force restart: trigger %d fired.\n",
+ trig);
+ iwl_force_nmi(fwrt->trans);
+ return 0;
+ }
+
+ trigger->occurrences = cpu_to_le16(occurrences);
+ delay = le16_to_cpu(trigger->trig_dis_ms);
}
desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
if (!desc)
return -ENOMEM;
+
desc->len = len;
desc->trig_desc.type = cpu_to_le32(trig);
memcpy(desc->trig_desc.data, str, len);
- return iwl_fw_dbg_collect_desc(fwrt, desc, trigger);
+ return iwl_fw_dbg_collect_desc(fwrt, desc, trigger, delay);
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
@@ -1078,13 +1078,9 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trigger,
const char *fmt, ...)
{
- u16 occurrences = le16_to_cpu(trigger->occurrences);
int ret, len = 0;
char buf[64];
- if (!occurrences)
- return 0;
-
if (fmt) {
va_list ap;
@@ -1107,7 +1103,6 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
if (ret)
return ret;
- trigger->occurrences = cpu_to_le16(occurrences - 1);
return 0;
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig);
@@ -1118,17 +1113,17 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
int ret;
int i;
- if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg_conf_tlv),
+ if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg.conf_tlv),
"Invalid configuration %d\n", conf_id))
return -EINVAL;
/* EARLY START - firmware's configuration is hard coded */
- if ((!fwrt->fw->dbg_conf_tlv[conf_id] ||
- !fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
+ if ((!fwrt->fw->dbg.conf_tlv[conf_id] ||
+ !fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds) &&
conf_id == FW_DBG_START_FROM_ALIVE)
return 0;
- if (!fwrt->fw->dbg_conf_tlv[conf_id])
+ if (!fwrt->fw->dbg.conf_tlv[conf_id])
return -EINVAL;
if (fwrt->dump.conf != FW_DBG_INVALID)
@@ -1136,8 +1131,8 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
fwrt->dump.conf);
/* Send all HCMDs for configuring the FW debug */
- ptr = (void *)&fwrt->fw->dbg_conf_tlv[conf_id]->hcmd;
- for (i = 0; i < fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) {
+ ptr = (void *)&fwrt->fw->dbg.conf_tlv[conf_id]->hcmd;
+ for (i = 0; i < fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds; i++) {
struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr;
struct iwl_host_cmd hcmd = {
.id = cmd->id,
@@ -1159,14 +1154,14 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
}
IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
-void iwl_fw_error_dump_wk(struct work_struct *work)
+/* this function assumes dump_start was called beforehand and dump_end will be
+ * called afterwards
+ */
+void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt)
{
- struct iwl_fw_runtime *fwrt =
- container_of(work, struct iwl_fw_runtime, dump.wk.work);
struct iwl_fw_dbg_params params = {0};
- if (fwrt->ops && fwrt->ops->dump_start &&
- fwrt->ops->dump_start(fwrt->ops_ctx))
+ if (!test_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
return;
if (fwrt->ops && fwrt->ops->fw_running &&
@@ -1174,7 +1169,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
iwl_fw_free_dump_desc(fwrt);
clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
- goto out;
+ return;
}
iwl_fw_dbg_stop_recording(fwrt, &params);
@@ -1183,12 +1178,25 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
/* start recording again if the firmware is not crashed */
if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
- fwrt->fw->dbg_dest_tlv) {
+ fwrt->fw->dbg.dest_tlv) {
/* wait before we collect the data till the DBGC stop */
udelay(500);
iwl_fw_dbg_restart_recording(fwrt, &params);
}
-out:
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_sync);
+
+void iwl_fw_error_dump_wk(struct work_struct *work)
+{
+ struct iwl_fw_runtime *fwrt =
+ container_of(work, struct iwl_fw_runtime, dump.wk.work);
+
+ if (fwrt->ops && fwrt->ops->dump_start &&
+ fwrt->ops->dump_start(fwrt->ops_ctx))
+ return;
+
+ iwl_fw_dbg_collect_sync(fwrt);
+
if (fwrt->ops && fwrt->ops->dump_end)
fwrt->ops->dump_end(fwrt->ops_ctx);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 3c89230fae6a..6f8d3256f7b0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -107,25 +107,25 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc *desc,
- const struct iwl_fw_dbg_trigger_tlv *trigger);
+ void *trigger, unsigned int delay);
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig,
const char *str, size_t len,
- const struct iwl_fw_dbg_trigger_tlv *trigger);
+ struct iwl_fw_dbg_trigger_tlv *trigger);
int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trigger,
const char *fmt, ...) __printf(3, 4);
int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 id);
#define iwl_fw_dbg_trigger_enabled(fw, id) ({ \
- void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)]; \
+ void *__dbg_trigger = (fw)->dbg.trigger_tlv[(id)]; \
unlikely(__dbg_trigger); \
})
static inline struct iwl_fw_dbg_trigger_tlv*
_iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, enum iwl_fw_dbg_trigger id)
{
- return fw->dbg_trigger_tlv[id];
+ return fw->dbg.trigger_tlv[id];
}
#define iwl_fw_dbg_get_trigger(fw, id) ({ \
@@ -154,12 +154,9 @@ iwl_fw_dbg_trigger_stop_conf_match(struct iwl_fw_runtime *fwrt,
}
static inline bool
-iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_dbg_trigger_tlv *trig)
+iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, u32 id, u32 dis_ms)
{
- unsigned long wind_jiff =
- msecs_to_jiffies(le16_to_cpu(trig->trig_dis_ms));
- u32 id = le32_to_cpu(trig->id);
+ unsigned long wind_jiff = msecs_to_jiffies(dis_ms);
/* If this is the first event checked, jump to update start ts */
if (fwrt->dump.non_collect_ts_start[id] &&
@@ -179,7 +176,8 @@ iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt,
if (wdev && !iwl_fw_dbg_trigger_vif_match(trig, wdev))
return false;
- if (iwl_fw_dbg_no_trig_window(fwrt, trig)) {
+ if (iwl_fw_dbg_no_trig_window(fwrt, le32_to_cpu(trig->id),
+ le16_to_cpu(trig->trig_dis_ms))) {
IWL_WARN(fwrt, "Trigger %d occurred while no-collect window.\n",
trig->id);
return false;
@@ -188,6 +186,30 @@ iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt,
return iwl_fw_dbg_trigger_stop_conf_match(fwrt, trig);
}
+static inline struct iwl_fw_dbg_trigger_tlv*
+_iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt,
+ struct wireless_dev *wdev,
+ const enum iwl_fw_dbg_trigger id)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+
+ if (!iwl_fw_dbg_trigger_enabled(fwrt->fw, id))
+ return NULL;
+
+ trig = _iwl_fw_dbg_get_trigger(fwrt->fw, id);
+
+ if (!iwl_fw_dbg_trigger_check_stop(fwrt, wdev, trig))
+ return NULL;
+
+ return trig;
+}
+
+#define iwl_fw_dbg_trigger_on(fwrt, wdev, id) ({ \
+ BUILD_BUG_ON(!__builtin_constant_p(id)); \
+ BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX); \
+ _iwl_fw_dbg_trigger_on((fwrt), (wdev), (id)); \
+})
+
static inline void
_iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
struct wireless_dev *wdev,
@@ -293,7 +315,7 @@ static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt)
return fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_D3_DEBUG) &&
fwrt->trans->cfg->d3_debug_data_length &&
- fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
+ fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
}
void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt);
@@ -344,4 +366,6 @@ static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
#endif /* CONFIG_IWLWIFI_DEBUGFS */
+void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt);
+void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt);
#endif /* __iwl_fw_dbg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index 1049bdfe1e69..3e120dd47305 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -258,11 +258,75 @@ static ssize_t iwl_dbgfs_timestamp_marker_read(struct iwl_fw_runtime *fwrt,
FWRT_DEBUGFS_READ_WRITE_FILE_OPS(timestamp_marker, 16);
+struct hcmd_write_data {
+ __be32 cmd_id;
+ __be32 flags;
+ __be16 length;
+ u8 data[0];
+} __packed;
+
+static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf,
+ size_t count)
+{
+ size_t header_size = (sizeof(u32) * 2 + sizeof(u16)) * 2;
+ size_t data_size = (count - 1) / 2;
+ int ret;
+ struct hcmd_write_data *data;
+ struct iwl_host_cmd hcmd = {
+ .len = { 0, },
+ .data = { NULL, },
+ };
+
+ if (fwrt->ops && fwrt->ops->fw_running &&
+ !fwrt->ops->fw_running(fwrt->ops_ctx))
+ return -EIO;
+
+ if (count < header_size + 1 || count > 1024 * 4)
+ return -EINVAL;
+
+ data = kmalloc(data_size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = hex2bin((u8 *)data, buf, data_size);
+ if (ret)
+ goto out;
+
+ hcmd.id = be32_to_cpu(data->cmd_id);
+ hcmd.flags = be32_to_cpu(data->flags);
+ hcmd.len[0] = be16_to_cpu(data->length);
+ hcmd.data[0] = data->data;
+
+ if (count != header_size + hcmd.len[0] * 2 + 1) {
+ IWL_ERR(fwrt,
+ "host command data size does not match header length\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (fwrt->ops && fwrt->ops->send_hcmd)
+ ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd);
+ else
+ ret = -EPERM;
+
+ if (ret < 0)
+ goto out;
+
+ if (hcmd.flags & CMD_WANT_SKB)
+ iwl_free_resp(&hcmd);
+out:
+ kfree(data);
+ return ret ?: count;
+}
+
+FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512);
+
int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir)
{
INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
+ FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
return 0;
err:
IWL_ERR(fwrt, "Can't create the fwrt debugfs directory\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 6d3ef331b7d5..6fede174c664 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -328,6 +328,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
* @FW_DBG_TDLS: trigger log collection upon TDLS related events.
* @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when
* the firmware sends a tx reply.
+ * @FW_DBG_TRIGGER_NO_ALIVE: trigger log collection if alive flow fails
*/
enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_INVALID = 0,
@@ -345,6 +346,7 @@ enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_TX_LATENCY,
FW_DBG_TRIGGER_TDLS,
FW_DBG_TRIGGER_TX_STATUS,
+ FW_DBG_TRIGGER_NO_ALIVE,
/* must be last */
FW_DBG_TRIGGER_MAX,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 63e277b07b8a..6005a41c53d1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -337,7 +337,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* antenna the beacon should be transmitted
* @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon
* from AP and will send it upon d0i3 exit.
- * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2
+ * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3: support LAR API V3
* @IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill
* @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature
* thresholds reporting
@@ -352,6 +352,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* power reduction.
* @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
* @IWL_UCODE_TLV_CAPA_D3_DEBUG: supports debug recording during D3
+ * @IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT: MCC response support 11ax
+ * capability.
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -392,7 +394,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD = (__force iwl_ucode_tlv_capa_t)70,
IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71,
IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72,
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3 = (__force iwl_ucode_tlv_capa_t)73,
IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74,
IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75,
IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76,
@@ -402,6 +404,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84,
IWL_UCODE_TLV_CAPA_D3_DEBUG = (__force iwl_ucode_tlv_capa_t)87,
IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)88,
+ IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT = (__force iwl_ucode_tlv_capa_t)89,
IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96,
NUM_IWL_UCODE_TLV_CAPA
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index 9cc8fe8908ac..54dbbd998abf 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -198,6 +198,29 @@ enum iwl_fw_type {
};
/**
+ * struct iwl_fw_dbg - debug data
+ *
+ * @dest_tlv: points to debug destination TLV (typically SRAM or DRAM)
+ * @n_dest_reg: num of reg_ops in dest_tlv
+ * @conf_tlv: array of pointers to configuration HCMDs
+ * @trigger_tlv: array of pointers to triggers TLVs
+ * @trigger_tlv_len: lengths of the @dbg_trigger_tlv entries
+ * @mem_tlv: Runtime addresses to dump
+ * @n_mem_tlv: number of runtime addresses
+ * @dump_mask: bitmask of dump regions
+*/
+struct iwl_fw_dbg {
+ struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
+ u8 n_dest_reg;
+ struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
+ struct iwl_fw_dbg_trigger_tlv *trigger_tlv[FW_DBG_TRIGGER_MAX];
+ size_t trigger_tlv_len[FW_DBG_TRIGGER_MAX];
+ struct iwl_fw_dbg_mem_seg_tlv *mem_tlv;
+ size_t n_mem_tlv;
+ u32 dump_mask;
+};
+
+/**
* struct iwl_fw - variables associated with the firmware
*
* @ucode_ver: ucode version from the ucode file
@@ -217,12 +240,6 @@ enum iwl_fw_type {
* @cipher_scheme: optional external cipher scheme.
* @human_readable: human readable version
* we get the ALIVE from the uCode
- * @dbg_dest_tlv: points to the destination TLV for debug
- * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
- * @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries
- * @dbg_trigger_tlv: array of pointers to triggers TLVs
- * @dbg_trigger_tlv_len: lengths of the @dbg_trigger_tlv entries
- * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
*/
struct iwl_fw {
u32 ucode_ver;
@@ -250,15 +267,7 @@ struct iwl_fw {
struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
- struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
- struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
- size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
- struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
- struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
- size_t n_dbg_mem_tlv;
- size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
- u8 dbg_dest_reg_num;
- u32 dbg_dump_mask;
+ struct iwl_fw_dbg dbg;
};
static inline const char *get_fw_dbg_mode_string(int mode)
@@ -280,7 +289,7 @@ static inline const char *get_fw_dbg_mode_string(int mode)
static inline bool
iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
{
- const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id];
+ const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg.conf_tlv[id];
if (!conf_tlv)
return false;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 9ed5819defaf..6b95d0e75889 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -71,6 +71,7 @@ struct iwl_fw_runtime_ops {
int (*dump_start)(void *ctx);
void (*dump_end)(void *ctx);
bool (*fw_running)(void *ctx);
+ int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
};
#define MAX_NUM_LMAC 2
@@ -88,6 +89,7 @@ struct iwl_fwrt_shared_mem_cfg {
enum iwl_fw_runtime_status {
IWL_FWRT_STATUS_DUMPING = 0,
+ IWL_FWRT_STATUS_WAIT_ALIVE,
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
index 2cc6c019d0e1..420e6d745f77 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
@@ -30,38 +30,20 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_data
-TRACE_EVENT(iwlwifi_dev_tx_data,
- TP_PROTO(const struct device *dev,
- struct sk_buff *skb, u8 hdr_len),
- TP_ARGS(dev, skb, hdr_len),
+TRACE_EVENT(iwlwifi_dev_tx_tb,
+ TP_PROTO(const struct device *dev, struct sk_buff *skb,
+ u8 *data_src, size_t data_len),
+ TP_ARGS(dev, skb, data_src, data_len),
TP_STRUCT__entry(
DEV_ENTRY
__dynamic_array(u8, data,
- iwl_trace_data(skb) ? skb->len - hdr_len : 0)
+ iwl_trace_data(skb) ? data_len : 0)
),
TP_fast_assign(
DEV_ASSIGN;
if (iwl_trace_data(skb))
- skb_copy_bits(skb, hdr_len,
- __get_dynamic_array(data),
- skb->len - hdr_len);
- ),
- TP_printk("[%s] TX frame data", __get_str(dev))
-);
-
-TRACE_EVENT(iwlwifi_dev_tx_tso_chunk,
- TP_PROTO(const struct device *dev,
- u8 *data_src, size_t data_len),
- TP_ARGS(dev, data_src, data_len),
- TP_STRUCT__entry(
- DEV_ENTRY
-
- __dynamic_array(u8, data, data_len)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- memcpy(__get_dynamic_array(data), data_src, data_len);
+ memcpy(__get_dynamic_array(data), data_src, data_len);
),
TP_printk("[%s] TX frame data", __get_str(dev))
);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index d3a60d1aacb5..ba41d23b4211 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -168,12 +168,12 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
{
int i;
- kfree(drv->fw.dbg_dest_tlv);
- for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++)
- kfree(drv->fw.dbg_conf_tlv[i]);
- for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
- kfree(drv->fw.dbg_trigger_tlv[i]);
- kfree(drv->fw.dbg_mem_tlv);
+ kfree(drv->fw.dbg.dest_tlv);
+ for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++)
+ kfree(drv->fw.dbg.conf_tlv[i]);
+ for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++)
+ kfree(drv->fw.dbg.trigger_tlv[i]);
+ kfree(drv->fw.dbg.mem_tlv);
kfree(drv->fw.iml);
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
@@ -303,7 +303,7 @@ struct iwl_firmware_pieces {
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
- size_t n_dbg_mem_tlv;
+ size_t n_mem_tlv;
};
/*
@@ -936,7 +936,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
IWL_INFO(drv, "Found debug destination: %s\n",
get_fw_dbg_mode_string(mon_mode));
- drv->fw.dbg_dest_reg_num = (dest_v1) ?
+ drv->fw.dbg.n_dest_reg = (dest_v1) ?
tlv_len -
offsetof(struct iwl_fw_dbg_dest_tlv_v1,
reg_ops) :
@@ -944,8 +944,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
offsetof(struct iwl_fw_dbg_dest_tlv,
reg_ops);
- drv->fw.dbg_dest_reg_num /=
- sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]);
+ drv->fw.dbg.n_dest_reg /=
+ sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]);
break;
}
@@ -959,7 +959,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
- if (conf->id >= ARRAY_SIZE(drv->fw.dbg_conf_tlv)) {
+ if (conf->id >= ARRAY_SIZE(drv->fw.dbg.conf_tlv)) {
IWL_ERR(drv,
"Skip unknown configuration: %d\n",
conf->id);
@@ -988,7 +988,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
(void *)tlv_data;
u32 trigger_id = le32_to_cpu(trigger->id);
- if (trigger_id >= ARRAY_SIZE(drv->fw.dbg_trigger_tlv)) {
+ if (trigger_id >= ARRAY_SIZE(drv->fw.dbg.trigger_tlv)) {
IWL_ERR(drv,
"Skip unknown trigger: %u\n",
trigger->id);
@@ -1015,7 +1015,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
- drv->fw.dbg_dump_mask =
+ drv->fw.dbg.dump_mask =
le32_to_cpup((__le32 *)tlv_data);
break;
}
@@ -1070,13 +1070,13 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
dbg_mem->data_type);
size = sizeof(*pieces->dbg_mem_tlv) *
- (pieces->n_dbg_mem_tlv + 1);
+ (pieces->n_mem_tlv + 1);
n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL);
if (!n)
return -ENOMEM;
pieces->dbg_mem_tlv = n;
- pieces->dbg_mem_tlv[pieces->n_dbg_mem_tlv] = *dbg_mem;
- pieces->n_dbg_mem_tlv++;
+ pieces->dbg_mem_tlv[pieces->n_mem_tlv] = *dbg_mem;
+ pieces->n_mem_tlv++;
break;
}
case IWL_UCODE_TLV_IML: {
@@ -1256,7 +1256,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
/* dump all fw memory areas by default except d3 debug data */
- fw->dbg_dump_mask = 0xfffdffff;
+ fw->dbg.dump_mask = 0xfffdffff;
pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
if (!pieces)
@@ -1323,21 +1323,21 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
goto out_free_fw;
if (pieces->dbg_dest_tlv_init) {
- size_t dbg_dest_size = sizeof(*drv->fw.dbg_dest_tlv) +
- sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) *
- drv->fw.dbg_dest_reg_num;
+ size_t dbg_dest_size = sizeof(*drv->fw.dbg.dest_tlv) +
+ sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) *
+ drv->fw.dbg.n_dest_reg;
- drv->fw.dbg_dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL);
+ drv->fw.dbg.dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL);
- if (!drv->fw.dbg_dest_tlv)
+ if (!drv->fw.dbg.dest_tlv)
goto out_free_fw;
if (*pieces->dbg_dest_ver == 0) {
- memcpy(drv->fw.dbg_dest_tlv, pieces->dbg_dest_tlv_v1,
+ memcpy(drv->fw.dbg.dest_tlv, pieces->dbg_dest_tlv_v1,
dbg_dest_size);
} else {
struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv =
- drv->fw.dbg_dest_tlv;
+ drv->fw.dbg.dest_tlv;
dest_tlv->version = pieces->dbg_dest_tlv->version;
dest_tlv->monitor_mode =
@@ -1352,8 +1352,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
pieces->dbg_dest_tlv->base_shift;
memcpy(dest_tlv->reg_ops,
pieces->dbg_dest_tlv->reg_ops,
- sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) *
- drv->fw.dbg_dest_reg_num);
+ sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) *
+ drv->fw.dbg.n_dest_reg);
/* In version 1 of the destination tlv, which is
* relevant for internal buffer exclusively,
@@ -1369,15 +1369,13 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
}
}
- for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) {
+ for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++) {
if (pieces->dbg_conf_tlv[i]) {
- drv->fw.dbg_conf_tlv_len[i] =
- pieces->dbg_conf_tlv_len[i];
- drv->fw.dbg_conf_tlv[i] =
+ drv->fw.dbg.conf_tlv[i] =
kmemdup(pieces->dbg_conf_tlv[i],
- drv->fw.dbg_conf_tlv_len[i],
+ pieces->dbg_conf_tlv_len[i],
GFP_KERNEL);
- if (!drv->fw.dbg_conf_tlv[i])
+ if (!pieces->dbg_conf_tlv_len[i])
goto out_free_fw;
}
}
@@ -1404,7 +1402,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
trigger_tlv_sz[FW_DBG_TRIGGER_TDLS] =
sizeof(struct iwl_fw_dbg_trigger_tdls);
- for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) {
+ for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++) {
if (pieces->dbg_trigger_tlv[i]) {
/*
* If the trigger isn't long enough, WARN and exit.
@@ -1417,22 +1415,22 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
(trigger_tlv_sz[i] +
sizeof(struct iwl_fw_dbg_trigger_tlv))))
goto out_free_fw;
- drv->fw.dbg_trigger_tlv_len[i] =
+ drv->fw.dbg.trigger_tlv_len[i] =
pieces->dbg_trigger_tlv_len[i];
- drv->fw.dbg_trigger_tlv[i] =
+ drv->fw.dbg.trigger_tlv[i] =
kmemdup(pieces->dbg_trigger_tlv[i],
- drv->fw.dbg_trigger_tlv_len[i],
+ drv->fw.dbg.trigger_tlv_len[i],
GFP_KERNEL);
- if (!drv->fw.dbg_trigger_tlv[i])
+ if (!drv->fw.dbg.trigger_tlv[i])
goto out_free_fw;
}
}
/* Now that we can no longer fail, copy information */
- drv->fw.dbg_mem_tlv = pieces->dbg_mem_tlv;
+ drv->fw.dbg.mem_tlv = pieces->dbg_mem_tlv;
pieces->dbg_mem_tlv = NULL;
- drv->fw.n_dbg_mem_tlv = pieces->n_dbg_mem_tlv;
+ drv->fw.dbg.n_mem_tlv = pieces->n_mem_tlv;
/*
* The (size - 16) / 12 formula is based on the information recorded
@@ -1473,6 +1471,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
break;
default:
WARN(1, "Invalid fw type %d\n", fw->type);
+ /* fall through */
case IWL_FW_MVM:
op = &iwlwifi_opmode_table[MVM_OP_MODE];
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index ec300d388694..96e101d79662 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -1335,6 +1335,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
fw_has_capa(&fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+ bool empty_otp;
u32 mac_flags;
u32 sbands_flags = 0;
@@ -1350,7 +1351,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
}
rsp = (void *)hcmd.resp_pkt->data;
- if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP)
+ empty_otp = !!(le32_to_cpu(rsp->general.flags) &
+ NVM_GENERAL_FLAGS_EMPTY_OTP);
+ if (empty_otp)
IWL_INFO(trans, "OTP is empty\n");
nvm = kzalloc(sizeof(*nvm) +
@@ -1374,6 +1377,11 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
/* Initialize general data */
nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version);
+ nvm->n_hw_addrs = rsp->general.n_hw_addrs;
+ if (nvm->n_hw_addrs == 0)
+ IWL_WARN(trans,
+ "Firmware declares no reserved mac addresses. OTP is empty: %d\n",
+ empty_otp);
/* Initialize MAC sku data */
mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 6c636b2a6b43..26b3c73051ca 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -725,7 +725,7 @@ struct iwl_dram_data {
* @dbg_dest_tlv: points to the destination TLV for debug
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug
* @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
- * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
+ * @dbg_n_dest_reg: num of reg_ops in %dbg_dest_tlv
* @num_blocks: number of blocks in fw_mon
* @fw_mon: address of the buffers for firmware monitor
* @system_pm_mode: the system-wide power management mode in use.
@@ -778,7 +778,7 @@ struct iwl_trans {
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
u32 dbg_dump_mask;
- u8 dbg_dest_reg_num;
+ u8 dbg_n_dest_reg;
int num_blocks;
struct iwl_dram_data fw_mon[IWL_MAX_DEBUG_ALLOCATIONS];
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 210be26aadaa..843f3b41b72e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -722,8 +722,10 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
{
struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
+ bool unified = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
struct wowlan_key_data key_data = {
- .configure_keys = !d0i3,
+ .configure_keys = !d0i3 && !unified,
.use_rsc_tsc = false,
.tkip = &tkip_cmd,
.use_tkip = false,
@@ -1636,32 +1638,10 @@ out_free_resp:
}
static struct iwl_wowlan_status *
-iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm)
{
- u32 base = mvm->error_event_table[0];
- struct error_table_start {
- /* cf. struct iwl_error_event_table */
- u32 valid;
- u32 error_id;
- } err_info;
int ret;
- iwl_trans_read_mem_bytes(mvm->trans, base,
- &err_info, sizeof(err_info));
-
- if (err_info.valid) {
- IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n",
- err_info.valid, err_info.error_id);
- if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
- struct cfg80211_wowlan_wakeup wakeup = {
- .rfkill_release = true,
- };
- ieee80211_report_wowlan_wakeup(vif, &wakeup,
- GFP_KERNEL);
- }
- return ERR_PTR(-EIO);
- }
-
/* only for tracing for now */
ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
if (ret)
@@ -1680,7 +1660,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
bool keep;
struct iwl_mvm_sta *mvm_ap_sta;
- fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
+ fw_status = iwl_mvm_get_wakeup_status(mvm);
if (IS_ERR_OR_NULL(fw_status))
goto out_unlock;
@@ -1805,7 +1785,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
u32 reasons = 0;
int i, j, n_matches, ret;
- fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
+ fw_status = iwl_mvm_get_wakeup_status(mvm);
if (!IS_ERR_OR_NULL(fw_status)) {
reasons = le32_to_cpu(fw_status->wakeup_reasons);
kfree(fw_status);
@@ -1918,6 +1898,29 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
ieee80211_resume_disconnect(vif);
}
+static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ u32 base = mvm->error_event_table[0];
+ struct error_table_start {
+ /* cf. struct iwl_error_event_table */
+ u32 valid;
+ u32 error_id;
+ } err_info;
+
+ iwl_trans_read_mem_bytes(mvm->trans, base,
+ &err_info, sizeof(err_info));
+
+ if (err_info.valid &&
+ err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .rfkill_release = true,
+ };
+ ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL);
+ }
+ return err_info.valid;
+}
+
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
{
struct ieee80211_vif *vif = NULL;
@@ -1949,6 +1952,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* query SRAM first in case we want event logging */
iwl_mvm_read_d3_sram(mvm);
+ if (iwl_mvm_check_rt_status(mvm, vif)) {
+ set_bit(STATUS_FW_ERROR, &mvm->trans->status);
+ iwl_mvm_dump_nic_error_log(mvm);
+ iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
+ NULL, 0);
+ ret = 1;
+ goto err;
+ }
+
if (d0i3_first) {
ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
if (ret < 0) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index de40752aa67e..3b6b3d8fb961 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -666,16 +666,11 @@ iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf,
};
int ret, bt_force_ant_mode;
- for (bt_force_ant_mode = 0;
- bt_force_ant_mode < ARRAY_SIZE(modes_str);
- bt_force_ant_mode++) {
- if (!strcmp(buf, modes_str[bt_force_ant_mode]))
- break;
- }
-
- if (bt_force_ant_mode >= ARRAY_SIZE(modes_str))
- return -EINVAL;
+ ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf);
+ if (ret < 0)
+ return ret;
+ bt_force_ant_mode = ret;
ret = 0;
mutex_lock(&mvm->mutex);
if (mvm->bt_force_ant_mode == bt_force_ant_mode)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 96d26b749952..dade206d5511 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -299,6 +299,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
static const u16 alive_cmd[] = { MVM_ALIVE };
+ set_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status);
if (ucode_type == IWL_UCODE_REGULAR &&
iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
!(fw_has_capa(&mvm->fw->ucode_capa,
@@ -363,12 +364,20 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
*/
memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
- mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
+ /*
+ * Set a 'fake' TID for the command queue, since we use the
+ * hweight() of the tid_bitmap as a refcount now. Not that
+ * we ever even consider the command queue as one we might
+ * want to reuse, but be safe nevertheless.
+ */
+ mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
+ BIT(IWL_MAX_TID_COUNT + 2);
for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+ clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status);
return 0;
}
@@ -699,8 +708,12 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
enabled = !!(wifi_pkg->package.elements[1].integer.value);
n_profiles = wifi_pkg->package.elements[2].integer.value;
- /* in case of BIOS bug */
- if (n_profiles <= 0) {
+ /*
+ * Check the validity of n_profiles. The EWRD profiles start
+ * from index 1, so the maximum value allowed here is
+ * ACPI_SAR_PROFILES_NUM - 1.
+ */
+ if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
ret = -EINVAL;
goto out_free;
}
@@ -1022,7 +1035,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
mvm->fwrt.dump.conf = FW_DBG_INVALID;
/* if we have a destination, assume EARLY START */
- if (mvm->fw->dbg_dest_tlv)
+ if (mvm->fw->dbg.dest_tlv)
mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 781f30356720..6486cfb33f40 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1487,12 +1487,11 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
IWL_MVM_MISSED_BEACONS_THRESHOLD)
ieee80211_beacon_loss(vif);
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw,
- FW_DBG_TRIGGER_MISSED_BEACONS))
+ trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_MISSED_BEACONS);
+ if (!trigger)
return;
- trigger = iwl_fw_dbg_get_trigger(mvm->fw,
- FW_DBG_TRIGGER_MISSED_BEACONS);
bcon_trig = (void *)trigger->data;
stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
stop_trig_missed_bcon_since_rx =
@@ -1500,11 +1499,6 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
/* TODO: implement start trigger */
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(vif),
- trigger))
- return;
-
if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
rx_missed_bcon >= stop_trig_missed_bcon)
iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index c78d017749d3..505b0385d800 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -857,16 +857,13 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_ba *ba_trig;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_BA);
+ if (!trig)
return;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(vif), trig))
- return;
-
switch (action) {
case IEEE80211_AMPDU_TX_OPERATIONAL: {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -1231,12 +1228,15 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
iwl_mvm_del_aux_sta(mvm);
/*
- * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
- * won't be called in this case).
+ * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the
+ * hw (as restart_complete() won't be called in this case) and mac80211
+ * won't execute the restart.
* But make sure to cleanup interfaces that have gone down before/during
* HW restart was requested.
*/
- if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
+ test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status))
ieee80211_iterate_interfaces(mvm->hw, 0,
iwl_mvm_cleanup_iterator, mvm);
@@ -2802,14 +2802,12 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_tdls *tdls_trig;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_TDLS);
+ if (!trig)
return;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS);
tdls_trig = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(vif), trig))
- return;
if (!(tdls_trig->action_bitmap & BIT(action)))
return;
@@ -4491,14 +4489,12 @@ static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_mlme *trig_mlme;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_MLME);
+ if (!trig)
return;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
trig_mlme = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(vif), trig))
- return;
if (event->u.mlme.data == ASSOC_EVENT) {
if (event->u.mlme.status == MLME_DENIED)
@@ -4533,14 +4529,12 @@ static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_ba *ba_trig;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_BA);
+ if (!trig)
return;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(vif), trig))
- return;
if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 8f71eeed50d9..7ba5bc2ed1c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -512,6 +512,7 @@ enum iwl_mvm_scan_type {
IWL_SCAN_TYPE_WILD,
IWL_SCAN_TYPE_MILD,
IWL_SCAN_TYPE_FRAGMENTED,
+ IWL_SCAN_TYPE_FAST_BALANCE,
};
enum iwl_mvm_sched_scan_pass_all_states {
@@ -753,24 +754,12 @@ iwl_mvm_baid_data_from_reorder_buf(struct iwl_mvm_reorder_buffer *buf)
* This is a state in which a single queue serves more than one TID, all of
* which are not aggregated. Note that the queue is only associated to one
* RA.
- * @IWL_MVM_QUEUE_INACTIVE: queue is allocated but no traffic on it
- * This is a state of a queue that has had traffic on it, but during the
- * last %IWL_MVM_DQA_QUEUE_TIMEOUT time period there has been no traffic on
- * it. In this state, when a new queue is needed to be allocated but no
- * such free queue exists, an inactive queue might be freed and given to
- * the new RA/TID.
- * @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured
- * This is the state of a queue that has had traffic pass through it, but
- * needs to be reconfigured for some reason, e.g. the queue needs to
- * become unshared and aggregations re-enabled on.
*/
enum iwl_mvm_queue_status {
IWL_MVM_QUEUE_FREE,
IWL_MVM_QUEUE_RESERVED,
IWL_MVM_QUEUE_READY,
IWL_MVM_QUEUE_SHARED,
- IWL_MVM_QUEUE_INACTIVE,
- IWL_MVM_QUEUE_RECONFIGURING,
};
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
@@ -787,6 +776,17 @@ struct iwl_mvm_geo_profile {
u8 values[ACPI_GEO_TABLE_SIZE];
};
+struct iwl_mvm_dqa_txq_info {
+ u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
+ bool reserved; /* Is this the TXQ reserved for a STA */
+ u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
+ u8 txq_tid; /* The TID "owner" of this queue*/
+ u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
+ /* Timestamp for inactivation per TID of this queue */
+ unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
+ enum iwl_mvm_queue_status status;
+};
+
struct iwl_mvm {
/* for logger access */
struct device *dev;
@@ -843,17 +843,7 @@ struct iwl_mvm {
u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
- struct {
- u8 hw_queue_refcount;
- u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
- bool reserved; /* Is this the TXQ reserved for a STA */
- u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
- u8 txq_tid; /* The TID "owner" of this queue*/
- u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
- /* Timestamp for inactivation per TID of this queue */
- unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
- enum iwl_mvm_queue_status status;
- } queue_info[IWL_MAX_HW_QUEUES];
+ struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
struct work_struct add_stream_wk; /* To add streams to queues */
@@ -1883,17 +1873,6 @@ void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set,
mvmvif->low_latency &= ~cause;
}
-/* hw scheduler queue config */
-bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
- u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
- unsigned int wdg_timeout);
-int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
- u8 sta_id, u8 tid, unsigned int timeout);
-
-int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
- u8 tid, u8 flags);
-int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
-
/* Return a bitmask with all the hw supported queues, except for the
* command queue, which can't be flushed.
*/
@@ -1905,6 +1884,11 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
+ lockdep_assert_held(&mvm->mutex);
+ /* calling this function without using dump_start/end since at this
+ * point we already hold the op mode mutex
+ */
+ iwl_fw_dbg_collect_sync(&mvm->fwrt);
iwl_fw_cancel_timestamp(&mvm->fwrt);
iwl_free_fw_paging(&mvm->fwrt);
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
@@ -1990,8 +1974,6 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
-void iwl_mvm_inactivity_check(struct iwl_mvm *mvm);
-
#define MVM_TCM_PERIOD_MSEC 500
#define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000)
#define MVM_LL_PERIOD (10 * HZ)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index fff98fed35ed..3633f27d048a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -477,15 +477,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
u32 status;
int resp_len, n_channels;
u16 mcc;
- bool resp_v2 = fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
return ERR_PTR(-EOPNOTSUPP);
cmd.len[0] = sizeof(struct iwl_mcc_update_cmd);
- if (!resp_v2)
- cmd.len[0] = sizeof(struct iwl_mcc_update_cmd_v1);
IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n",
alpha2[0], alpha2[1], src_id);
@@ -497,7 +493,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
pkt = cmd.resp_pkt;
/* Extract MCC response */
- if (resp_v2) {
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT)) {
struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data;
n_channels = __le32_to_cpu(mcc_resp->n_channels);
@@ -509,9 +506,9 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
goto exit;
}
} else {
- struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = (void *)pkt->data;
+ struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data;
- n_channels = __le32_to_cpu(mcc_resp_v1->n_channels);
+ n_channels = __le32_to_cpu(mcc_resp_v3->n_channels);
resp_len = sizeof(struct iwl_mcc_update_resp) +
n_channels * sizeof(__le32);
resp_cp = kzalloc(resp_len, GFP_KERNEL);
@@ -520,12 +517,14 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
goto exit;
}
- resp_cp->status = mcc_resp_v1->status;
- resp_cp->mcc = mcc_resp_v1->mcc;
- resp_cp->cap = mcc_resp_v1->cap;
- resp_cp->source_id = mcc_resp_v1->source_id;
- resp_cp->n_channels = mcc_resp_v1->n_channels;
- memcpy(resp_cp->channels, mcc_resp_v1->channels,
+ resp_cp->status = mcc_resp_v3->status;
+ resp_cp->mcc = mcc_resp_v3->mcc;
+ resp_cp->cap = cpu_to_le16(mcc_resp_v3->cap);
+ resp_cp->source_id = mcc_resp_v3->source_id;
+ resp_cp->time = mcc_resp_v3->time;
+ resp_cp->geo_info = mcc_resp_v3->geo_info;
+ resp_cp->n_channels = mcc_resp_v3->n_channels;
+ memcpy(resp_cp->channels, mcc_resp_v3->channels,
n_channels * sizeof(__le32));
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 0599d323cbeb..0e2092526fae 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -565,10 +565,23 @@ static bool iwl_mvm_fwrt_fw_running(void *ctx)
return iwl_mvm_firmware_running(ctx);
}
+static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
+{
+ struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, host_cmd);
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
.dump_start = iwl_mvm_fwrt_dump_start,
.dump_end = iwl_mvm_fwrt_dump_end,
.fw_running = iwl_mvm_fwrt_fw_running,
+ .send_hcmd = iwl_mvm_fwrt_send_hcmd,
};
static struct iwl_op_mode *
@@ -604,9 +617,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (cfg->max_rx_agg_size)
hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;
+ else
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
if (cfg->max_tx_agg_size)
hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
+ else
+ hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
op_mode = hw->priv;
@@ -748,12 +765,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_trans_configure(mvm->trans, &trans_cfg);
trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
- trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv;
- trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
- memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
+ trans->dbg_dest_tlv = mvm->fw->dbg.dest_tlv;
+ trans->dbg_n_dest_reg = mvm->fw->dbg.n_dest_reg;
+ memcpy(trans->dbg_conf_tlv, mvm->fw->dbg.conf_tlv,
sizeof(trans->dbg_conf_tlv));
- trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
- trans->dbg_dump_mask = mvm->fw->dbg_dump_mask;
+ trans->dbg_trigger_tlv = mvm->fw->dbg.trigger_tlv;
+ trans->dbg_dump_mask = mvm->fw->dbg.dump_mask;
trans->iml = mvm->fw->iml;
trans->iml_len = mvm->fw->iml_len;
@@ -784,6 +801,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mutex_lock(&mvm->mutex);
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
err = iwl_run_init_mvm_ucode(mvm, true);
+ if (test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status))
+ iwl_fw_alive_error_dump(&mvm->fwrt);
if (!iwlmvm_mod_params.init_dbg || !err)
iwl_mvm_stop_device(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
@@ -953,15 +972,13 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_cmd *cmds_trig;
int i;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
+ FW_DBG_TRIGGER_FW_NOTIF);
+ if (!trig)
return;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF);
cmds_trig = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
- return;
-
for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
/* don't collect on CMD 0 */
if (!cmds_trig->cmds[i].cmd_id)
@@ -1223,7 +1240,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
*/
if (!mvm->fw_restart && fw_error) {
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
- NULL);
+ NULL, 0);
} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
struct iwl_mvm_reprobe *reprobe;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 2c75f51a04e4..089972280daa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1239,7 +1239,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
!(info->flags & IEEE80211_TX_STAT_AMPDU))
return;
- rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate);
+ if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band,
+ &tx_resp_rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
#ifdef CONFIG_MAC80211_DEBUGFS
/* Disable last tx check if we are debugging with fixed rate but
@@ -1290,7 +1294,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
*/
table = &lq_sta->lq;
lq_hwrate = le32_to_cpu(table->rs_table[0]);
- rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
+ if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
/* Here we actually compare this rate to the latest LQ command */
if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
@@ -1392,8 +1399,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/* Collect data for each rate used during failed TX attempts */
for (i = 0; i <= retries; ++i) {
lq_hwrate = le32_to_cpu(table->rs_table[i]);
- rs_rate_from_ucode_rate(lq_hwrate, info->band,
- &lq_rate);
+ if (rs_rate_from_ucode_rate(lq_hwrate, info->band,
+ &lq_rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
/*
* Only collect stats if retried rate is in the same RS
* table as active/search.
@@ -3260,7 +3271,10 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
for (i = 0; i < num_rates; i++)
lq_cmd->rs_table[i] = ucode_rate_le32;
- rs_rate_from_ucode_rate(ucode_rate, band, &rate);
+ if (rs_rate_from_ucode_rate(ucode_rate, band, &rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
if (is_mimo(&rate))
lq_cmd->mimo_delim = num_rates - 1;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index a050220da678..ef624833cf1b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -433,13 +433,14 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_vif *tx_blocked_vif =
rcu_dereference(mvm->csa_tx_blocked_vif);
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct ieee80211_vif *vif = mvmsta->vif;
/* We have tx blocked stations (with CS bit). If we heard
* frames from a blocked station on a new channel we can
* TX to it again.
*/
- if (unlikely(tx_blocked_vif) &&
- mvmsta->vif == tx_blocked_vif) {
+ if (unlikely(tx_blocked_vif) && vif == tx_blocked_vif) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(tx_blocked_vif);
@@ -450,23 +451,18 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
rs_update_last_rssi(mvm, mvmsta, rx_status);
- if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
- ieee80211_is_beacon(hdr->frame_control)) {
- struct iwl_fw_dbg_trigger_tlv *trig;
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
+ ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_RSSI);
+
+ if (trig && ieee80211_is_beacon(hdr->frame_control)) {
struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
- bool trig_check;
s32 rssi;
- trig = iwl_fw_dbg_get_trigger(mvm->fw,
- FW_DBG_TRIGGER_RSSI);
rssi_trig = (void *)trig->data;
rssi = le32_to_cpu(rssi_trig->rssi);
- trig_check =
- iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(mvmsta->vif),
- trig);
- if (trig_check && rx_status->signal < rssi)
+ if (rx_status->signal < rssi)
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
NULL);
}
@@ -693,15 +689,12 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
struct iwl_fw_dbg_trigger_stats *trig_stats;
u32 trig_offset, trig_thold;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_STATS))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_STATS);
+ if (!trig)
return;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS);
trig_stats = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
- return;
-
trig_offset = le32_to_cpu(trig_stats->stop_offset);
trig_thold = le32_to_cpu(trig_stats->stop_threshold);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 894dd6379b9a..26ac9402568d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -923,6 +923,185 @@ static void iwl_mvm_decode_he_sigb(struct iwl_mvm *mvm,
}
}
+static void
+iwl_mvm_decode_he_phy_ru_alloc(u64 he_phy_data, u32 rate_n_flags,
+ struct ieee80211_radiotap_he *he,
+ struct ieee80211_radiotap_he_mu *he_mu,
+ struct ieee80211_rx_status *rx_status)
+{
+ /*
+ * Unfortunately, we have to leave the mac80211 data
+ * incorrect for the case that we receive an HE-MU
+ * transmission and *don't* have the HE phy data (due
+ * to the bits being used for TSF). This shouldn't
+ * happen though as management frames where we need
+ * the TSF/timers are not be transmitted in HE-MU.
+ */
+ u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
+ u8 offs = 0;
+
+ rx_status->bw = RATE_INFO_BW_HE_RU;
+
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
+
+ switch (ru) {
+ case 0 ... 36:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ offs = ru;
+ break;
+ case 37 ... 52:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ offs = ru - 37;
+ break;
+ case 53 ... 60:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ offs = ru - 53;
+ break;
+ case 61 ... 64:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ offs = ru - 61;
+ break;
+ case 65 ... 66:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ offs = ru - 65;
+ break;
+ case 67:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case 68:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
+ }
+ he->data2 |= le16_encode_bits(offs,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
+ he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN);
+ if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
+
+ if (he_mu) {
+#define CHECK_BW(bw) \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \
+ RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS)
+ CHECK_BW(20);
+ CHECK_BW(40);
+ CHECK_BW(80);
+ CHECK_BW(160);
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
+ rate_n_flags),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
+ }
+}
+
+static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
+ struct iwl_rx_mpdu_desc *desc,
+ struct ieee80211_radiotap_he *he,
+ struct ieee80211_radiotap_he_mu *he_mu,
+ struct ieee80211_rx_status *rx_status,
+ u64 he_phy_data, u32 rate_n_flags,
+ int queue)
+{
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+ bool sigb_data;
+ u16 d1known = IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN;
+ u16 d2known = IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN;
+
+ he->data1 |= cpu_to_le16(d1known);
+ he->data2 |= cpu_to_le16(d2known);
+ he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_BSS_COLOR_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR);
+ he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_UPLINK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
+ he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_LDPC_EXT_SYM,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG);
+ he->data4 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SPATIAL_REUSE_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE);
+ he->data5 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PRE_FEC_PAD_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD);
+ he->data5 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PE_DISAMBIG,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG);
+ he->data6 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_TXOP_DUR_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_DATA6_TXOP);
+ he->data6 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_DOPPLER,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_DATA6_DOPPLER);
+
+ switch (he_type) {
+ case RATE_MCS_HE_TYPE_MU:
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_DCM,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_MCS_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIBG_SYM_OR_USER_NUM_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_COMPRESSION,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_PREAMBLE_PUNC_TYPE_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
+
+ sigb_data = FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK,
+ he_phy_data) ==
+ IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO;
+ if (sigb_data)
+ iwl_mvm_decode_he_sigb(mvm, desc, rate_n_flags, he_mu);
+ /* fall through */
+ case RATE_MCS_HE_TYPE_TRIG:
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+ he->data5 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
+ break;
+ case RATE_MCS_HE_TYPE_SU:
+ case RATE_MCS_HE_TYPE_EXT_SU:
+ he->data1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN);
+ he->data3 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_BEAM_CHNG,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE);
+ break;
+ }
+
+ switch (FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, he_phy_data)) {
+ case IWL_RX_HE_PHY_INFO_TYPE_MU:
+ case IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO:
+ case IWL_RX_HE_PHY_INFO_TYPE_TB:
+ iwl_mvm_decode_he_phy_ru_alloc(he_phy_data, rate_n_flags,
+ he, he_mu, rx_status);
+ break;
+ default:
+ /* nothing */
+ break;
+ }
+}
+
static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_rx_mpdu_desc *desc,
u32 rate_n_flags, u16 phy_info, int queue)
@@ -933,9 +1112,8 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
u64 he_phy_data = HE_PHY_DATA_INVAL;
struct ieee80211_radiotap_he *he = NULL;
struct ieee80211_radiotap_he_mu *he_mu = NULL;
- u32 he_type = 0xffffffff;
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
u8 stbc, ltf;
-
static const struct ieee80211_radiotap_he known = {
.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
@@ -953,25 +1131,19 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
};
unsigned int radiotap_len = 0;
- bool overload = phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD;
- bool sigb_data = false;
he = skb_put_data(skb, &known, sizeof(known));
radiotap_len += sizeof(known);
rx_status->flag |= RX_FLAG_RADIOTAP_HE;
- he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
-
if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) {
- if (mvm->trans->cfg->device_family >=
- IWL_DEVICE_FAMILY_22560)
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
else
he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
if (he_type == RATE_MCS_HE_TYPE_MU) {
- he_mu = skb_put_data(skb, &mu_known,
- sizeof(mu_known));
+ he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known));
radiotap_len += sizeof(mu_known);
rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
}
@@ -980,60 +1152,21 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
/* temporarily hide the radiotap data */
__skb_pull(skb, radiotap_len);
- if (overload && he_type == RATE_MCS_HE_TYPE_SU) {
- he->data1 |=
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
- if (FIELD_GET(IWL_RX_HE_PHY_UPLINK, he_phy_data))
- he->data3 |=
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
-
+ if (he_phy_data != HE_PHY_DATA_INVAL &&
+ he_type == RATE_MCS_HE_TYPE_SU) {
+ /* report the AMPDU-EOF bit on single frames */
if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, he_phy_data))
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
}
- } else if (overload && he_mu && he_phy_data != HE_PHY_DATA_INVAL) {
- he_mu->flags1 |=
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIBG_SYM_OR_USER_NUM_MASK,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
- he_mu->flags1 |=
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_DCM,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
- he_mu->flags1 |=
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_MCS_MASK,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
- he_mu->flags2 |=
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_COMPRESSION,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
- he_mu->flags2 |=
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_PREAMBLE_PUNC_TYPE_MASK,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
-
- sigb_data = FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK,
- he_phy_data) ==
- IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO;
- if (sigb_data)
- iwl_mvm_decode_he_sigb(mvm, desc, rate_n_flags, he_mu);
- }
- if (he_phy_data != HE_PHY_DATA_INVAL &&
- (he_type == RATE_MCS_HE_TYPE_SU ||
- he_type == RATE_MCS_HE_TYPE_MU)) {
- u8 bss_color = FIELD_GET(IWL_RX_HE_PHY_BSS_COLOR_MASK,
- he_phy_data);
-
- if (bss_color) {
- he->data1 |=
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN);
- he->data3 |= cpu_to_le16(bss_color);
- }
}
+ if (he_phy_data != HE_PHY_DATA_INVAL)
+ iwl_mvm_decode_he_phy_data(mvm, desc, he, he_mu, rx_status,
+ he_phy_data, rate_n_flags, queue);
+
/* update aggregation data for monitor sake on default queue */
if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
@@ -1056,84 +1189,12 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
}
- if (he_phy_data != HE_PHY_DATA_INVAL &&
- (FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, he_phy_data) ==
- IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO ||
- FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, he_phy_data) ==
- IWL_RX_HE_PHY_INFO_TYPE_TB_EXT_INFO)) {
- /*
- * Unfortunately, we have to leave the mac80211 data
- * incorrect for the case that we receive an HE-MU
- * transmission and *don't* have the HE phy data (due
- * to the bits being used for TSF). This shouldn't
- * happen though as management frames where we need
- * the TSF/timers are not be transmitted in HE-MU.
- */
- u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
- u8 offs = 0;
-
- rx_status->bw = RATE_INFO_BW_HE_RU;
-
+ /* actually data is filled in mac80211 */
+ if (he_type == RATE_MCS_HE_TYPE_SU ||
+ he_type == RATE_MCS_HE_TYPE_EXT_SU)
he->data1 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
- switch (ru) {
- case 0 ... 36:
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
- offs = ru;
- break;
- case 37 ... 52:
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
- offs = ru - 37;
- break;
- case 53 ... 60:
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
- offs = ru - 53;
- break;
- case 61 ... 64:
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
- offs = ru - 61;
- break;
- case 65 ... 66:
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
- offs = ru - 65;
- break;
- case 67:
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
- break;
- case 68:
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
- break;
- }
- he->data2 |=
- le16_encode_bits(offs,
- IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
- he->data2 |=
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN |
- IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN);
- if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
- he->data2 |=
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
-
- if (he_mu) {
-#define CHECK_BW(bw) \
- BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \
- RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS)
- CHECK_BW(20);
- CHECK_BW(40);
- CHECK_BW(80);
- CHECK_BW(160);
- he->data2 |=
- le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
- rate_n_flags),
- IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
- }
- } else if (he_type == RATE_MCS_HE_TYPE_SU ||
- he_type == RATE_MCS_HE_TYPE_EXT_SU) {
- he->data1 |=
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
- }
-
stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS;
rx_status->nss =
((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
@@ -1202,9 +1263,8 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
he->data5 |= le16_encode_bits(ltf, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
- switch (he_type) {
- case RATE_MCS_HE_TYPE_SU:
- case RATE_MCS_HE_TYPE_EXT_SU: {
+ if (he_type == RATE_MCS_HE_TYPE_SU ||
+ he_type == RATE_MCS_HE_TYPE_EXT_SU) {
u16 val;
/* LTF syms correspond to streams */
@@ -1234,31 +1294,10 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
rx_status->nss);
val = 0;
}
+
he->data5 |=
le16_encode_bits(val,
IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
- }
- break;
- case RATE_MCS_HE_TYPE_MU: {
- u16 val;
-
- if (he_phy_data == HE_PHY_DATA_INVAL)
- break;
-
- val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
- he_phy_data);
-
- he->data2 |=
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
- he->data5 |=
- cpu_to_le16(FIELD_PREP(
- IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS,
- val));
- }
- break;
- case RATE_MCS_HE_TYPE_TRIG:
- /* not supported */
- break;
}
}
@@ -1424,6 +1463,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
IWL_RX_MPDU_REORDER_BAID_MASK) >>
IWL_RX_MPDU_REORDER_BAID_SHIFT);
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct ieee80211_vif *vif = mvmsta->vif;
if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
!is_multicast_ether_addr(hdr->addr1) &&
@@ -1436,8 +1477,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
* frames from a blocked station on a new channel we can
* TX to it again.
*/
- if (unlikely(tx_blocked_vif) &&
- tx_blocked_vif == mvmsta->vif) {
+ if (unlikely(tx_blocked_vif) && tx_blocked_vif == vif) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(tx_blocked_vif);
@@ -1448,23 +1488,18 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rs_update_last_rssi(mvm, mvmsta, rx_status);
- if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
- ieee80211_is_beacon(hdr->frame_control)) {
- struct iwl_fw_dbg_trigger_tlv *trig;
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
+ ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_RSSI);
+
+ if (trig && ieee80211_is_beacon(hdr->frame_control)) {
struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
- bool trig_check;
s32 rssi;
- trig = iwl_fw_dbg_get_trigger(mvm->fw,
- FW_DBG_TRIGGER_RSSI);
rssi_trig = (void *)trig->data;
rssi = le32_to_cpu(rssi_trig->rssi);
- trig_check =
- iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(mvmsta->vif),
- trig);
- if (trig_check && rx_status->signal < rssi)
+ if (rx_status->signal < rssi)
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
NULL);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index e9048a98e793..cfb784fea77b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -110,6 +110,10 @@ static struct iwl_mvm_scan_timing_params scan_timing[] = {
.suspend_time = 95,
.max_out_time = 44,
},
+ [IWL_SCAN_TYPE_FAST_BALANCE] = {
+ .suspend_time = 30,
+ .max_out_time = 37,
+ },
};
struct iwl_mvm_scan_params {
@@ -235,8 +239,32 @@ iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
return mvm->tcm.result.band_load[band];
}
+struct iwl_is_dcm_with_go_iterator_data {
+ struct ieee80211_vif *current_vif;
+ bool is_dcm_with_p2p_go;
+};
+
+static void iwl_mvm_is_dcm_with_go_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_is_dcm_with_go_iterator_data *data = _data;
+ struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif *curr_mvmvif =
+ iwl_mvm_vif_from_mac80211(data->current_vif);
+
+ /* exclude the given vif */
+ if (vif == data->current_vif)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
+ other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
+ other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)
+ data->is_dcm_with_p2p_go = true;
+}
+
static enum
-iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
+iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
enum iwl_mvm_traffic_load load,
bool low_latency)
{
@@ -249,9 +277,30 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
if (!global_cnt)
return IWL_SCAN_TYPE_UNASSOC;
- if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && !p2p_device &&
- fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
- return IWL_SCAN_TYPE_FRAGMENTED;
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
+ if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
+ (!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE))
+ return IWL_SCAN_TYPE_FRAGMENTED;
+
+ /* in case of DCM with GO where BSS DTIM interval < 220msec
+ * set all scan requests as fast-balance scan
+ * */
+ if (vif && vif->type == NL80211_IFTYPE_STATION &&
+ vif->bss_conf.dtim_period < 220) {
+ struct iwl_is_dcm_with_go_iterator_data data = {
+ .current_vif = vif,
+ .is_dcm_with_p2p_go = false,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_is_dcm_with_go_iterator,
+ &data);
+ if (data.is_dcm_with_p2p_go)
+ return IWL_SCAN_TYPE_FAST_BALANCE;
+ }
+ }
if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
return IWL_SCAN_TYPE_MILD;
@@ -260,7 +309,8 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
}
static enum
-iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
+iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
{
enum iwl_mvm_traffic_load load;
bool low_latency;
@@ -268,12 +318,12 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
load = iwl_mvm_get_traffic_load(mvm);
low_latency = iwl_mvm_low_latency(mvm);
- return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency);
+ return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
}
static enum
iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
- bool p2p_device,
+ struct ieee80211_vif *vif,
enum nl80211_band band)
{
enum iwl_mvm_traffic_load load;
@@ -282,7 +332,7 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
load = iwl_mvm_get_traffic_load_band(mvm, band);
low_latency = iwl_mvm_low_latency_band(mvm, band);
- return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency);
+ return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
}
static int
@@ -860,6 +910,12 @@ static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
params->scan_plans[0].iterations == 1;
}
+static bool iwl_mvm_is_scan_fragmented(enum iwl_mvm_scan_type type)
+{
+ return (type == IWL_SCAN_TYPE_FRAGMENTED ||
+ type == IWL_SCAN_TYPE_FAST_BALANCE);
+}
+
static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif)
@@ -872,7 +928,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
- if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
+ if (iwl_mvm_is_scan_fragmented(params->type))
flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
if (iwl_mvm_rrm_scan_needed(mvm) &&
@@ -895,7 +951,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
if (iwl_mvm_is_regular_scan(params) &&
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
- params->type != IWL_SCAN_TYPE_FRAGMENTED)
+ !iwl_mvm_is_scan_fragmented(params->type))
flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL;
return flags;
@@ -1044,7 +1100,7 @@ static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
u32 flags, u8 channel_flags)
{
- enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
+ enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL);
struct iwl_scan_config_v1 *cfg = config;
cfg->flags = cpu_to_le32(flags);
@@ -1077,9 +1133,9 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
if (iwl_mvm_is_cdb_supported(mvm)) {
enum iwl_mvm_scan_type lb_type, hb_type;
- lb_type = iwl_mvm_get_scan_type_band(mvm, false,
+ lb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
NL80211_BAND_2GHZ);
- hb_type = iwl_mvm_get_scan_type_band(mvm, false,
+ hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
NL80211_BAND_5GHZ);
cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
@@ -1093,7 +1149,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
cpu_to_le32(scan_timing[hb_type].suspend_time);
} else {
enum iwl_mvm_scan_type type =
- iwl_mvm_get_scan_type(mvm, false);
+ iwl_mvm_get_scan_type(mvm, NULL);
cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
cpu_to_le32(scan_timing[type].max_out_time);
@@ -1130,14 +1186,14 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
return -ENOBUFS;
if (iwl_mvm_is_cdb_supported(mvm)) {
- type = iwl_mvm_get_scan_type_band(mvm, false,
+ type = iwl_mvm_get_scan_type_band(mvm, NULL,
NL80211_BAND_2GHZ);
- hb_type = iwl_mvm_get_scan_type_band(mvm, false,
+ hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
NL80211_BAND_5GHZ);
if (type == mvm->scan_type && hb_type == mvm->hb_scan_type)
return 0;
} else {
- type = iwl_mvm_get_scan_type(mvm, false);
+ type = iwl_mvm_get_scan_type(mvm, NULL);
if (type == mvm->scan_type)
return 0;
}
@@ -1162,7 +1218,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
SCAN_CONFIG_FLAG_SET_MAC_ADDR |
SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS |
SCAN_CONFIG_N_CHANNELS(num_channels) |
- (type == IWL_SCAN_TYPE_FRAGMENTED ?
+ (iwl_mvm_is_scan_fragmented(type) ?
SCAN_CONFIG_FLAG_SET_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
@@ -1177,7 +1233,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
*/
if (iwl_mvm_cdb_scan_api(mvm)) {
if (iwl_mvm_is_cdb_supported(mvm))
- flags |= (hb_type == IWL_SCAN_TYPE_FRAGMENTED) ?
+ flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
@@ -1338,11 +1394,11 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
- if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
+ if (iwl_mvm_is_scan_fragmented(params->type))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
if (iwl_mvm_is_cdb_supported(mvm) &&
- params->hb_type == IWL_SCAN_TYPE_FRAGMENTED)
+ iwl_mvm_is_scan_fragmented(params->hb_type))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
if (iwl_mvm_rrm_scan_needed(mvm) &&
@@ -1380,7 +1436,7 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
*/
if (iwl_mvm_is_regular_scan(params) &&
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
- params->type != IWL_SCAN_TYPE_FRAGMENTED &&
+ !iwl_mvm_is_scan_fragmented(params->type) &&
!iwl_mvm_is_adaptive_dwell_supported(mvm) &&
!iwl_mvm_is_oce_supported(mvm))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL;
@@ -1448,6 +1504,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED)
cmd->v8.num_of_fragments[SCAN_HB_LMAC_IDX] =
IWL_SCAN_NUM_OF_FRAGS;
+
+ cmd->v8.general_flags2 =
+ IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
}
cmd->scan_start_mac_id = scan_vif->id;
@@ -1586,19 +1645,20 @@ void iwl_mvm_scan_timeout_wk(struct work_struct *work)
static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
- bool p2p)
+ struct ieee80211_vif *vif)
{
if (iwl_mvm_is_cdb_supported(mvm)) {
params->type =
- iwl_mvm_get_scan_type_band(mvm, p2p,
+ iwl_mvm_get_scan_type_band(mvm, vif,
NL80211_BAND_2GHZ);
params->hb_type =
- iwl_mvm_get_scan_type_band(mvm, p2p,
+ iwl_mvm_get_scan_type_band(mvm, vif,
NL80211_BAND_5GHZ);
} else {
- params->type = iwl_mvm_get_scan_type(mvm, p2p);
+ params->type = iwl_mvm_get_scan_type(mvm, vif);
}
}
+
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies)
@@ -1646,8 +1706,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
params.scan_plans = &scan_plan;
params.n_scan_plans = 1;
- iwl_mvm_fill_scan_type(mvm, &params,
- vif->type == NL80211_IFTYPE_P2P_DEVICE);
+ iwl_mvm_fill_scan_type(mvm, &params, vif);
ret = iwl_mvm_get_measurement_dwell(mvm, req, &params);
if (ret < 0)
@@ -1742,8 +1801,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
params.n_scan_plans = req->n_scan_plans;
params.scan_plans = req->scan_plans;
- iwl_mvm_fill_scan_type(mvm, &params,
- vif->type == NL80211_IFTYPE_P2P_DEVICE);
+ iwl_mvm_fill_scan_type(mvm, &params, vif);
/* In theory, LMAC scans can handle a 32-bit delay, but since
* waiting for over 18 hours to start the scan is a bit silly
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 8f929c774e70..1887d2b9f185 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -358,6 +358,108 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
return ret;
}
+static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
+ int mac80211_queue, u8 tid, u8 flags)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_DISABLE_QUEUE,
+ };
+ bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
+ int ret;
+
+ if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
+ return -EINVAL;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ if (remove_mac_queue)
+ mvm->hw_queue_to_mac80211[queue] &=
+ ~BIT(mac80211_queue);
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ iwl_trans_txq_free(mvm->trans, queue);
+
+ return 0;
+ }
+
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ return 0;
+ }
+
+ mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
+
+ /*
+ * If there is another TID with the same AC - don't remove the MAC queue
+ * from the mapping
+ */
+ if (tid < IWL_MAX_TID_COUNT) {
+ unsigned long tid_bitmap =
+ mvm->queue_info[queue].tid_bitmap;
+ int ac = tid_to_mac80211_ac[tid];
+ int i;
+
+ for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
+ if (tid_to_mac80211_ac[i] == ac)
+ remove_mac_queue = false;
+ }
+ }
+
+ if (remove_mac_queue)
+ mvm->hw_queue_to_mac80211[queue] &=
+ ~BIT(mac80211_queue);
+
+ cmd.action = mvm->queue_info[queue].tid_bitmap ?
+ SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
+ if (cmd.action == SCD_CFG_DISABLE_QUEUE)
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
+ queue,
+ mvm->queue_info[queue].tid_bitmap,
+ mvm->hw_queue_to_mac80211[queue]);
+
+ /* If the queue is still enabled - nothing left to do in this func */
+ if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ return 0;
+ }
+
+ cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+ cmd.tid = mvm->queue_info[queue].txq_tid;
+
+ /* Make sure queue info is correct even though we overwrite it */
+ WARN(mvm->queue_info[queue].tid_bitmap ||
+ mvm->hw_queue_to_mac80211[queue],
+ "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n",
+ queue, mvm->hw_queue_to_mac80211[queue],
+ mvm->queue_info[queue].tid_bitmap);
+
+ /* If we are here - the queue is freed and we can zero out these vals */
+ mvm->queue_info[queue].tid_bitmap = 0;
+ mvm->hw_queue_to_mac80211[queue] = 0;
+
+ /* Regardless if this is a reserved TXQ for a STA - mark it as false */
+ mvm->queue_info[queue].reserved = false;
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ iwl_trans_txq_disable(mvm->trans, queue, false);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
+ sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
+
+ if (ret)
+ IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
+ queue, ret);
+ return ret;
+}
+
static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
{
struct ieee80211_sta *sta;
@@ -447,11 +549,12 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
}
static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
- bool same_sta)
+ u8 new_sta_id)
{
struct iwl_mvm_sta *mvmsta;
u8 txq_curr_ac, sta_id, tid;
unsigned long disable_agg_tids = 0;
+ bool same_sta;
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -465,6 +568,8 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
tid = mvm->queue_info[queue].txq_tid;
spin_unlock_bh(&mvm->queue_info_lock);
+ same_sta = sta_id == new_sta_id;
+
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
if (WARN_ON(!mvmsta))
return -EINVAL;
@@ -479,10 +584,6 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
mvmsta->vif->hw_queue[txq_curr_ac],
tid, 0);
if (ret) {
- /* Re-mark the inactive queue as inactive */
- spin_lock_bh(&mvm->queue_info_lock);
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
- spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm,
"Failed to free inactive queue %d (ret=%d)\n",
queue, ret);
@@ -504,7 +605,13 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
u8 ac_to_queue[IEEE80211_NUM_ACS];
int i;
+ /*
+ * This protects us against grabbing a queue that's being reconfigured
+ * by the inactivity checker.
+ */
+ lockdep_assert_held(&mvm->mutex);
lockdep_assert_held(&mvm->queue_info_lock);
+
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
@@ -517,11 +624,6 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
continue;
- /* Don't try and take queues being reconfigured */
- if (mvm->queue_info[queue].status ==
- IWL_MVM_QUEUE_RECONFIGURING)
- continue;
-
ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
}
@@ -562,14 +664,6 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
return -ENOSPC;
}
- /* Make sure the queue isn't in the middle of being reconfigured */
- if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
- IWL_ERR(mvm,
- "TXQ %d is in the middle of re-config - try again\n",
- queue);
- return -EBUSY;
- }
-
return queue;
}
@@ -579,9 +673,9 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
* in such a case, otherwise - if no redirection required - it does nothing,
* unless the %force param is true.
*/
-int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
- int ac, int ssn, unsigned int wdg_timeout,
- bool force)
+static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
+ int ac, int ssn, unsigned int wdg_timeout,
+ bool force)
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
@@ -616,7 +710,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
cmd.tid = mvm->queue_info[queue].txq_tid;
mq = mvm->hw_queue_to_mac80211[queue];
- shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
+ shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
spin_unlock_bh(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
@@ -674,6 +768,57 @@ out:
return ret;
}
+static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
+ u8 minq, u8 maxq)
+{
+ int i;
+
+ lockdep_assert_held(&mvm->queue_info_lock);
+
+ /* This should not be hit with new TX path */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -ENOSPC;
+
+ /* Start by looking for a free queue */
+ for (i = minq; i <= maxq; i++)
+ if (mvm->queue_info[i].tid_bitmap == 0 &&
+ mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
+ return i;
+
+ return -ENOSPC;
+}
+
+static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
+ u8 sta_id, u8 tid, unsigned int timeout)
+{
+ int queue, size = IWL_DEFAULT_QUEUE_SIZE;
+
+ if (tid == IWL_MAX_TID_COUNT) {
+ tid = IWL_MGMT_TID;
+ size = IWL_MGMT_QUEUE_SIZE;
+ }
+ queue = iwl_trans_txq_alloc(mvm->trans,
+ cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
+ sta_id, tid, SCD_QUEUE_CFG, size, timeout);
+
+ if (queue < 0) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
+ sta_id, tid, queue);
+ return queue;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
+ queue, sta_id, tid);
+
+ mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Enabling TXQ #%d (mac80211 map:0x%x)\n",
+ queue, mvm->hw_queue_to_mac80211[queue]);
+
+ return queue;
+}
+
static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u8 ac,
int tid)
@@ -698,12 +843,428 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
spin_lock_bh(&mvmsta->lock);
mvmsta->tid_data[tid].txq_id = queue;
- mvmsta->tid_data[tid].is_tid_active = true;
spin_unlock_bh(&mvmsta->lock);
return 0;
}
+static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
+ int mac80211_queue, u8 sta_id, u8 tid)
+{
+ bool enable_queue = true;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ /* Make sure this TID isn't already enabled */
+ if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
+ queue, tid);
+ return false;
+ }
+
+ /* Update mappings and refcounts */
+ if (mvm->queue_info[queue].tid_bitmap)
+ enable_queue = false;
+
+ if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
+ WARN(mac80211_queue >=
+ BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
+ "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
+ mac80211_queue, queue, sta_id, tid);
+ mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
+ }
+
+ mvm->queue_info[queue].tid_bitmap |= BIT(tid);
+ mvm->queue_info[queue].ra_sta_id = sta_id;
+
+ if (enable_queue) {
+ if (tid != IWL_MAX_TID_COUNT)
+ mvm->queue_info[queue].mac80211_ac =
+ tid_to_mac80211_ac[tid];
+ else
+ mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
+
+ mvm->queue_info[queue].txq_tid = tid;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
+ queue, mvm->queue_info[queue].tid_bitmap,
+ mvm->hw_queue_to_mac80211[queue]);
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ return enable_queue;
+}
+
+static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
+ int mac80211_queue, u16 ssn,
+ const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int wdg_timeout)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_ENABLE_QUEUE,
+ .window = cfg->frame_limit,
+ .sta_id = cfg->sta_id,
+ .ssn = cpu_to_le16(ssn),
+ .tx_fifo = cfg->fifo,
+ .aggregate = cfg->aggregate,
+ .tid = cfg->tid,
+ };
+ bool inc_ssn;
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return false;
+
+ /* Send the enabling command if we need to */
+ if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
+ cfg->sta_id, cfg->tid))
+ return false;
+
+ inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
+ NULL, wdg_timeout);
+ if (inc_ssn)
+ le16_add_cpu(&cmd.ssn, 1);
+
+ WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
+ "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
+
+ return inc_ssn;
+}
+
+static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_UPDATE_QUEUE_TID,
+ };
+ int tid;
+ unsigned long tid_bitmap;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
+ return;
+
+ /* Find any TID for queue */
+ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
+ cmd.tid = tid;
+ cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
+ queue, ret);
+ return;
+ }
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->queue_info[queue].txq_tid = tid;
+ spin_unlock_bh(&mvm->queue_info_lock);
+ IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
+ queue, tid);
+}
+
+static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ u8 sta_id;
+ int tid = -1;
+ unsigned long tid_bitmap;
+ unsigned int wdg_timeout;
+ int ssn;
+ int ret = true;
+
+ /* queue sharing is disabled on new TX path */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /* Find TID for queue, and make sure it is the only one on the queue */
+ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
+ if (tid_bitmap != BIT(tid)) {
+ IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
+ queue, tid_bitmap);
+ return;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
+ tid);
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ return;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+
+ ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
+
+ ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
+ tid_to_mac80211_ac[tid], ssn,
+ wdg_timeout, true);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
+ return;
+ }
+
+ /* If aggs should be turned back on - do it */
+ if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
+ struct iwl_mvm_add_sta_cmd cmd = {0};
+
+ mvmsta->tid_disable_agg &= ~BIT(tid);
+
+ cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+ cmd.sta_id = mvmsta->sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+ cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
+ cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+ iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+ if (!ret) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "TXQ #%d is now aggregated again\n",
+ queue);
+
+ /* Mark queue intenally as aggregating again */
+ iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
+ }
+ }
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+ spin_unlock_bh(&mvm->queue_info_lock);
+}
+
+/*
+ * Remove inactive TIDs of a given queue.
+ * If all queue TIDs are inactive - mark the queue as inactive
+ * If only some the queue TIDs are inactive - unmap them from the queue
+ *
+ * Returns %true if all TIDs were removed and the queue could be reused.
+ */
+static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta, int queue,
+ unsigned long tid_bitmap,
+ unsigned long *unshare_queues,
+ unsigned long *changetid_queues)
+{
+ int tid;
+
+ lockdep_assert_held(&mvmsta->lock);
+ lockdep_assert_held(&mvm->queue_info_lock);
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return false;
+
+ /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ /* If some TFDs are still queued - don't mark TID as inactive */
+ if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
+ tid_bitmap &= ~BIT(tid);
+
+ /* Don't mark as inactive any TID that has an active BA */
+ if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
+ tid_bitmap &= ~BIT(tid);
+ }
+
+ /* If all TIDs in the queue are inactive - return it can be reused */
+ if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
+ IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
+ return true;
+ }
+
+ /*
+ * If we are here, this is a shared queue and not all TIDs timed-out.
+ * Remove the ones that did.
+ */
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
+ u16 tid_bitmap;
+
+ mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
+ mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
+ mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
+
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+
+ /*
+ * We need to take into account a situation in which a TXQ was
+ * allocated to TID x, and then turned shared by adding TIDs y
+ * and z. If TID x becomes inactive and is removed from the TXQ,
+ * ownership must be given to one of the remaining TIDs.
+ * This is mainly because if TID x continues - a new queue can't
+ * be allocated for it as long as it is an owner of another TXQ.
+ *
+ * Mark this queue in the right bitmap, we'll send the command
+ * to the firmware later.
+ */
+ if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
+ set_bit(queue, changetid_queues);
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Removing inactive TID %d from shared Q:%d\n",
+ tid, queue);
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "TXQ #%d left with tid bitmap 0x%x\n", queue,
+ mvm->queue_info[queue].tid_bitmap);
+
+ /*
+ * There may be different TIDs with the same mac queues, so make
+ * sure all TIDs have existing corresponding mac queues enabled
+ */
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ mvm->hw_queue_to_mac80211[queue] |=
+ BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
+ }
+
+ /* If the queue is marked as shared - "unshare" it */
+ if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
+ mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
+ IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
+ queue);
+ set_bit(queue, unshare_queues);
+ }
+
+ return false;
+}
+
+/*
+ * Check for inactivity - this includes checking if any queue
+ * can be unshared and finding one (and only one) that can be
+ * reused.
+ * This function is also invoked as a sort of clean-up task,
+ * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
+ *
+ * Returns the queue number, or -ENOSPC.
+ */
+static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
+{
+ unsigned long now = jiffies;
+ unsigned long unshare_queues = 0;
+ unsigned long changetid_queues = 0;
+ int i, ret, free_queue = -ENOSPC;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return -ENOSPC;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ rcu_read_lock();
+
+ /* we skip the CMD queue below by starting at 1 */
+ BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
+
+ for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ u8 sta_id;
+ int tid;
+ unsigned long inactive_tid_bitmap = 0;
+ unsigned long queue_tid_bitmap;
+
+ queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
+ if (!queue_tid_bitmap)
+ continue;
+
+ /* If TXQ isn't in active use anyway - nothing to do here... */
+ if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
+ mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
+ continue;
+
+ /* Check to see if there are inactive TIDs on this queue */
+ for_each_set_bit(tid, &queue_tid_bitmap,
+ IWL_MAX_TID_COUNT + 1) {
+ if (time_after(mvm->queue_info[i].last_frame_time[tid] +
+ IWL_MVM_DQA_QUEUE_TIMEOUT, now))
+ continue;
+
+ inactive_tid_bitmap |= BIT(tid);
+ }
+
+ /* If all TIDs are active - finish check on this queue */
+ if (!inactive_tid_bitmap)
+ continue;
+
+ /*
+ * If we are here - the queue hadn't been served recently and is
+ * in use
+ */
+
+ sta_id = mvm->queue_info[i].ra_sta_id;
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ /*
+ * If the STA doesn't exist anymore, it isn't an error. It could
+ * be that it was removed since getting the queues, and in this
+ * case it should've inactivated its queues anyway.
+ */
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ /* this isn't so nice, but works OK due to the way we loop */
+ spin_unlock(&mvm->queue_info_lock);
+
+ /* and we need this locking order */
+ spin_lock(&mvmsta->lock);
+ spin_lock(&mvm->queue_info_lock);
+ ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
+ inactive_tid_bitmap,
+ &unshare_queues,
+ &changetid_queues);
+ if (ret >= 0 && free_queue < 0)
+ free_queue = ret;
+ /* only unlock sta lock - we still need the queue info lock */
+ spin_unlock(&mvmsta->lock);
+ }
+
+ rcu_read_unlock();
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /* Reconfigure queues requiring reconfiguation */
+ for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
+ iwl_mvm_unshare_queue(mvm, i);
+ for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
+ iwl_mvm_change_queue_tid(mvm, i);
+
+ if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
+ ret = iwl_mvm_free_inactive_queue(mvm, free_queue,
+ alloc_for_sta);
+ if (ret)
+ return ret;
+ }
+
+ return free_queue;
+}
+
static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u8 ac, int tid,
struct ieee80211_hdr *hdr)
@@ -719,7 +1280,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
u8 mac_queue = mvmsta->vif->hw_queue[ac];
int queue = -1;
- bool using_inactive_queue = false, same_sta = false;
unsigned long disable_agg_tids = 0;
enum iwl_mvm_agg_state queue_state;
bool shared_queue = false, inc_ssn;
@@ -756,9 +1316,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
(mvm->queue_info[mvmsta->reserved_queue].status ==
- IWL_MVM_QUEUE_RESERVED ||
- mvm->queue_info[mvmsta->reserved_queue].status ==
- IWL_MVM_QUEUE_INACTIVE)) {
+ IWL_MVM_QUEUE_RESERVED)) {
queue = mvmsta->reserved_queue;
mvm->queue_info[queue].reserved = true;
IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
@@ -768,21 +1326,13 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE);
+ if (queue < 0) {
+ spin_unlock_bh(&mvm->queue_info_lock);
- /*
- * Check if this queue is already allocated but inactive.
- * In such a case, we'll need to first free this queue before enabling
- * it again, so we'll mark it as reserved to make sure no new traffic
- * arrives on it
- */
- if (queue > 0 &&
- mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
- using_inactive_queue = true;
- same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
- IWL_DEBUG_TX_QUEUES(mvm,
- "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
- queue, mvmsta->sta_id, tid);
+ /* try harder - perhaps kill an inactive queue */
+ queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
+
+ spin_lock_bh(&mvm->queue_info_lock);
}
/* No free queue - we'll have to share */
@@ -800,7 +1350,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
* This will allow avoiding re-acquiring the lock at the end of the
* configuration. On error we'll mark it back as free.
*/
- if ((queue > 0) && !shared_queue)
+ if (queue > 0 && !shared_queue)
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
spin_unlock_bh(&mvm->queue_info_lock);
@@ -821,16 +1371,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
- /*
- * If this queue was previously inactive (idle) - we need to free it
- * first
- */
- if (using_inactive_queue) {
- ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
- if (ret)
- return ret;
- }
-
IWL_DEBUG_TX_QUEUES(mvm,
"Allocating %squeue #%d to sta %d on tid %d\n",
shared_queue ? "shared " : "", queue,
@@ -874,7 +1414,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
if (inc_ssn)
mvmsta->tid_data[tid].seq_number += 0x10;
mvmsta->tid_data[tid].txq_id = queue;
- mvmsta->tid_data[tid].is_tid_active = true;
mvmsta->tfd_queue_msk |= BIT(queue);
queue_state = mvmsta->tid_data[tid].state;
@@ -909,129 +1448,6 @@ out_err:
return ret;
}
-static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
-{
- struct iwl_scd_txq_cfg_cmd cmd = {
- .scd_queue = queue,
- .action = SCD_CFG_UPDATE_QUEUE_TID,
- };
- int tid;
- unsigned long tid_bitmap;
- int ret;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
- return;
-
- spin_lock_bh(&mvm->queue_info_lock);
- tid_bitmap = mvm->queue_info[queue].tid_bitmap;
- spin_unlock_bh(&mvm->queue_info_lock);
-
- if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
- return;
-
- /* Find any TID for queue */
- tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
- cmd.tid = tid;
- cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
-
- ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
- if (ret) {
- IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
- queue, ret);
- return;
- }
-
- spin_lock_bh(&mvm->queue_info_lock);
- mvm->queue_info[queue].txq_tid = tid;
- spin_unlock_bh(&mvm->queue_info_lock);
- IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
- queue, tid);
-}
-
-static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
-{
- struct ieee80211_sta *sta;
- struct iwl_mvm_sta *mvmsta;
- u8 sta_id;
- int tid = -1;
- unsigned long tid_bitmap;
- unsigned int wdg_timeout;
- int ssn;
- int ret = true;
-
- /* queue sharing is disabled on new TX path */
- if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
- return;
-
- lockdep_assert_held(&mvm->mutex);
-
- spin_lock_bh(&mvm->queue_info_lock);
- sta_id = mvm->queue_info[queue].ra_sta_id;
- tid_bitmap = mvm->queue_info[queue].tid_bitmap;
- spin_unlock_bh(&mvm->queue_info_lock);
-
- /* Find TID for queue, and make sure it is the only one on the queue */
- tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
- if (tid_bitmap != BIT(tid)) {
- IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
- queue, tid_bitmap);
- return;
- }
-
- IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
- tid);
-
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
- lockdep_is_held(&mvm->mutex));
-
- if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
- return;
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
- wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
-
- ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
-
- ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
- tid_to_mac80211_ac[tid], ssn,
- wdg_timeout, true);
- if (ret) {
- IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
- return;
- }
-
- /* If aggs should be turned back on - do it */
- if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
- struct iwl_mvm_add_sta_cmd cmd = {0};
-
- mvmsta->tid_disable_agg &= ~BIT(tid);
-
- cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
- cmd.sta_id = mvmsta->sta_id;
- cmd.add_modify = STA_MODE_MODIFY;
- cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
- cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
- cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
-
- ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
- iwl_mvm_add_sta_cmd_size(mvm), &cmd);
- if (!ret) {
- IWL_DEBUG_TX_QUEUES(mvm,
- "TXQ #%d is now aggregated again\n",
- queue);
-
- /* Mark queue intenally as aggregating again */
- iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
- }
- }
-
- spin_lock_bh(&mvm->queue_info_lock);
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
- spin_unlock_bh(&mvm->queue_info_lock);
-}
-
static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
{
if (tid == IWL_MAX_TID_COUNT)
@@ -1100,47 +1516,12 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
unsigned long deferred_tid_traffic;
- int queue, sta_id, tid;
-
- /* Check inactivity of queues */
- iwl_mvm_inactivity_check(mvm);
+ int sta_id, tid;
mutex_lock(&mvm->mutex);
- /* No queue reconfiguration in TVQM mode */
- if (iwl_mvm_has_new_tx_api(mvm))
- goto alloc_queues;
-
- /* Reconfigure queues requiring reconfiguation */
- for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
- bool reconfig;
- bool change_owner;
-
- spin_lock_bh(&mvm->queue_info_lock);
- reconfig = (mvm->queue_info[queue].status ==
- IWL_MVM_QUEUE_RECONFIGURING);
+ iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
- /*
- * We need to take into account a situation in which a TXQ was
- * allocated to TID x, and then turned shared by adding TIDs y
- * and z. If TID x becomes inactive and is removed from the TXQ,
- * ownership must be given to one of the remaining TIDs.
- * This is mainly because if TID x continues - a new queue can't
- * be allocated for it as long as it is an owner of another TXQ.
- */
- change_owner = !(mvm->queue_info[queue].tid_bitmap &
- BIT(mvm->queue_info[queue].txq_tid)) &&
- (mvm->queue_info[queue].status ==
- IWL_MVM_QUEUE_SHARED);
- spin_unlock_bh(&mvm->queue_info_lock);
-
- if (reconfig)
- iwl_mvm_unshare_queue(mvm, queue);
- else if (change_owner)
- iwl_mvm_change_queue_owner(mvm, queue);
- }
-
-alloc_queues:
/* Go over all stations with deferred traffic */
for_each_set_bit(sta_id, mvm->sta_deferred_frames,
IWL_MVM_STATION_COUNT) {
@@ -1167,23 +1548,19 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
int queue;
- bool using_inactive_queue = false, same_sta = false;
/* queue reserving is disabled on new TX path */
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return 0;
- /*
- * Check for inactive queues, so we don't reach a situation where we
- * can't add a STA due to a shortage in queues that doesn't really exist
- */
- iwl_mvm_inactivity_check(mvm);
+ /* run the general cleanup/unsharing of queues */
+ iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
spin_lock_bh(&mvm->queue_info_lock);
/* Make sure we have free resources for this STA */
if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
- !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
+ !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
(mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
IWL_MVM_QUEUE_FREE))
queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
@@ -1193,16 +1570,13 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
IWL_MVM_DQA_MAX_DATA_QUEUE);
if (queue < 0) {
spin_unlock_bh(&mvm->queue_info_lock);
- IWL_ERR(mvm, "No available queues for new station\n");
- return -ENOSPC;
- } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
- /*
- * If this queue is already allocated but inactive we'll need to
- * first free this queue before enabling it again, we'll mark
- * it as reserved to make sure no new traffic arrives on it
- */
- using_inactive_queue = true;
- same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
+ /* try again - this time kick out a queue if needed */
+ queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
+ if (queue < 0) {
+ IWL_ERR(mvm, "No available queues for new station\n");
+ return -ENOSPC;
+ }
+ spin_lock_bh(&mvm->queue_info_lock);
}
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
@@ -1210,9 +1584,6 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
mvmsta->reserved_queue = queue;
- if (using_inactive_queue)
- iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
-
IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
queue, mvmsta->sta_id);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 0fc211108149..de1a0a2d8723 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -312,9 +312,6 @@ enum iwl_mvm_agg_state {
* Basically when next_reclaimed reaches ssn, we can tell mac80211 that
* we are ready to finish the Tx AGG stop / start flow.
* @tx_time: medium time consumed by this A-MPDU
- * @is_tid_active: has this TID sent traffic in the last
- * %IWL_MVM_DQA_QUEUE_TIMEOUT time period. If %txq_id is invalid, this
- * field should be ignored.
* @tpt_meas_start: time of the throughput measurements start, is reset every HZ
* @tx_count_last: number of frames transmitted during the last second
* @tx_count: counts the number of frames transmitted since the last reset of
@@ -332,7 +329,6 @@ struct iwl_mvm_tid_data {
u16 txq_id;
u16 ssn;
u16 tx_time;
- bool is_tid_active;
unsigned long tpt_meas_start;
u32 tx_count_last;
u32 tx_count;
@@ -572,8 +568,4 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
-int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
- int ac, int ssn, unsigned int wdg_timeout,
- bool force);
-
#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index cd91bc44259c..e1a6f4e22253 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -254,17 +254,14 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_time_event *te_trig;
int i;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
+ ieee80211_vif_to_wdev(te_data->vif),
+ FW_DBG_TRIGGER_TIME_EVENT);
+ if (!trig)
return;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT);
te_trig = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(te_data->vif),
- trig))
- return;
-
for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
u32 trig_action_bitmap =
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index a6877b3f8037..ec57682efe54 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -79,15 +79,12 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_ba *ba_trig;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA);
+ if (!trig)
return;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
- return;
-
if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
return;
@@ -1143,32 +1140,16 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
/* Check if TXQ needs to be allocated or re-activated */
- if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE ||
- !mvmsta->tid_data[tid].is_tid_active)) {
- /* If TXQ needs to be allocated... */
- if (txq_id == IWL_MVM_INVALID_QUEUE) {
- iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
-
- /*
- * The frame is now deferred, and the worker scheduled
- * will re-allocate it, so we can free it for now.
- */
- iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
- spin_unlock(&mvmsta->lock);
- return 0;
- }
-
- /* queue should always be active in new TX path */
- WARN_ON(iwl_mvm_has_new_tx_api(mvm));
+ if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE)) {
+ iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
- /* If we are here - TXQ exists and needs to be re-activated */
- spin_lock(&mvm->queue_info_lock);
- mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
- mvmsta->tid_data[tid].is_tid_active = true;
- spin_unlock(&mvm->queue_info_lock);
-
- IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n",
- txq_id);
+ /*
+ * The frame is now deferred, and the worker scheduled
+ * will re-allocate it, so we can free it for now.
+ */
+ iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+ spin_unlock(&mvmsta->lock);
+ return 0;
}
if (!iwl_mvm_has_new_tx_api(mvm)) {
@@ -1414,15 +1395,13 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tx_status *status_trig;
int i;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
+ FW_DBG_TRIGGER_TX_STATUS);
+ if (!trig)
return;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS);
status_trig = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
- return;
-
for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
/* don't collect on status 0 */
if (!status_trig->statuses[i].status)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index dcacc4d11abc..818e1180bbdd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -599,36 +599,6 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
iwl_mvm_dump_umac_error_log(mvm);
}
-int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
-{
- int i;
-
- lockdep_assert_held(&mvm->queue_info_lock);
-
- /* This should not be hit with new TX path */
- if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
- return -ENOSPC;
-
- /* Start by looking for a free queue */
- for (i = minq; i <= maxq; i++)
- if (mvm->queue_info[i].hw_queue_refcount == 0 &&
- mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
- return i;
-
- /*
- * If no free queue found - settle for an inactive one to reconfigure
- * Make sure that the inactive queue either already belongs to this STA,
- * or that if it belongs to another one - it isn't the reserved queue
- */
- for (i = minq; i <= maxq; i++)
- if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE &&
- (sta_id == mvm->queue_info[i].ra_sta_id ||
- !mvm->queue_info[i].reserved))
- return i;
-
- return -ENOSPC;
-}
-
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
int tid, int frame_limit, u16 ssn)
{
@@ -649,7 +619,7 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock);
- if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
+ if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
"Trying to reconfig unallocated queue %d\n", queue)) {
spin_unlock_bh(&mvm->queue_info_lock);
return -ENXIO;
@@ -665,229 +635,6 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
return ret;
}
-static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
- int mac80211_queue, u8 sta_id, u8 tid)
-{
- bool enable_queue = true;
-
- spin_lock_bh(&mvm->queue_info_lock);
-
- /* Make sure this TID isn't already enabled */
- if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
- spin_unlock_bh(&mvm->queue_info_lock);
- IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
- queue, tid);
- return false;
- }
-
- /* Update mappings and refcounts */
- if (mvm->queue_info[queue].hw_queue_refcount > 0)
- enable_queue = false;
-
- if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
- WARN(mac80211_queue >=
- BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
- "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
- mac80211_queue, queue, sta_id, tid);
- mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
- }
-
- mvm->queue_info[queue].hw_queue_refcount++;
- mvm->queue_info[queue].tid_bitmap |= BIT(tid);
- mvm->queue_info[queue].ra_sta_id = sta_id;
-
- if (enable_queue) {
- if (tid != IWL_MAX_TID_COUNT)
- mvm->queue_info[queue].mac80211_ac =
- tid_to_mac80211_ac[tid];
- else
- mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
-
- mvm->queue_info[queue].txq_tid = tid;
- }
-
- IWL_DEBUG_TX_QUEUES(mvm,
- "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
- queue, mvm->queue_info[queue].hw_queue_refcount,
- mvm->hw_queue_to_mac80211[queue]);
-
- spin_unlock_bh(&mvm->queue_info_lock);
-
- return enable_queue;
-}
-
-int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
- u8 sta_id, u8 tid, unsigned int timeout)
-{
- int queue, size = IWL_DEFAULT_QUEUE_SIZE;
-
- if (tid == IWL_MAX_TID_COUNT) {
- tid = IWL_MGMT_TID;
- size = IWL_MGMT_QUEUE_SIZE;
- }
- queue = iwl_trans_txq_alloc(mvm->trans,
- cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
- sta_id, tid, SCD_QUEUE_CFG, size, timeout);
-
- if (queue < 0) {
- IWL_DEBUG_TX_QUEUES(mvm,
- "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
- sta_id, tid, queue);
- return queue;
- }
-
- IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
- queue, sta_id, tid);
-
- mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
- IWL_DEBUG_TX_QUEUES(mvm,
- "Enabling TXQ #%d (mac80211 map:0x%x)\n",
- queue, mvm->hw_queue_to_mac80211[queue]);
-
- return queue;
-}
-
-bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
- u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
- unsigned int wdg_timeout)
-{
- struct iwl_scd_txq_cfg_cmd cmd = {
- .scd_queue = queue,
- .action = SCD_CFG_ENABLE_QUEUE,
- .window = cfg->frame_limit,
- .sta_id = cfg->sta_id,
- .ssn = cpu_to_le16(ssn),
- .tx_fifo = cfg->fifo,
- .aggregate = cfg->aggregate,
- .tid = cfg->tid,
- };
- bool inc_ssn;
-
- if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
- return false;
-
- /* Send the enabling command if we need to */
- if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
- cfg->sta_id, cfg->tid))
- return false;
-
- inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
- NULL, wdg_timeout);
- if (inc_ssn)
- le16_add_cpu(&cmd.ssn, 1);
-
- WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
- "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
-
- return inc_ssn;
-}
-
-int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
- u8 tid, u8 flags)
-{
- struct iwl_scd_txq_cfg_cmd cmd = {
- .scd_queue = queue,
- .action = SCD_CFG_DISABLE_QUEUE,
- };
- bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
- int ret;
-
- if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
- return -EINVAL;
-
- if (iwl_mvm_has_new_tx_api(mvm)) {
- spin_lock_bh(&mvm->queue_info_lock);
-
- if (remove_mac_queue)
- mvm->hw_queue_to_mac80211[queue] &=
- ~BIT(mac80211_queue);
-
- spin_unlock_bh(&mvm->queue_info_lock);
-
- iwl_trans_txq_free(mvm->trans, queue);
-
- return 0;
- }
-
- spin_lock_bh(&mvm->queue_info_lock);
-
- if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
- spin_unlock_bh(&mvm->queue_info_lock);
- return 0;
- }
-
- mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
-
- /*
- * If there is another TID with the same AC - don't remove the MAC queue
- * from the mapping
- */
- if (tid < IWL_MAX_TID_COUNT) {
- unsigned long tid_bitmap =
- mvm->queue_info[queue].tid_bitmap;
- int ac = tid_to_mac80211_ac[tid];
- int i;
-
- for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
- if (tid_to_mac80211_ac[i] == ac)
- remove_mac_queue = false;
- }
- }
-
- if (remove_mac_queue)
- mvm->hw_queue_to_mac80211[queue] &=
- ~BIT(mac80211_queue);
- mvm->queue_info[queue].hw_queue_refcount--;
-
- cmd.action = mvm->queue_info[queue].hw_queue_refcount ?
- SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
- if (cmd.action == SCD_CFG_DISABLE_QUEUE)
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
-
- IWL_DEBUG_TX_QUEUES(mvm,
- "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
- queue,
- mvm->queue_info[queue].hw_queue_refcount,
- mvm->hw_queue_to_mac80211[queue]);
-
- /* If the queue is still enabled - nothing left to do in this func */
- if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
- spin_unlock_bh(&mvm->queue_info_lock);
- return 0;
- }
-
- cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
- cmd.tid = mvm->queue_info[queue].txq_tid;
-
- /* Make sure queue info is correct even though we overwrite it */
- WARN(mvm->queue_info[queue].hw_queue_refcount ||
- mvm->queue_info[queue].tid_bitmap ||
- mvm->hw_queue_to_mac80211[queue],
- "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
- queue, mvm->queue_info[queue].hw_queue_refcount,
- mvm->hw_queue_to_mac80211[queue],
- mvm->queue_info[queue].tid_bitmap);
-
- /* If we are here - the queue is freed and we can zero out these vals */
- mvm->queue_info[queue].hw_queue_refcount = 0;
- mvm->queue_info[queue].tid_bitmap = 0;
- mvm->hw_queue_to_mac80211[queue] = 0;
-
- /* Regardless if this is a reserved TXQ for a STA - mark it as false */
- mvm->queue_info[queue].reserved = false;
-
- spin_unlock_bh(&mvm->queue_info_lock);
-
- iwl_trans_txq_disable(mvm->trans, queue, false);
- ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
- sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
-
- if (ret)
- IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
- queue, ret);
- return ret;
-}
-
/**
* iwl_mvm_send_lq_cmd() - Send link quality command
* @sync: This command can be sent synchronously.
@@ -1238,14 +985,12 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_mlme *trig_mlme;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_MLME);
+ if (!trig)
goto out;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
trig_mlme = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(vif), trig))
- goto out;
if (trig_mlme->stop_connection_loss &&
--trig_mlme->stop_connection_loss)
@@ -1257,171 +1002,6 @@ out:
ieee80211_connection_loss(vif);
}
-/*
- * Remove inactive TIDs of a given queue.
- * If all queue TIDs are inactive - mark the queue as inactive
- * If only some the queue TIDs are inactive - unmap them from the queue
- */
-static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
- struct iwl_mvm_sta *mvmsta, int queue,
- unsigned long tid_bitmap)
-{
- int tid;
-
- lockdep_assert_held(&mvmsta->lock);
- lockdep_assert_held(&mvm->queue_info_lock);
-
- if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
- return;
-
- /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
- for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
- /* If some TFDs are still queued - don't mark TID as inactive */
- if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
- tid_bitmap &= ~BIT(tid);
-
- /* Don't mark as inactive any TID that has an active BA */
- if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
- tid_bitmap &= ~BIT(tid);
- }
-
- /* If all TIDs in the queue are inactive - mark queue as inactive. */
- if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
-
- for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1)
- mvmsta->tid_data[tid].is_tid_active = false;
-
- IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n",
- queue);
- return;
- }
-
- /*
- * If we are here, this is a shared queue and not all TIDs timed-out.
- * Remove the ones that did.
- */
- for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
- int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
-
- mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
- mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
- mvm->queue_info[queue].hw_queue_refcount--;
- mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
- mvmsta->tid_data[tid].is_tid_active = false;
-
- IWL_DEBUG_TX_QUEUES(mvm,
- "Removing inactive TID %d from shared Q:%d\n",
- tid, queue);
- }
-
- IWL_DEBUG_TX_QUEUES(mvm,
- "TXQ #%d left with tid bitmap 0x%x\n", queue,
- mvm->queue_info[queue].tid_bitmap);
-
- /*
- * There may be different TIDs with the same mac queues, so make
- * sure all TIDs have existing corresponding mac queues enabled
- */
- tid_bitmap = mvm->queue_info[queue].tid_bitmap;
- for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
- mvm->hw_queue_to_mac80211[queue] |=
- BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
- }
-
- /* If the queue is marked as shared - "unshare" it */
- if (mvm->queue_info[queue].hw_queue_refcount == 1 &&
- mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
- IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
- queue);
- }
-}
-
-void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
-{
- unsigned long timeout_queues_map = 0;
- unsigned long now = jiffies;
- int i;
-
- if (iwl_mvm_has_new_tx_api(mvm))
- return;
-
- spin_lock_bh(&mvm->queue_info_lock);
- for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
- if (mvm->queue_info[i].hw_queue_refcount > 0)
- timeout_queues_map |= BIT(i);
- spin_unlock_bh(&mvm->queue_info_lock);
-
- rcu_read_lock();
-
- /*
- * If a queue time outs - mark it as INACTIVE (don't remove right away
- * if we don't have to.) This is an optimization in case traffic comes
- * later, and we don't HAVE to use a currently-inactive queue
- */
- for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) {
- struct ieee80211_sta *sta;
- struct iwl_mvm_sta *mvmsta;
- u8 sta_id;
- int tid;
- unsigned long inactive_tid_bitmap = 0;
- unsigned long queue_tid_bitmap;
-
- spin_lock_bh(&mvm->queue_info_lock);
- queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
-
- /* If TXQ isn't in active use anyway - nothing to do here... */
- if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
- mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) {
- spin_unlock_bh(&mvm->queue_info_lock);
- continue;
- }
-
- /* Check to see if there are inactive TIDs on this queue */
- for_each_set_bit(tid, &queue_tid_bitmap,
- IWL_MAX_TID_COUNT + 1) {
- if (time_after(mvm->queue_info[i].last_frame_time[tid] +
- IWL_MVM_DQA_QUEUE_TIMEOUT, now))
- continue;
-
- inactive_tid_bitmap |= BIT(tid);
- }
- spin_unlock_bh(&mvm->queue_info_lock);
-
- /* If all TIDs are active - finish check on this queue */
- if (!inactive_tid_bitmap)
- continue;
-
- /*
- * If we are here - the queue hadn't been served recently and is
- * in use
- */
-
- sta_id = mvm->queue_info[i].ra_sta_id;
- sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
-
- /*
- * If the STA doesn't exist anymore, it isn't an error. It could
- * be that it was removed since getting the queues, and in this
- * case it should've inactivated its queues anyway.
- */
- if (IS_ERR_OR_NULL(sta))
- continue;
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
- spin_lock_bh(&mvmsta->lock);
- spin_lock(&mvm->queue_info_lock);
- iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
- inactive_tid_bitmap);
- spin_unlock(&mvm->queue_info_lock);
- spin_unlock_bh(&mvmsta->lock);
- }
-
- rcu_read_unlock();
-}
-
void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
const struct ieee80211_sta *sta,
@@ -1430,14 +1010,12 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_ba *ba_trig;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_BA);
+ if (!trig)
return;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(vif), trig))
- return;
if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index d519e7ebdbe8..e965cc588850 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1144,6 +1144,14 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
kfree(trans_pcie->rxq);
}
+static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
+ struct iwl_rb_allocator *rba)
+{
+ spin_lock(&rba->lock);
+ list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+ spin_unlock(&rba->lock);
+}
+
/*
* iwl_pcie_rx_reuse_rbd - Recycle used RBDs
*
@@ -1175,9 +1183,7 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
/* Move the 2 RBDs to the allocator ownership.
Allocator has another 6 from pool for the request completion*/
- spin_lock(&rba->lock);
- list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
- spin_unlock(&rba->lock);
+ iwl_pcie_rx_move_to_allocator(rxq, rba);
atomic_inc(&rba->req_pending);
queue_work(rba->alloc_wq, &rba->rx_alloc);
@@ -1400,10 +1406,18 @@ restart:
IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
while (i != r) {
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
struct iwl_rx_mem_buffer *rxb;
-
- if (unlikely(rxq->used_count == rxq->queue_size / 2))
+ /* number of RBDs still waiting for page allocation */
+ u32 rb_pending_alloc =
+ atomic_read(&trans_pcie->rba.req_pending) *
+ RX_CLAIM_REQ_ALLOC;
+
+ if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
+ !emergency)) {
+ iwl_pcie_rx_move_to_allocator(rxq, rba);
emergency = true;
+ }
rxb = iwl_pcie_get_rxb(trans, rxq, i);
if (!rxb)
@@ -1425,17 +1439,13 @@ restart:
iwl_pcie_rx_allocator_get(trans, rxq);
if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
-
/* Add the remaining empty RBDs for allocator use */
- spin_lock(&rba->lock);
- list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
- spin_unlock(&rba->lock);
+ iwl_pcie_rx_move_to_allocator(rxq, rba);
} else if (emergency) {
count++;
if (count == 8) {
count = 0;
- if (rxq->used_count < rxq->queue_size / 3)
+ if (rb_pending_alloc < rxq->queue_size / 3)
emergency = false;
rxq->read = i;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index bc6682a11fa4..5bafb3f46eb8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -931,7 +931,7 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
else
IWL_WARN(trans, "PCI should have external buffer debug\n");
- for (i = 0; i < trans->dbg_dest_reg_num; i++) {
+ for (i = 0; i < trans->dbg_n_dest_reg; i++) {
u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
u32 val = le32_to_cpu(dest->reg_ops[i].val);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index ba9d37bed4c2..e880f69eac26 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -330,7 +330,7 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
goto out_err;
}
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_len);
/* add this subframe's headers' length to the tx_cmd */
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
@@ -347,8 +347,8 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
goto out_err;
}
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
- tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
+ tb_len);
data_left -= tb_len;
tso_build_data(skb, &tso, tb_len);
@@ -438,6 +438,11 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
return -ENOMEM;
tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
skb_frag_size(frag));
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb,
+ skb_frag_address(frag),
+ skb_frag_size(frag));
+ if (tb_idx < 0)
+ return tb_idx;
out_meta->tbs |= BIT(tb_idx);
}
@@ -452,7 +457,8 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
int hdr_len,
- int tx_cmd_len)
+ int tx_cmd_len,
+ bool pad)
{
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
@@ -476,7 +482,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
IWL_FIRST_TB_SIZE;
- tb1_len = ALIGN(len, 4);
+ if (pad)
+ tb1_len = ALIGN(len, 4);
+ else
+ tb1_len = len;
/* map the data for TB1 */
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
@@ -484,6 +493,8 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
+ trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
+ IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
/* set up TFD's third entry to point to remainder of skb's head */
tb2_len = skb_headlen(skb) - hdr_len;
@@ -494,15 +505,14 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb,
+ skb->data + hdr_len,
+ tb2_len);
}
if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
goto out_err;
- trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
- trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
-
return tfd;
out_err:
@@ -549,7 +559,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
out_meta, hdr_len, len);
return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
- hdr_len, len);
+ hdr_len, len, !amsdu);
}
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 67820bfaba64..87b7225fe289 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1994,6 +1994,9 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
head_tb_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
return -EINVAL;
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb,
+ skb->data + hdr_len,
+ head_tb_len);
iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
}
@@ -2011,8 +2014,13 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
return -EINVAL;
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb,
+ skb_frag_address(frag),
+ skb_frag_size(frag));
tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
skb_frag_size(frag), false);
+ if (tb_idx < 0)
+ return tb_idx;
out_meta->tbs |= BIT(tb_idx);
}
@@ -2188,8 +2196,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
}
iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
hdr_tb_len, false);
- trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr,
- hdr_tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
+ hdr_tb_len);
/* add this subframe's headers' length to the tx_cmd */
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
@@ -2214,8 +2222,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
size, false);
- trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
- size);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
+ size);
data_left -= size;
tso_build_data(skb, &tso, size);
@@ -2396,6 +2404,13 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
goto out_err;
iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
+ trace_iwlwifi_dev_tx(trans->dev, skb,
+ iwl_pcie_get_tfd(trans, txq,
+ txq->write_ptr),
+ trans_pcie->tfd_size,
+ &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
+ hdr_len);
+
/*
* If gso_size wasn't set, don't give the frame "amsdu treatment"
* (adding subframes, etc.).
@@ -2419,14 +2434,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
out_meta)))
goto out_err;
}
-
- trace_iwlwifi_dev_tx(trans->dev, skb,
- iwl_pcie_get_tfd(trans, txq,
- txq->write_ptr),
- trans_pcie->tfd_size,
- &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
- hdr_len);
- trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
}
/* building the A-MSDU might have changed this data, so memcpy it now */
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index 21bb68457cfe..40a8b941ad5c 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -908,6 +908,7 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv,
case EZUSB_CTX_REQ_SUBMITTED:
if (!ctx->in_rid)
break;
+ /* fall through */
default:
err("%s: Unexpected context state %d", __func__,
state);
diff --git a/drivers/net/wireless/intersil/prism54/isl_38xx.c b/drivers/net/wireless/intersil/prism54/isl_38xx.c
index ce9d4db0d9ca..b0eb58a62c90 100644
--- a/drivers/net/wireless/intersil/prism54/isl_38xx.c
+++ b/drivers/net/wireless/intersil/prism54/isl_38xx.c
@@ -235,6 +235,7 @@ isl38xx_in_queue(isl38xx_control_block *cb, int queue)
/* send queues */
case ISL38XX_CB_TX_MGMTQ:
BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE);
+ /* fall through */
case ISL38XX_CB_TX_DATA_LQ:
case ISL38XX_CB_TX_DATA_HQ:
diff --git a/drivers/net/wireless/intersil/prism54/isl_ioctl.c b/drivers/net/wireless/intersil/prism54/isl_ioctl.c
index 334717b0a2be..3ccf2a4b548c 100644
--- a/drivers/net/wireless/intersil/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/intersil/prism54/isl_ioctl.c
@@ -1691,6 +1691,7 @@ static int prism54_get_encodeext(struct net_device *ndev,
case DOT11_AUTH_BOTH:
case DOT11_AUTH_SK:
wrqu->encoding.flags |= IW_ENCODE_RESTRICTED;
+ /* fall through */
case DOT11_AUTH_OS:
default:
wrqu->encoding.flags |= IW_ENCODE_OPEN;
diff --git a/drivers/net/wireless/intersil/prism54/islpci_dev.c b/drivers/net/wireless/intersil/prism54/islpci_dev.c
index 325176d4d796..ad6d3a56ae06 100644
--- a/drivers/net/wireless/intersil/prism54/islpci_dev.c
+++ b/drivers/net/wireless/intersil/prism54/islpci_dev.c
@@ -932,6 +932,7 @@ islpci_set_state(islpci_private *priv, islpci_state_t new_state)
switch (new_state) {
case PRV_STATE_OFF:
priv->state_off++;
+ /* fall through */
default:
priv->state = new_state;
break;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index f3863101af78..aa8058264d5b 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -495,7 +495,6 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
static spinlock_t hwsim_radio_lock;
static LIST_HEAD(hwsim_radios);
-static struct workqueue_struct *hwsim_wq;
static struct rhashtable hwsim_radios_rht;
static int hwsim_radio_idx;
static int hwsim_radios_generation = 1;
@@ -521,7 +520,6 @@ struct mac80211_hwsim_data {
int channels, idx;
bool use_chanctx;
bool destroy_on_close;
- struct work_struct destroy_work;
u32 portid;
char alpha2[2];
const struct ieee80211_regdomain *regd;
@@ -2931,8 +2929,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
hwsim_radios_generation++;
spin_unlock_bh(&hwsim_radio_lock);
- if (idx > 0)
- hwsim_mcast_new_radio(idx, info, param);
+ hwsim_mcast_new_radio(idx, info, param);
return idx;
@@ -3561,30 +3558,27 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
};
-static void destroy_radio(struct work_struct *work)
-{
- struct mac80211_hwsim_data *data =
- container_of(work, struct mac80211_hwsim_data, destroy_work);
-
- hwsim_radios_generation++;
- mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL);
-}
-
static void remove_user_radios(u32 portid)
{
struct mac80211_hwsim_data *entry, *tmp;
+ LIST_HEAD(list);
spin_lock_bh(&hwsim_radio_lock);
list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) {
if (entry->destroy_on_close && entry->portid == portid) {
- list_del(&entry->list);
+ list_move(&entry->list, &list);
rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht,
hwsim_rht_params);
- INIT_WORK(&entry->destroy_work, destroy_radio);
- queue_work(hwsim_wq, &entry->destroy_work);
+ hwsim_radios_generation++;
}
}
spin_unlock_bh(&hwsim_radio_lock);
+
+ list_for_each_entry_safe(entry, tmp, &list, list) {
+ list_del(&entry->list);
+ mac80211_hwsim_del_radio(entry, wiphy_name(entry->hw->wiphy),
+ NULL);
+ }
}
static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
@@ -3642,6 +3636,7 @@ static __net_init int hwsim_init_net(struct net *net)
static void __net_exit hwsim_exit_net(struct net *net)
{
struct mac80211_hwsim_data *data, *tmp;
+ LIST_HEAD(list);
spin_lock_bh(&hwsim_radio_lock);
list_for_each_entry_safe(data, tmp, &hwsim_radios, list) {
@@ -3652,17 +3647,19 @@ static void __net_exit hwsim_exit_net(struct net *net)
if (data->netgroup == hwsim_net_get_netgroup(&init_net))
continue;
- list_del(&data->list);
+ list_move(&data->list, &list);
rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
hwsim_rht_params);
hwsim_radios_generation++;
- spin_unlock_bh(&hwsim_radio_lock);
+ }
+ spin_unlock_bh(&hwsim_radio_lock);
+
+ list_for_each_entry_safe(data, tmp, &list, list) {
+ list_del(&data->list);
mac80211_hwsim_del_radio(data,
wiphy_name(data->hw->wiphy),
NULL);
- spin_lock_bh(&hwsim_radio_lock);
}
- spin_unlock_bh(&hwsim_radio_lock);
ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net));
}
@@ -3694,13 +3691,9 @@ static int __init init_mac80211_hwsim(void)
spin_lock_init(&hwsim_radio_lock);
- hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0);
- if (!hwsim_wq)
- return -ENOMEM;
-
err = rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params);
if (err)
- goto out_free_wq;
+ return err;
err = register_pernet_device(&hwsim_net_ops);
if (err)
@@ -3831,8 +3824,6 @@ out_unregister_pernet:
unregister_pernet_device(&hwsim_net_ops);
out_free_rht:
rhashtable_destroy(&hwsim_radios_rht);
-out_free_wq:
- destroy_workqueue(hwsim_wq);
return err;
}
module_init(init_mac80211_hwsim);
@@ -3844,12 +3835,10 @@ static void __exit exit_mac80211_hwsim(void)
hwsim_exit_netlink();
mac80211_hwsim_free();
- flush_workqueue(hwsim_wq);
rhashtable_destroy(&hwsim_radios_rht);
unregister_netdev(hwsim_mon);
platform_driver_unregister(&mac80211_hwsim_driver);
unregister_pernet_device(&hwsim_net_ops);
- destroy_workqueue(hwsim_wq);
}
module_exit(exit_mac80211_hwsim);
diff --git a/drivers/net/wireless/marvell/libertas/if_cs.c b/drivers/net/wireless/marvell/libertas/if_cs.c
index 7d88223f890b..cebf03c6a622 100644
--- a/drivers/net/wireless/marvell/libertas/if_cs.c
+++ b/drivers/net/wireless/marvell/libertas/if_cs.c
@@ -900,8 +900,8 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
/* Make this card known to the libertas driver */
priv = lbs_add_card(card, &p_dev->dev);
- if (!priv) {
- ret = -ENOMEM;
+ if (IS_ERR(priv)) {
+ ret = PTR_ERR(priv);
goto out2;
}
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 43743c26c071..8d98e7fdd27c 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -1206,8 +1206,8 @@ static int if_sdio_probe(struct sdio_func *func,
priv = lbs_add_card(card, &func->dev);
- if (!priv) {
- ret = -ENOMEM;
+ if (IS_ERR(priv)) {
+ ret = PTR_ERR(priv);
goto free;
}
@@ -1317,6 +1317,10 @@ static int if_sdio_suspend(struct device *dev)
if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
dev_info(dev, "Suspend without wake params -- powering down card\n");
if (priv->fw_ready) {
+ ret = lbs_suspend(priv);
+ if (ret)
+ return ret;
+
priv->power_up_on_resume = true;
if_sdio_power_off(card);
}
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index e9aec6cb1105..7c3224b83ef7 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -796,15 +796,13 @@ static void if_spi_h2c(struct if_spi_card *card,
{
struct lbs_private *priv = card->priv;
int err = 0;
- u16 int_type, port_reg;
+ u16 port_reg;
switch (type) {
case MVMS_DAT:
- int_type = IF_SPI_CIC_TX_DOWNLOAD_OVER;
port_reg = IF_SPI_DATA_RDWRPORT_REG;
break;
case MVMS_CMD:
- int_type = IF_SPI_CIC_CMD_DOWNLOAD_OVER;
port_reg = IF_SPI_CMD_RDWRPORT_REG;
break;
default:
@@ -1146,8 +1144,8 @@ static int if_spi_probe(struct spi_device *spi)
* This will call alloc_etherdev.
*/
priv = lbs_add_card(card, &spi->dev);
- if (!priv) {
- err = -ENOMEM;
+ if (IS_ERR(priv)) {
+ err = PTR_ERR(priv);
goto free_card;
}
card->priv = priv;
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index c67a8e7be310..220dcdee8d2b 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -254,8 +254,11 @@ static int if_usb_probe(struct usb_interface *intf,
goto dealloc;
}
- if (!(priv = lbs_add_card(cardp, &intf->dev)))
+ priv = lbs_add_card(cardp, &intf->dev);
+ if (IS_ERR(priv)) {
+ r = PTR_ERR(priv);
goto err_add_card;
+ }
cardp->priv = priv;
@@ -456,8 +459,6 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn,
cardp);
- cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
-
lbs_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb);
if ((ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC))) {
lbs_deb_usbd(&cardp->udev->dev, "Submit Rx URB failed: %d\n", ret);
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index f22e1c220cba..f7db60bc7c7f 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -907,25 +907,29 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
struct net_device *dev;
struct wireless_dev *wdev;
struct lbs_private *priv = NULL;
+ int err;
/* Allocate an Ethernet device and register it */
wdev = lbs_cfg_alloc(dmdev);
if (IS_ERR(wdev)) {
+ err = PTR_ERR(wdev);
pr_err("cfg80211 init failed\n");
- goto done;
+ goto err_cfg;
}
wdev->iftype = NL80211_IFTYPE_STATION;
priv = wdev_priv(wdev);
priv->wdev = wdev;
- if (lbs_init_adapter(priv)) {
+ err = lbs_init_adapter(priv);
+ if (err) {
pr_err("failed to initialize adapter structure\n");
goto err_wdev;
}
dev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, ether_setup);
if (!dev) {
+ err = -ENOMEM;
dev_err(dmdev, "no memory for network device instance\n");
goto err_adapter;
}
@@ -949,6 +953,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
init_waitqueue_head(&priv->waitq);
priv->main_thread = kthread_run(lbs_thread, dev, "lbs_main");
if (IS_ERR(priv->main_thread)) {
+ err = PTR_ERR(priv->main_thread);
lbs_deb_thread("Error creating main thread.\n");
goto err_ndev;
}
@@ -961,7 +966,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
priv->wol_gap = 20;
priv->ehs_remove_supported = true;
- goto done;
+ return priv;
err_ndev:
free_netdev(dev);
@@ -972,10 +977,8 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
err_wdev:
lbs_cfg_free(priv);
- priv = NULL;
-
-done:
- return priv;
+ err_cfg:
+ return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(lbs_add_card);
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index 75cbd609d606..6845eb57b39a 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -363,6 +363,7 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
(const u8 *)hdr,
hdr->len + sizeof(struct ieee_types_header)))
break;
+ /* fall through */
default:
memcpy(gen_ie->ie_buffer + ie_len, hdr,
hdr->len + sizeof(struct ieee_types_header));
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
index 7f24aad94efd..0ccbcd7e887d 100644
--- a/drivers/net/wireless/mediatek/mt76/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -13,44 +13,5 @@ config MT76x02_USB
tristate
select MT76_USB
-config MT76x0_COMMON
- tristate
- select MT76x02_LIB
-
-config MT76x2_COMMON
- tristate
- select MT76x02_LIB
-
-config MT76x0U
- tristate "MediaTek MT76x0U (USB) support"
- select MT76x0_COMMON
- select MT76x02_USB
- depends on MAC80211
- depends on USB
- help
- This adds support for MT7610U-based wireless USB dongles.
-
-config MT76x0E
- tristate "MediaTek MT76x0E (PCIe) support"
- select MT76x0_COMMON
- depends on MAC80211
- depends on PCI
- help
- This adds support for MT7610/MT7630-based wireless PCIe devices.
-
-config MT76x2E
- tristate "MediaTek MT76x2E (PCIe) support"
- select MT76x2_COMMON
- depends on MAC80211
- depends on PCI
- ---help---
- This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices.
-
-config MT76x2U
- tristate "MediaTek MT76x2U (USB) support"
- select MT76x2_COMMON
- select MT76x02_USB
- depends on MAC80211
- depends on USB
- help
- This adds support for MT7612U-based wireless USB dongles.
+source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig"
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index 2346a1b768bc..9b8d7488c545 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -1,11 +1,7 @@
obj-$(CONFIG_MT76_CORE) += mt76.o
obj-$(CONFIG_MT76_USB) += mt76-usb.o
-obj-$(CONFIG_MT76x0_COMMON) += mt76x0/
obj-$(CONFIG_MT76x02_LIB) += mt76x02-lib.o
obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o
-obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o
-obj-$(CONFIG_MT76x2E) += mt76x2e.o
-obj-$(CONFIG_MT76x2U) += mt76x2u.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o
@@ -14,25 +10,13 @@ mt76-usb-y := usb.o usb_trace.o usb_mcu.o
CFLAGS_trace.o := -I$(src)
CFLAGS_usb_trace.o := -I$(src)
+CFLAGS_mt76x02_trace.o := -I$(src)
mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o \
- mt76x02_eeprom.o mt76x02_phy.o mt76x02_mmio.o
+ mt76x02_eeprom.o mt76x02_phy.o mt76x02_mmio.o \
+ mt76x02_txrx.o mt76x02_trace.o
mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o
-mt76x2-common-y := \
- mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o \
- mt76x2_init_common.o mt76x2_common.o mt76x2_phy_common.o \
- mt76x2_debugfs.o mt76x2_mcu_common.o
-
-mt76x2e-y := \
- mt76x2_pci.o mt76x2_dma.o \
- mt76x2_main.o mt76x2_init.o mt76x2_tx.o \
- mt76x2_core.o mt76x2_mac.o mt76x2_mcu.o mt76x2_phy.o \
- mt76x2_dfs.o mt76x2_trace.o
-
-mt76x2u-y := \
- mt76x2_usb.o mt76x2u_init.o mt76x2u_main.o mt76x2u_mac.o \
- mt76x2u_mcu.o mt76x2u_phy.o mt76x2u_core.o
-
-CFLAGS_mt76x2_trace.o := -I$(src)
+obj-$(CONFIG_MT76x0_COMMON) += mt76x0/
+obj-$(CONFIG_MT76x2_COMMON) += mt76x2/
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
index 30a5d928e655..1d6bbce76041 100644
--- a/drivers/net/wireless/mediatek/mt76/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -79,6 +79,7 @@ void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
.copy = mt76_mmio_copy,
.wr_rp = mt76_mmio_wr_rp,
.rd_rp = mt76_mmio_rd_rp,
+ .type = MT76_BUS_MMIO,
};
dev->bus = &mt76_mmio_ops;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index f2dd4d87e355..3bfa7f5e3513 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -38,6 +38,11 @@ struct mt76_reg_pair {
u32 value;
};
+enum mt76_bus_type {
+ MT76_BUS_MMIO,
+ MT76_BUS_USB,
+};
+
struct mt76_bus_ops {
u32 (*rr)(struct mt76_dev *dev, u32 offset);
void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
@@ -48,8 +53,12 @@ struct mt76_bus_ops {
const struct mt76_reg_pair *rp, int len);
int (*rd_rp)(struct mt76_dev *dev, u32 base,
struct mt76_reg_pair *rp, int len);
+ enum mt76_bus_type type;
};
+#define mt76_is_usb(dev) ((dev)->mt76.bus->type == MT76_BUS_USB)
+#define mt76_is_mmio(dev) ((dev)->mt76.bus->type == MT76_BUS_MMIO)
+
enum mt76_txq_id {
MT_TXQ_VO = IEEE80211_AC_VO,
MT_TXQ_VI = IEEE80211_AC_VI,
@@ -262,8 +271,6 @@ struct mt76_driver_ops {
void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
bool ps);
- s8 (*get_max_txpwr_adj)(struct mt76_dev *dev,
- const struct ieee80211_tx_rate *rate);
};
struct mt76_channel_state {
@@ -519,8 +526,8 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
#define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
-#define __mt76_init_queues(dev) (dev)->queue_ops->init((dev))
-#define __mt76_queue_alloc(dev, ...) (dev)->queue_ops->alloc((dev), __VA_ARGS__)
+#define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
+#define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_add_buf(dev, ...) (dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig b/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig
new file mode 100644
index 000000000000..9a6157db3893
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig
@@ -0,0 +1,20 @@
+config MT76x0_COMMON
+ tristate
+ select MT76x02_LIB
+
+config MT76x0U
+ tristate "MediaTek MT76x0U (USB) support"
+ select MT76x0_COMMON
+ select MT76x02_USB
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7610U-based wireless USB dongles.
+
+config MT76x0E
+ tristate "MediaTek MT76x0E (PCIe) support"
+ select MT76x0_COMMON
+ depends on MAC80211
+ depends on PCI
+ help
+ This adds support for MT7610/MT7630-based wireless PCIe devices.
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
index 254d94efd24d..20672978dceb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
@@ -4,7 +4,7 @@ obj-$(CONFIG_MT76x0_COMMON) += mt76x0-common.o
mt76x0-common-y := \
init.o main.o trace.o eeprom.o phy.o \
- mac.o debugfs.o tx.o
+ mac.o debugfs.o
mt76x0u-y := usb.o usb_mcu.o
mt76x0e-y := pci.o pci_mcu.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
index ddc1af626b3b..3224e5b1a1e5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
@@ -21,7 +21,7 @@
static int
mt76x0_ampdu_stat_read(struct seq_file *file, void *data)
{
- struct mt76x0_dev *dev = file->private;
+ struct mt76x02_dev *dev = file->private;
int i, j;
#define stat_printf(grp, off, name) \
@@ -75,7 +75,7 @@ static const struct file_operations fops_ampdu_stat = {
.release = single_release,
};
-void mt76x0_init_debugfs(struct mt76x0_dev *dev)
+void mt76x0_init_debugfs(struct mt76x02_dev *dev)
{
struct dentry *dir;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
deleted file mode 100644
index 891ce1c3461f..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MT76X0U_DMA_H
-#define __MT76X0U_DMA_H
-
-#include <asm/unaligned.h>
-#include <linux/skbuff.h>
-
-#define MT_DMA_HDR_LEN 4
-#define MT_RX_INFO_LEN 4
-#define MT_FCE_INFO_LEN 4
-#define MT_DMA_HDRS (MT_DMA_HDR_LEN + MT_RX_INFO_LEN)
-
-/* Common Tx DMA descriptor fields */
-#define MT_TXD_INFO_LEN GENMASK(15, 0)
-#define MT_TXD_INFO_D_PORT GENMASK(29, 27)
-#define MT_TXD_INFO_TYPE GENMASK(31, 30)
-
-/* Tx DMA MCU command specific flags */
-#define MT_TXD_CMD_SEQ GENMASK(19, 16)
-#define MT_TXD_CMD_TYPE GENMASK(26, 20)
-
-enum mt76_msg_port {
- WLAN_PORT,
- CPU_RX_PORT,
- CPU_TX_PORT,
- HOST_PORT,
- VIRTUAL_CPU_RX_PORT,
- VIRTUAL_CPU_TX_PORT,
- DISCARD,
-};
-
-enum mt76_info_type {
- DMA_PACKET,
- DMA_COMMAND,
-};
-
-/* Tx DMA packet specific flags */
-#define MT_TXD_PKT_INFO_NEXT_VLD BIT(16)
-#define MT_TXD_PKT_INFO_TX_BURST BIT(17)
-#define MT_TXD_PKT_INFO_80211 BIT(19)
-#define MT_TXD_PKT_INFO_TSO BIT(20)
-#define MT_TXD_PKT_INFO_CSO BIT(21)
-#define MT_TXD_PKT_INFO_WIV BIT(24)
-#define MT_TXD_PKT_INFO_QSEL GENMASK(26, 25)
-
-enum mt76_qsel {
- MT_QSEL_MGMT,
- MT_QSEL_HCCA,
- MT_QSEL_EDCA,
- MT_QSEL_EDCA_2,
-};
-
-
-static inline int mt76x0_dma_skb_wrap(struct sk_buff *skb,
- enum mt76_msg_port d_port,
- enum mt76_info_type type, u32 flags)
-{
- u32 info;
-
- /* Buffer layout:
- * | 4B | xfer len | pad | 4B |
- * | TXINFO | pkt/cmd | zero pad to 4B | zero |
- *
- * length field of TXINFO should be set to 'xfer len'.
- */
-
- info = flags |
- FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
- FIELD_PREP(MT_TXD_INFO_D_PORT, d_port) |
- FIELD_PREP(MT_TXD_INFO_TYPE, type);
-
- put_unaligned_le32(info, skb_push(skb, sizeof(info)));
- return skb_put_padto(skb, round_up(skb->len, 4) + 4);
-}
-
-static inline int
-mt76x0_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
-{
- flags |= FIELD_PREP(MT_TXD_PKT_INFO_QSEL, qsel);
- return mt76x0_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
-}
-
-/* Common Rx DMA descriptor fields */
-#define MT_RXD_INFO_LEN GENMASK(13, 0)
-#define MT_RXD_INFO_PCIE_INTR BIT(24)
-#define MT_RXD_INFO_QSEL GENMASK(26, 25)
-#define MT_RXD_INFO_PORT GENMASK(29, 27)
-#define MT_RXD_INFO_TYPE GENMASK(31, 30)
-
-/* Rx DMA packet specific flags */
-#define MT_RXD_PKT_INFO_UDP_ERR BIT(16)
-#define MT_RXD_PKT_INFO_TCP_ERR BIT(17)
-#define MT_RXD_PKT_INFO_IP_ERR BIT(18)
-#define MT_RXD_PKT_INFO_PKT_80211 BIT(19)
-#define MT_RXD_PKT_INFO_L3L4_DONE BIT(20)
-#define MT_RXD_PKT_INFO_MAC_LEN GENMASK(23, 21)
-
-/* Rx DMA MCU command specific flags */
-#define MT_RXD_CMD_INFO_SELF_GEN BIT(15)
-#define MT_RXD_CMD_INFO_CMD_SEQ GENMASK(19, 16)
-#define MT_RXD_CMD_INFO_EVT_TYPE GENMASK(23, 20)
-
-enum mt76_evt_type {
- CMD_DONE,
- CMD_ERROR,
- CMD_RETRY,
- EVENT_PWR_RSP,
- EVENT_WOW_RSP,
- EVENT_CARRIER_DETECT_RSP,
- EVENT_DFS_DETECT_RSP,
-};
-
-#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index 166a1fd8644e..ab4fd6e0f23a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -25,14 +25,14 @@
#define MT_MAP_READS DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16)
static int
-mt76x0_efuse_physical_size_check(struct mt76x0_dev *dev)
+mt76x0_efuse_physical_size_check(struct mt76x02_dev *dev)
{
u8 data[MT_MAP_READS * 16];
int ret, i;
u32 start = 0, end = 0, cnt_free;
- ret = mt76x02_get_efuse_data(&dev->mt76, MT_EE_USAGE_MAP_START,
- data, sizeof(data), MT_EE_PHYSICAL_READ);
+ ret = mt76x02_get_efuse_data(dev, MT_EE_USAGE_MAP_START, data,
+ sizeof(data), MT_EE_PHYSICAL_READ);
if (ret)
return ret;
@@ -53,12 +53,12 @@ mt76x0_efuse_physical_size_check(struct mt76x0_dev *dev)
return 0;
}
-static void mt76x0_set_chip_cap(struct mt76x0_dev *dev)
+static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
{
- u16 nic_conf0 = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_0);
- u16 nic_conf1 = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_1);
+ u16 nic_conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
+ u16 nic_conf1 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
- mt76x02_eeprom_parse_hw_cap(&dev->mt76);
+ mt76x02_eeprom_parse_hw_cap(dev);
dev_dbg(dev->mt76.dev, "2GHz %d 5GHz %d\n",
dev->mt76.cap.has_2ghz, dev->mt76.cap.has_5ghz);
@@ -82,46 +82,44 @@ static void mt76x0_set_chip_cap(struct mt76x0_dev *dev)
dev_err(dev->mt76.dev, "invalid tx-rx stream\n");
}
-static void mt76x0_set_temp_offset(struct mt76x0_dev *dev)
+static void mt76x0_set_temp_offset(struct mt76x02_dev *dev)
{
u8 val;
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_2G_TARGET_POWER) >> 8;
+ val = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER) >> 8;
if (mt76x02_field_valid(val))
- dev->caldata.temp_offset = mt76x02_sign_extend(val, 8);
+ dev->cal.rx.temp_offset = mt76x02_sign_extend(val, 8);
else
- dev->caldata.temp_offset = -10;
+ dev->cal.rx.temp_offset = -10;
}
-static void mt76x0_set_freq_offset(struct mt76x0_dev *dev)
+static void mt76x0_set_freq_offset(struct mt76x02_dev *dev)
{
- struct mt76x0_caldata *caldata = &dev->caldata;
+ struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx;
u8 val;
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_FREQ_OFFSET);
+ val = mt76x02_eeprom_get(dev, MT_EE_FREQ_OFFSET);
if (!mt76x02_field_valid(val))
val = 0;
caldata->freq_offset = val;
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TSSI_BOUND4) >> 8;
+ val = mt76x02_eeprom_get(dev, MT_EE_TSSI_BOUND4) >> 8;
if (!mt76x02_field_valid(val))
val = 0;
caldata->freq_offset -= mt76x02_sign_extend(val, 8);
}
-void mt76x0_read_rx_gain(struct mt76x0_dev *dev)
+void mt76x0_read_rx_gain(struct mt76x02_dev *dev)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
- struct mt76x0_caldata *caldata = &dev->caldata;
+ struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx;
s8 val, lna_5g[3], lna_2g;
u16 rssi_offset;
int i;
- mt76x02_get_rx_gain(&dev->mt76, chan->band, &rssi_offset,
- &lna_2g, lna_5g);
- caldata->lna_gain = mt76x02_get_lna_gain(&dev->mt76, &lna_2g,
- lna_5g, chan);
+ mt76x02_get_rx_gain(dev, chan->band, &rssi_offset, &lna_2g, lna_5g);
+ caldata->lna_gain = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
for (i = 0; i < ARRAY_SIZE(caldata->rssi_offset); i++) {
val = rssi_offset >> (8 * i);
@@ -132,12 +130,12 @@ void mt76x0_read_rx_gain(struct mt76x0_dev *dev)
}
}
-static s8 mt76x0_get_delta(struct mt76_dev *dev)
+static s8 mt76x0_get_delta(struct mt76x02_dev *dev)
{
- struct cfg80211_chan_def *chandef = &dev->chandef;
+ struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
u8 val;
- if (mt76x02_tssi_enabled(dev))
+ if (mt76x0_tssi_enabled(dev))
return 0;
if (chandef->width == NL80211_CHAN_WIDTH_80) {
@@ -157,66 +155,66 @@ static s8 mt76x0_get_delta(struct mt76_dev *dev)
return mt76x02_rate_power_val(val);
}
-void mt76x0_get_tx_power_per_rate(struct mt76x0_dev *dev)
+void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
bool is_2ghz = chan->band == NL80211_BAND_2GHZ;
struct mt76_rate_power *t = &dev->mt76.rate_power;
- s8 delta = mt76x0_get_delta(&dev->mt76);
+ s8 delta = mt76x0_get_delta(dev);
u16 val, addr;
memset(t, 0, sizeof(*t));
/* cck 1M, 2M, 5.5M, 11M */
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_BYRATE_BASE);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_BYRATE_BASE);
t->cck[0] = t->cck[1] = s6_to_s8(val);
t->cck[2] = t->cck[3] = s6_to_s8(val >> 8);
/* ofdm 6M, 9M, 12M, 18M */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 2 : 0x120;
- val = mt76x02_eeprom_get(&dev->mt76, addr);
+ val = mt76x02_eeprom_get(dev, addr);
t->ofdm[0] = t->ofdm[1] = s6_to_s8(val);
t->ofdm[2] = t->ofdm[3] = s6_to_s8(val >> 8);
/* ofdm 24M, 36M, 48M, 54M */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 4 : 0x122;
- val = mt76x02_eeprom_get(&dev->mt76, addr);
+ val = mt76x02_eeprom_get(dev, addr);
t->ofdm[4] = t->ofdm[5] = s6_to_s8(val);
t->ofdm[6] = t->ofdm[7] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 0, 1, 2, 3 */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 6 : 0x124;
- val = mt76x02_eeprom_get(&dev->mt76, addr);
+ val = mt76x02_eeprom_get(dev, addr);
t->ht[0] = t->ht[1] = t->vht[0] = t->vht[1] = s6_to_s8(val);
t->ht[2] = t->ht[3] = t->vht[2] = t->vht[3] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 4, 5, 6 */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 8 : 0x126;
- val = mt76x02_eeprom_get(&dev->mt76, addr);
+ val = mt76x02_eeprom_get(dev, addr);
t->ht[4] = t->ht[5] = t->vht[4] = t->vht[5] = s6_to_s8(val);
t->ht[6] = t->vht[6] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 0, 1, 2, 3 stbc */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 14 : 0xec;
- val = mt76x02_eeprom_get(&dev->mt76, addr);
+ val = mt76x02_eeprom_get(dev, addr);
t->stbc[0] = t->stbc[1] = s6_to_s8(val);
t->stbc[2] = t->stbc[3] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 4, 5, 6 stbc */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 16 : 0xee;
- val = mt76x02_eeprom_get(&dev->mt76, addr);
+ val = mt76x02_eeprom_get(dev, addr);
t->stbc[4] = t->stbc[5] = s6_to_s8(val);
t->stbc[6] = t->stbc[7] = s6_to_s8(val >> 8);
/* vht mcs 8, 9 5GHz */
- val = mt76x02_eeprom_get(&dev->mt76, 0x132);
+ val = mt76x02_eeprom_get(dev, 0x132);
t->vht[7] = s6_to_s8(val);
t->vht[8] = s6_to_s8(val >> 8);
mt76x02_add_rate_power_offset(t, delta);
}
-void mt76x0_get_power_info(struct mt76x0_dev *dev, u8 *info)
+void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
{
struct mt76x0_chan_map {
u8 chan;
@@ -266,7 +264,7 @@ void mt76x0_get_power_info(struct mt76x0_dev *dev, u8 *info)
addr = MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE + 2 + offset;
}
- data = mt76x02_eeprom_get(&dev->mt76, addr);
+ data = mt76x02_eeprom_get(dev, addr);
info[0] = data;
if (!info[0] || info[0] > 0x3f)
@@ -277,7 +275,7 @@ void mt76x0_get_power_info(struct mt76x0_dev *dev, u8 *info)
info[1] = 5;
}
-static int mt76x0_check_eeprom(struct mt76x0_dev *dev)
+static int mt76x0_check_eeprom(struct mt76x02_dev *dev)
{
u16 val;
@@ -297,7 +295,7 @@ static int mt76x0_check_eeprom(struct mt76x0_dev *dev)
}
}
-static int mt76x0_load_eeprom(struct mt76x0_dev *dev)
+static int mt76x0_load_eeprom(struct mt76x02_dev *dev)
{
int found;
@@ -312,11 +310,11 @@ static int mt76x0_load_eeprom(struct mt76x0_dev *dev)
if (found < 0)
return found;
- return mt76x02_get_efuse_data(&dev->mt76, 0, dev->mt76.eeprom.data,
+ return mt76x02_get_efuse_data(dev, 0, dev->mt76.eeprom.data,
MT76X0_EEPROM_SIZE, MT_EE_READ);
}
-int mt76x0_eeprom_init(struct mt76x0_dev *dev)
+int mt76x0_eeprom_init(struct mt76x02_dev *dev)
{
u8 version, fae;
u16 data;
@@ -326,7 +324,7 @@ int mt76x0_eeprom_init(struct mt76x0_dev *dev)
if (err < 0)
return err;
- data = mt76x02_eeprom_get(&dev->mt76, MT_EE_VERSION);
+ data = mt76x02_eeprom_get(dev, MT_EE_VERSION);
version = data >> 8;
fae = data;
@@ -337,8 +335,7 @@ int mt76x0_eeprom_init(struct mt76x0_dev *dev)
dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n",
version, fae);
- mt76x02_mac_setaddr(&dev->mt76,
- dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
+ mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
mt76x0_set_chip_cap(dev);
mt76x0_set_freq_offset(dev);
mt76x0_set_temp_offset(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
index 4e1fafa5b8c3..ee9ade9f3c8b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
@@ -18,23 +18,15 @@
#include "../mt76x02_eeprom.h"
-struct mt76x0_dev;
+struct mt76x02_dev;
#define MT76X0U_EE_MAX_VER 0x0c
#define MT76X0_EEPROM_SIZE 512
-struct mt76x0_caldata {
- s8 rssi_offset[2];
- s8 lna_gain;
-
- s16 temp_offset;
- u8 freq_offset;
-};
-
-int mt76x0_eeprom_init(struct mt76x0_dev *dev);
-void mt76x0_read_rx_gain(struct mt76x0_dev *dev);
-void mt76x0_get_tx_power_per_rate(struct mt76x0_dev *dev);
-void mt76x0_get_power_info(struct mt76x0_dev *dev, u8 *info);
+int mt76x0_eeprom_init(struct mt76x02_dev *dev);
+void mt76x0_read_rx_gain(struct mt76x02_dev *dev);
+void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev);
+void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info);
static inline s8 s6_to_s8(u32 val)
{
@@ -45,4 +37,10 @@ static inline s8 s6_to_s8(u32 val)
return ret;
}
+static inline bool mt76x0_tssi_enabled(struct mt76x02_dev *dev)
+{
+ return (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+ MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index edfd5d94d197..4a9408801260 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -18,9 +18,6 @@
#include "eeprom.h"
#include "trace.h"
#include "mcu.h"
-#include "../mt76x02_util.h"
-#include "../mt76x02_dma.h"
-
#include "initvals.h"
static void mt76x0_vht_cap_mask(struct ieee80211_supported_band *sband)
@@ -42,7 +39,7 @@ static void mt76x0_vht_cap_mask(struct ieee80211_supported_band *sband)
}
static void
-mt76x0_set_wlan_state(struct mt76x0_dev *dev, u32 val, bool enable)
+mt76x0_set_wlan_state(struct mt76x02_dev *dev, u32 val, bool enable)
{
u32 mask = MT_CMB_CTRL_XTAL_RDY | MT_CMB_CTRL_PLL_LD;
@@ -69,12 +66,10 @@ mt76x0_set_wlan_state(struct mt76x0_dev *dev, u32 val, bool enable)
dev_err(dev->mt76.dev, "PLL and XTAL check failed\n");
}
-void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset)
+void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset)
{
u32 val;
- mutex_lock(&dev->hw_atomic_mutex);
-
val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
if (reset) {
@@ -96,12 +91,10 @@ void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset)
udelay(20);
mt76x0_set_wlan_state(dev, val, enable);
-
- mutex_unlock(&dev->hw_atomic_mutex);
}
EXPORT_SYMBOL_GPL(mt76x0_chip_onoff);
-static void mt76x0_reset_csr_bbp(struct mt76x0_dev *dev)
+static void mt76x0_reset_csr_bbp(struct mt76x02_dev *dev)
{
mt76_wr(dev, MT_MAC_SYS_CTRL,
MT_MAC_SYS_CTRL_RESET_CSR |
@@ -116,7 +109,7 @@ static void mt76x0_reset_csr_bbp(struct mt76x0_dev *dev)
mt76_wr_rp(dev, MT_MCU_MEMMAP_WLAN, \
tab, ARRAY_SIZE(tab))
-static int mt76x0_init_bbp(struct mt76x0_dev *dev)
+static int mt76x0_init_bbp(struct mt76x02_dev *dev)
{
int ret, i;
@@ -139,13 +132,13 @@ static int mt76x0_init_bbp(struct mt76x0_dev *dev)
return 0;
}
-static void mt76x0_init_mac_registers(struct mt76x0_dev *dev)
+static void mt76x0_init_mac_registers(struct mt76x02_dev *dev)
{
u32 reg;
RANDOM_WRITE(dev, common_mac_reg_table);
- mt76x02_set_beacon_offsets(&dev->mt76);
+ mt76x02_set_beacon_offsets(dev);
/* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */
RANDOM_WRITE(dev, mt76x0_mac_reg_table);
@@ -172,15 +165,9 @@ static void mt76x0_init_mac_registers(struct mt76x0_dev *dev)
reg &= ~0x000003FF;
reg |= 0x00000201;
mt76_wr(dev, MT_WMM_CTRL, reg);
-
- /* TODO: Probably not needed */
- mt76_wr(dev, 0x7028, 0);
- mt76_wr(dev, 0x7010, 0);
- mt76_wr(dev, 0x7024, 0);
- msleep(10);
}
-static int mt76x0_init_wcid_mem(struct mt76x0_dev *dev)
+static int mt76x0_init_wcid_mem(struct mt76x02_dev *dev)
{
u32 *vals;
int i;
@@ -199,14 +186,14 @@ static int mt76x0_init_wcid_mem(struct mt76x0_dev *dev)
return 0;
}
-static void mt76x0_init_key_mem(struct mt76x0_dev *dev)
+static void mt76x0_init_key_mem(struct mt76x02_dev *dev)
{
u32 vals[4] = {};
mt76_wr_copy(dev, MT_SKEY_MODE_BASE_0, vals, ARRAY_SIZE(vals));
}
-static int mt76x0_init_wcid_attr_mem(struct mt76x0_dev *dev)
+static int mt76x0_init_wcid_attr_mem(struct mt76x02_dev *dev)
{
u32 *vals;
int i;
@@ -223,7 +210,7 @@ static int mt76x0_init_wcid_attr_mem(struct mt76x0_dev *dev)
return 0;
}
-static void mt76x0_reset_counters(struct mt76x0_dev *dev)
+static void mt76x0_reset_counters(struct mt76x02_dev *dev)
{
mt76_rr(dev, MT_RX_STAT_0);
mt76_rr(dev, MT_RX_STAT_1);
@@ -233,7 +220,7 @@ static void mt76x0_reset_counters(struct mt76x0_dev *dev)
mt76_rr(dev, MT_TX_STA_2);
}
-int mt76x0_mac_start(struct mt76x0_dev *dev)
+int mt76x0_mac_start(struct mt76x02_dev *dev)
{
mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
@@ -248,7 +235,7 @@ int mt76x0_mac_start(struct mt76x0_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x0_mac_start);
-void mt76x0_mac_stop(struct mt76x0_dev *dev)
+void mt76x0_mac_stop(struct mt76x02_dev *dev)
{
int i = 200, ok = 0;
@@ -281,7 +268,7 @@ void mt76x0_mac_stop(struct mt76x0_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x0_mac_stop);
-int mt76x0_init_hardware(struct mt76x0_dev *dev)
+int mt76x0_init_hardware(struct mt76x02_dev *dev)
{
int ret;
@@ -293,7 +280,7 @@ int mt76x0_init_hardware(struct mt76x0_dev *dev)
return -ETIMEDOUT;
mt76x0_reset_csr_bbp(dev);
- ret = mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, false);
+ ret = mt76x02_mcu_function_select(dev, Q_SELECT, 1, false);
if (ret)
return ret;
@@ -335,12 +322,12 @@ int mt76x0_init_hardware(struct mt76x0_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x0_init_hardware);
-struct mt76x0_dev *
+struct mt76x02_dev *
mt76x0_alloc_device(struct device *pdev,
const struct mt76_driver_ops *drv_ops,
const struct ieee80211_ops *ops)
{
- struct mt76x0_dev *dev;
+ struct mt76x02_dev *dev;
struct mt76_dev *mdev;
mdev = mt76_alloc_device(sizeof(*dev), ops);
@@ -350,18 +337,15 @@ mt76x0_alloc_device(struct device *pdev,
mdev->dev = pdev;
mdev->drv = drv_ops;
- dev = container_of(mdev, struct mt76x0_dev, mt76);
- mutex_init(&dev->reg_atomic_mutex);
- mutex_init(&dev->hw_atomic_mutex);
- spin_lock_init(&dev->mac_lock);
- spin_lock_init(&dev->con_mon_lock);
+ dev = container_of(mdev, struct mt76x02_dev, mt76);
+ mutex_init(&dev->phy_mutex);
atomic_set(&dev->avg_ampdu_len, 1);
return dev;
}
EXPORT_SYMBOL_GPL(mt76x0_alloc_device);
-int mt76x0_register_device(struct mt76x0_dev *dev)
+int mt76x0_register_device(struct mt76x02_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
struct ieee80211_hw *hw = mdev->hw;
@@ -384,7 +368,10 @@ int mt76x0_register_device(struct mt76x0_dev *dev)
hw->max_rates = 1;
hw->max_report_rates = 7;
hw->max_rate_tries = 1;
- hw->extra_tx_headroom = sizeof(struct mt76x02_txwi) + 4 + 2;
+ hw->extra_tx_headroom = 2;
+ if (mt76_is_usb(dev))
+ hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
+ MT_DMA_HDR_LEN;
hw->sta_data_size = sizeof(struct mt76x02_sta);
hw->vif_data_size = sizeof(struct mt76x02_vif);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
index 6f26dc6dabde..236dce6860b4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
@@ -83,7 +83,8 @@ static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
{ MT_LDO_CTRL_1, 0x6B006464 },
{ MT_HT_BASIC_RATE, 0x00004003 },
{ MT_HT_CTRL_CFG, 0x000001FF },
- { MT_TXOP_HLDR_ET, 0x00000000 }
+ { MT_TXOP_HLDR_ET, 0x00000000 },
+ { MT_PN_PAD_MODE, 0x00000003 },
};
static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
index f55734a922aa..7a422c590211 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
@@ -13,13 +13,13 @@
* GNU General Public License for more details.
*/
+#include <linux/etherdevice.h>
+
#include "mt76x0.h"
#include "trace.h"
-#include "../mt76x02_util.h"
-#include <linux/etherdevice.h>
-void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
- int ht_mode)
+void mt76x0_mac_set_protection(struct mt76x02_dev *dev, bool legacy_prot,
+ int ht_mode)
{
int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
@@ -77,7 +77,7 @@ void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
}
-void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb)
+void mt76x0_mac_set_short_preamble(struct mt76x02_dev *dev, bool short_preamb)
{
if (short_preamb)
mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
@@ -85,7 +85,7 @@ void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb)
mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
}
-void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval)
+void mt76x0_mac_config_tsf(struct mt76x02_dev *dev, bool enable, int interval)
{
u32 val = mt76_rr(dev, MT_BEACON_TIME_CFG);
@@ -105,7 +105,7 @@ void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval)
MT_BEACON_TIME_CFG_TBTT_EN;
}
-static void mt76x0_check_mac_err(struct mt76x0_dev *dev)
+static void mt76x0_check_mac_err(struct mt76x02_dev *dev)
{
u32 val = mt76_rr(dev, 0x10f4);
@@ -120,7 +120,7 @@ static void mt76x0_check_mac_err(struct mt76x0_dev *dev)
}
void mt76x0_mac_work(struct work_struct *work)
{
- struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
+ struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
mac_work.work);
struct {
u32 addr_base;
@@ -171,7 +171,7 @@ void mt76x0_mac_work(struct work_struct *work)
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, 10 * HZ);
}
-void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev)
+void mt76x0_mac_set_ampdu_factor(struct mt76x02_dev *dev)
{
struct ieee80211_sta *sta;
struct mt76_wcid *wcid;
@@ -195,67 +195,3 @@ void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev)
mt76_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor));
}
-
-static void
-mt76x0_rx_monitor_beacon(struct mt76x0_dev *dev, struct mt76x02_rxwi *rxwi,
- u16 rate, int rssi)
-{
- dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate);
- dev->avg_rssi = ((dev->avg_rssi * 15) / 16 + (rssi << 8)) / 256;
-}
-
-static int
-mt76x0_rx_is_our_beacon(struct mt76x0_dev *dev, u8 *data)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
-
- return ieee80211_is_beacon(hdr->frame_control) &&
- ether_addr_equal(hdr->addr2, dev->ap_bssid);
-}
-
-u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb,
- void *rxi)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
- struct mt76x02_rxwi *rxwi = rxi;
- u32 len, ctl = le32_to_cpu(rxwi->ctl);
- u16 rate = le16_to_cpu(rxwi->rate);
- int rssi, pad_len = 0;
-
- len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
- if (WARN_ON(len < 10))
- return 0;
-
- if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) {
- status->flag |= RX_FLAG_DECRYPTED;
- status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
- }
-
- if (rxwi->rxinfo & MT_RXINFO_L2PAD)
- pad_len += 2;
-
- mt76x02_remove_hdr_pad(skb, pad_len);
-
- pskb_trim(skb, len);
- status->chains = BIT(0);
- rssi = mt76x0_phy_get_rssi(dev, rxwi);
- status->chain_signal[0] = status->signal = rssi;
- status->freq = dev->mt76.chandef.chan->center_freq;
- status->band = dev->mt76.chandef.chan->band;
-
- mt76x02_mac_process_rate(status, rate);
-
- spin_lock_bh(&dev->con_mon_lock);
- if (mt76x0_rx_is_our_beacon(dev, skb->data)) {
- mt76x0_rx_monitor_beacon(dev, rxwi, rate, rssi);
- } else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST)) {
- if (dev->avg_rssi == 0)
- dev->avg_rssi = rssi;
- else
- dev->avg_rssi = (dev->avg_rssi * 15) / 16 + rssi / 16;
-
- }
- spin_unlock_bh(&dev->con_mon_lock);
-
- return len;
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h
deleted file mode 100644
index b887693a56b6..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MT76_MAC_H
-#define __MT76_MAC_H
-
-u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb,
- void *rxi);
-#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index c3cea52ec0dc..9273d2d2764a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -13,21 +13,33 @@
* GNU General Public License for more details.
*/
-#include "mt76x0.h"
-#include "mac.h"
-#include "../mt76x02_util.h"
#include <linux/etherdevice.h>
+#include "mt76x0.h"
+
+static int
+mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
+{
+ int ret;
+
+ cancel_delayed_work_sync(&dev->cal_work);
+
+ mt76_set_channel(&dev->mt76);
+ ret = mt76x0_phy_set_channel(dev, chandef);
+ mt76_txq_schedule_all(&dev->mt76);
+
+ return ret;
+}
int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
{
- struct mt76x0_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
int ret = 0;
mutex_lock(&dev->mt76.mutex);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ieee80211_stop_queues(hw);
- ret = mt76x0_phy_set_channel(dev, &hw->conf.chandef);
+ ret = mt76x0_set_channel(dev, &hw->conf.chandef);
ieee80211_wake_queues(hw);
}
@@ -54,7 +66,7 @@ int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
EXPORT_SYMBOL_GPL(mt76x0_config);
static void
-mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr)
+mt76x0_addr_wr(struct mt76x02_dev *dev, const u32 offset, const u8 *addr)
{
mt76_wr(dev, offset, get_unaligned_le32(addr));
mt76_wr(dev, offset + 4, addr[4] | addr[5] << 8);
@@ -64,13 +76,10 @@ void mt76x0_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
- struct mt76x0_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
- if (changed & BSS_CHANGED_ASSOC)
- mt76x0_phy_con_cal_onoff(dev, info);
-
if (changed & BSS_CHANGED_BSSID) {
mt76x0_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid);
@@ -117,10 +126,8 @@ EXPORT_SYMBOL_GPL(mt76x0_bss_info_changed);
void mt76x0_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac_addr)
{
- struct mt76x0_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
- cancel_delayed_work_sync(&dev->cal_work);
- mt76x0_agc_save(dev);
set_bit(MT76_SCANNING, &dev->mt76.state);
}
EXPORT_SYMBOL_GPL(mt76x0_sw_scan);
@@ -128,19 +135,15 @@ EXPORT_SYMBOL_GPL(mt76x0_sw_scan);
void mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct mt76x0_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
- mt76x0_agc_restore(dev);
clear_bit(MT76_SCANNING, &dev->mt76.state);
-
- ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
- MT_CALIBRATE_INTERVAL);
}
EXPORT_SYMBOL_GPL(mt76x0_sw_scan_complete);
int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
- struct mt76x0_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
index 297bf6b94d8c..3b34e1d2769f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
@@ -17,7 +17,7 @@
#include "../mt76x02_mcu.h"
-struct mt76x0_dev;
+struct mt76x02_dev;
#define MT_MCU_IVB_SIZE 0x40
#define MT_MCU_DLM_OFFSET 0x80000
@@ -39,11 +39,14 @@ enum mcu_calibrate {
MCU_CAL_TXDCOC,
MCU_CAL_RX_GROUP_DELAY,
MCU_CAL_TX_GROUP_DELAY,
+ MCU_CAL_VCO,
+ MCU_CAL_NO_SIGNAL = 0xfe,
+ MCU_CAL_FULL = 0xff,
};
-int mt76x0e_mcu_init(struct mt76x0_dev *dev);
-int mt76x0u_mcu_init(struct mt76x0_dev *dev);
-static inline int mt76x0_firmware_running(struct mt76x0_dev *dev)
+int mt76x0e_mcu_init(struct mt76x02_dev *dev);
+int mt76x0u_mcu_init(struct mt76x02_dev *dev);
+static inline int mt76x0_firmware_running(struct mt76x02_dev *dev)
{
return mt76_rr(dev, MT_MCU_COM_REG0) == 1;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
index a37dbf944b15..2187bafaf2e9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -25,114 +25,33 @@
#include <net/mac80211.h>
#include <linux/debugfs.h>
-#include "../mt76.h"
-#include "../mt76x02_regs.h"
-#include "../mt76x02_mac.h"
+#include "../mt76x02.h"
#include "eeprom.h"
#define MT_CALIBRATE_INTERVAL (4 * HZ)
-#define MT_FREQ_CAL_INIT_DELAY (30 * HZ)
-#define MT_FREQ_CAL_CHECK_INTERVAL (10 * HZ)
-#define MT_FREQ_CAL_ADJ_INTERVAL (HZ / 2)
-
-#define MT_BBP_REG_VERSION 0x00
-
#define MT_USB_AGGR_SIZE_LIMIT 21 /* * 1024B */
#define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */
-struct mac_stats {
- u64 rx_stat[6];
- u64 tx_stat[6];
- u64 aggr_stat[2];
- u64 aggr_n[32];
- u64 zero_len_del[2];
-};
-
-struct mt76x0_eeprom_params;
-
-#define MT_EE_TEMPERATURE_SLOPE 39
-#define MT_FREQ_OFFSET_INVALID -128
-
-/* addr req mask */
-#define MT_VEND_TYPE_EEPROM BIT(31)
-#define MT_VEND_TYPE_CFG BIT(30)
-#define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
-
-#define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
-
-enum mt_bw {
- MT_BW_20,
- MT_BW_40,
-};
-
-/**
- * struct mt76x0_dev - adapter structure
- * @lock: protects @wcid->tx_rate.
- * @mac_lock: locks out mac80211's tx status and rx paths.
- * @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi.
- * @mutex: ensures exclusive access from mac80211 callbacks.
- * @reg_atomic_mutex: ensures atomicity of indirect register accesses
- * (accesses to RF and BBP).
- * @hw_atomic_mutex: ensures exclusive access to HW during critical
- * operations (power management, channel switch).
- */
-struct mt76x0_dev {
- struct mt76_dev mt76; /* must be first */
-
- u8 data[32];
-
- struct delayed_work cal_work;
- struct delayed_work mac_work;
-
- spinlock_t mac_lock;
-
- struct mt76x0_caldata caldata;
-
- struct mutex reg_atomic_mutex;
- struct mutex hw_atomic_mutex;
-
- atomic_t avg_ampdu_len;
-
- /* Connection monitoring things */
- spinlock_t con_mon_lock;
- u8 ap_bssid[ETH_ALEN];
-
- s8 bcn_freq_off;
- u8 bcn_phy_mode;
-
- int avg_rssi; /* starts at 0 and converges */
-
- u8 agc_save;
-
- bool no_2ghz;
-
- struct mac_stats stats;
-};
-
-static inline bool is_mt7610e(struct mt76x0_dev *dev)
+static inline bool is_mt7610e(struct mt76x02_dev *dev)
{
/* TODO */
return false;
}
-void mt76x0_init_debugfs(struct mt76x0_dev *dev);
-
-/* Compatibility with mt76 */
-#define mt76_rmw_field(_dev, _reg, _field, _val) \
- mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
+void mt76x0_init_debugfs(struct mt76x02_dev *dev);
/* Init */
-struct mt76x0_dev *
+struct mt76x02_dev *
mt76x0_alloc_device(struct device *pdev,
const struct mt76_driver_ops *drv_ops,
const struct ieee80211_ops *ops);
-int mt76x0_init_hardware(struct mt76x0_dev *dev);
-int mt76x0_register_device(struct mt76x0_dev *dev);
-void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset);
+int mt76x0_init_hardware(struct mt76x02_dev *dev);
+int mt76x0_register_device(struct mt76x02_dev *dev);
+void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset);
-int mt76x0_mac_start(struct mt76x0_dev *dev);
-void mt76x0_mac_stop(struct mt76x0_dev *dev);
+int mt76x0_mac_start(struct mt76x02_dev *dev);
+void mt76x0_mac_stop(struct mt76x02_dev *dev);
int mt76x0_config(struct ieee80211_hw *hw, u32 changed);
void mt76x0_bss_info_changed(struct ieee80211_hw *hw,
@@ -145,35 +64,20 @@ void mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
/* PHY */
-void mt76x0_phy_init(struct mt76x0_dev *dev);
-int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev);
-void mt76x0_agc_save(struct mt76x0_dev *dev);
-void mt76x0_agc_restore(struct mt76x0_dev *dev);
-int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
+void mt76x0_phy_init(struct mt76x02_dev *dev);
+int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev);
+int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef);
-void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev);
-int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x02_rxwi *rxwi);
-void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
- struct ieee80211_bss_conf *info);
-void mt76x0_phy_set_txpower(struct mt76x0_dev *dev);
+void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev);
+void mt76x0_phy_set_txpower(struct mt76x02_dev *dev);
+void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on);
/* MAC */
void mt76x0_mac_work(struct work_struct *work);
-void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
+void mt76x0_mac_set_protection(struct mt76x02_dev *dev, bool legacy_prot,
int ht_mode);
-void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb);
-void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval);
-void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev);
-
-/* TX */
-void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
- struct sk_buff *skb);
-struct mt76x02_txwi *
-mt76x0_push_txwi(struct mt76x0_dev *dev, struct sk_buff *skb,
- struct ieee80211_sta *sta, struct mt76_wcid *wcid,
- int pkt_len);
-
-void mt76x0_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb);
+void mt76x0_mac_set_short_preamble(struct mt76x02_dev *dev, bool short_preamb);
+void mt76x0_mac_config_tsf(struct mt76x02_dev *dev, bool enable, int interval);
+void mt76x0_mac_set_ampdu_factor(struct mt76x02_dev *dev);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 876291dd3c1e..522c86059bcb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -20,16 +20,15 @@
#include "mt76x0.h"
#include "mcu.h"
-#include "../mt76x02_dma.h"
-#include "../mt76x02_util.h"
static int mt76x0e_start(struct ieee80211_hw *hw)
{
- struct mt76x0_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
- mt76x02_mac_start(&dev->mt76);
+ mt76x02_mac_start(dev);
+ mt76x0_phy_calibrate(dev, true);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
MT_CALIBRATE_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
@@ -41,13 +40,8 @@ static int mt76x0e_start(struct ieee80211_hw *hw)
return 0;
}
-static void mt76x0e_stop(struct ieee80211_hw *hw)
+static void mt76x0e_stop_hw(struct mt76x02_dev *dev)
{
- struct mt76x0_dev *dev = hw->priv;
-
- mutex_lock(&dev->mt76.mutex);
-
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mac_work);
@@ -62,21 +56,38 @@ static void mt76x0e_stop(struct ieee80211_hw *hw)
0, 1000))
dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_EN);
+}
+static void mt76x0e_stop(struct ieee80211_hw *hw)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mt76.mutex);
+ clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ mt76x0e_stop_hw(dev);
mutex_unlock(&dev->mt76.mutex);
}
static const struct ieee80211_ops mt76x0e_ops = {
- .tx = mt76x0_tx,
+ .tx = mt76x02_tx,
.start = mt76x0e_start,
.stop = mt76x0e_stop,
- .config = mt76x0_config,
.add_interface = mt76x02_add_interface,
.remove_interface = mt76x02_remove_interface,
+ .config = mt76x0_config,
.configure_filter = mt76x02_configure_filter,
+ .sta_add = mt76x02_sta_add,
+ .sta_remove = mt76x02_sta_remove,
+ .set_key = mt76x02_set_key,
+ .conf_tx = mt76x02_conf_tx,
+ .sw_scan_start = mt76x0_sw_scan,
+ .sw_scan_complete = mt76x0_sw_scan_complete,
+ .ampdu_action = mt76x02_ampdu_action,
+ .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
+ .wake_tx_queue = mt76_wake_tx_queue,
};
-static int mt76x0e_register_device(struct mt76x0_dev *dev)
+static int mt76x0e_register_device(struct mt76x02_dev *dev)
{
int err;
@@ -84,12 +95,12 @@ static int mt76x0e_register_device(struct mt76x0_dev *dev)
if (!mt76x02_wait_for_mac(&dev->mt76))
return -ETIMEDOUT;
- mt76x02_dma_disable(&dev->mt76);
+ mt76x02_dma_disable(dev);
err = mt76x0e_mcu_init(dev);
if (err < 0)
return err;
- err = mt76x02_dma_init(&dev->mt76);
+ err = mt76x02_dma_init(dev);
if (err < 0)
return err;
@@ -101,30 +112,36 @@ static int mt76x0e_register_device(struct mt76x0_dev *dev)
u16 val;
mt76_clear(dev, MT_COEXCFG0, BIT(0));
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_0);
- if (val & MT_EE_NIC_CONF_0_PA_IO_CURRENT) {
- u32 data;
-
- /* set external external PA I/O
- * current to 16mA
- */
- data = mt76_rr(dev, 0x11c);
- val |= 0xc03;
- mt76_wr(dev, 0x11c, val);
- }
+
+ val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
+ if (!(val & MT_EE_NIC_CONF_0_PA_IO_CURRENT))
+ mt76_set(dev, MT_XO_CTRL7, 0xc03);
}
mt76_clear(dev, 0x110, BIT(9));
mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
+ err = mt76x0_register_device(dev);
+ if (err < 0)
+ return err;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
return 0;
}
static int
mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct mt76x0_dev *dev;
- int ret = -ENODEV;
+ static const struct mt76_driver_ops drv_ops = {
+ .txwi_size = sizeof(struct mt76x02_txwi),
+ .tx_prepare_skb = mt76x02_tx_prepare_skb,
+ .tx_complete_skb = mt76x02_tx_complete_skb,
+ .rx_skb = mt76x02_queue_rx_skb,
+ .rx_poll_complete = mt76x02_rx_poll_complete,
+ };
+ struct mt76x02_dev *dev;
+ int ret;
ret = pcim_enable_device(pdev);
if (ret)
@@ -140,7 +157,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
- dev = mt76x0_alloc_device(&pdev->dev, NULL, &mt76x0e_ops);
+ dev = mt76x0_alloc_device(&pdev->dev, &drv_ops, &mt76x0e_ops);
if (!dev)
return -ENOMEM;
@@ -149,6 +166,11 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
+ ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x02_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto error;
+
ret = mt76x0e_register_device(dev);
if (ret < 0)
goto error;
@@ -160,12 +182,23 @@ error:
return ret;
}
+static void mt76x0e_cleanup(struct mt76x02_dev *dev)
+{
+ clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ mt76x0_chip_onoff(dev, false, false);
+ mt76x0e_stop_hw(dev);
+ mt76x02_dma_cleanup(dev);
+ mt76x02_mcu_cleanup(dev);
+}
+
static void
mt76x0e_remove(struct pci_dev *pdev)
{
struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
mt76_unregister_device(mdev);
+ mt76x0e_cleanup(dev);
ieee80211_free_hw(mdev->hw);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
index e3cf049314bb..569861289aa5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
@@ -24,7 +24,7 @@
#define MT_MCU_IVB_ADDR (MT_MCU_ILM_ADDR + 0x54000 - MT_MCU_IVB_SIZE)
-static int mt76x0e_load_firmware(struct mt76x0_dev *dev)
+static int mt76x0e_load_firmware(struct mt76x02_dev *dev)
{
bool is_combo_chip = mt76_chip(&dev->mt76) != 0x7610;
u32 val, ilm_len, dlm_len, offset = 0;
@@ -116,6 +116,7 @@ static int mt76x0e_load_firmware(struct mt76x0_dev *dev)
goto out;
}
+ mt76x02_set_ethtool_fwver(dev, hdr);
dev_dbg(dev->mt76.dev, "Firmware running!\n");
out:
@@ -126,7 +127,7 @@ out:
return err;
}
-int mt76x0e_mcu_init(struct mt76x0_dev *dev)
+int mt76x0e_mcu_init(struct mt76x02_dev *dev)
{
static const struct mt76_mcu_ops mt76x0e_mcu_ops = {
.mcu_msg_alloc = mt76x02_mcu_msg_alloc,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index 4fd2c65e196a..cf024950e0ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -14,6 +14,9 @@
* GNU General Public License for more details.
*/
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+
#include "mt76x0.h"
#include "mcu.h"
#include "eeprom.h"
@@ -23,10 +26,8 @@
#include "initvals_phy.h"
#include "../mt76x02_phy.h"
-#include <linux/etherdevice.h>
-
static int
-mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value)
+mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value)
{
int ret = 0;
u8 bank, reg;
@@ -37,10 +38,10 @@ mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value)
bank = MT_RF_BANK(offset);
reg = MT_RF_REG(offset);
- if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
+ if (WARN_ON_ONCE(reg > 127) || WARN_ON_ONCE(bank > 8))
return -EINVAL;
- mutex_lock(&dev->reg_atomic_mutex);
+ mutex_lock(&dev->phy_mutex);
if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) {
ret = -ETIMEDOUT;
@@ -55,7 +56,7 @@ mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value)
MT_RF_CSR_CFG_KICK);
trace_mt76x0_rf_write(&dev->mt76, bank, offset, value);
out:
- mutex_unlock(&dev->reg_atomic_mutex);
+ mutex_unlock(&dev->phy_mutex);
if (ret < 0)
dev_err(dev->mt76.dev, "Error: RF write %d:%d failed:%d!!\n",
@@ -64,8 +65,7 @@ out:
return ret;
}
-static int
-mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset)
+static int mt76x0_rf_csr_rr(struct mt76x02_dev *dev, u32 offset)
{
int ret = -ETIMEDOUT;
u32 val;
@@ -77,10 +77,10 @@ mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset)
bank = MT_RF_BANK(offset);
reg = MT_RF_REG(offset);
- if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
+ if (WARN_ON_ONCE(reg > 127) || WARN_ON_ONCE(bank > 8))
return -EINVAL;
- mutex_lock(&dev->reg_atomic_mutex);
+ mutex_lock(&dev->phy_mutex);
if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
goto out;
@@ -100,7 +100,7 @@ mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset)
trace_mt76x0_rf_read(&dev->mt76, bank, offset, ret);
}
out:
- mutex_unlock(&dev->reg_atomic_mutex);
+ mutex_unlock(&dev->phy_mutex);
if (ret < 0)
dev_err(dev->mt76.dev, "Error: RF read %d:%d failed:%d!!\n",
@@ -110,36 +110,38 @@ out:
}
static int
-rf_wr(struct mt76x0_dev *dev, u32 offset, u8 val)
+rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val)
{
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
+ if (mt76_is_usb(dev)) {
struct mt76_reg_pair pair = {
.reg = offset,
.value = val,
};
+ WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING,
+ &dev->mt76.state));
return mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
} else {
- WARN_ON_ONCE(1);
return mt76x0_rf_csr_wr(dev, offset, val);
}
}
static int
-rf_rr(struct mt76x0_dev *dev, u32 offset)
+rf_rr(struct mt76x02_dev *dev, u32 offset)
{
int ret;
u32 val;
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
+ if (mt76_is_usb(dev)) {
struct mt76_reg_pair pair = {
.reg = offset,
};
+ WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING,
+ &dev->mt76.state));
ret = mt76_rd_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
val = pair.value;
} else {
- WARN_ON_ONCE(1);
ret = val = mt76x0_rf_csr_rr(dev, offset);
}
@@ -147,7 +149,7 @@ rf_rr(struct mt76x0_dev *dev, u32 offset)
}
static int
-rf_rmw(struct mt76x0_dev *dev, u32 offset, u8 mask, u8 val)
+rf_rmw(struct mt76x02_dev *dev, u32 offset, u8 mask, u8 val)
{
int ret;
@@ -163,31 +165,43 @@ rf_rmw(struct mt76x0_dev *dev, u32 offset, u8 mask, u8 val)
}
static int
-rf_set(struct mt76x0_dev *dev, u32 offset, u8 val)
+rf_set(struct mt76x02_dev *dev, u32 offset, u8 val)
{
return rf_rmw(dev, offset, 0, val);
}
#if 0
static int
-rf_clear(struct mt76x0_dev *dev, u32 offset, u8 mask)
+rf_clear(struct mt76x02_dev *dev, u32 offset, u8 mask)
{
return rf_rmw(dev, offset, mask, 0);
}
#endif
-#define RF_RANDOM_WRITE(dev, tab) \
- mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, \
- tab, ARRAY_SIZE(tab))
+static void
+mt76x0_rf_csr_wr_rp(struct mt76x02_dev *dev, const struct mt76_reg_pair *data,
+ int n)
+{
+ while (n-- > 0) {
+ mt76x0_rf_csr_wr(dev, data->reg, data->value);
+ data++;
+ }
+}
+
+#define RF_RANDOM_WRITE(dev, tab) do { \
+ if (mt76_is_mmio(dev)) \
+ mt76x0_rf_csr_wr_rp(dev, tab, ARRAY_SIZE(tab)); \
+ else \
+ mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));\
+} while (0)
-int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev)
+int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev)
{
int i = 20;
u32 val;
do {
val = mt76_rr(dev, MT_BBP(CORE, 0));
- printk("BBP version %08x\n", val);
if (val && ~val)
break;
} while (--i);
@@ -197,44 +211,11 @@ int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev)
return -EIO;
}
+ dev_dbg(dev->mt76.dev, "BBP version %08x\n", val);
return 0;
}
-static void
-mt76x0_bbp_set_ctrlch(struct mt76x0_dev *dev, enum nl80211_chan_width width,
- u8 ctrl)
-{
- int core_val, agc_val;
-
- switch (width) {
- case NL80211_CHAN_WIDTH_80:
- core_val = 3;
- agc_val = 7;
- break;
- case NL80211_CHAN_WIDTH_40:
- core_val = 2;
- agc_val = 3;
- break;
- default:
- core_val = 0;
- agc_val = 1;
- break;
- }
-
- mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
- mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
- mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
- mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
-}
-
-int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x02_rxwi *rxwi)
-{
- struct mt76x0_caldata *caldata = &dev->caldata;
-
- return rxwi->rssi[0] + caldata->rssi_offset[0] - caldata->lna_gain;
-}
-
-static void mt76x0_vco_cal(struct mt76x0_dev *dev, u8 channel)
+static void mt76x0_vco_cal(struct mt76x02_dev *dev, u8 channel)
{
u8 val;
@@ -291,14 +272,7 @@ static void mt76x0_vco_cal(struct mt76x0_dev *dev, u8 channel)
}
static void
-mt76x0_mac_set_ctrlch(struct mt76x0_dev *dev, bool primary_upper)
-{
- mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
- primary_upper);
-}
-
-static void
-mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band)
+mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
{
switch (band) {
case NL80211_BAND_2GHZ:
@@ -307,9 +281,6 @@ mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band)
rf_wr(dev, MT_RF(5, 0), 0x45);
rf_wr(dev, MT_RF(6, 0), 0x44);
- mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
- mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
-
mt76_wr(dev, MT_TX_ALC_VGA3, 0x00050007);
mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x003E0002);
break;
@@ -319,9 +290,6 @@ mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band)
rf_wr(dev, MT_RF(5, 0), 0x44);
rf_wr(dev, MT_RF(6, 0), 0x45);
- mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
- mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
-
mt76_wr(dev, MT_TX_ALC_VGA3, 0x00000005);
mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x01010102);
break;
@@ -331,7 +299,7 @@ mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band)
}
static void
-mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band)
+mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_band)
{
u16 rf_band = rf_bw_band & 0xff00;
u16 rf_bw = rf_bw_band & 0x00ff;
@@ -483,7 +451,7 @@ mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band
mt76_wr(dev, MT_RF_MISC, mac_reg);
band = (rf_band & RF_G_BAND) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
- if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
+ if (mt76x02_ext_pa_enabled(dev, band)) {
/*
MT_RF_MISC (offset: 0x0518)
[2]1'b1: enable external A band PA, 1'b0: disable external A band PA
@@ -522,7 +490,7 @@ mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band
}
static void
-mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band)
+mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u16 rf_bw_band)
{
int i;
@@ -538,7 +506,7 @@ mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_ban
u8 gain;
gain = FIELD_GET(MT_BBP_AGC_GAIN, val);
- gain -= dev->caldata.lna_gain * 2;
+ gain -= dev->cal.rx.lna_gain * 2;
val &= ~MT_BBP_AGC_GAIN;
val |= FIELD_PREP(MT_BBP_AGC_GAIN, gain);
mt76_wr(dev, pair->reg, val);
@@ -548,7 +516,7 @@ mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_ban
}
}
-static void mt76x0_ant_select(struct mt76x0_dev *dev)
+static void mt76x0_ant_select(struct mt76x02_dev *dev)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
@@ -568,7 +536,7 @@ static void mt76x0_ant_select(struct mt76x0_dev *dev)
}
static void
-mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width)
+mt76x0_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width)
{
enum { BW_20 = 0, BW_40 = 1, BW_80 = 2, BW_10 = 4};
int bw;
@@ -595,10 +563,10 @@ mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width)
return ;
}
- mt76x02_mcu_function_select(&dev->mt76, BW_SETTING, bw, false);
+ mt76x02_mcu_function_select(dev, BW_SETTING, bw, false);
}
-void mt76x0_phy_set_txpower(struct mt76x0_dev *dev)
+void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
{
struct mt76_rate_power *t = &dev->mt76.rate_power;
u8 info[2];
@@ -611,12 +579,53 @@ void mt76x0_phy_set_txpower(struct mt76x0_dev *dev)
dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t);
mt76x02_add_rate_power_offset(t, -info[0]);
- mt76x02_phy_set_txpower(&dev->mt76, info[0], info[1]);
+ mt76x02_phy_set_txpower(dev, info[0], info[1]);
}
-static int
-__mt76x0_phy_set_channel(struct mt76x0_dev *dev,
- struct cfg80211_chan_def *chandef)
+void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ u32 val, tx_alc, reg_val;
+
+ if (power_on) {
+ mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_VCO, chan->hw_value,
+ false);
+ usleep_range(10, 20);
+ /* XXX: tssi */
+ }
+
+ tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0);
+ mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
+ usleep_range(500, 700);
+
+ reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
+ mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
+
+ if (chan->band == NL80211_BAND_5GHZ) {
+ if (chan->hw_value < 100)
+ val = 0x701;
+ else if (chan->hw_value < 140)
+ val = 0x801;
+ else
+ val = 0x901;
+ } else {
+ val = 0x600;
+ }
+
+ mt76x02_mcu_calibrate(dev, MCU_CAL_FULL, val, false);
+ msleep(350);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 1, false);
+ usleep_range(15000, 20000);
+
+ mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
+ mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false);
+}
+EXPORT_SYMBOL_GPL(mt76x0_phy_calibrate);
+
+int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
+ struct cfg80211_chan_def *chandef)
{
u32 ext_cca_chan[4] = {
[0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
@@ -674,9 +683,19 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev,
break;
}
- mt76x0_bbp_set_bw(dev, chandef->width);
- mt76x0_bbp_set_ctrlch(dev, chandef->width, ch_group_index);
- mt76x0_mac_set_ctrlch(dev, ch_group_index & 1);
+ if (mt76_is_usb(dev)) {
+ mt76x0_bbp_set_bw(dev, chandef->width);
+ } else {
+ if (chandef->width == NL80211_CHAN_WIDTH_80 ||
+ chandef->width == NL80211_CHAN_WIDTH_40)
+ val = 0x201;
+ else
+ val = 0x601;
+ mt76_wr(dev, MT_TX_SW_CFG0, val);
+ }
+ mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
+ mt76x02_phy_set_band(dev, chandef->chan->band,
+ ch_group_index & 1);
mt76x0_ant_select(dev);
mt76_rmw(dev, MT_EXT_CCA_CFG,
@@ -689,7 +708,6 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev,
mt76x0_phy_set_band(dev, chandef->chan->band);
mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band);
- mt76x0_read_rx_gain(dev);
/* set Japan Tx filter at channel 14 */
val = mt76_rr(dev, MT_BBP(CORE, 1));
@@ -699,39 +717,37 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev,
val &= ~0x20;
mt76_wr(dev, MT_BBP(CORE, 1), val);
- mt76x0_phy_set_chan_bbp_params(dev, channel, rf_bw_band);
+ mt76x0_read_rx_gain(dev);
+ mt76x0_phy_set_chan_bbp_params(dev, rf_bw_band);
+ mt76x02_init_agc_gain(dev);
- /* Vendor driver don't do it */
- /* mt76x0_phy_set_tx_power(dev, channel, rf_bw_band); */
+ if (mt76_is_usb(dev)) {
+ mt76x0_vco_cal(dev, channel);
+ } else {
+ /* enable vco */
+ rf_set(dev, MT_RF(0, 4), BIT(7));
+ }
- mt76x0_vco_cal(dev, channel);
if (scan)
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 1, false);
+ return 0;
+ if (mt76_is_mmio(dev))
+ mt76x0_phy_calibrate(dev, false);
mt76x0_phy_set_txpower(dev);
- return 0;
-}
-
-int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
- struct cfg80211_chan_def *chandef)
-{
- int ret;
-
- mutex_lock(&dev->hw_atomic_mutex);
- ret = __mt76x0_phy_set_channel(dev, chandef);
- mutex_unlock(&dev->hw_atomic_mutex);
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
- return ret;
+ return 0;
}
-void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev)
+void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev)
{
u32 tx_alc, reg_val;
u8 channel = dev->mt76.chandef.chan->hw_value;
int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R, 0, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
mt76x0_vco_cal(dev, channel);
@@ -739,124 +755,119 @@ void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev)
mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
usleep_range(500, 700);
- reg_val = mt76_rr(dev, 0x2124);
- reg_val &= 0xffffff7e;
- mt76_wr(dev, 0x2124, reg_val);
+ reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
+ mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 0, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0, false);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, is_5ghz, false);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LOFT, is_5ghz, false);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, false);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_GROUP_DELAY,
- is_5ghz, false);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQ, is_5ghz, false);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RX_GROUP_DELAY,
- is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz, false);
- mt76_wr(dev, 0x2124, reg_val);
+ mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
msleep(100);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 1, false);
-}
-
-void mt76x0_agc_save(struct mt76x0_dev *dev)
-{
- /* Only one RX path */
- dev->agc_save = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, 8)));
-}
-
-void mt76x0_agc_restore(struct mt76x0_dev *dev)
-{
- mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, dev->agc_save);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false);
}
-static void mt76x0_temp_sensor(struct mt76x0_dev *dev)
+static void mt76x0_temp_sensor(struct mt76x02_dev *dev)
{
u8 rf_b7_73, rf_b0_66, rf_b0_67;
- int cycle, temp;
- u32 val;
- s32 sval;
+ s8 val;
rf_b7_73 = rf_rr(dev, MT_RF(7, 73));
rf_b0_66 = rf_rr(dev, MT_RF(0, 66));
- rf_b0_67 = rf_rr(dev, MT_RF(0, 73));
+ rf_b0_67 = rf_rr(dev, MT_RF(0, 67));
rf_wr(dev, MT_RF(7, 73), 0x02);
rf_wr(dev, MT_RF(0, 66), 0x23);
- rf_wr(dev, MT_RF(0, 73), 0x01);
+ rf_wr(dev, MT_RF(0, 67), 0x01);
mt76_wr(dev, MT_BBP(CORE, 34), 0x00080055);
- for (cycle = 0; cycle < 2000; cycle++) {
- val = mt76_rr(dev, MT_BBP(CORE, 34));
- if (!(val & 0x10))
- break;
- udelay(3);
- }
-
- if (cycle >= 2000) {
- val &= 0x10;
- mt76_wr(dev, MT_BBP(CORE, 34), val);
+ if (!mt76_poll(dev, MT_BBP(CORE, 34), BIT(4), 0, 2000)) {
+ mt76_clear(dev, MT_BBP(CORE, 34), BIT(4));
goto done;
}
- sval = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
- if (!(sval & 0x80))
- sval &= 0x7f; /* Positive */
- else
- sval |= 0xffffff00; /* Negative */
+ val = mt76_rr(dev, MT_BBP(CORE, 35));
+ val = (35 * (val - dev->cal.rx.temp_offset)) / 10 + 25;
- temp = (35 * (sval - dev->caldata.temp_offset)) / 10 + 25;
+ if (abs(val - dev->cal.temp_vco) > 20) {
+ mt76x02_mcu_calibrate(dev, MCU_CAL_VCO,
+ dev->mt76.chandef.chan->hw_value,
+ false);
+ dev->cal.temp_vco = val;
+ }
+ if (abs(val - dev->cal.temp) > 30) {
+ mt76x0_phy_calibrate(dev, false);
+ dev->cal.temp = val;
+ }
done:
rf_wr(dev, MT_RF(7, 73), rf_b7_73);
rf_wr(dev, MT_RF(0, 66), rf_b0_66);
- rf_wr(dev, MT_RF(0, 73), rf_b0_67);
+ rf_wr(dev, MT_RF(0, 67), rf_b0_67);
}
-static void mt76x0_dynamic_vga_tuning(struct mt76x0_dev *dev)
+static void mt76x0_phy_set_gain_val(struct mt76x02_dev *dev)
{
- u32 val, init_vga;
-
- init_vga = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 0x54 : 0x4E;
- if (dev->avg_rssi > -60)
- init_vga -= 0x20;
- else if (dev->avg_rssi > -70)
- init_vga -= 0x10;
-
- val = mt76_rr(dev, MT_BBP(AGC, 8));
- val &= 0xFFFF80FF;
- val |= init_vga << 8;
- mt76_wr(dev, MT_BBP(AGC,8), val);
+ u8 gain = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
+ u32 val = 0x122c << 16 | 0xf2;
+
+ mt76_wr(dev, MT_BBP(AGC, 8),
+ val | FIELD_PREP(MT_BBP_AGC_GAIN, gain));
}
-static void mt76x0_phy_calibrate(struct work_struct *work)
+static void
+mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev)
{
- struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
- cal_work.work);
+ bool gain_change;
+ u8 gain_delta;
+ int low_gain;
- mt76x0_dynamic_vga_tuning(dev);
- mt76x0_temp_sensor(dev);
+ dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
- ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
- MT_CALIBRATE_INTERVAL);
+ low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
+ (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
+
+ gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
+ dev->cal.low_gain = low_gain;
+
+ if (!gain_change) {
+ if (mt76x02_phy_adjust_vga_gain(dev))
+ mt76x0_phy_set_gain_val(dev);
+ return;
+ }
+
+ dev->cal.agc_gain_adjust = (low_gain == 2) ? 0 : 10;
+ gain_delta = (low_gain == 2) ? 10 : 0;
+
+ dev->cal.agc_gain_cur[0] = dev->cal.agc_gain_init[0] - gain_delta;
+ mt76x0_phy_set_gain_val(dev);
+
+ /* clear false CCA counters */
+ mt76_rr(dev, MT_RX_STAT_1);
}
-void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
- struct ieee80211_bss_conf *info)
+static void mt76x0_phy_calibration_work(struct work_struct *work)
{
- /* Start/stop collecting beacon data */
- spin_lock_bh(&dev->con_mon_lock);
- ether_addr_copy(dev->ap_bssid, info->bssid);
- dev->avg_rssi = 0;
- dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
- spin_unlock_bh(&dev->con_mon_lock);
+ struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
+ cal_work.work);
+
+ mt76x0_phy_update_channel_gain(dev);
+ if (!mt76x0_tssi_enabled(dev))
+ mt76x0_temp_sensor(dev);
+
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
}
-static void
-mt76x0_rf_init(struct mt76x0_dev *dev)
+static void mt76x0_rf_init(struct mt76x02_dev *dev)
{
int i;
u8 val;
@@ -889,7 +900,7 @@ mt76x0_rf_init(struct mt76x0_dev *dev)
E2: B0.R21<0>: xo_cxo<0>, B0.R22<7:0>: xo_cxo<8:1>
*/
rf_wr(dev, MT_RF(0, 22),
- min_t(u8, dev->caldata.freq_offset, 0xbf));
+ min_t(u8, dev->cal.rx.freq_offset, 0xbf));
val = rf_rr(dev, MT_RF(0, 22));
/*
@@ -909,11 +920,11 @@ mt76x0_rf_init(struct mt76x0_dev *dev)
rf_set(dev, MT_RF(0, 4), 0x80);
}
-void mt76x0_phy_init(struct mt76x0_dev *dev)
+void mt76x0_phy_init(struct mt76x02_dev *dev)
{
- INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibrate);
+ INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibration_work);
mt76x0_rf_init(dev);
- mt76x02_phy_set_rxpath(&dev->mt76);
- mt76x02_phy_set_txdac(&dev->mt76);
+ mt76x02_phy_set_rxpath(dev);
+ mt76x02_phy_set_txdac(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
index 36bbdd585163..75d1d6738c34 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
@@ -17,7 +17,6 @@
#include <linux/tracepoint.h>
#include "mt76x0.h"
-#include "mac.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mt76x0
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c
deleted file mode 100644
index b3c5dc2ffeb1..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include "mt76x0.h"
-#include "trace.h"
-#include "../mt76x02_util.h"
-#include "../mt76x02_usb.h"
-
-struct mt76x02_txwi *
-mt76x0_push_txwi(struct mt76x0_dev *dev, struct sk_buff *skb,
- struct ieee80211_sta *sta, struct mt76_wcid *wcid,
- int pkt_len)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_rate *rate = &info->control.rates[0];
- struct mt76x02_txwi *txwi;
- unsigned long flags;
- u16 rate_ctl;
- u8 nss;
-
- txwi = (struct mt76x02_txwi *)skb_push(skb, sizeof(struct mt76x02_txwi));
- memset(txwi, 0, sizeof(*txwi));
-
- if (!wcid->tx_rate_set)
- ieee80211_get_tx_rates(info->control.vif, sta, skb,
- info->control.rates, 1);
-
- spin_lock_irqsave(&dev->mt76.lock, flags);
- if (rate->idx < 0 || !rate->count) {
- rate_ctl = wcid->tx_rate;
- nss = wcid->tx_rate_nss;
- } else {
- rate_ctl = mt76x02_mac_tx_rate_val(&dev->mt76, rate, &nss);
- }
- spin_unlock_irqrestore(&dev->mt76.lock, flags);
-
- txwi->wcid = wcid->idx;
- txwi->rate = cpu_to_le16(rate_ctl);
- txwi->pktid = (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) ? 1 : 0;
-
- mt76x02_mac_fill_txwi(txwi, skb, sta, pkt_len, nss);
-
- return txwi;
-}
-EXPORT_SYMBOL_GPL(mt76x0_push_txwi);
-
-void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
- struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct mt76x0_dev *dev = hw->priv;
- struct ieee80211_vif *vif = info->control.vif;
- struct mt76_wcid *wcid = &dev->mt76.global_wcid;
-
- if (control->sta) {
- struct mt76x02_sta *msta;
-
- msta = (struct mt76x02_sta *)control->sta->drv_priv;
- wcid = &msta->wcid;
- /* sw encrypted frames */
- if (!info->control.hw_key && wcid->hw_key_idx != 0xff)
- control->sta = NULL;
- }
-
- if (vif && !control->sta) {
- struct mt76x02_vif *mvif;
-
- mvif = (struct mt76x02_vif *)vif->drv_priv;
- wcid = &mvif->group_wcid;
- }
-
- mt76_tx(&dev->mt76, control->sta, wcid, skb);
-}
-EXPORT_SYMBOL_GPL(mt76x0_tx);
-
-void mt76x0_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb)
-{
- struct mt76x0_dev *dev = container_of(mdev, struct mt76x0_dev, mt76);
- void *rxwi = skb->data;
-
- skb_pull(skb, sizeof(struct mt76x02_rxwi));
- if (!mt76x0_mac_process_rx(dev, skb, rxwi)) {
- dev_kfree_skb(skb);
- return;
- }
-
- mt76_rx(&dev->mt76, q, skb);
-}
-EXPORT_SYMBOL_GPL(mt76x0_queue_rx_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index a76043213f55..a7fd36c2f633 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -18,7 +18,6 @@
#include "mt76x0.h"
#include "mcu.h"
#include "trace.h"
-#include "../mt76x02_util.h"
#include "../mt76x02_usb.h"
static struct usb_device_id mt76x0_device_table[] = {
@@ -49,7 +48,7 @@ static struct usb_device_id mt76x0_device_table[] = {
{ 0, }
};
-static void mt76x0_init_usb_dma(struct mt76x0_dev *dev)
+static void mt76x0_init_usb_dma(struct mt76x02_dev *dev)
{
u32 val;
@@ -76,7 +75,7 @@ static void mt76x0_init_usb_dma(struct mt76x0_dev *dev)
mt76_wr(dev, MT_USB_DMA_CFG, val);
}
-static void mt76x0u_cleanup(struct mt76x0_dev *dev)
+static void mt76x0u_cleanup(struct mt76x02_dev *dev)
{
clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
mt76x0_chip_onoff(dev, false, false);
@@ -84,16 +83,16 @@ static void mt76x0u_cleanup(struct mt76x0_dev *dev)
mt76u_mcu_deinit(&dev->mt76);
}
-static void mt76x0u_mac_stop(struct mt76x0_dev *dev)
+static void mt76x0u_mac_stop(struct mt76x02_dev *dev)
{
- if (test_bit(MT76_REMOVED, &dev->mt76.state))
- return;
-
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mac_work);
mt76u_stop_stat_wk(&dev->mt76);
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return;
+
mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
MT_BEACON_TIME_CFG_BEACON_TX);
@@ -109,7 +108,7 @@ static void mt76x0u_mac_stop(struct mt76x0_dev *dev)
static int mt76x0u_start(struct ieee80211_hw *hw)
{
- struct mt76x0_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
int ret;
mutex_lock(&dev->mt76.mutex);
@@ -131,7 +130,7 @@ out:
static void mt76x0u_stop(struct ieee80211_hw *hw)
{
- struct mt76x0_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
mt76x0u_mac_stop(dev);
@@ -139,7 +138,7 @@ static void mt76x0u_stop(struct ieee80211_hw *hw)
}
static const struct ieee80211_ops mt76x0u_ops = {
- .tx = mt76x0_tx,
+ .tx = mt76x02_tx,
.start = mt76x0u_start,
.stop = mt76x0u_stop,
.add_interface = mt76x02_add_interface,
@@ -159,48 +158,33 @@ static const struct ieee80211_ops mt76x0u_ops = {
.wake_tx_queue = mt76_wake_tx_queue,
};
-static int mt76x0u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info)
-{
- struct mt76x0_dev *dev = container_of(mdev, struct mt76x0_dev, mt76);
- struct mt76x02_txwi *txwi;
- int len = skb->len;
-
- mt76x02_insert_hdr_pad(skb);
- txwi = mt76x0_push_txwi(dev, skb, sta, wcid, len);
-
- return mt76x02u_set_txinfo(skb, wcid, q2ep(q->hw_idx));
-}
-
-static int mt76x0u_register_device(struct mt76x0_dev *dev)
+static int mt76x0u_register_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = dev->mt76.hw;
int err;
- err = mt76u_mcu_init_rx(&dev->mt76);
+ err = mt76u_alloc_queues(&dev->mt76);
if (err < 0)
- return err;
+ goto out_err;
- err = mt76u_alloc_queues(&dev->mt76);
+ err = mt76u_mcu_init_rx(&dev->mt76);
if (err < 0)
- return err;
+ goto out_err;
mt76x0_chip_onoff(dev, true, true);
if (!mt76x02_wait_for_mac(&dev->mt76)) {
err = -ETIMEDOUT;
- goto err;
+ goto out_err;
}
err = mt76x0u_mcu_init(dev);
if (err < 0)
- goto err;
+ goto out_err;
mt76x0_init_usb_dma(dev);
err = mt76x0_init_hardware(dev);
if (err < 0)
- goto err;
+ goto out_err;
mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
mt76_wr(dev, MT_TXOP_CTRL_CFG,
@@ -209,7 +193,7 @@ static int mt76x0u_register_device(struct mt76x0_dev *dev)
err = mt76x0_register_device(dev);
if (err < 0)
- goto err;
+ goto out_err;
/* check hw sg support in order to enable AMSDU */
if (mt76u_check_sg(&dev->mt76))
@@ -221,7 +205,7 @@ static int mt76x0u_register_device(struct mt76x0_dev *dev)
return 0;
-err:
+out_err:
mt76x0u_cleanup(dev);
return err;
}
@@ -230,13 +214,13 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
const struct usb_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
- .tx_prepare_skb = mt76x0u_tx_prepare_skb,
- .tx_complete_skb = mt76x02_tx_complete_skb,
+ .tx_prepare_skb = mt76x02u_tx_prepare_skb,
+ .tx_complete_skb = mt76x02u_tx_complete_skb,
.tx_status_data = mt76x02_tx_status_data,
- .rx_skb = mt76x0_queue_rx_skb,
+ .rx_skb = mt76x02_queue_rx_skb,
};
struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
- struct mt76x0_dev *dev;
+ struct mt76x02_dev *dev;
u32 asic_rev, mac_rev;
int ret;
@@ -292,7 +276,7 @@ err:
static void mt76x0_disconnect(struct usb_interface *usb_intf)
{
- struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+ struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
bool initalized = test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
if (!initalized)
@@ -310,7 +294,7 @@ static void mt76x0_disconnect(struct usb_interface *usb_intf)
static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
pm_message_t state)
{
- struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+ struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
struct mt76_usb *usb = &dev->mt76.usb;
mt76u_stop_queues(&dev->mt76);
@@ -322,7 +306,7 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
{
- struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+ struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
struct mt76_usb *usb = &dev->mt76.usb;
int ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
index 4c5b7a6f15ce..a9f14d5149d1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
@@ -25,7 +25,7 @@
#define MT7610U_FIRMWARE "mediatek/mt7610u.bin"
static int
-mt76x0u_upload_firmware(struct mt76x0_dev *dev,
+mt76x0u_upload_firmware(struct mt76x02_dev *dev,
const struct mt76x02_fw_header *hdr)
{
u8 *fw_payload = (u8 *)(hdr + 1);
@@ -40,8 +40,7 @@ mt76x0u_upload_firmware(struct mt76x0_dev *dev,
ilm_len = le32_to_cpu(hdr->ilm_len) - MT_MCU_IVB_SIZE;
dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %u\n",
ilm_len, MT_MCU_IVB_SIZE);
- err = mt76x02u_mcu_fw_send_data(&dev->mt76,
- fw_payload + MT_MCU_IVB_SIZE,
+ err = mt76x02u_mcu_fw_send_data(dev, fw_payload + MT_MCU_IVB_SIZE,
ilm_len, MCU_FW_URB_MAX_PAYLOAD,
MT_MCU_IVB_SIZE);
if (err)
@@ -49,7 +48,7 @@ mt76x0u_upload_firmware(struct mt76x0_dev *dev,
dlm_len = le32_to_cpu(hdr->dlm_len);
dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
- err = mt76x02u_mcu_fw_send_data(&dev->mt76,
+ err = mt76x02u_mcu_fw_send_data(dev,
fw_payload + le32_to_cpu(hdr->ilm_len),
dlm_len, MCU_FW_URB_MAX_PAYLOAD,
MT_MCU_DLM_OFFSET);
@@ -76,7 +75,7 @@ out:
return err;
}
-static int mt76x0u_load_firmware(struct mt76x0_dev *dev)
+static int mt76x0u_load_firmware(struct mt76x02_dev *dev)
{
const struct firmware *fw;
const struct mt76x02_fw_header *hdr;
@@ -121,7 +120,7 @@ static int mt76x0u_load_firmware(struct mt76x0_dev *dev)
mt76_set(dev, MT_USB_DMA_CFG,
(MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN) |
FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20));
- mt76x02u_mcu_fw_reset(&dev->mt76);
+ mt76x02u_mcu_fw_reset(dev);
usleep_range(5000, 6000);
/*
mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
@@ -160,7 +159,7 @@ err_inv_fw:
return -ENOENT;
}
-int mt76x0u_mcu_init(struct mt76x0_dev *dev)
+int mt76x0u_mcu_init(struct mt76x02_dev *dev)
{
int ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
new file mode 100644
index 000000000000..47c42c607964
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76X02_UTIL_H
+#define __MT76X02_UTIL_H
+
+#include <linux/kfifo.h>
+
+#include "mt76.h"
+#include "mt76x02_regs.h"
+#include "mt76x02_mac.h"
+#include "mt76x02_dfs.h"
+#include "mt76x02_dma.h"
+
+struct mt76x02_mac_stats {
+ u64 rx_stat[6];
+ u64 tx_stat[6];
+ u64 aggr_stat[2];
+ u64 aggr_n[32];
+ u64 zero_len_del[2];
+};
+
+#define MT_MAX_CHAINS 2
+struct mt76x02_rx_freq_cal {
+ s8 high_gain[MT_MAX_CHAINS];
+ s8 rssi_offset[MT_MAX_CHAINS];
+ s8 lna_gain;
+ u32 mcu_gain;
+ s16 temp_offset;
+ u8 freq_offset;
+};
+
+struct mt76x02_calibration {
+ struct mt76x02_rx_freq_cal rx;
+
+ u8 agc_gain_init[MT_MAX_CHAINS];
+ u8 agc_gain_cur[MT_MAX_CHAINS];
+
+ u16 false_cca;
+ s8 avg_rssi_all;
+ s8 agc_gain_adjust;
+ s8 low_gain;
+
+ s8 temp_vco;
+ s8 temp;
+
+ bool init_cal_done;
+ bool tssi_cal_done;
+ bool tssi_comp_pending;
+ bool dpd_cal_done;
+ bool channel_cal_done;
+};
+
+struct mt76x02_dev {
+ struct mt76_dev mt76; /* must be first */
+
+ struct mac_address macaddr_list[8];
+
+ struct mutex phy_mutex;
+ struct mutex mutex;
+
+ u8 txdone_seq;
+ DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
+
+ struct sk_buff *rx_head;
+
+ struct tasklet_struct tx_tasklet;
+ struct tasklet_struct pre_tbtt_tasklet;
+ struct delayed_work cal_work;
+ struct delayed_work mac_work;
+
+ struct mt76x02_mac_stats stats;
+ atomic_t avg_ampdu_len;
+ u32 aggr_stats[32];
+
+ struct sk_buff *beacons[8];
+ u8 beacon_mask;
+ u8 beacon_data_mask;
+
+ u8 tbtt_count;
+ u16 beacon_int;
+
+ struct mt76x02_calibration cal;
+
+ s8 target_power;
+ s8 target_power_delta[2];
+ bool enable_tpc;
+
+ bool no_2ghz;
+
+ u8 coverage_class;
+ u8 slottime;
+
+ struct mt76x02_dfs_pattern_detector dfs_pd;
+};
+
+extern struct ieee80211_rate mt76x02_rates[12];
+
+void mt76x02_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast);
+int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
+ unsigned int idx);
+int mt76x02_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+void mt76x02_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params);
+int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
+ const struct ieee80211_tx_rate *rate);
+s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr,
+ s8 max_txpwr_adj);
+void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr);
+int mt76x02_insert_hdr_pad(struct sk_buff *skb);
+void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
+void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb);
+bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
+void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb);
+void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
+irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance);
+void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info);
+
+extern const u16 mt76x02_beacon_offsets[16];
+void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev);
+void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set);
+void mt76x02_mac_start(struct mt76x02_dev *dev);
+
+static inline bool is_mt76x2(struct mt76x02_dev *dev)
+{
+ return mt76_chip(&dev->mt76) == 0x7612 ||
+ mt76_chip(&dev->mt76) == 0x7662 ||
+ mt76_chip(&dev->mt76) == 0x7602;
+}
+
+static inline void mt76x02_irq_enable(struct mt76x02_dev *dev, u32 mask)
+{
+ mt76x02_set_irq_mask(dev, 0, mask);
+}
+
+static inline void mt76x02_irq_disable(struct mt76x02_dev *dev, u32 mask)
+{
+ mt76x02_set_irq_mask(dev, mask, 0);
+}
+
+static inline bool
+mt76x02_wait_for_txrx_idle(struct mt76_dev *dev)
+{
+ return __mt76_poll_msec(dev, MT_MAC_STATUS,
+ MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
+ 0, 100);
+}
+
+static inline struct mt76x02_sta *
+mt76x02_rx_get_sta(struct mt76_dev *dev, u8 idx)
+{
+ struct mt76_wcid *wcid;
+
+ if (idx >= ARRAY_SIZE(dev->wcid))
+ return NULL;
+
+ wcid = rcu_dereference(dev->wcid[idx]);
+ if (!wcid)
+ return NULL;
+
+ return container_of(wcid, struct mt76x02_sta, wcid);
+}
+
+static inline struct mt76_wcid *
+mt76x02_rx_get_sta_wcid(struct mt76x02_sta *sta, bool unicast)
+{
+ if (!sta)
+ return NULL;
+
+ if (unicast)
+ return &sta->wcid;
+ else
+ return &sta->vif->group_wcid;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h
index 693f421bf096..7e177c934592 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h
@@ -14,8 +14,8 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef __MT76x2_DFS_H
-#define __MT76x2_DFS_H
+#ifndef __MT76x02_DFS_H
+#define __MT76x02_DFS_H
#include <linux/types.h>
#include <linux/nl80211.h>
@@ -49,7 +49,7 @@
#define MT_DFS_ETSI_MAX_PRI (133333 + 125000 + 117647 + 1000)
#define MT_DFS_ETSI_MIN_PRI (4500 - 20)
-struct mt76x2_radar_specs {
+struct mt76x02_radar_specs {
u8 mode;
u16 avg_len;
u16 e_low;
@@ -70,7 +70,7 @@ struct mt76x2_radar_specs {
#define MT_DFS_EVENT_ENGINE(x) (((x) & BIT(31)) ? 2 : 0)
#define MT_DFS_EVENT_TIMESTAMP(x) ((x) & GENMASK(21, 0))
#define MT_DFS_EVENT_WIDTH(x) ((x) & GENMASK(11, 0))
-struct mt76x2_dfs_event {
+struct mt76x02_dfs_event {
unsigned long fetch_ts;
u32 ts;
u16 width;
@@ -78,12 +78,12 @@ struct mt76x2_dfs_event {
};
#define MT_DFS_EVENT_BUFLEN 256
-struct mt76x2_dfs_event_rb {
- struct mt76x2_dfs_event data[MT_DFS_EVENT_BUFLEN];
+struct mt76x02_dfs_event_rb {
+ struct mt76x02_dfs_event data[MT_DFS_EVENT_BUFLEN];
int h_rb, t_rb;
};
-struct mt76x2_dfs_sequence {
+struct mt76x02_dfs_sequence {
struct list_head head;
u32 first_ts;
u32 last_ts;
@@ -92,7 +92,7 @@ struct mt76x2_dfs_sequence {
u8 engine;
};
-struct mt76x2_dfs_hw_pulse {
+struct mt76x02_dfs_hw_pulse {
u8 engine;
u32 period;
u32 w1;
@@ -100,47 +100,41 @@ struct mt76x2_dfs_hw_pulse {
u32 burst;
};
-struct mt76x2_dfs_sw_detector_params {
+struct mt76x02_dfs_sw_detector_params {
u32 min_pri;
u32 max_pri;
u32 pri_margin;
};
-struct mt76x2_dfs_engine_stats {
+struct mt76x02_dfs_engine_stats {
u32 hw_pattern;
u32 hw_pulse_discarded;
u32 sw_pattern;
};
-struct mt76x2_dfs_seq_stats {
+struct mt76x02_dfs_seq_stats {
u32 seq_pool_len;
u32 seq_len;
};
-struct mt76x2_dfs_pattern_detector {
+struct mt76x02_dfs_pattern_detector {
enum nl80211_dfs_regions region;
u8 chirp_pulse_cnt;
u32 chirp_pulse_ts;
- struct mt76x2_dfs_sw_detector_params sw_dpd_params;
- struct mt76x2_dfs_event_rb event_rb[2];
+ struct mt76x02_dfs_sw_detector_params sw_dpd_params;
+ struct mt76x02_dfs_event_rb event_rb[2];
struct list_head sequences;
struct list_head seq_pool;
- struct mt76x2_dfs_seq_stats seq_stats;
+ struct mt76x02_dfs_seq_stats seq_stats;
unsigned long last_sw_check;
u32 last_event_ts;
- struct mt76x2_dfs_engine_stats stats[MT_DFS_NUM_ENGINES];
+ struct mt76x02_dfs_engine_stats stats[MT_DFS_NUM_ENGINES];
struct tasklet_struct dfs_tasklet;
};
-void mt76x2_dfs_init_params(struct mt76x2_dev *dev);
-void mt76x2_dfs_init_detector(struct mt76x2_dev *dev);
-void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev);
-void mt76x2_dfs_set_domain(struct mt76x2_dev *dev,
- enum nl80211_dfs_regions region);
-
-#endif /* __MT76x2_DFS_H */
+#endif /* __MT76x02_DFS_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h
index 65b97f5713d3..6394010a565f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h
@@ -17,8 +17,8 @@
#ifndef __MT76x02_DMA_H
#define __MT76x02_DMA_H
+#include "mt76x02.h"
#include "dma.h"
-#include "mt76x02_regs.h"
#define MT_TXD_INFO_LEN GENMASK(15, 0)
#define MT_TXD_INFO_NEXT_VLD BIT(16)
@@ -70,8 +70,8 @@ mt76x02_wait_for_wpdma(struct mt76_dev *dev, int timeout)
0, timeout);
}
-int mt76x02_dma_init(struct mt76_dev *dev);
-void mt76x02_dma_enable(struct mt76_dev *dev);
-void mt76x02_dma_disable(struct mt76_dev *dev);
+int mt76x02_dma_init(struct mt76x02_dev *dev);
+void mt76x02_dma_disable(struct mt76x02_dev *dev);
+void mt76x02_dma_cleanup(struct mt76x02_dev *dev);
#endif /* __MT76x02_DMA_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
index d3efeb8a72b7..9390de2a323e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
@@ -17,46 +17,43 @@
#include <asm/unaligned.h>
-#include "mt76.h"
#include "mt76x02_eeprom.h"
-#include "mt76x02_regs.h"
static int
-mt76x02_efuse_read(struct mt76_dev *dev, u16 addr, u8 *data,
+mt76x02_efuse_read(struct mt76x02_dev *dev, u16 addr, u8 *data,
enum mt76x02_eeprom_modes mode)
{
u32 val;
int i;
- val = __mt76_rr(dev, MT_EFUSE_CTRL);
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
val &= ~(MT_EFUSE_CTRL_AIN |
MT_EFUSE_CTRL_MODE);
val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
val |= FIELD_PREP(MT_EFUSE_CTRL_MODE, mode);
val |= MT_EFUSE_CTRL_KICK;
- __mt76_wr(dev, MT_EFUSE_CTRL, val);
+ mt76_wr(dev, MT_EFUSE_CTRL, val);
- if (!__mt76_poll_msec(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK,
- 0, 1000))
+ if (!mt76_poll_msec(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
return -ETIMEDOUT;
udelay(2);
- val = __mt76_rr(dev, MT_EFUSE_CTRL);
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
memset(data, 0xff, 16);
return 0;
}
for (i = 0; i < 4; i++) {
- val = __mt76_rr(dev, MT_EFUSE_DATA(i));
+ val = mt76_rr(dev, MT_EFUSE_DATA(i));
put_unaligned_le32(val, data + 4 * i);
}
return 0;
}
-int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf,
+int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf,
int len, enum mt76x02_eeprom_modes mode)
{
int ret, i;
@@ -71,26 +68,26 @@ int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf,
}
EXPORT_SYMBOL_GPL(mt76x02_get_efuse_data);
-void mt76x02_eeprom_parse_hw_cap(struct mt76_dev *dev)
+void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev)
{
u16 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) {
case BOARD_TYPE_5GHZ:
- dev->cap.has_5ghz = true;
+ dev->mt76.cap.has_5ghz = true;
break;
case BOARD_TYPE_2GHZ:
- dev->cap.has_2ghz = true;
+ dev->mt76.cap.has_2ghz = true;
break;
default:
- dev->cap.has_2ghz = true;
- dev->cap.has_5ghz = true;
+ dev->mt76.cap.has_2ghz = true;
+ dev->mt76.cap.has_5ghz = true;
break;
}
}
EXPORT_SYMBOL_GPL(mt76x02_eeprom_parse_hw_cap);
-bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band)
+bool mt76x02_ext_pa_enabled(struct mt76x02_dev *dev, enum nl80211_band band)
{
u16 conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
@@ -101,7 +98,7 @@ bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band)
}
EXPORT_SYMBOL_GPL(mt76x02_ext_pa_enabled);
-void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band,
+void mt76x02_get_rx_gain(struct mt76x02_dev *dev, enum nl80211_band band,
u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g)
{
u16 val;
@@ -129,7 +126,7 @@ void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band,
}
EXPORT_SYMBOL_GPL(mt76x02_get_rx_gain);
-u8 mt76x02_get_lna_gain(struct mt76_dev *dev,
+u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
s8 *lna_2g, s8 *lna_5g,
struct ieee80211_channel *chan)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
index bcd05f7c5f45..b3ec74835d10 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
@@ -18,6 +18,8 @@
#ifndef __MT76x02_EEPROM_H
#define __MT76x02_EEPROM_H
+#include "mt76x02.h"
+
enum mt76x02_eeprom_field {
MT_EE_CHIP_ID = 0x000,
MT_EE_VERSION = 0x002,
@@ -168,44 +170,23 @@ static inline s8 mt76x02_rate_power_val(u8 val)
}
static inline int
-mt76x02_eeprom_get(struct mt76_dev *dev,
+mt76x02_eeprom_get(struct mt76x02_dev *dev,
enum mt76x02_eeprom_field field)
{
if ((field & 1) || field >= __MT_EE_MAX)
return -1;
- return get_unaligned_le16(dev->eeprom.data + field);
-}
-
-static inline bool
-mt76x02_temp_tx_alc_enabled(struct mt76_dev *dev)
-{
- u16 val;
-
- val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
- if (!(val & BIT(15)))
- return false;
-
- return mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
- MT_EE_NIC_CONF_1_TEMP_TX_ALC;
-}
-
-static inline bool
-mt76x02_tssi_enabled(struct mt76_dev *dev)
-{
- return !mt76x02_temp_tx_alc_enabled(dev) &&
- (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
- MT_EE_NIC_CONF_1_TX_ALC_EN);
+ return get_unaligned_le16(dev->mt76.eeprom.data + field);
}
-bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band);
-int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf,
+bool mt76x02_ext_pa_enabled(struct mt76x02_dev *dev, enum nl80211_band band);
+int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf,
int len, enum mt76x02_eeprom_modes mode);
-void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band,
+void mt76x02_get_rx_gain(struct mt76x02_dev *dev, enum nl80211_band band,
u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g);
-u8 mt76x02_get_lna_gain(struct mt76_dev *dev,
+u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
s8 *lna_2g, s8 *lna_5g,
struct ieee80211_channel *chan);
-void mt76x02_eeprom_parse_hw_cap(struct mt76_dev *dev);
+void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev);
#endif /* __MT76x02_EEPROM_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index df4366a702c9..10578e4cb269 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -15,9 +15,8 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include "mt76.h"
-#include "mt76x02_regs.h"
-#include "mt76x02_mac.h"
+#include "mt76x02.h"
+#include "mt76x02_trace.h"
enum mt76x02_cipher_type
mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
@@ -46,8 +45,8 @@ mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
}
EXPORT_SYMBOL_GPL(mt76x02_mac_get_key_info);
-int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx,
- struct ieee80211_key_conf *key)
+int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
+ u8 key_idx, struct ieee80211_key_conf *key)
{
enum mt76x02_cipher_type cipher;
u8 key_data[32];
@@ -57,20 +56,20 @@ int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx,
if (cipher == MT_CIPHER_NONE && key)
return -EOPNOTSUPP;
- val = __mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+ val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
- __mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+ mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
- __mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
- sizeof(key_data));
+ mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
+ sizeof(key_data));
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
-int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx,
- struct ieee80211_key_conf *key)
+int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key)
{
enum mt76x02_cipher_type cipher;
u8 key_data[32];
@@ -80,25 +79,26 @@ int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx,
if (cipher == MT_CIPHER_NONE && key)
return -EOPNOTSUPP;
- __mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
- __mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
+ mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+ mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
memset(iv_data, 0, sizeof(iv_data));
if (key) {
- __mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
- !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+ mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
+ !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
iv_data[3] = key->keyidx << 6;
if (cipher >= MT_CIPHER_TKIP)
iv_data[3] |= 0x20;
}
- __mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+ mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_key);
-void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx,
+ u8 vif_idx, u8 *mac)
{
struct mt76_wcid_addr addr = {};
u32 attr;
@@ -106,10 +106,10 @@ void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
- __mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+ mt76_wr(dev, MT_WCID_ATTR(idx), attr);
- __mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
- __mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
+ mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
+ mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
if (idx >= 128)
return;
@@ -117,22 +117,22 @@ void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
if (mac)
memcpy(addr.macaddr, mac, ETH_ALEN);
- __mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
+ mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
}
EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup);
-void mt76x02_mac_wcid_set_drop(struct mt76_dev *dev, u8 idx, bool drop)
+void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop)
{
- u32 val = __mt76_rr(dev, MT_WCID_DROP(idx));
+ u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
u32 bit = MT_WCID_DROP_MASK(idx);
/* prevent unnecessary writes */
if ((val & bit) != (bit * drop))
- __mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
+ mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
}
EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_drop);
-void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
+void mt76x02_txq_init(struct mt76x02_dev *dev, struct ieee80211_txq *txq)
{
struct mt76_txq *mtxq;
@@ -152,55 +152,13 @@ void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
mtxq->wcid = &mvif->group_wcid;
}
- mt76_txq_init(dev, txq);
+ mt76_txq_init(&dev->mt76, txq);
}
EXPORT_SYMBOL_GPL(mt76x02_txq_init);
-void mt76x02_mac_fill_txwi(struct mt76x02_txwi *txwi, struct sk_buff *skb,
- struct ieee80211_sta *sta, int len, u8 nss)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- u16 txwi_flags = 0;
-
- if (info->flags & IEEE80211_TX_CTL_LDPC)
- txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
- if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
- txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
- if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
- txwi_flags |= MT_TXWI_FLAGS_MMPS;
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
- txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
- if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
- txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
- txwi->pktid |= MT_TXWI_PKTID_PROBE;
- if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
- u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
-
- ba_size <<= sta->ht_cap.ampdu_factor;
- ba_size = min_t(int, 63, ba_size - 1);
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
- ba_size = 0;
- txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
-
- txwi_flags |= MT_TXWI_FLAGS_AMPDU |
- FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
- sta->ht_cap.ampdu_density);
- }
-
- if (ieee80211_is_probe_resp(hdr->frame_control) ||
- ieee80211_is_beacon(hdr->frame_control))
- txwi_flags |= MT_TXWI_FLAGS_TS;
-
- txwi->flags |= cpu_to_le16(txwi_flags);
- txwi->len_ctl = cpu_to_le16(len);
-}
-EXPORT_SYMBOL_GPL(mt76x02_mac_fill_txwi);
-
-__le16
-mt76x02_mac_tx_rate_val(struct mt76_dev *dev,
- const struct ieee80211_tx_rate *rate, u8 *nss_val)
+static __le16
+mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val)
{
u16 rateval;
u8 phy, rate_idx;
@@ -225,10 +183,10 @@ mt76x02_mac_tx_rate_val(struct mt76_dev *dev,
bw = 1;
} else {
const struct ieee80211_rate *r;
- int band = dev->chandef.chan->band;
+ int band = dev->mt76.chandef.chan->band;
u16 val;
- r = &dev->hw->wiphy->bands[band]->bitrates[rate->idx];
+ r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
val = r->hw_value_short;
else
@@ -248,24 +206,23 @@ mt76x02_mac_tx_rate_val(struct mt76_dev *dev,
*nss_val = nss;
return cpu_to_le16(rateval);
}
-EXPORT_SYMBOL_GPL(mt76x02_mac_tx_rate_val);
-void mt76x02_mac_wcid_set_rate(struct mt76_dev *dev, struct mt76_wcid *wcid,
- const struct ieee80211_tx_rate *rate)
+void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate)
{
- spin_lock_bh(&dev->lock);
+ spin_lock_bh(&dev->mt76.lock);
wcid->tx_rate = mt76x02_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
wcid->tx_rate_set = true;
- spin_unlock_bh(&dev->lock);
+ spin_unlock_bh(&dev->mt76.lock);
}
-bool mt76x02_mac_load_tx_status(struct mt76_dev *dev,
- struct mt76x02_tx_status *stat)
+bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
+ struct mt76x02_tx_status *stat)
{
u32 stat1, stat2;
- stat2 = __mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
- stat1 = __mt76_rr(dev, MT_TX_STAT_FIFO);
+ stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+ stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
if (!stat->valid)
@@ -341,10 +298,103 @@ mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
return 0;
}
+void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta, int len)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
+ u16 txwi_flags = 0;
+ u8 nss;
+ s8 txpwr_adj, max_txpwr_adj;
+ u8 ccmp_pn[8], nstreams = dev->mt76.chainmask & 0xf;
+
+ memset(txwi, 0, sizeof(*txwi));
+
+ if (wcid)
+ txwi->wcid = wcid->idx;
+ else
+ txwi->wcid = 0xff;
+
+ txwi->pktid = 1;
+
+ if (wcid && wcid->sw_iv && key) {
+ u64 pn = atomic64_inc_return(&key->tx_pn);
+ ccmp_pn[0] = pn;
+ ccmp_pn[1] = pn >> 8;
+ ccmp_pn[2] = 0;
+ ccmp_pn[3] = 0x20 | (key->keyidx << 6);
+ ccmp_pn[4] = pn >> 16;
+ ccmp_pn[5] = pn >> 24;
+ ccmp_pn[6] = pn >> 32;
+ ccmp_pn[7] = pn >> 40;
+ txwi->iv = *((__le32 *)&ccmp_pn[0]);
+ txwi->eiv = *((__le32 *)&ccmp_pn[1]);
+ }
+
+ spin_lock_bh(&dev->mt76.lock);
+ if (wcid && (rate->idx < 0 || !rate->count)) {
+ txwi->rate = wcid->tx_rate;
+ max_txpwr_adj = wcid->max_txpwr_adj;
+ nss = wcid->tx_rate_nss;
+ } else {
+ txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss);
+ max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
+ }
+ spin_unlock_bh(&dev->mt76.lock);
+
+ txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->mt76.txpower_conf,
+ max_txpwr_adj);
+ txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
+
+ if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E4)
+ txwi->txstream = 0x13;
+ else if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E3 &&
+ !(txwi->rate & cpu_to_le16(rate_ht_mask)))
+ txwi->txstream = 0x93;
+
+ if (is_mt76x2(dev) && (info->flags & IEEE80211_TX_CTL_LDPC))
+ txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
+ if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
+ txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
+ if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ txwi_flags |= MT_TXWI_FLAGS_MMPS;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ txwi->pktid |= MT_TXWI_PKTID_PROBE;
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+ u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+ ba_size <<= sta->ht_cap.ampdu_factor;
+ ba_size = min_t(int, 63, ba_size - 1);
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ ba_size = 0;
+ txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+ txwi_flags |= MT_TXWI_FLAGS_AMPDU |
+ FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+ sta->ht_cap.ampdu_density);
+ }
+
+ if (ieee80211_is_probe_resp(hdr->frame_control) ||
+ ieee80211_is_beacon(hdr->frame_control))
+ txwi_flags |= MT_TXWI_FLAGS_TS;
+
+ txwi->flags |= cpu_to_le16(txwi_flags);
+ txwi->len_ctl = cpu_to_le16(len);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi);
+
static void
-mt76x02_mac_fill_tx_status(struct mt76_dev *dev,
- struct ieee80211_tx_info *info,
- struct mt76x02_tx_status *st, int n_frames)
+mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev,
+ struct ieee80211_tx_info *info,
+ struct mt76x02_tx_status *st, int n_frames)
{
struct ieee80211_tx_rate *rate = info->status.rates;
int cur_idx, last_rate;
@@ -355,7 +405,7 @@ mt76x02_mac_fill_tx_status(struct mt76_dev *dev,
last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
mt76x02_mac_process_tx_rate(&rate[last_rate], st->rate,
- dev->chandef.chan->band);
+ dev->mt76.chandef.chan->band);
if (last_rate < IEEE80211_TX_MAX_RATES - 1)
rate[last_rate + 1].idx = -1;
@@ -383,8 +433,8 @@ mt76x02_mac_fill_tx_status(struct mt76_dev *dev,
info->flags |= IEEE80211_TX_STAT_ACK;
}
-void mt76x02_send_tx_status(struct mt76_dev *dev,
- struct mt76x02_tx_status *stat, u8 *update)
+void mt76x02_send_tx_status(struct mt76x02_dev *dev,
+ struct mt76x02_tx_status *stat, u8 *update)
{
struct ieee80211_tx_info info = {};
struct ieee80211_sta *sta = NULL;
@@ -392,8 +442,8 @@ void mt76x02_send_tx_status(struct mt76_dev *dev,
struct mt76x02_sta *msta = NULL;
rcu_read_lock();
- if (stat->wcid < ARRAY_SIZE(dev->wcid))
- wcid = rcu_dereference(dev->wcid[stat->wcid]);
+ if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
+ wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
if (wcid) {
void *priv;
@@ -418,7 +468,7 @@ void mt76x02_send_tx_status(struct mt76_dev *dev,
}
mt76x02_mac_fill_tx_status(dev, &info, &msta->status,
- msta->n_frames);
+ msta->n_frames);
msta->status = *stat;
msta->n_frames = 1;
@@ -428,7 +478,7 @@ void mt76x02_send_tx_status(struct mt76_dev *dev,
*update = 1;
}
- ieee80211_tx_status_noskb(dev->hw, sta, &info);
+ ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info);
out:
rcu_read_unlock();
@@ -503,20 +553,185 @@ mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate)
}
EXPORT_SYMBOL_GPL(mt76x02_mac_process_rate);
-void mt76x02_mac_setaddr(struct mt76_dev *dev, u8 *addr)
+void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr)
{
- ether_addr_copy(dev->macaddr, addr);
+ ether_addr_copy(dev->mt76.macaddr, addr);
- if (!is_valid_ether_addr(dev->macaddr)) {
- eth_random_addr(dev->macaddr);
- dev_info(dev->dev,
+ if (!is_valid_ether_addr(dev->mt76.macaddr)) {
+ eth_random_addr(dev->mt76.macaddr);
+ dev_info(dev->mt76.dev,
"Invalid MAC address, using random address %pM\n",
- dev->macaddr);
+ dev->mt76.macaddr);
}
- __mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
- __mt76_wr(dev, MT_MAC_ADDR_DW1,
- get_unaligned_le16(dev->macaddr + 4) |
- FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+ mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr));
+ mt76_wr(dev, MT_MAC_ADDR_DW1,
+ get_unaligned_le16(dev->mt76.macaddr + 4) |
+ FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
}
EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr);
+
+static int
+mt76x02_mac_get_rssi(struct mt76x02_dev *dev, s8 rssi, int chain)
+{
+ struct mt76x02_rx_freq_cal *cal = &dev->cal.rx;
+
+ rssi += cal->rssi_offset[chain];
+ rssi -= cal->lna_gain;
+
+ return rssi;
+}
+
+int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
+ void *rxi)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
+ struct mt76x02_rxwi *rxwi = rxi;
+ struct mt76x02_sta *sta;
+ u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
+ u32 ctl = le32_to_cpu(rxwi->ctl);
+ u16 rate = le16_to_cpu(rxwi->rate);
+ u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
+ bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
+ int i, pad_len = 0, nstreams = dev->mt76.chainmask & 0xf;
+ s8 signal;
+ u8 pn_len;
+ u8 wcid;
+ int len;
+
+ if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ return -EINVAL;
+
+ if (rxinfo & MT_RXINFO_L2PAD)
+ pad_len += 2;
+
+ if (rxinfo & MT_RXINFO_DECRYPT) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_MMIC_STRIPPED;
+ status->flag |= RX_FLAG_MIC_STRIPPED;
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ }
+
+ wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
+ sta = mt76x02_rx_get_sta(&dev->mt76, wcid);
+ status->wcid = mt76x02_rx_get_sta_wcid(sta, unicast);
+
+ len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+ pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
+ if (pn_len) {
+ int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
+ u8 *data = skb->data + offset;
+
+ status->iv[0] = data[7];
+ status->iv[1] = data[6];
+ status->iv[2] = data[5];
+ status->iv[3] = data[4];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+
+ /*
+ * Driver CCMP validation can't deal with fragments.
+ * Let mac80211 take care of it.
+ */
+ if (rxinfo & MT_RXINFO_FRAG) {
+ status->flag &= ~RX_FLAG_IV_STRIPPED;
+ } else {
+ pad_len += pn_len << 2;
+ len -= pn_len << 2;
+ }
+ }
+
+ mt76x02_remove_hdr_pad(skb, pad_len);
+
+ if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
+ status->aggr = true;
+
+ if (WARN_ON_ONCE(len > skb->len))
+ return -EINVAL;
+
+ pskb_trim(skb, len);
+
+ status->chains = BIT(0);
+ signal = mt76x02_mac_get_rssi(dev, rxwi->rssi[0], 0);
+ for (i = 1; i < nstreams; i++) {
+ status->chains |= BIT(i);
+ status->chain_signal[i] = mt76x02_mac_get_rssi(dev,
+ rxwi->rssi[i],
+ i);
+ signal = max_t(s8, signal, status->chain_signal[i]);
+ }
+ status->signal = signal;
+ status->freq = dev->mt76.chandef.chan->center_freq;
+ status->band = dev->mt76.chandef.chan->band;
+
+ status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
+ status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
+
+ if (sta) {
+ ewma_signal_add(&sta->rssi, status->signal);
+ sta->inactive_count = 0;
+ }
+
+ return mt76x02_mac_process_rate(status, rate);
+}
+
+void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
+{
+ struct mt76x02_tx_status stat = {};
+ unsigned long flags;
+ u8 update = 1;
+ bool ret;
+
+ if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ return;
+
+ trace_mac_txstat_poll(dev);
+
+ while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
+ spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
+ ret = mt76x02_mac_load_tx_status(dev, &stat);
+ spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
+
+ if (!ret)
+ break;
+
+ trace_mac_txstat_fetch(dev, &stat);
+
+ if (!irq) {
+ mt76x02_send_tx_status(dev, &stat, &update);
+ continue;
+ }
+
+ kfifo_put(&dev->txstatus_fifo, stat);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_poll_tx_status);
+
+static void
+mt76x02_mac_queue_txdone(struct mt76x02_dev *dev, struct sk_buff *skb,
+ void *txwi_ptr)
+{
+ struct mt76x02_tx_info *txi = mt76x02_skb_tx_info(skb);
+ struct mt76x02_txwi *txwi = txwi_ptr;
+
+ mt76x02_mac_poll_tx_status(dev, false);
+
+ txi->tries = 0;
+ txi->jiffies = jiffies;
+ txi->wcid = txwi->wcid;
+ txi->pktid = txwi->pktid;
+ trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
+ mt76x02_tx_complete(&dev->mt76, skb);
+}
+
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+
+ if (e->txwi)
+ mt76x02_mac_queue_txdone(dev, e->skb, &e->txwi->txwi);
+ else
+ dev_kfree_skb_any(e->skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index 62072291e416..d99c18743969 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -20,6 +20,8 @@
#include <linux/average.h>
+struct mt76x02_dev;
+
struct mt76x02_tx_status {
u8 valid:1;
u8 success:1;
@@ -40,6 +42,15 @@ struct mt76x02_vif {
struct mt76_wcid group_wcid;
};
+struct mt76x02_tx_info {
+ unsigned long jiffies;
+ u8 tries;
+
+ u8 wcid;
+ u8 pktid;
+ u8 retry;
+};
+
DECLARE_EWMA(signal, 10, 8);
struct mt76x02_sta {
@@ -179,28 +190,40 @@ static inline bool mt76x02_wait_for_mac(struct mt76_dev *dev)
return false;
}
-void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
-void mt76x02_mac_fill_txwi(struct mt76x02_txwi *txwi, struct sk_buff *skb,
- struct ieee80211_sta *sta, int len, u8 nss);
+static inline struct mt76x02_tx_info *
+mt76x02_skb_tx_info(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ return (void *)info->status.status_driver_data;
+}
+
+void mt76x02_txq_init(struct mt76x02_dev *dev, struct ieee80211_txq *txq);
enum mt76x02_cipher_type
mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data);
-int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx,
- struct ieee80211_key_conf *key);
-int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx,
- struct ieee80211_key_conf *key);
-void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
-void mt76x02_mac_wcid_set_drop(struct mt76_dev *dev, u8 idx, bool drop);
-void mt76x02_mac_wcid_set_rate(struct mt76_dev *dev, struct mt76_wcid *wcid,
- const struct ieee80211_tx_rate *rate);
-__le16
-mt76x02_mac_tx_rate_val(struct mt76_dev *dev,
- const struct ieee80211_tx_rate *rate, u8 *nss_val);
-bool mt76x02_mac_load_tx_status(struct mt76_dev *dev,
- struct mt76x02_tx_status *stat);
-void mt76x02_send_tx_status(struct mt76_dev *dev,
- struct mt76x02_tx_status *stat, u8 *update);
+int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
+ u8 key_idx, struct ieee80211_key_conf *key);
+int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key);
+void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx,
+ u8 *mac);
+void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop);
+void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate);
+bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
+ struct mt76x02_tx_status *stat);
+void mt76x02_send_tx_status(struct mt76x02_dev *dev,
+ struct mt76x02_tx_status *stat, u8 *update);
+int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
+ void *rxi);
int
mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate);
-void mt76x02_mac_setaddr(struct mt76_dev *dev, u8 *addr);
+void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr);
+void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta, int len);
+void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
index 6d565133b7af..1b853bb723fb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
@@ -19,9 +19,7 @@
#include <linux/firmware.h>
#include <linux/delay.h>
-#include "mt76.h"
#include "mt76x02_mcu.h"
-#include "mt76x02_dma.h"
struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len)
{
@@ -37,7 +35,7 @@ struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len)
EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_alloc);
static struct sk_buff *
-mt76x02_mcu_get_response(struct mt76_dev *dev, unsigned long expires)
+mt76x02_mcu_get_response(struct mt76x02_dev *dev, unsigned long expires)
{
unsigned long timeout;
@@ -45,17 +43,17 @@ mt76x02_mcu_get_response(struct mt76_dev *dev, unsigned long expires)
return NULL;
timeout = expires - jiffies;
- wait_event_timeout(dev->mmio.mcu.wait,
- !skb_queue_empty(&dev->mmio.mcu.res_q),
+ wait_event_timeout(dev->mt76.mmio.mcu.wait,
+ !skb_queue_empty(&dev->mt76.mmio.mcu.res_q),
timeout);
- return skb_dequeue(&dev->mmio.mcu.res_q);
+ return skb_dequeue(&dev->mt76.mmio.mcu.res_q);
}
static int
-mt76x02_tx_queue_mcu(struct mt76_dev *dev, enum mt76_txq_id qid,
+mt76x02_tx_queue_mcu(struct mt76x02_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, int cmd, int seq)
{
- struct mt76_queue *q = &dev->q_tx[qid];
+ struct mt76_queue *q = &dev->mt76.q_tx[qid];
struct mt76_queue_buf buf;
dma_addr_t addr;
u32 tx_info;
@@ -66,24 +64,26 @@ mt76x02_tx_queue_mcu(struct mt76_dev *dev, enum mt76_txq_id qid,
FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
- addr = dma_map_single(dev->dev, skb->data, skb->len,
+ addr = dma_map_single(dev->mt76.dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr))
+ if (dma_mapping_error(dev->mt76.dev, addr))
return -ENOMEM;
buf.addr = addr;
buf.len = skb->len;
+
spin_lock_bh(&q->lock);
- dev->queue_ops->add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
- dev->queue_ops->kick(dev, q);
+ mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
+ mt76_queue_kick(dev, q);
spin_unlock_bh(&q->lock);
return 0;
}
-int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
+int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, bool wait_resp)
{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
unsigned long expires = jiffies + HZ;
int ret;
u8 seq;
@@ -91,11 +91,11 @@ int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
if (!skb)
return -EINVAL;
- mutex_lock(&dev->mmio.mcu.mutex);
+ mutex_lock(&mdev->mmio.mcu.mutex);
- seq = ++dev->mmio.mcu.msg_seq & 0xf;
+ seq = ++mdev->mmio.mcu.msg_seq & 0xf;
if (!seq)
- seq = ++dev->mmio.mcu.msg_seq & 0xf;
+ seq = ++mdev->mmio.mcu.msg_seq & 0xf;
ret = mt76x02_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
if (ret)
@@ -107,7 +107,7 @@ int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
skb = mt76x02_mcu_get_response(dev, expires);
if (!skb) {
- dev_err(dev->dev,
+ dev_err(mdev->dev,
"MCU message %d (seq %d) timed out\n", cmd,
seq);
ret = -ETIMEDOUT;
@@ -125,13 +125,13 @@ int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
}
out:
- mutex_unlock(&dev->mmio.mcu.mutex);
+ mutex_unlock(&mdev->mmio.mcu.mutex);
return ret;
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send);
-int mt76x02_mcu_function_select(struct mt76_dev *dev,
+int mt76x02_mcu_function_select(struct mt76x02_dev *dev,
enum mcu_function func,
u32 val, bool wait_resp)
{
@@ -144,13 +144,12 @@ int mt76x02_mcu_function_select(struct mt76_dev *dev,
.value = cpu_to_le32(val),
};
- skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg));
- return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_FUN_SET_OP,
- wait_resp);
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ return mt76_mcu_send_msg(dev, skb, CMD_FUN_SET_OP, wait_resp);
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select);
-int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on,
+int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on,
bool wait_resp)
{
struct sk_buff *skb;
@@ -162,13 +161,12 @@ int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on,
.level = cpu_to_le32(0),
};
- skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg));
- return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP,
- wait_resp);
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ return mt76_mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP, wait_resp);
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state);
-int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type,
+int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type,
u32 param, bool wait)
{
struct sk_buff *skb;
@@ -182,44 +180,44 @@ int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type,
int ret;
if (wait)
- dev->bus->rmw(dev, MT_MCU_COM_REG0, BIT(31), 0);
+ mt76_rmw(dev, MT_MCU_COM_REG0, BIT(31), 0);
- skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg));
- ret = dev->mcu_ops->mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true);
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ ret = mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true);
if (ret)
return ret;
if (wait &&
- WARN_ON(!__mt76_poll_msec(dev, MT_MCU_COM_REG0,
- BIT(31), BIT(31), 100)))
+ WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0,
+ BIT(31), BIT(31), 100)))
return -ETIMEDOUT;
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_calibrate);
-int mt76x02_mcu_cleanup(struct mt76_dev *dev)
+int mt76x02_mcu_cleanup(struct mt76x02_dev *dev)
{
struct sk_buff *skb;
- dev->bus->wr(dev, MT_MCU_INT_LEVEL, 1);
+ mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
usleep_range(20000, 30000);
- while ((skb = skb_dequeue(&dev->mmio.mcu.res_q)) != NULL)
+ while ((skb = skb_dequeue(&dev->mt76.mmio.mcu.res_q)) != NULL)
dev_kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_cleanup);
-void mt76x02_set_ethtool_fwver(struct mt76_dev *dev,
+void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev,
const struct mt76x02_fw_header *h)
{
u16 bld = le16_to_cpu(h->build_ver);
u16 ver = le16_to_cpu(h->fw_ver);
- snprintf(dev->hw->wiphy->fw_version,
- sizeof(dev->hw->wiphy->fw_version),
+ snprintf(dev->mt76.hw->wiphy->fw_version,
+ sizeof(dev->mt76.hw->wiphy->fw_version),
"%d.%d.%02d-b%x",
(ver >> 12) & 0xf, (ver >> 8) & 0xf, ver & 0xf, bld);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
index d30a58b5df29..2d8fd2514570 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
@@ -15,7 +15,9 @@
*/
#ifndef __MT76x02_MCU_H
-#define __MT76x0x_MCU_H
+#define __MT76x02_MCU_H
+
+#include "mt76x02.h"
#define MT_MCU_RESET_CTL 0x070C
#define MT_MCU_INT_LEVEL 0x0718
@@ -94,18 +96,18 @@ struct mt76x02_patch_header {
u8 pad[2];
};
-int mt76x02_mcu_cleanup(struct mt76_dev *dev);
-int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type,
+int mt76x02_mcu_cleanup(struct mt76x02_dev *dev);
+int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type,
u32 param, bool wait);
struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len);
-int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
+int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, bool wait_resp);
-int mt76x02_mcu_function_select(struct mt76_dev *dev,
+int mt76x02_mcu_function_select(struct mt76x02_dev *dev,
enum mcu_function func,
u32 val, bool wait_resp);
-int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on,
+int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on,
bool wait_resp);
-void mt76x02_set_ethtool_fwver(struct mt76_dev *dev,
+void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev,
const struct mt76x02_fw_header *h);
#endif /* __MT76x02_MCU_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 1146fbfd8df5..39f092034240 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -16,23 +16,22 @@
*/
#include <linux/kernel.h>
+#include <linux/irq.h>
-#include "mt76.h"
-#include "mt76x02_dma.h"
-#include "mt76x02_util.h"
-#include "mt76x02_mac.h"
+#include "mt76x02.h"
+#include "mt76x02_trace.h"
static int
-mt76x02_init_tx_queue(struct mt76_dev *dev, struct mt76_queue *q,
+mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
int idx, int n_desc)
{
int ret;
- q->regs = dev->mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
+ q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
q->ndesc = n_desc;
q->hw_idx = idx;
- ret = __mt76_queue_alloc(dev, q);
+ ret = mt76_queue_alloc(dev, q);
if (ret)
return ret;
@@ -42,16 +41,16 @@ mt76x02_init_tx_queue(struct mt76_dev *dev, struct mt76_queue *q,
}
static int
-mt76x02_init_rx_queue(struct mt76_dev *dev, struct mt76_queue *q,
+mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize)
{
int ret;
- q->regs = dev->mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
+ q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
q->ndesc = n_desc;
q->buf_size = bufsize;
- ret = __mt76_queue_alloc(dev, q);
+ ret = mt76_queue_alloc(dev, q);
if (ret)
return ret;
@@ -60,100 +59,200 @@ mt76x02_init_rx_queue(struct mt76_dev *dev, struct mt76_queue *q,
return 0;
}
-int mt76x02_dma_init(struct mt76_dev *dev)
+static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
+{
+ struct mt76x02_tx_status stat;
+ u8 update = 1;
+
+ while (kfifo_get(&dev->txstatus_fifo, &stat))
+ mt76x02_send_tx_status(dev, &stat, &update);
+}
+
+static void mt76x02_tx_tasklet(unsigned long data)
+{
+ struct mt76x02_dev *dev = (struct mt76x02_dev *)data;
+ int i;
+
+ mt76x02_process_tx_status_fifo(dev);
+
+ for (i = MT_TXQ_MCU; i >= 0; i--)
+ mt76_queue_tx_cleanup(dev, i, false);
+
+ mt76x02_mac_poll_tx_status(dev, false);
+ mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
+}
+
+int mt76x02_dma_init(struct mt76x02_dev *dev)
{
struct mt76_txwi_cache __maybe_unused *t;
+ int i, ret, fifo_size;
struct mt76_queue *q;
- int i, ret;
+ void *status_fifo;
BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x02_txwi));
BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM);
- mt76_dma_attach(dev);
- __mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+ fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status));
+ status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
+ if (!status_fifo)
+ return -ENOMEM;
+
+ tasklet_init(&dev->tx_tasklet, mt76x02_tx_tasklet, (unsigned long) dev);
+ kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
+
+ mt76_dma_attach(&dev->mt76);
+
+ mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- ret = mt76x02_init_tx_queue(dev, &dev->q_tx[i],
+ ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[i],
mt76_ac_to_hwq(i),
MT_TX_RING_SIZE);
if (ret)
return ret;
}
- ret = mt76x02_init_tx_queue(dev, &dev->q_tx[MT_TXQ_PSD],
+ ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
if (ret)
return ret;
- ret = mt76x02_init_tx_queue(dev, &dev->q_tx[MT_TXQ_MCU],
+ ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
if (ret)
return ret;
- ret = mt76x02_init_rx_queue(dev, &dev->q_rx[MT_RXQ_MCU], 1,
+ ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
if (ret)
return ret;
- q = &dev->q_rx[MT_RXQ_MAIN];
+ q = &dev->mt76.q_rx[MT_RXQ_MAIN];
q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x02_rxwi);
ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE,
MT_RX_BUF_SIZE);
if (ret)
return ret;
- return __mt76_init_queues(dev);
+ return mt76_init_queues(dev);
}
EXPORT_SYMBOL_GPL(mt76x02_dma_init);
-void mt76x02_set_irq_mask(struct mt76_dev *dev, u32 clear, u32 set)
+void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+ struct mt76x02_dev *dev;
+
+ dev = container_of(mdev, struct mt76x02_dev, mt76);
+ mt76x02_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete);
+
+irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
+{
+ struct mt76x02_dev *dev = dev_instance;
+ u32 intr;
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ return IRQ_NONE;
+
+ trace_dev_irq(dev, intr, dev->mt76.mmio.irqmask);
+
+ intr &= dev->mt76.mmio.irqmask;
+
+ if (intr & MT_INT_TX_DONE_ALL) {
+ mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL);
+ tasklet_schedule(&dev->tx_tasklet);
+ }
+
+ if (intr & MT_INT_RX_DONE(0)) {
+ mt76x02_irq_disable(dev, MT_INT_RX_DONE(0));
+ napi_schedule(&dev->mt76.napi[0]);
+ }
+
+ if (intr & MT_INT_RX_DONE(1)) {
+ mt76x02_irq_disable(dev, MT_INT_RX_DONE(1));
+ napi_schedule(&dev->mt76.napi[1]);
+ }
+
+ if (intr & MT_INT_PRE_TBTT)
+ tasklet_schedule(&dev->pre_tbtt_tasklet);
+
+ /* send buffered multicast frames now */
+ if (intr & MT_INT_TBTT)
+ mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
+
+ if (intr & MT_INT_TX_STAT) {
+ mt76x02_mac_poll_tx_status(dev, true);
+ tasklet_schedule(&dev->tx_tasklet);
+ }
+
+ if (intr & MT_INT_GPTIMER) {
+ mt76x02_irq_disable(dev, MT_INT_GPTIMER);
+ tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
+ }
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(mt76x02_irq_handler);
+
+void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set)
{
unsigned long flags;
- spin_lock_irqsave(&dev->mmio.irq_lock, flags);
- dev->mmio.irqmask &= ~clear;
- dev->mmio.irqmask |= set;
- __mt76_wr(dev, MT_INT_MASK_CSR, dev->mmio.irqmask);
- spin_unlock_irqrestore(&dev->mmio.irq_lock, flags);
+ spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
+ dev->mt76.mmio.irqmask &= ~clear;
+ dev->mt76.mmio.irqmask |= set;
+ mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
+ spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
}
EXPORT_SYMBOL_GPL(mt76x02_set_irq_mask);
-void mt76x02_dma_enable(struct mt76_dev *dev)
+static void mt76x02_dma_enable(struct mt76x02_dev *dev)
{
u32 val;
- __mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
- mt76x02_wait_for_wpdma(dev, 1000);
+ mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+ mt76x02_wait_for_wpdma(&dev->mt76, 1000);
usleep_range(50, 100);
val = FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) |
MT_WPDMA_GLO_CFG_TX_DMA_EN |
MT_WPDMA_GLO_CFG_RX_DMA_EN;
- __mt76_set(dev, MT_WPDMA_GLO_CFG, val);
- __mt76_clear(dev, MT_WPDMA_GLO_CFG,
- MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+ mt76_set(dev, MT_WPDMA_GLO_CFG, val);
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
}
EXPORT_SYMBOL_GPL(mt76x02_dma_enable);
-void mt76x02_dma_disable(struct mt76_dev *dev)
+void mt76x02_dma_cleanup(struct mt76x02_dev *dev)
+{
+ tasklet_kill(&dev->tx_tasklet);
+ mt76_dma_cleanup(&dev->mt76);
+}
+EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup);
+
+void mt76x02_dma_disable(struct mt76x02_dev *dev)
{
- u32 val = __mt76_rr(dev, MT_WPDMA_GLO_CFG);
+ u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
MT_WPDMA_GLO_CFG_BIG_ENDIAN |
MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
- __mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
+ mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
}
EXPORT_SYMBOL_GPL(mt76x02_dma_disable);
-void mt76x02_mac_start(struct mt76_dev *dev)
+void mt76x02_mac_start(struct mt76x02_dev *dev)
{
mt76x02_dma_enable(dev);
- __mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
- __mt76_wr(dev, MT_MAC_SYS_CTRL,
- MT_MAC_SYS_CTRL_ENABLE_TX |
- MT_MAC_SYS_CTRL_ENABLE_RX);
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
mt76x02_irq_enable(dev,
MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
MT_INT_TX_STAT);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
index e29914d78b72..0f1d7b5c9f68 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
@@ -17,17 +17,17 @@
#include <linux/kernel.h>
-#include "mt76.h"
+#include "mt76x02.h"
#include "mt76x02_phy.h"
-void mt76x02_phy_set_rxpath(struct mt76_dev *dev)
+void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev)
{
u32 val;
- val = __mt76_rr(dev, MT_BBP(AGC, 0));
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
val &= ~BIT(4);
- switch (dev->chainmask & 0xf) {
+ switch (dev->mt76.chainmask & 0xf) {
case 2:
val |= BIT(3);
break;
@@ -36,23 +36,23 @@ void mt76x02_phy_set_rxpath(struct mt76_dev *dev)
break;
}
- __mt76_wr(dev, MT_BBP(AGC, 0), val);
+ mt76_wr(dev, MT_BBP(AGC, 0), val);
mb();
- val = __mt76_rr(dev, MT_BBP(AGC, 0));
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
}
EXPORT_SYMBOL_GPL(mt76x02_phy_set_rxpath);
-void mt76x02_phy_set_txdac(struct mt76_dev *dev)
+void mt76x02_phy_set_txdac(struct mt76x02_dev *dev)
{
int txpath;
- txpath = (dev->chainmask >> 8) & 0xf;
+ txpath = (dev->mt76.chainmask >> 8) & 0xf;
switch (txpath) {
case 2:
- __mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
+ mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
break;
default:
- __mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
+ mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
break;
}
}
@@ -101,35 +101,158 @@ void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset)
}
EXPORT_SYMBOL_GPL(mt76x02_add_rate_power_offset);
-void mt76x02_phy_set_txpower(struct mt76_dev *dev, int txp_0, int txp_1)
+void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_1)
{
- struct mt76_rate_power *t = &dev->rate_power;
-
- __mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0,
- txp_0);
- __mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1,
- txp_1);
-
- __mt76_wr(dev, MT_TX_PWR_CFG_0,
- mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0],
- t->ofdm[2]));
- __mt76_wr(dev, MT_TX_PWR_CFG_1,
- mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0],
- t->ht[2]));
- __mt76_wr(dev, MT_TX_PWR_CFG_2,
- mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8],
- t->ht[10]));
- __mt76_wr(dev, MT_TX_PWR_CFG_3,
- mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0],
- t->stbc[2]));
- __mt76_wr(dev, MT_TX_PWR_CFG_4,
- mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0));
- __mt76_wr(dev, MT_TX_PWR_CFG_7,
- mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7],
- t->vht[9]));
- __mt76_wr(dev, MT_TX_PWR_CFG_8,
- mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9]));
- __mt76_wr(dev, MT_TX_PWR_CFG_9,
- mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9]));
+ struct mt76_rate_power *t = &dev->mt76.rate_power;
+
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
+
+ mt76_wr(dev, MT_TX_PWR_CFG_0,
+ mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0],
+ t->ofdm[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_1,
+ mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0],
+ t->ht[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_2,
+ mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8],
+ t->ht[10]));
+ mt76_wr(dev, MT_TX_PWR_CFG_3,
+ mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0],
+ t->stbc[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_4,
+ mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0));
+ mt76_wr(dev, MT_TX_PWR_CFG_7,
+ mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7],
+ t->vht[9]));
+ mt76_wr(dev, MT_TX_PWR_CFG_8,
+ mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9]));
+ mt76_wr(dev, MT_TX_PWR_CFG_9,
+ mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9]));
}
EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower);
+
+int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev)
+{
+ struct mt76x02_sta *sta;
+ struct mt76_wcid *wcid;
+ int i, j, min_rssi = 0;
+ s8 cur_rssi;
+
+ local_bh_disable();
+ rcu_read_lock();
+
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid_mask); i++) {
+ unsigned long mask = dev->mt76.wcid_mask[i];
+
+ if (!mask)
+ continue;
+
+ for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+
+ wcid = rcu_dereference(dev->mt76.wcid[j]);
+ if (!wcid)
+ continue;
+
+ sta = container_of(wcid, struct mt76x02_sta, wcid);
+ spin_lock(&dev->mt76.rx_lock);
+ if (sta->inactive_count++ < 5)
+ cur_rssi = ewma_signal_read(&sta->rssi);
+ else
+ cur_rssi = 0;
+ spin_unlock(&dev->mt76.rx_lock);
+
+ if (cur_rssi < min_rssi)
+ min_rssi = cur_rssi;
+ }
+ }
+
+ rcu_read_unlock();
+ local_bh_enable();
+
+ if (!min_rssi)
+ return -75;
+
+ return min_rssi;
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_get_min_avg_rssi);
+
+void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl)
+{
+ int core_val, agc_val;
+
+ switch (width) {
+ case NL80211_CHAN_WIDTH_80:
+ core_val = 3;
+ agc_val = 7;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ core_val = 2;
+ agc_val = 3;
+ break;
+ default:
+ core_val = 0;
+ agc_val = 1;
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+ mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_bw);
+
+void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band,
+ bool primary_upper)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+ break;
+ case NL80211_BAND_5GHZ:
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+ primary_upper);
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_band);
+
+bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev)
+{
+ u8 limit = dev->cal.low_gain > 0 ? 16 : 4;
+ bool ret = false;
+ u32 false_cca;
+
+ false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
+ dev->cal.false_cca = false_cca;
+ if (false_cca > 800 && dev->cal.agc_gain_adjust < limit) {
+ dev->cal.agc_gain_adjust += 2;
+ ret = true;
+ } else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) ||
+ (dev->cal.agc_gain_adjust >= limit && false_cca < 500)) {
+ dev->cal.agc_gain_adjust -= 2;
+ ret = true;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain);
+
+void mt76x02_init_agc_gain(struct mt76x02_dev *dev)
+{
+ dev->cal.agc_gain_init[0] = mt76_get_field(dev, MT_BBP(AGC, 8),
+ MT_BBP_AGC_GAIN);
+ dev->cal.agc_gain_init[1] = mt76_get_field(dev, MT_BBP(AGC, 9),
+ MT_BBP_AGC_GAIN);
+ memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
+ sizeof(dev->cal.agc_gain_cur));
+ dev->cal.low_gain = -1;
+}
+EXPORT_SYMBOL_GPL(mt76x02_init_agc_gain);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
index df69f8fade75..2b316cf7c70c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
@@ -19,11 +19,43 @@
#include "mt76x02_regs.h"
+static inline int
+mt76x02_get_rssi_gain_thresh(struct mt76x02_dev *dev)
+{
+ switch (dev->mt76.chandef.width) {
+ case NL80211_CHAN_WIDTH_80:
+ return -62;
+ case NL80211_CHAN_WIDTH_40:
+ return -65;
+ default:
+ return -68;
+ }
+}
+
+static inline int
+mt76x02_get_low_rssi_gain_thresh(struct mt76x02_dev *dev)
+{
+ switch (dev->mt76.chandef.width) {
+ case NL80211_CHAN_WIDTH_80:
+ return -76;
+ case NL80211_CHAN_WIDTH_40:
+ return -79;
+ default:
+ return -82;
+ }
+}
+
void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset);
-void mt76x02_phy_set_txpower(struct mt76_dev *dev, int txp_0, int txp_2);
+void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_2);
void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit);
int mt76x02_get_max_rate_power(struct mt76_rate_power *r);
-void mt76x02_phy_set_rxpath(struct mt76_dev *dev);
-void mt76x02_phy_set_txdac(struct mt76_dev *dev);
+void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev);
+void mt76x02_phy_set_txdac(struct mt76x02_dev *dev);
+int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev);
+void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl);
+void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band,
+ bool primary_upper);
+bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev);
+void mt76x02_init_agc_gain(struct mt76x02_dev *dev);
#endif /* __MT76x02_PHY_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
index 24d1e6d747dd..f7de77d09d28 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
@@ -205,8 +205,8 @@
#define MT_TXQ_STA 0x0434
#define MT_RF_CSR_CFG 0x0500
#define MT_RF_CSR_CFG_DATA GENMASK(7, 0)
-#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8)
-#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14)
+#define MT_RF_CSR_CFG_REG_ID GENMASK(14, 8)
+#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 15)
#define MT_RF_CSR_CFG_WR BIT(30)
#define MT_RF_CSR_CFG_KICK BIT(31)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.c
index a09f117848d6..5b42d2c87937 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.c
@@ -18,6 +18,6 @@
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
-#include "mt76x2_trace.h"
+#include "mt76x02_trace.h"
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h
index eb5afeaefa44..713f12d3c8de 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h
@@ -14,14 +14,14 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#if !defined(__MT76x2_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define __MT76x2_TRACE_H
+#if !defined(__MT76x02_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76x02_TRACE_H
#include <linux/tracepoint.h>
-#include "mt76x2.h"
+#include "mt76x02.h"
#undef TRACE_SYSTEM
-#define TRACE_SYSTEM mt76x2
+#define TRACE_SYSTEM mt76x02
#define MAXNAME 32
#define DEV_ENTRY __array(char, wiphy_name, 32)
@@ -35,7 +35,7 @@
#define TXID_PR_ARG __entry->wcid, __entry->pktid
DECLARE_EVENT_CLASS(dev_evt,
- TP_PROTO(struct mt76x2_dev *dev),
+ TP_PROTO(struct mt76x02_dev *dev),
TP_ARGS(dev),
TP_STRUCT__entry(
DEV_ENTRY
@@ -47,7 +47,7 @@ DECLARE_EVENT_CLASS(dev_evt,
);
DECLARE_EVENT_CLASS(dev_txid_evt,
- TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid),
+ TP_PROTO(struct mt76x02_dev *dev, u8 wcid, u8 pktid),
TP_ARGS(dev, wcid, pktid),
TP_STRUCT__entry(
DEV_ENTRY
@@ -63,18 +63,18 @@ DECLARE_EVENT_CLASS(dev_txid_evt,
)
);
-DEFINE_EVENT(dev_evt, mac_txstat_poll,
- TP_PROTO(struct mt76x2_dev *dev),
- TP_ARGS(dev)
-);
-
DEFINE_EVENT(dev_txid_evt, mac_txdone_add,
- TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid),
+ TP_PROTO(struct mt76x02_dev *dev, u8 wcid, u8 pktid),
TP_ARGS(dev, wcid, pktid)
);
+DEFINE_EVENT(dev_evt, mac_txstat_poll,
+ TP_PROTO(struct mt76x02_dev *dev),
+ TP_ARGS(dev)
+);
+
TRACE_EVENT(mac_txstat_fetch,
- TP_PROTO(struct mt76x2_dev *dev,
+ TP_PROTO(struct mt76x02_dev *dev,
struct mt76x02_tx_status *stat),
TP_ARGS(dev, stat),
@@ -110,9 +110,8 @@ TRACE_EVENT(mac_txstat_fetch,
)
);
-
TRACE_EVENT(dev_irq,
- TP_PROTO(struct mt76x2_dev *dev, u32 val, u32 mask),
+ TP_PROTO(struct mt76x02_dev *dev, u32 val, u32 mask),
TP_ARGS(dev, val, mask),
@@ -139,6 +138,6 @@ TRACE_EVENT(dev_irq,
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE mt76x2_trace
+#define TRACE_INCLUDE_FILE mt76x02_trace
#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
new file mode 100644
index 000000000000..d3de08872d6e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+
+#include "mt76x02.h"
+
+void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76x02_dev *dev = hw->priv;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+
+ if (control->sta) {
+ struct mt76x02_sta *msta;
+
+ msta = (struct mt76x02_sta *)control->sta->drv_priv;
+ wcid = &msta->wcid;
+ /* sw encrypted frames */
+ if (!info->control.hw_key && wcid->hw_key_idx != 0xff)
+ control->sta = NULL;
+ }
+
+ if (vif && !control->sta) {
+ struct mt76x02_vif *mvif;
+
+ mvif = (struct mt76x02_vif *)vif->drv_priv;
+ wcid = &mvif->group_wcid;
+ }
+
+ mt76_tx(&dev->mt76, control->sta, wcid, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx);
+
+void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ void *rxwi = skb->data;
+
+ if (q == MT_RXQ_MCU) {
+ /* this is used just by mmio code */
+ skb_queue_tail(&mdev->mmio.mcu.res_q, skb);
+ wake_up(&mdev->mmio.mcu.wait);
+ return;
+ }
+
+ skb_pull(skb, sizeof(struct mt76x02_rxwi));
+ if (mt76x02_mac_process_rx(dev, skb, rxwi)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ mt76_rx(mdev, q, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02_queue_rx_skb);
+
+s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
+ const struct ieee80211_tx_rate *rate)
+{
+ s8 max_txpwr;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ u8 mcs = ieee80211_rate_get_vht_mcs(rate);
+
+ if (mcs == 8 || mcs == 9) {
+ max_txpwr = dev->mt76.rate_power.vht[8];
+ } else {
+ u8 nss, idx;
+
+ nss = ieee80211_rate_get_vht_nss(rate);
+ idx = ((nss - 1) << 3) + mcs;
+ max_txpwr = dev->mt76.rate_power.ht[idx & 0xf];
+ }
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ max_txpwr = dev->mt76.rate_power.ht[rate->idx & 0xf];
+ } else {
+ enum nl80211_band band = dev->mt76.chandef.chan->band;
+
+ if (band == NL80211_BAND_2GHZ) {
+ const struct ieee80211_rate *r;
+ struct wiphy *wiphy = dev->mt76.hw->wiphy;
+ struct mt76_rate_power *rp = &dev->mt76.rate_power;
+
+ r = &wiphy->bands[band]->bitrates[rate->idx];
+ if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
+ max_txpwr = rp->cck[r->hw_value & 0x3];
+ else
+ max_txpwr = rp->ofdm[r->hw_value & 0x7];
+ } else {
+ max_txpwr = dev->mt76.rate_power.ofdm[rate->idx & 0x7];
+ }
+ }
+
+ return max_txpwr;
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_get_max_txpwr_adj);
+
+s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj)
+{
+ txpwr = min_t(s8, txpwr, dev->mt76.txpower_conf);
+ txpwr -= (dev->target_power + dev->target_power_delta[0]);
+ txpwr = min_t(s8, txpwr, max_txpwr_adj);
+
+ if (!dev->enable_tpc)
+ return 0;
+ else if (txpwr >= 0)
+ return min_t(s8, txpwr, 7);
+ else
+ return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_get_txpwr_adj);
+
+void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr)
+{
+ s8 txpwr_adj;
+
+ txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, txpwr,
+ dev->mt76.rate_power.ofdm[4]);
+ mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+ MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
+ mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+ MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_set_txpwr_auto);
+
+void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ ieee80211_free_txskb(dev->hw, skb);
+ } else {
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ ieee80211_tx_status(dev->hw, skb);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_complete);
+
+bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct mt76x02_tx_status stat;
+
+ if (!mt76x02_mac_load_tx_status(dev, &stat))
+ return false;
+
+ mt76x02_send_tx_status(dev, &stat, update);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_status_data);
+
+int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int qsel = MT_QSEL_EDCA;
+ int ret;
+
+ if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
+ mt76x02_mac_wcid_set_drop(dev, wcid->idx, false);
+
+ mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
+
+ ret = mt76x02_insert_hdr_pad(skb);
+ if (ret < 0)
+ return ret;
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ qsel = MT_QSEL_MGMT;
+
+ *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+ MT_TXD_INFO_80211;
+
+ if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
+ *tx_info |= MT_TXD_INFO_WIV;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_prepare_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
index 2482f9761fcd..0126e51d77ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
@@ -15,15 +15,20 @@
*/
#ifndef __MT76x02_USB_H
-#define __MT76x0x_USB_H
+#define __MT76x02_USB_H
-#include "mt76.h"
+#include "mt76x02.h"
void mt76x02u_init_mcu(struct mt76_dev *dev);
-void mt76x02u_mcu_fw_reset(struct mt76_dev *dev);
-int mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
+void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev);
+int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
int data_len, u32 max_payload, u32 offset);
int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
-int mt76x02u_set_txinfo(struct sk_buff *skb, struct mt76_wcid *wcid, u8 ep);
+int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info);
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush);
#endif /* __MT76x02_USB_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index aecbe0c429ea..dc2226c722dd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -14,8 +14,25 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include "mt76.h"
-#include "mt76x02_dma.h"
+#include "mt76x02.h"
+
+static void mt76x02u_remove_dma_hdr(struct sk_buff *skb)
+{
+ int hdr_len;
+
+ skb_pull(skb, sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN);
+ hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ if (hdr_len % 4)
+ mt76x02_remove_hdr_pad(skb, 2);
+}
+
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush)
+{
+ mt76x02u_remove_dma_hdr(e->skb);
+ mt76x02_tx_complete(mdev, e->skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb);
int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
{
@@ -50,7 +67,8 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
return 0;
}
-int mt76x02u_set_txinfo(struct sk_buff *skb, struct mt76_wcid *wcid, u8 ep)
+static int
+mt76x02u_set_txinfo(struct sk_buff *skb, struct mt76_wcid *wcid, u8 ep)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
enum mt76_qsel qsel;
@@ -69,4 +87,21 @@ int mt76x02u_set_txinfo(struct sk_buff *skb, struct mt76_wcid *wcid, u8 ep)
return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags);
}
-EXPORT_SYMBOL_GPL(mt76x02u_set_txinfo);
+
+int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct mt76x02_txwi *txwi;
+ int len = skb->len;
+
+ mt76x02_insert_hdr_pad(skb);
+
+ txwi = skb_push(skb, sizeof(struct mt76x02_txwi));
+ mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
+
+ return mt76x02u_set_txinfo(skb, wcid, q2ep(q->hw_idx));
+}
+EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
index cb5f073f08af..da299b8a1334 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -17,8 +17,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
-#include "mt76.h"
-#include "mt76x02_dma.h"
+#include "mt76x02.h"
#include "mt76x02_mcu.h"
#include "mt76x02_usb.h"
@@ -255,16 +254,16 @@ mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
return ret;
}
-void mt76x02u_mcu_fw_reset(struct mt76_dev *dev)
+void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev)
{
- mt76u_vendor_request(dev, MT_VEND_DEV_MODE,
+ mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
USB_DIR_OUT | USB_TYPE_VENDOR,
0x1, 0, NULL, 0);
}
EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset);
static int
-__mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
+__mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf,
const void *fw_data, int len, u32 dst_addr)
{
u8 *data = sg_virt(&buf->urb->sg[0]);
@@ -281,14 +280,14 @@ __mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
memcpy(data + sizeof(info), fw_data, len);
memset(data + sizeof(info) + len, 0, 4);
- mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
+ mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
MT_FCE_DMA_ADDR, dst_addr);
len = roundup(len, 4);
- mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
+ mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
MT_FCE_DMA_LEN, len << 16);
buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
- err = mt76u_submit_buf(dev, USB_DIR_OUT,
+ err = mt76u_submit_buf(&dev->mt76, USB_DIR_OUT,
MT_EP_OUT_INBAND_CMD,
buf, GFP_KERNEL,
mt76u_mcu_complete_urb, &cmpl);
@@ -297,31 +296,31 @@ __mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
if (!wait_for_completion_timeout(&cmpl,
msecs_to_jiffies(1000))) {
- dev_err(dev->dev, "firmware upload timed out\n");
+ dev_err(dev->mt76.dev, "firmware upload timed out\n");
usb_kill_urb(buf->urb);
return -ETIMEDOUT;
}
if (mt76u_urb_error(buf->urb)) {
- dev_err(dev->dev, "firmware upload failed: %d\n",
+ dev_err(dev->mt76.dev, "firmware upload failed: %d\n",
buf->urb->status);
return buf->urb->status;
}
- val = mt76u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+ val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
val++;
- mt76u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
return 0;
}
-int mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
+int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
int data_len, u32 max_payload, u32 offset)
{
int err, len, pos = 0, max_len = max_payload - 8;
struct mt76u_buf buf;
- err = mt76u_buf_alloc(dev, &buf, 1, max_payload, max_payload,
+ err = mt76u_buf_alloc(&dev->mt76, &buf, 1, max_payload, max_payload,
GFP_KERNEL);
if (err < 0)
return err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index ec422c3980e8..ca05332f81fc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -16,10 +16,7 @@
*/
#include <linux/module.h>
-#include "mt76.h"
-#include "mt76x02_dma.h"
-#include "mt76x02_regs.h"
-#include "mt76x02_mac.h"
+#include "mt76x02.h"
#define CCK_RATE(_idx, _rate) { \
.bitrate = _rate, \
@@ -51,21 +48,21 @@ struct ieee80211_rate mt76x02_rates[] = {
EXPORT_SYMBOL_GPL(mt76x02_rates);
void mt76x02_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags, u64 multicast)
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
u32 flags = 0;
#define MT76_FILTER(_flag, _hw) do { \
flags |= *total_flags & FIF_##_flag; \
- dev->rxfilter &= ~(_hw); \
- dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ dev->mt76.rxfilter &= ~(_hw); \
+ dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \
} while (0)
- mutex_lock(&dev->mutex);
+ mutex_lock(&dev->mt76.mutex);
- dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+ dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
@@ -78,25 +75,25 @@ void mt76x02_configure_filter(struct ieee80211_hw *hw,
MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
*total_flags = flags;
- dev->bus->wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mt76.mutex);
}
EXPORT_SYMBOL_GPL(mt76x02_configure_filter);
int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta)
{
- struct mt76_dev *dev = hw->priv;
- struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
- struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
int ret = 0;
int idx = 0;
int i;
- mutex_lock(&dev->mutex);
+ mutex_lock(&dev->mt76.mutex);
- idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid));
if (idx < 0) {
ret = -ENOSPC;
goto out;
@@ -116,40 +113,40 @@ int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ewma_signal_init(&msta->rssi);
- rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+ rcu_assign_pointer(dev->mt76.wcid[idx], &msta->wcid);
out:
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mt76.mutex);
return ret;
}
EXPORT_SYMBOL_GPL(mt76x02_sta_add);
int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta)
{
- struct mt76_dev *dev = hw->priv;
- struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
int idx = msta->wcid.idx;
int i;
- mutex_lock(&dev->mutex);
- rcu_assign_pointer(dev->wcid[idx], NULL);
+ mutex_lock(&dev->mt76.mutex);
+ rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
- mt76_txq_remove(dev, sta->txq[i]);
+ mt76_txq_remove(&dev->mt76, sta->txq[i]);
mt76x02_mac_wcid_set_drop(dev, idx, true);
- mt76_wcid_free(dev->wcid_mask, idx);
+ mt76_wcid_free(dev->mt76.wcid_mask, idx);
mt76x02_mac_wcid_setup(dev, idx, 0, NULL);
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mt76.mutex);
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_sta_remove);
-void mt76x02_vif_init(struct mt76_dev *dev, struct ieee80211_vif *vif,
- unsigned int idx)
+void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
+ unsigned int idx)
{
- struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
mvif->idx = idx;
mvif->group_wcid.idx = MT_VIF_WCID(idx);
@@ -161,11 +158,11 @@ EXPORT_SYMBOL_GPL(mt76x02_vif_init);
int
mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
unsigned int idx = 0;
if (vif->addr[0] & BIT(1))
- idx = 1 + (((dev->macaddr[0] ^ vif->addr[0]) >> 2) & 7);
+ idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
/*
* Client mode typically only has one configurable BSSID register,
@@ -189,20 +186,20 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
EXPORT_SYMBOL_GPL(mt76x02_add_interface);
void mt76x02_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
- mt76_txq_remove(dev, vif->txq);
+ mt76_txq_remove(&dev->mt76, vif->txq);
}
EXPORT_SYMBOL_GPL(mt76x02_remove_interface);
int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_ampdu_params *params)
+ struct ieee80211_ampdu_params *params)
{
enum ieee80211_ampdu_mlme_action action = params->action;
struct ieee80211_sta *sta = params->sta;
- struct mt76_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
struct ieee80211_txq *txq = sta->txq[params->tid];
u16 tid = params->tid;
@@ -216,12 +213,14 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
switch (action) {
case IEEE80211_AMPDU_RX_START:
- mt76_rx_aggr_start(dev, &msta->wcid, tid, *ssn, params->buf_size);
- __mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid,
+ *ssn, params->buf_size);
+ mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
break;
case IEEE80211_AMPDU_RX_STOP:
- mt76_rx_aggr_stop(dev, &msta->wcid, tid);
- __mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+ BIT(16 + tid));
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
mtxq->aggr = true;
@@ -248,11 +247,11 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
EXPORT_SYMBOL_GPL(mt76x02_ampdu_action);
int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
{
- struct mt76_dev *dev = hw->priv;
- struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
struct mt76x02_sta *msta;
struct mt76_wcid *wcid;
int idx = key->keyidx;
@@ -298,7 +297,7 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key = NULL;
}
- mt76_wcid_key_setup(dev, wcid, key);
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
if (!msta) {
if (key || wcid->hw_key_idx == idx) {
@@ -315,13 +314,13 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
EXPORT_SYMBOL_GPL(mt76x02_set_key);
int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params)
+ u16 queue, const struct ieee80211_tx_queue_params *params)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
u8 cw_min = 5, cw_max = 10, qid;
u32 val;
- qid = dev->q_tx[queue].hw_idx;
+ qid = dev->mt76.q_tx[queue].hw_idx;
if (params->cw_min)
cw_min = fls(params->cw_min);
@@ -332,27 +331,27 @@ int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
- __mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
+ mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
- val = __mt76_rr(dev, MT_WMM_TXOP(qid));
+ val = mt76_rr(dev, MT_WMM_TXOP(qid));
val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
- __mt76_wr(dev, MT_WMM_TXOP(qid), val);
+ mt76_wr(dev, MT_WMM_TXOP(qid), val);
- val = __mt76_rr(dev, MT_WMM_AIFSN);
+ val = mt76_rr(dev, MT_WMM_AIFSN);
val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
- __mt76_wr(dev, MT_WMM_AIFSN, val);
+ mt76_wr(dev, MT_WMM_AIFSN, val);
- val = __mt76_rr(dev, MT_WMM_CWMIN);
+ val = mt76_rr(dev, MT_WMM_CWMIN);
val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
- __mt76_wr(dev, MT_WMM_CWMIN, val);
+ mt76_wr(dev, MT_WMM_CWMIN, val);
- val = __mt76_rr(dev, MT_WMM_CWMAX);
+ val = mt76_rr(dev, MT_WMM_CWMAX);
val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
- __mt76_wr(dev, MT_WMM_CWMAX, val);
+ mt76_wr(dev, MT_WMM_CWMAX, val);
return 0;
}
@@ -362,7 +361,7 @@ void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
struct ieee80211_tx_rate rate = {};
@@ -373,9 +372,7 @@ void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
rate.idx = rates->rate[0].idx;
rate.flags = rates->rate[0].flags;
mt76x02_mac_wcid_set_rate(dev, &msta->wcid, &rate);
-
- if (dev->drv && dev->drv->get_max_txpwr_adj)
- msta->wcid.max_txpwr_adj = dev->drv->get_max_txpwr_adj(dev, &rate);
+ msta->wcid.max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, &rate);
}
EXPORT_SYMBOL_GPL(mt76x02_sta_rate_tbl_update);
@@ -408,52 +405,6 @@ void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len)
}
EXPORT_SYMBOL_GPL(mt76x02_remove_hdr_pad);
-static void mt76x02_remove_dma_hdr(struct sk_buff *skb)
-{
- int hdr_len;
-
- skb_pull(skb, sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN);
- hdr_len = ieee80211_get_hdrlen_from_skb(skb);
- if (hdr_len % 4)
- mt76x02_remove_hdr_pad(skb, 2);
-}
-
-void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- ieee80211_free_txskb(dev->hw, skb);
- } else {
- ieee80211_tx_info_clear_status(info);
- info->status.rates[0].idx = -1;
- info->flags |= IEEE80211_TX_STAT_ACK;
- ieee80211_tx_status(dev->hw, skb);
- }
-}
-EXPORT_SYMBOL_GPL(mt76x02_tx_complete);
-
-void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush)
-{
- mt76x02_remove_dma_hdr(e->skb);
- mt76x02_tx_complete(mdev, e->skb);
-}
-EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb);
-
-bool mt76x02_tx_status_data(struct mt76_dev *dev, u8 *update)
-{
- struct mt76x02_tx_status stat;
-
- if (!mt76x02_mac_load_tx_status(dev, &stat))
- return false;
-
- mt76x02_send_tx_status(dev, &stat, update);
-
- return true;
-}
-EXPORT_SYMBOL_GPL(mt76x02_tx_status_data);
-
const u16 mt76x02_beacon_offsets[16] = {
/* 1024 byte per beacon */
0xc000,
@@ -476,7 +427,7 @@ const u16 mt76x02_beacon_offsets[16] = {
};
EXPORT_SYMBOL_GPL(mt76x02_beacon_offsets);
-void mt76x02_set_beacon_offsets(struct mt76_dev *dev)
+void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev)
{
u16 val, base = MT_BEACON_BASE;
u32 regs[4] = {};
@@ -488,7 +439,7 @@ void mt76x02_set_beacon_offsets(struct mt76_dev *dev)
}
for (i = 0; i < 4; i++)
- __mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+ mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
}
EXPORT_SYMBOL_GPL(mt76x02_set_beacon_offsets);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.h b/drivers/net/wireless/mediatek/mt76/mt76x02_util.h
deleted file mode 100644
index ff4cab5ca038..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef __MT76X02_UTIL_H
-#define __MT76X02_UTIL_H
-
-extern struct ieee80211_rate mt76x02_rates[12];
-
-void mt76x02_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags, u64 multicast);
-int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-
-void mt76x02_vif_init(struct mt76_dev *dev, struct ieee80211_vif *vif,
- unsigned int idx);
-int mt76x02_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-void mt76x02_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-
-int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_ampdu_params *params);
-int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key);
-int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params);
-void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-int mt76x02_insert_hdr_pad(struct sk_buff *skb);
-void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
-void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb);
-void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush);
-bool mt76x02_tx_status_data(struct mt76_dev *dev, u8 *update);
-
-extern const u16 mt76x02_beacon_offsets[16];
-void mt76x02_set_beacon_offsets(struct mt76_dev *dev);
-void mt76x02_set_irq_mask(struct mt76_dev *dev, u32 clear, u32 set);
-void mt76x02_mac_start(struct mt76_dev *dev);
-
-static inline void mt76x02_irq_enable(struct mt76_dev *dev, u32 mask)
-{
- mt76x02_set_irq_mask(dev, 0, mask);
-}
-
-static inline void mt76x02_irq_disable(struct mt76_dev *dev, u32 mask)
-{
- mt76x02_set_irq_mask(dev, mask, 0);
-}
-
-static inline bool
-mt76x02_wait_for_txrx_idle(struct mt76_dev *dev)
-{
- return __mt76_poll_msec(dev, MT_MAC_STATUS,
- MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
- 0, 100);
-}
-
-#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h
deleted file mode 100644
index d6ccab06a594..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x2.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef __MT76x2_H
-#define __MT76x2_H
-
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/spinlock.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/mutex.h>
-#include <linux/bitops.h>
-#include <linux/kfifo.h>
-
-#define MT7662_FIRMWARE "mt7662.bin"
-#define MT7662_ROM_PATCH "mt7662_rom_patch.bin"
-#define MT7662_EEPROM_SIZE 512
-
-#define MT7662U_FIRMWARE "mediatek/mt7662u.bin"
-#define MT7662U_ROM_PATCH "mediatek/mt7662u_rom_patch.bin"
-
-#define MT_MAX_CHAINS 2
-
-#define MT_CALIBRATE_INTERVAL HZ
-
-#include "mt76.h"
-#include "mt76x02_regs.h"
-#include "mt76x2_mac.h"
-#include "mt76x2_dfs.h"
-
-struct mt76x2_rx_freq_cal {
- s8 high_gain[MT_MAX_CHAINS];
- s8 rssi_offset[MT_MAX_CHAINS];
- s8 lna_gain;
- u32 mcu_gain;
-};
-
-struct mt76x2_calibration {
- struct mt76x2_rx_freq_cal rx;
-
- u8 agc_gain_init[MT_MAX_CHAINS];
- u8 agc_gain_cur[MT_MAX_CHAINS];
-
- u16 false_cca;
- s8 avg_rssi_all;
- s8 agc_gain_adjust;
- s8 low_gain;
-
- u8 temp;
-
- bool init_cal_done;
- bool tssi_cal_done;
- bool tssi_comp_pending;
- bool dpd_cal_done;
- bool channel_cal_done;
-};
-
-struct mt76x2_dev {
- struct mt76_dev mt76; /* must be first */
-
- struct mac_address macaddr_list[8];
-
- struct mutex mutex;
-
- u8 txdone_seq;
- DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
-
- struct sk_buff *rx_head;
-
- struct tasklet_struct tx_tasklet;
- struct tasklet_struct pre_tbtt_tasklet;
- struct delayed_work cal_work;
- struct delayed_work mac_work;
-
- u32 aggr_stats[32];
-
- struct sk_buff *beacons[8];
- u8 beacon_mask;
- u8 beacon_data_mask;
-
- u8 tbtt_count;
- u16 beacon_int;
-
- struct mt76x2_calibration cal;
-
- s8 target_power;
- s8 target_power_delta[2];
- bool enable_tpc;
-
- u8 coverage_class;
- u8 slottime;
-
- struct mt76x2_dfs_pattern_detector dfs_pd;
-};
-
-static inline bool is_mt7612(struct mt76x2_dev *dev)
-{
- return mt76_chip(&dev->mt76) == 0x7612;
-}
-
-static inline bool mt76x2_channel_silent(struct mt76x2_dev *dev)
-{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
-
- return ((chan->flags & IEEE80211_CHAN_RADAR) &&
- chan->dfs_state != NL80211_DFS_AVAILABLE);
-}
-
-extern const struct ieee80211_ops mt76x2_ops;
-
-struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev);
-int mt76x2_register_device(struct mt76x2_dev *dev);
-void mt76x2_init_debugfs(struct mt76x2_dev *dev);
-void mt76x2_init_device(struct mt76x2_dev *dev);
-
-irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance);
-void mt76x2_phy_power_on(struct mt76x2_dev *dev);
-int mt76x2_init_hardware(struct mt76x2_dev *dev);
-void mt76x2_stop_hardware(struct mt76x2_dev *dev);
-int mt76x2_eeprom_init(struct mt76x2_dev *dev);
-int mt76x2_apply_calibration_data(struct mt76x2_dev *dev, int channel);
-void mt76x2_set_tx_ackto(struct mt76x2_dev *dev);
-
-void mt76x2_phy_set_antenna(struct mt76x2_dev *dev);
-int mt76x2_phy_start(struct mt76x2_dev *dev);
-int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
- struct cfg80211_chan_def *chandef);
-int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain);
-void mt76x2_phy_calibrate(struct work_struct *work);
-void mt76x2_phy_set_txpower(struct mt76x2_dev *dev);
-
-int mt76x2_mcu_init(struct mt76x2_dev *dev);
-int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
- u8 bw_index, bool scan);
-int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
- u8 channel);
-
-void mt76x2_tx_tasklet(unsigned long data);
-void mt76x2_dma_cleanup(struct mt76x2_dev *dev);
-
-void mt76x2_cleanup(struct mt76x2_dev *dev);
-
-void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
- struct sk_buff *skb);
-int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info);
-void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush);
-void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val);
-
-void mt76x2_pre_tbtt_tasklet(unsigned long arg);
-
-void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
-void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb);
-
-void mt76x2_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
-
-void mt76x2_update_channel(struct mt76_dev *mdev);
-
-s8 mt76x2_tx_get_max_txpwr_adj(struct mt76_dev *dev,
- const struct ieee80211_tx_rate *rate);
-s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj);
-void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr);
-
-
-void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable);
-void mt76x2_init_txpower(struct mt76x2_dev *dev,
- struct ieee80211_supported_band *sband);
-void mt76_write_mac_initvals(struct mt76x2_dev *dev);
-
-int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-void mt76x2_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params);
-void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq);
-
-void mt76x2_phy_tssi_compensate(struct mt76x2_dev *dev, bool wait);
-void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
- enum nl80211_band band);
-void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
- enum nl80211_band band, u8 bw);
-void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl);
-void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper);
-int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev);
-void mt76x2_apply_gain_adj(struct mt76x2_dev *dev);
-
-#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig b/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig
new file mode 100644
index 000000000000..2b414a0e9088
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig
@@ -0,0 +1,20 @@
+config MT76x2_COMMON
+ tristate
+ select MT76x02_LIB
+
+config MT76x2E
+ tristate "MediaTek MT76x2E (PCIe) support"
+ select MT76x2_COMMON
+ depends on MAC80211
+ depends on PCI
+ ---help---
+ This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices.
+
+config MT76x2U
+ tristate "MediaTek MT76x2U (USB) support"
+ select MT76x2_COMMON
+ select MT76x02_USB
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7612U-based wireless USB dongles.
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
new file mode 100644
index 000000000000..b71bb1049170
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
@@ -0,0 +1,16 @@
+obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o
+obj-$(CONFIG_MT76x2E) += mt76x2e.o
+obj-$(CONFIG_MT76x2U) += mt76x2u.o
+
+mt76x2-common-y := \
+ eeprom.o mac.o init.o phy.o debugfs.o mcu.o
+
+mt76x2e-y := \
+ pci.o pci_main.o pci_init.o pci_tx.o \
+ pci_mac.o pci_mcu.o pci_phy.o pci_dfs.o
+
+mt76x2u-y := \
+ usb.o usb_init.o usb_main.o usb_mac.o usb_mcu.o \
+ usb_phy.o
+
+CFLAGS_pci_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2/debugfs.c
index ea373bae1522..e8f8ccc0a5ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/debugfs.c
@@ -20,7 +20,7 @@
static int
mt76x2_ampdu_stat_read(struct seq_file *file, void *data)
{
- struct mt76x2_dev *dev = file->private;
+ struct mt76x02_dev *dev = file->private;
int i, j;
for (i = 0; i < 4; i++) {
@@ -49,7 +49,7 @@ mt76x2_ampdu_stat_open(struct inode *inode, struct file *f)
static int read_txpower(struct seq_file *file, void *data)
{
- struct mt76x2_dev *dev = dev_get_drvdata(file->private);
+ struct mt76x02_dev *dev = dev_get_drvdata(file->private);
seq_printf(file, "Target power: %d\n", dev->target_power);
@@ -68,9 +68,9 @@ static const struct file_operations fops_ampdu_stat = {
static int
mt76x2_dfs_stat_read(struct seq_file *file, void *data)
{
+ struct mt76x02_dev *dev = file->private;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
int i;
- struct mt76x2_dev *dev = file->private;
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
seq_printf(file, "allocated sequences:\t%d\n",
dfs_pd->seq_stats.seq_pool_len);
@@ -106,7 +106,7 @@ static const struct file_operations fops_dfs_stat = {
static int read_agc(struct seq_file *file, void *data)
{
- struct mt76x2_dev *dev = dev_get_drvdata(file->private);
+ struct mt76x02_dev *dev = dev_get_drvdata(file->private);
seq_printf(file, "avg_rssi: %d\n", dev->cal.avg_rssi_all);
seq_printf(file, "low_gain: %d\n", dev->cal.low_gain);
@@ -116,7 +116,7 @@ static int read_agc(struct seq_file *file, void *data)
return 0;
}
-void mt76x2_init_debugfs(struct mt76x2_dev *dev)
+void mt76x2_init_debugfs(struct mt76x02_dev *dev)
{
struct dentry *dir;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h
new file mode 100644
index 000000000000..3cb9d1864286
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __DFS_H
+#define __DFS_H
+
+void mt76x2_dfs_init_params(struct mt76x02_dev *dev);
+void mt76x2_dfs_init_detector(struct mt76x02_dev *dev);
+void mt76x2_dfs_adjust_agc(struct mt76x02_dev *dev);
+void mt76x2_dfs_set_domain(struct mt76x02_dev *dev,
+ enum nl80211_dfs_regions region);
+
+#endif /* __DFS_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index 136faa4066a5..f39b622d03f4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -17,12 +17,12 @@
#include <linux/module.h>
#include <asm/unaligned.h>
#include "mt76x2.h"
-#include "mt76x2_eeprom.h"
+#include "eeprom.h"
#define EE_FIELD(_name, _value) [MT_EE_##_name] = (_value) | 1
static int
-mt76x2_eeprom_copy(struct mt76x2_dev *dev, enum mt76x02_eeprom_field field,
+mt76x2_eeprom_copy(struct mt76x02_dev *dev, enum mt76x02_eeprom_field field,
void *dest, int len)
{
if (field + len > dev->mt76.eeprom.size)
@@ -33,7 +33,7 @@ mt76x2_eeprom_copy(struct mt76x2_dev *dev, enum mt76x02_eeprom_field field,
}
static int
-mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev)
+mt76x2_eeprom_get_macaddr(struct mt76x02_dev *dev)
{
void *src = dev->mt76.eeprom.data + MT_EE_MAC_ADDR;
@@ -42,7 +42,7 @@ mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev)
}
static bool
-mt76x2_has_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
+mt76x2_has_cal_free_data(struct mt76x02_dev *dev, u8 *efuse)
{
u16 *efuse_w = (u16 *) efuse;
@@ -68,7 +68,7 @@ mt76x2_has_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
}
static void
-mt76x2_apply_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
+mt76x2_apply_cal_free_data(struct mt76x02_dev *dev, u8 *efuse)
{
#define GROUP_5G(_id) \
MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id), \
@@ -137,7 +137,7 @@ mt76x2_apply_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
eeprom[MT_EE_BT_PMUCFG] = val & 0xff;
}
-static int mt76x2_check_eeprom(struct mt76x2_dev *dev)
+static int mt76x2_check_eeprom(struct mt76x02_dev *dev)
{
u16 val = get_unaligned_le16(dev->mt76.eeprom.data);
@@ -155,7 +155,7 @@ static int mt76x2_check_eeprom(struct mt76x2_dev *dev)
}
static int
-mt76x2_eeprom_load(struct mt76x2_dev *dev)
+mt76x2_eeprom_load(struct mt76x02_dev *dev)
{
void *efuse;
bool found;
@@ -177,8 +177,8 @@ mt76x2_eeprom_load(struct mt76x2_dev *dev)
efuse = dev->mt76.otp.data;
- if (mt76x02_get_efuse_data(&dev->mt76, 0, efuse,
- MT7662_EEPROM_SIZE, MT_EE_READ))
+ if (mt76x02_get_efuse_data(dev, 0, efuse, MT7662_EEPROM_SIZE,
+ MT_EE_READ))
goto out;
if (found) {
@@ -197,7 +197,7 @@ out:
}
static void
-mt76x2_set_rx_gain_group(struct mt76x2_dev *dev, u8 val)
+mt76x2_set_rx_gain_group(struct mt76x02_dev *dev, u8 val)
{
s8 *dest = dev->cal.rx.high_gain;
@@ -212,7 +212,7 @@ mt76x2_set_rx_gain_group(struct mt76x2_dev *dev, u8 val)
}
static void
-mt76x2_set_rssi_offset(struct mt76x2_dev *dev, int chain, u8 val)
+mt76x2_set_rssi_offset(struct mt76x02_dev *dev, int chain, u8 val)
{
s8 *dest = dev->cal.rx.rssi_offset;
@@ -241,34 +241,34 @@ mt76x2_get_cal_channel_group(int channel)
}
static u8
-mt76x2_get_5g_rx_gain(struct mt76x2_dev *dev, u8 channel)
+mt76x2_get_5g_rx_gain(struct mt76x02_dev *dev, u8 channel)
{
enum mt76x2_cal_channel_group group;
group = mt76x2_get_cal_channel_group(channel);
switch (group) {
case MT_CH_5G_JAPAN:
- return mt76x02_eeprom_get(&dev->mt76,
+ return mt76x02_eeprom_get(dev,
MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN);
case MT_CH_5G_UNII_1:
- return mt76x02_eeprom_get(&dev->mt76,
+ return mt76x02_eeprom_get(dev,
MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8;
case MT_CH_5G_UNII_2:
- return mt76x02_eeprom_get(&dev->mt76,
+ return mt76x02_eeprom_get(dev,
MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN);
case MT_CH_5G_UNII_2E_1:
- return mt76x02_eeprom_get(&dev->mt76,
+ return mt76x02_eeprom_get(dev,
MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8;
case MT_CH_5G_UNII_2E_2:
- return mt76x02_eeprom_get(&dev->mt76,
+ return mt76x02_eeprom_get(dev,
MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN);
default:
- return mt76x02_eeprom_get(&dev->mt76,
+ return mt76x02_eeprom_get(dev,
MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8;
}
}
-void mt76x2_read_rx_gain(struct mt76x2_dev *dev)
+void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
int channel = chan->hw_value;
@@ -277,14 +277,13 @@ void mt76x2_read_rx_gain(struct mt76x2_dev *dev)
u16 val;
if (chan->band == NL80211_BAND_2GHZ)
- val = mt76x02_eeprom_get(&dev->mt76,
- MT_EE_RF_2G_RX_HIGH_GAIN) >> 8;
+ val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN) >> 8;
else
val = mt76x2_get_5g_rx_gain(dev, channel);
mt76x2_set_rx_gain_group(dev, val);
- mt76x02_get_rx_gain(&dev->mt76, chan->band, &val, &lna_2g, lna_5g);
+ mt76x02_get_rx_gain(dev, chan->band, &val, &lna_2g, lna_5g);
mt76x2_set_rssi_offset(dev, 0, val);
mt76x2_set_rssi_offset(dev, 1, val >> 8);
@@ -293,12 +292,12 @@ void mt76x2_read_rx_gain(struct mt76x2_dev *dev)
dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
- lna = mt76x02_get_lna_gain(&dev->mt76, &lna_2g, lna_5g, chan);
+ lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8);
}
EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
-void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
+void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
struct ieee80211_channel *chan)
{
bool is_5ghz;
@@ -308,53 +307,49 @@ void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
memset(t, 0, sizeof(*t));
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_CCK);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_CCK);
t->cck[0] = t->cck[1] = mt76x02_rate_power_val(val);
t->cck[2] = t->cck[3] = mt76x02_rate_power_val(val >> 8);
if (is_5ghz)
- val = mt76x02_eeprom_get(&dev->mt76,
- MT_EE_TX_POWER_OFDM_5G_6M);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_6M);
else
- val = mt76x02_eeprom_get(&dev->mt76,
- MT_EE_TX_POWER_OFDM_2G_6M);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_6M);
t->ofdm[0] = t->ofdm[1] = mt76x02_rate_power_val(val);
t->ofdm[2] = t->ofdm[3] = mt76x02_rate_power_val(val >> 8);
if (is_5ghz)
- val = mt76x02_eeprom_get(&dev->mt76,
- MT_EE_TX_POWER_OFDM_5G_24M);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_24M);
else
- val = mt76x02_eeprom_get(&dev->mt76,
- MT_EE_TX_POWER_OFDM_2G_24M);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_24M);
t->ofdm[4] = t->ofdm[5] = mt76x02_rate_power_val(val);
t->ofdm[6] = t->ofdm[7] = mt76x02_rate_power_val(val >> 8);
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS0);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS0);
t->ht[0] = t->ht[1] = mt76x02_rate_power_val(val);
t->ht[2] = t->ht[3] = mt76x02_rate_power_val(val >> 8);
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS4);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS4);
t->ht[4] = t->ht[5] = mt76x02_rate_power_val(val);
t->ht[6] = t->ht[7] = mt76x02_rate_power_val(val >> 8);
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS8);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS8);
t->ht[8] = t->ht[9] = mt76x02_rate_power_val(val);
t->ht[10] = t->ht[11] = mt76x02_rate_power_val(val >> 8);
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS12);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS12);
t->ht[12] = t->ht[13] = mt76x02_rate_power_val(val);
t->ht[14] = t->ht[15] = mt76x02_rate_power_val(val >> 8);
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS0);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0);
t->vht[0] = t->vht[1] = mt76x02_rate_power_val(val);
t->vht[2] = t->vht[3] = mt76x02_rate_power_val(val >> 8);
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS4);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4);
t->vht[4] = t->vht[5] = mt76x02_rate_power_val(val);
t->vht[6] = t->vht[7] = mt76x02_rate_power_val(val >> 8);
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS8);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8);
if (!is_5ghz)
val >>= 8;
t->vht[8] = t->vht[9] = mt76x02_rate_power_val(val >> 8);
@@ -366,8 +361,10 @@ void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
EXPORT_SYMBOL_GPL(mt76x2_get_rate_power);
static void
-mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
- struct ieee80211_channel *chan, int chain, int offset)
+mt76x2_get_power_info_2g(struct mt76x02_dev *dev,
+ struct mt76x2_tx_power_info *t,
+ struct ieee80211_channel *chan,
+ int chain, int offset)
{
int channel = chan->hw_value;
int delta_idx;
@@ -388,13 +385,15 @@ mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
t->chain[chain].target_power = data[2];
t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7);
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_RF_2G_TSSI_OFF_TXPOWER);
+ val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_TSSI_OFF_TXPOWER);
t->target_power = val >> 8;
}
static void
-mt76x2_get_power_info_5g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
- struct ieee80211_channel *chan, int chain, int offset)
+mt76x2_get_power_info_5g(struct mt76x02_dev *dev,
+ struct mt76x2_tx_power_info *t,
+ struct ieee80211_channel *chan,
+ int chain, int offset)
{
int channel = chan->hw_value;
enum mt76x2_cal_channel_group group;
@@ -437,11 +436,11 @@ mt76x2_get_power_info_5g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
t->chain[chain].target_power = data[2];
t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7);
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_RF_2G_RX_HIGH_GAIN);
+ val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN);
t->target_power = val & 0xff;
}
-void mt76x2_get_power_info(struct mt76x2_dev *dev,
+void mt76x2_get_power_info(struct mt76x02_dev *dev,
struct mt76x2_tx_power_info *t,
struct ieee80211_channel *chan)
{
@@ -449,8 +448,8 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev,
memset(t, 0, sizeof(*t));
- bw40 = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_DELTA_BW40);
- bw80 = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_DELTA_BW80);
+ bw40 = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
+ bw80 = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80);
if (chan->band == NL80211_BAND_5GHZ) {
bw40 >>= 8;
@@ -465,7 +464,7 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev,
MT_EE_TX_POWER_1_START_2G);
}
- if (mt76x02_tssi_enabled(&dev->mt76) ||
+ if (mt76x2_tssi_enabled(dev) ||
!mt76x02_field_valid(t->target_power))
t->target_power = t->chain[0].target_power;
@@ -474,7 +473,7 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev,
}
EXPORT_SYMBOL_GPL(mt76x2_get_power_info);
-int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
+int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t)
{
enum nl80211_band band = dev->mt76.chandef.chan->band;
u16 val, slope;
@@ -482,23 +481,20 @@ int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
memset(t, 0, sizeof(*t));
- if (!mt76x02_temp_tx_alc_enabled(&dev->mt76))
+ if (!mt76x2_temp_tx_alc_enabled(dev))
return -EINVAL;
- if (!mt76x02_ext_pa_enabled(&dev->mt76, band))
+ if (!mt76x02_ext_pa_enabled(dev, band))
return -EINVAL;
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_EXT_PA_5G) >> 8;
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8;
t->temp_25_ref = val & 0x7f;
if (band == NL80211_BAND_5GHZ) {
- slope = mt76x02_eeprom_get(&dev->mt76,
- MT_EE_RF_TEMP_COMP_SLOPE_5G);
- bounds = mt76x02_eeprom_get(&dev->mt76,
- MT_EE_TX_POWER_EXT_PA_5G);
+ slope = mt76x02_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G);
+ bounds = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
} else {
- slope = mt76x02_eeprom_get(&dev->mt76,
- MT_EE_RF_TEMP_COMP_SLOPE_2G);
- bounds = mt76x02_eeprom_get(&dev->mt76,
+ slope = mt76x02_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_2G);
+ bounds = mt76x02_eeprom_get(dev,
MT_EE_TX_POWER_DELTA_BW80) >> 8;
}
@@ -511,7 +507,7 @@ int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
}
EXPORT_SYMBOL_GPL(mt76x2_get_temp_comp);
-int mt76x2_eeprom_init(struct mt76x2_dev *dev)
+int mt76x2_eeprom_init(struct mt76x02_dev *dev)
{
int ret;
@@ -519,7 +515,7 @@ int mt76x2_eeprom_init(struct mt76x2_dev *dev)
if (ret)
return ret;
- mt76x02_eeprom_parse_hw_cap(&dev->mt76);
+ mt76x02_eeprom_parse_hw_cap(dev);
mt76x2_eeprom_get_macaddr(dev);
mt76_eeprom_override(&dev->mt76);
dev->mt76.macaddr[0] &= ~BIT(1);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
index c2e99bbeac3b..9e735524d367 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
@@ -17,7 +17,7 @@
#ifndef __MT76x2_EEPROM_H
#define __MT76x2_EEPROM_H
-#include "mt76x02_eeprom.h"
+#include "../mt76x02_eeprom.h"
enum mt76x2_cal_channel_group {
MT_CH_5G_JAPAN,
@@ -51,18 +51,18 @@ struct mt76x2_temp_comp {
unsigned int low_slope; /* J / dB */
};
-void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
+void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
struct ieee80211_channel *chan);
-void mt76x2_get_power_info(struct mt76x2_dev *dev,
+void mt76x2_get_power_info(struct mt76x02_dev *dev,
struct mt76x2_tx_power_info *t,
struct ieee80211_channel *chan);
-int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t);
-void mt76x2_read_rx_gain(struct mt76x2_dev *dev);
+int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t);
+void mt76x2_read_rx_gain(struct mt76x02_dev *dev);
static inline bool
-mt76x2_has_ext_lna(struct mt76x2_dev *dev)
+mt76x2_has_ext_lna(struct mt76x02_dev *dev)
{
- u32 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_1);
+ u32 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
return val & MT_EE_NIC_CONF_1_LNA_EXT_2G;
@@ -70,4 +70,25 @@ mt76x2_has_ext_lna(struct mt76x2_dev *dev)
return val & MT_EE_NIC_CONF_1_LNA_EXT_5G;
}
+static inline bool
+mt76x2_temp_tx_alc_enabled(struct mt76x02_dev *dev)
+{
+ u16 val;
+
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
+ if (!(val & BIT(15)))
+ return false;
+
+ return mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+ MT_EE_NIC_CONF_1_TEMP_TX_ALC;
+}
+
+static inline bool
+mt76x2_tssi_enabled(struct mt76x02_dev *dev)
+{
+ return !mt76x2_temp_tx_alc_enabled(dev) &&
+ (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+ MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
index f4c4cde9301e..3c73fdeaf30f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -16,11 +16,11 @@
*/
#include "mt76x2.h"
-#include "mt76x2_eeprom.h"
-#include "mt76x02_phy.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
static void
-mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
+mt76x2_set_wlan_state(struct mt76x02_dev *dev, bool enable)
{
u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
@@ -35,7 +35,7 @@ mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
udelay(20);
}
-void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
+void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable)
{
u32 val;
@@ -62,7 +62,7 @@ out:
}
EXPORT_SYMBOL_GPL(mt76x2_reset_wlan);
-void mt76_write_mac_initvals(struct mt76x2_dev *dev)
+void mt76_write_mac_initvals(struct mt76x02_dev *dev)
{
#define DEFAULT_PROT_CFG_CCK \
(FIELD_PREP(MT_PROT_CFG_RATE, 0x3) | \
@@ -158,7 +158,7 @@ void mt76_write_mac_initvals(struct mt76x2_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_write_mac_initvals);
-void mt76x2_init_device(struct mt76x2_dev *dev)
+void mt76x2_init_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
@@ -167,6 +167,9 @@ void mt76x2_init_device(struct mt76x2_dev *dev)
hw->max_report_rates = 7;
hw->max_rate_tries = 1;
hw->extra_tx_headroom = 2;
+ if (mt76_is_usb(dev))
+ hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
+ MT_DMA_HDR_LEN;
hw->sta_data_size = sizeof(struct mt76x02_sta);
hw->vif_data_size = sizeof(struct mt76x02_vif);
@@ -187,7 +190,7 @@ void mt76x2_init_device(struct mt76x2_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x2_init_device);
-void mt76x2_init_txpower(struct mt76x2_dev *dev,
+void mt76x2_init_txpower(struct mt76x02_dev *dev,
struct ieee80211_supported_band *sband)
{
struct ieee80211_channel *chan;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c
index 3e667d8c0ee7..e25905c91ee2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_common.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c
@@ -16,27 +16,39 @@
*/
#include "mt76x2.h"
-#include "mt76x02_mac.h"
-void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb)
+void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force)
{
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
- void *rxwi = skb->data;
-
- if (q == MT_RXQ_MCU) {
- /* this is used just by mmio code */
- skb_queue_tail(&mdev->mmio.mcu.res_q, skb);
- wake_up(&mdev->mmio.mcu.wait);
- return;
+ bool stopped = false;
+ u32 rts_cfg;
+ int i;
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+ rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+ /* Wait for MAC to become idle */
+ for (i = 0; i < 300; i++) {
+ if ((mt76_rr(dev, MT_MAC_STATUS) &
+ (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
+ mt76_rr(dev, MT_BBP(IBI, 12))) {
+ udelay(1);
+ continue;
+ }
+
+ stopped = true;
+ break;
}
- skb_pull(skb, sizeof(struct mt76x02_rxwi));
- if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
- dev_kfree_skb(skb);
- return;
+ if (force && !stopped) {
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
}
- mt76_rx(&dev->mt76, q, skb);
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
}
-EXPORT_SYMBOL_GPL(mt76x2_queue_rx_skb);
+EXPORT_SYMBOL_GPL(mt76x2_mac_stop);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
index 7e5eccda47f8..a31bd49ae6cb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
@@ -14,26 +14,24 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#ifndef __MT76x2_MAC_H
+#define __MT76x2_MAC_H
+
#include "mt76x2.h"
-#include "mt76x02_dma.h"
-#include "mt76x02_util.h"
-void mt76x2_tx_tasklet(unsigned long data)
-{
- struct mt76x2_dev *dev = (struct mt76x2_dev *) data;
- int i;
+struct mt76x02_dev;
+struct mt76x2_sta;
+struct mt76x02_vif;
- mt76x2_mac_process_tx_status_fifo(dev);
+int mt76x2_mac_start(struct mt76x02_dev *dev);
+void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force);
+void mt76x2_mac_resume(struct mt76x02_dev *dev);
+void mt76x2_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr);
- for (i = MT_TXQ_MCU; i >= 0; i--)
- mt76_queue_tx_cleanup(dev, i, false);
+int mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
+ struct sk_buff *skb);
+void mt76x2_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx, bool val);
- mt76x2_mac_poll_tx_status(dev, false);
- mt76x02_irq_enable(&dev->mt76, MT_INT_TX_DONE_ALL);
-}
+void mt76x2_mac_work(struct work_struct *work);
-void mt76x2_dma_cleanup(struct mt76x2_dev *dev)
-{
- tasklet_kill(&dev->tx_tasklet);
- mt76_dma_cleanup(&dev->mt76);
-}
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
index eff483333183..88bd62cfbdf9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu_common.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
@@ -20,11 +20,10 @@
#include <linux/delay.h>
#include "mt76x2.h"
-#include "mt76x2_mcu.h"
-#include "mt76x2_eeprom.h"
-#include "mt76x02_dma.h"
+#include "mcu.h"
+#include "eeprom.h"
-int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw,
u8 bw_index, bool scan)
{
struct sk_buff *skb;
@@ -57,10 +56,9 @@ int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
}
EXPORT_SYMBOL_GPL(mt76x2_mcu_set_channel);
-int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
+int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
u8 channel)
{
- struct mt76_dev *mdev = &dev->mt76;
struct sk_buff *skb;
struct {
u8 cr_mode;
@@ -77,8 +75,8 @@ int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
u32 val;
val = BIT(31);
- val |= (mt76x02_eeprom_get(mdev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
- val |= (mt76x02_eeprom_get(mdev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
+ val |= (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
+ val |= (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
msg.cfg = cpu_to_le32(val);
/* first set the channel without the extension channel info */
@@ -87,7 +85,7 @@ int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
}
EXPORT_SYMBOL_GPL(mt76x2_mcu_load_cr);
-int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain,
bool force)
{
struct sk_buff *skb;
@@ -107,7 +105,7 @@ int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
}
EXPORT_SYMBOL_GPL(mt76x2_mcu_init_gain);
-int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev,
+int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev,
struct mt76x2_tssi_comp *tssi_data)
{
struct sk_buff *skb;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h
index fa72d5a5ecad..acfa2b570c7c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h
@@ -17,7 +17,7 @@
#ifndef __MT76x2_MCU_H
#define __MT76x2_MCU_H
-#include "mt76x02_mcu.h"
+#include "../mt76x02_mcu.h"
/* Register definitions */
#define MT_MCU_CPU_CTL 0x0704
@@ -94,8 +94,8 @@ struct mt76x2_tssi_comp {
u8 offset1;
} __packed __aligned(4);
-int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, struct mt76x2_tssi_comp *tssi_data);
-int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev, struct mt76x2_tssi_comp *tssi_data);
+int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain,
bool force);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
new file mode 100644
index 000000000000..ab93125f46de
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_H
+#define __MT76x2_H
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+
+#define MT7662_FIRMWARE "mt7662.bin"
+#define MT7662_ROM_PATCH "mt7662_rom_patch.bin"
+#define MT7662_EEPROM_SIZE 512
+
+#define MT7662U_FIRMWARE "mediatek/mt7662u.bin"
+#define MT7662U_ROM_PATCH "mediatek/mt7662u_rom_patch.bin"
+
+#define MT_CALIBRATE_INTERVAL HZ
+
+#include "../mt76x02.h"
+#include "mac.h"
+#include "dfs.h"
+
+static inline bool is_mt7612(struct mt76x02_dev *dev)
+{
+ return mt76_chip(&dev->mt76) == 0x7612;
+}
+
+static inline bool mt76x2_channel_silent(struct mt76x02_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+
+ return ((chan->flags & IEEE80211_CHAN_RADAR) &&
+ chan->dfs_state != NL80211_DFS_AVAILABLE);
+}
+
+extern const struct ieee80211_ops mt76x2_ops;
+
+struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev);
+int mt76x2_register_device(struct mt76x02_dev *dev);
+void mt76x2_init_debugfs(struct mt76x02_dev *dev);
+void mt76x2_init_device(struct mt76x02_dev *dev);
+
+void mt76x2_phy_power_on(struct mt76x02_dev *dev);
+int mt76x2_init_hardware(struct mt76x02_dev *dev);
+void mt76x2_stop_hardware(struct mt76x02_dev *dev);
+int mt76x2_eeprom_init(struct mt76x02_dev *dev);
+int mt76x2_apply_calibration_data(struct mt76x02_dev *dev, int channel);
+void mt76x2_set_tx_ackto(struct mt76x02_dev *dev);
+
+void mt76x2_phy_set_antenna(struct mt76x02_dev *dev);
+int mt76x2_phy_start(struct mt76x02_dev *dev);
+int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
+ struct cfg80211_chan_def *chandef);
+void mt76x2_phy_calibrate(struct work_struct *work);
+void mt76x2_phy_set_txpower(struct mt76x02_dev *dev);
+
+int mt76x2_mcu_init(struct mt76x02_dev *dev);
+int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw,
+ u8 bw_index, bool scan);
+int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
+ u8 channel);
+
+void mt76x2_cleanup(struct mt76x02_dev *dev);
+
+void mt76x2_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val);
+
+void mt76x2_pre_tbtt_tasklet(unsigned long arg);
+
+void mt76x2_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
+
+void mt76x2_update_channel(struct mt76_dev *mdev);
+
+void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable);
+void mt76x2_init_txpower(struct mt76x02_dev *dev,
+ struct ieee80211_supported_band *sband);
+void mt76_write_mac_initvals(struct mt76x02_dev *dev);
+
+void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait);
+void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
+ enum nl80211_band band);
+void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
+ enum nl80211_band band, u8 bw);
+void mt76x2_apply_gain_adj(struct mt76x02_dev *dev);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
index 5d2ebdf42c63..6e932b5010ef 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2u.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
@@ -20,8 +20,7 @@
#include <linux/device.h>
#include "mt76x2.h"
-#include "mt76x2_mcu.h"
-#include "mt76x02_dma.h"
+#include "mcu.h"
#define MT7612U_EEPROM_SIZE 512
@@ -30,35 +29,31 @@
extern const struct ieee80211_ops mt76x2u_ops;
-struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev);
-int mt76x2u_register_device(struct mt76x2_dev *dev);
-int mt76x2u_init_hardware(struct mt76x2_dev *dev);
-void mt76x2u_cleanup(struct mt76x2_dev *dev);
-void mt76x2u_stop_hw(struct mt76x2_dev *dev);
+struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev);
+int mt76x2u_register_device(struct mt76x02_dev *dev);
+int mt76x2u_init_hardware(struct mt76x02_dev *dev);
+void mt76x2u_cleanup(struct mt76x02_dev *dev);
+void mt76x2u_stop_hw(struct mt76x02_dev *dev);
-int mt76x2u_mac_reset(struct mt76x2_dev *dev);
-void mt76x2u_mac_resume(struct mt76x2_dev *dev);
-int mt76x2u_mac_start(struct mt76x2_dev *dev);
-int mt76x2u_mac_stop(struct mt76x2_dev *dev);
+int mt76x2u_mac_reset(struct mt76x02_dev *dev);
+void mt76x2u_mac_resume(struct mt76x02_dev *dev);
+int mt76x2u_mac_start(struct mt76x02_dev *dev);
+int mt76x2u_mac_stop(struct mt76x02_dev *dev);
-int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
+int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef);
void mt76x2u_phy_calibrate(struct work_struct *work);
-void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev);
+void mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev);
void mt76x2u_mcu_complete_urb(struct urb *urb);
-int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
+int mt76x2u_mcu_set_dynamic_vga(struct mt76x02_dev *dev, u8 channel, bool ap,
bool ext, int rssi, u32 false_cca);
-int mt76x2u_mcu_init(struct mt76x2_dev *dev);
-int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev);
+int mt76x2u_mcu_init(struct mt76x02_dev *dev);
+int mt76x2u_mcu_fw_init(struct mt76x02_dev *dev);
-int mt76x2u_alloc_queues(struct mt76x2_dev *dev);
-void mt76x2u_queues_deinit(struct mt76x2_dev *dev);
-void mt76x2u_stop_queues(struct mt76x2_dev *dev);
-int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info);
+int mt76x2u_alloc_queues(struct mt76x02_dev *dev);
+void mt76x2u_queues_deinit(struct mt76x02_dev *dev);
+void mt76x2u_stop_queues(struct mt76x02_dev *dev);
int mt76x2u_skb_dma_info(struct sk_buff *skb, enum dma_msg_port port,
u32 flags);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 26cfda24ce08..92432fe97312 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -19,7 +19,6 @@
#include <linux/pci.h>
#include "mt76x2.h"
-#include "mt76x2_trace.h"
static const struct pci_device_id mt76pci_device_table[] = {
{ PCI_DEVICE(0x14c3, 0x7662) },
@@ -31,7 +30,7 @@ static const struct pci_device_id mt76pci_device_table[] = {
static int
mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct mt76x2_dev *dev;
+ struct mt76x02_dev *dev;
int ret;
ret = pcim_enable_device(pdev);
@@ -58,7 +57,7 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
- ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x2_irq_handler,
+ ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x02_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
goto error;
@@ -89,7 +88,7 @@ static void
mt76pci_remove(struct pci_dev *pdev)
{
struct mt76_dev *mdev = pci_get_drvdata(pdev);
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
mt76_unregister_device(mdev);
mt76x2_cleanup(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c
index 8cfa3a063bda..b56febae8945 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c
@@ -15,7 +15,6 @@
*/
#include "mt76x2.h"
-#include "mt76x02_util.h"
#define RADAR_SPEC(m, len, el, eh, wl, wh, \
w_tolerance, tl, th, t_tolerance, \
@@ -37,7 +36,7 @@
.pwr_jmp = power_jmp \
}
-static const struct mt76x2_radar_specs etsi_radar_specs[] = {
+static const struct mt76x02_radar_specs etsi_radar_specs[] = {
/* 20MHz */
RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
0x7fffffff, 0x155cc0, 0x19cc),
@@ -67,7 +66,7 @@ static const struct mt76x2_radar_specs etsi_radar_specs[] = {
0x7fffffff, 0x2191c0, 0x15cc)
};
-static const struct mt76x2_radar_specs fcc_radar_specs[] = {
+static const struct mt76x02_radar_specs fcc_radar_specs[] = {
/* 20MHz */
RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
0x7fffffff, 0xfe808, 0x13dc),
@@ -97,7 +96,7 @@ static const struct mt76x2_radar_specs fcc_radar_specs[] = {
0x3938700, 0x57bcf00, 0x1289)
};
-static const struct mt76x2_radar_specs jp_w56_radar_specs[] = {
+static const struct mt76x02_radar_specs jp_w56_radar_specs[] = {
/* 20MHz */
RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
0x7fffffff, 0x14c080, 0x13dc),
@@ -127,7 +126,7 @@ static const struct mt76x2_radar_specs jp_w56_radar_specs[] = {
0x3938700, 0X57bcf00, 0x1289)
};
-static const struct mt76x2_radar_specs jp_w53_radar_specs[] = {
+static const struct mt76x02_radar_specs jp_w53_radar_specs[] = {
/* 20MHz */
RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
0x7fffffff, 0x14c080, 0x16cc),
@@ -151,8 +150,9 @@ static const struct mt76x2_radar_specs jp_w53_radar_specs[] = {
{ 0 }
};
-static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev,
- u8 enable)
+static void
+mt76x2_dfs_set_capture_mode_ctrl(struct mt76x02_dev *dev,
+ u8 enable)
{
u32 data;
@@ -160,10 +160,10 @@ static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev,
mt76_wr(dev, MT_BBP(DFS, 36), data);
}
-static void mt76x2_dfs_seq_pool_put(struct mt76x2_dev *dev,
- struct mt76x2_dfs_sequence *seq)
+static void mt76x2_dfs_seq_pool_put(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_sequence *seq)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
list_add(&seq->head, &dfs_pd->seq_pool);
@@ -171,17 +171,17 @@ static void mt76x2_dfs_seq_pool_put(struct mt76x2_dev *dev,
dfs_pd->seq_stats.seq_len--;
}
-static
-struct mt76x2_dfs_sequence *mt76x2_dfs_seq_pool_get(struct mt76x2_dev *dev)
+static struct mt76x02_dfs_sequence *
+mt76x2_dfs_seq_pool_get(struct mt76x02_dev *dev)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
- struct mt76x2_dfs_sequence *seq;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_sequence *seq;
if (list_empty(&dfs_pd->seq_pool)) {
seq = devm_kzalloc(dev->mt76.dev, sizeof(*seq), GFP_ATOMIC);
} else {
seq = list_first_entry(&dfs_pd->seq_pool,
- struct mt76x2_dfs_sequence,
+ struct mt76x02_dfs_sequence,
head);
list_del(&seq->head);
dfs_pd->seq_stats.seq_pool_len--;
@@ -214,10 +214,10 @@ static int mt76x2_dfs_get_multiple(int val, int frac, int margin)
return factor;
}
-static void mt76x2_dfs_detector_reset(struct mt76x2_dev *dev)
+static void mt76x2_dfs_detector_reset(struct mt76x02_dev *dev)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
- struct mt76x2_dfs_sequence *seq, *tmp_seq;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_sequence *seq, *tmp_seq;
int i;
/* reset hw detector */
@@ -235,11 +235,11 @@ static void mt76x2_dfs_detector_reset(struct mt76x2_dev *dev)
}
}
-static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev)
+static bool mt76x2_dfs_check_chirp(struct mt76x02_dev *dev)
{
bool ret = false;
u32 current_ts, delta_ts;
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
current_ts = mt76_rr(dev, MT_PBF_LIFE_TIMER);
delta_ts = current_ts - dfs_pd->chirp_pulse_ts;
@@ -256,8 +256,8 @@ static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev)
return ret;
}
-static void mt76x2_dfs_get_hw_pulse(struct mt76x2_dev *dev,
- struct mt76x2_dfs_hw_pulse *pulse)
+static void mt76x2_dfs_get_hw_pulse(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_hw_pulse *pulse)
{
u32 data;
@@ -276,8 +276,8 @@ static void mt76x2_dfs_get_hw_pulse(struct mt76x2_dev *dev,
pulse->burst = mt76_rr(dev, MT_BBP(DFS, 22));
}
-static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev,
- struct mt76x2_dfs_hw_pulse *pulse)
+static bool mt76x2_dfs_check_hw_pulse(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_hw_pulse *pulse)
{
bool ret = false;
@@ -371,8 +371,8 @@ static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev,
return ret;
}
-static bool mt76x2_dfs_fetch_event(struct mt76x2_dev *dev,
- struct mt76x2_dfs_event *event)
+static bool mt76x2_dfs_fetch_event(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_event *event)
{
u32 data;
@@ -398,12 +398,12 @@ static bool mt76x2_dfs_fetch_event(struct mt76x2_dev *dev,
return true;
}
-static bool mt76x2_dfs_check_event(struct mt76x2_dev *dev,
- struct mt76x2_dfs_event *event)
+static bool mt76x2_dfs_check_event(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_event *event)
{
if (event->engine == 2) {
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
- struct mt76x2_dfs_event_rb *event_buff = &dfs_pd->event_rb[1];
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_event_rb *event_buff = &dfs_pd->event_rb[1];
u16 last_event_idx;
u32 delta_ts;
@@ -417,11 +417,11 @@ static bool mt76x2_dfs_check_event(struct mt76x2_dev *dev,
return true;
}
-static void mt76x2_dfs_queue_event(struct mt76x2_dev *dev,
- struct mt76x2_dfs_event *event)
+static void mt76x2_dfs_queue_event(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_event *event)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
- struct mt76x2_dfs_event_rb *event_buff;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_event_rb *event_buff;
/* add radar event to ring buffer */
event_buff = event->engine == 2 ? &dfs_pd->event_rb[1]
@@ -435,16 +435,16 @@ static void mt76x2_dfs_queue_event(struct mt76x2_dev *dev,
MT_DFS_EVENT_BUFLEN);
}
-static int mt76x2_dfs_create_sequence(struct mt76x2_dev *dev,
- struct mt76x2_dfs_event *event,
+static int mt76x2_dfs_create_sequence(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_event *event,
u16 cur_len)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
- struct mt76x2_dfs_sw_detector_params *sw_params;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_sw_detector_params *sw_params;
u32 width_delta, with_sum, factor, cur_pri;
- struct mt76x2_dfs_sequence seq, *seq_p;
- struct mt76x2_dfs_event_rb *event_rb;
- struct mt76x2_dfs_event *cur_event;
+ struct mt76x02_dfs_sequence seq, *seq_p;
+ struct mt76x02_dfs_event_rb *event_rb;
+ struct mt76x02_dfs_event *cur_event;
int i, j, end, pri;
event_rb = event->engine == 2 ? &dfs_pd->event_rb[1]
@@ -522,12 +522,12 @@ next:
return 0;
}
-static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x2_dev *dev,
- struct mt76x2_dfs_event *event)
+static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_event *event)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
- struct mt76x2_dfs_sw_detector_params *sw_params;
- struct mt76x2_dfs_sequence *seq, *tmp_seq;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_sw_detector_params *sw_params;
+ struct mt76x02_dfs_sequence *seq, *tmp_seq;
u16 max_seq_len = 0;
u32 factor, pri;
@@ -554,10 +554,10 @@ static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x2_dev *dev,
return max_seq_len;
}
-static bool mt76x2_dfs_check_detection(struct mt76x2_dev *dev)
+static bool mt76x2_dfs_check_detection(struct mt76x02_dev *dev)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
- struct mt76x2_dfs_sequence *seq;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_sequence *seq;
if (list_empty(&dfs_pd->sequences))
return false;
@@ -571,10 +571,10 @@ static bool mt76x2_dfs_check_detection(struct mt76x2_dev *dev)
return false;
}
-static void mt76x2_dfs_add_events(struct mt76x2_dev *dev)
+static void mt76x2_dfs_add_events(struct mt76x02_dev *dev)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
- struct mt76x2_dfs_event event;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_event event;
int i, seq_len;
/* disable debug mode */
@@ -598,11 +598,11 @@ static void mt76x2_dfs_add_events(struct mt76x2_dev *dev)
mt76x2_dfs_set_capture_mode_ctrl(dev, true);
}
-static void mt76x2_dfs_check_event_window(struct mt76x2_dev *dev)
+static void mt76x2_dfs_check_event_window(struct mt76x02_dev *dev)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
- struct mt76x2_dfs_event_rb *event_buff;
- struct mt76x2_dfs_event *event;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_event_rb *event_buff;
+ struct mt76x02_dfs_event *event;
int i;
for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) {
@@ -623,8 +623,8 @@ static void mt76x2_dfs_check_event_window(struct mt76x2_dev *dev)
static void mt76x2_dfs_tasklet(unsigned long arg)
{
- struct mt76x2_dev *dev = (struct mt76x2_dev *)arg;
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
u32 engine_mask;
int i;
@@ -654,7 +654,7 @@ static void mt76x2_dfs_tasklet(unsigned long arg)
goto out;
for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
- struct mt76x2_dfs_hw_pulse pulse;
+ struct mt76x02_dfs_hw_pulse pulse;
if (!(engine_mask & (1 << i)))
continue;
@@ -679,12 +679,12 @@ static void mt76x2_dfs_tasklet(unsigned long arg)
mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
out:
- mt76x02_irq_enable(&dev->mt76, MT_INT_GPTIMER);
+ mt76x02_irq_enable(dev, MT_INT_GPTIMER);
}
-static void mt76x2_dfs_init_sw_detector(struct mt76x2_dev *dev)
+static void mt76x2_dfs_init_sw_detector(struct mt76x02_dev *dev)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
switch (dev->dfs_pd.region) {
case NL80211_DFS_FCC:
@@ -708,11 +708,11 @@ static void mt76x2_dfs_init_sw_detector(struct mt76x2_dev *dev)
}
}
-static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev)
+static void mt76x2_dfs_set_bbp_params(struct mt76x02_dev *dev)
{
- u32 data;
+ const struct mt76x02_radar_specs *radar_specs;
u8 i, shift;
- const struct mt76x2_radar_specs *radar_specs;
+ u32 data;
switch (dev->mt76.chandef.width) {
case NL80211_CHAN_WIDTH_40:
@@ -803,7 +803,7 @@ static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev)
mt76_wr(dev, 0x212c, 0x0c350001);
}
-void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev)
+void mt76x2_dfs_adjust_agc(struct mt76x02_dev *dev)
{
u32 agc_r8, agc_r4, val_r8, val_r4, dfs_r31;
@@ -824,7 +824,7 @@ void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev)
mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071);
}
-void mt76x2_dfs_init_params(struct mt76x2_dev *dev)
+void mt76x2_dfs_init_params(struct mt76x02_dev *dev)
{
struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
@@ -835,7 +835,7 @@ void mt76x2_dfs_init_params(struct mt76x2_dev *dev)
/* enable debug mode */
mt76x2_dfs_set_capture_mode_ctrl(dev, true);
- mt76x02_irq_enable(&dev->mt76, MT_INT_GPTIMER);
+ mt76x02_irq_enable(dev, MT_INT_GPTIMER);
mt76_rmw_field(dev, MT_INT_TIMER_EN,
MT_INT_TIMER_EN_GP_TIMER_EN, 1);
} else {
@@ -845,15 +845,15 @@ void mt76x2_dfs_init_params(struct mt76x2_dev *dev)
mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
mt76_wr(dev, 0x212c, 0);
- mt76x02_irq_disable(&dev->mt76, MT_INT_GPTIMER);
+ mt76x02_irq_disable(dev, MT_INT_GPTIMER);
mt76_rmw_field(dev, MT_INT_TIMER_EN,
MT_INT_TIMER_EN_GP_TIMER_EN, 0);
}
}
-void mt76x2_dfs_init_detector(struct mt76x2_dev *dev)
+void mt76x2_dfs_init_detector(struct mt76x02_dev *dev)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
INIT_LIST_HEAD(&dfs_pd->sequences);
INIT_LIST_HEAD(&dfs_pd->seq_pool);
@@ -863,10 +863,10 @@ void mt76x2_dfs_init_detector(struct mt76x2_dev *dev)
(unsigned long)dev);
}
-void mt76x2_dfs_set_domain(struct mt76x2_dev *dev,
+void mt76x2_dfs_set_domain(struct mt76x02_dev *dev,
enum nl80211_dfs_regions region)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
if (dfs_pd->region != region) {
tasklet_disable(&dfs_pd->dfs_tasklet);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index 3f77c13a6d54..3824290b219d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -16,13 +16,11 @@
#include <linux/delay.h>
#include "mt76x2.h"
-#include "mt76x2_eeprom.h"
-#include "mt76x2_mcu.h"
-#include "mt76x02_util.h"
-#include "mt76x02_dma.h"
+#include "eeprom.h"
+#include "mcu.h"
static void
-mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
+mt76x2_mac_pbf_init(struct mt76x02_dev *dev)
{
u32 val;
@@ -40,12 +38,12 @@ mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
}
static void
-mt76x2_fixup_xtal(struct mt76x2_dev *dev)
+mt76x2_fixup_xtal(struct mt76x02_dev *dev)
{
u16 eep_val;
s8 offset = 0;
- eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_2);
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
offset = eep_val & 0x7f;
if ((eep_val & 0xff) == 0xff)
@@ -55,7 +53,7 @@ mt76x2_fixup_xtal(struct mt76x2_dev *dev)
eep_val >>= 8;
if (eep_val == 0x00 || eep_val == 0xff) {
- eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_1);
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
eep_val &= 0xff;
if (eep_val == 0x00 || eep_val == 0xff)
@@ -66,7 +64,7 @@ mt76x2_fixup_xtal(struct mt76x2_dev *dev)
mt76_rmw_field(dev, MT_XO_CTRL5, MT_XO_CTRL5_C2_VAL, eep_val + offset);
mt76_set(dev, MT_XO_CTRL6, MT_XO_CTRL6_C2_CTRL);
- eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2);
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
case 0:
mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
@@ -79,7 +77,7 @@ mt76x2_fixup_xtal(struct mt76x2_dev *dev)
}
}
-static int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard)
+static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
{
static const u8 null_addr[ETH_ALEN] = {};
const u8 *macaddr = dev->mt76.macaddr;
@@ -145,14 +143,14 @@ static int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard)
mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0);
for (i = 0; i < 256; i++)
- mt76x02_mac_wcid_setup(&dev->mt76, i, 0, NULL);
+ mt76x02_mac_wcid_setup(dev, i, 0, NULL);
for (i = 0; i < MT_MAX_VIFS; i++)
- mt76x02_mac_wcid_setup(&dev->mt76, MT_VIF_WCID(i), i, NULL);
+ mt76x02_mac_wcid_setup(dev, MT_VIF_WCID(i), i, NULL);
for (i = 0; i < 16; i++)
for (k = 0; k < 4; k++)
- mt76x02_mac_shared_key_setup(&dev->mt76, i, k, NULL);
+ mt76x02_mac_shared_key_setup(dev, i, k, NULL);
for (i = 0; i < 8; i++) {
mt76x2_mac_set_bssid(dev, i, null_addr);
@@ -170,14 +168,14 @@ static int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard)
MT_CH_TIME_CFG_EIFS_AS_BUSY |
FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
- mt76x02_set_beacon_offsets(&dev->mt76);
+ mt76x02_set_beacon_offsets(dev);
mt76x2_set_tx_ackto(dev);
return 0;
}
-int mt76x2_mac_start(struct mt76x2_dev *dev)
+int mt76x2_mac_start(struct mt76x02_dev *dev)
{
int i;
@@ -188,12 +186,12 @@ int mt76x2_mac_start(struct mt76x2_dev *dev)
mt76_rr(dev, MT_TX_STAT_FIFO);
memset(dev->aggr_stats, 0, sizeof(dev->aggr_stats));
- mt76x02_mac_start(&dev->mt76);
+ mt76x02_mac_start(dev);
return 0;
}
-void mt76x2_mac_resume(struct mt76x2_dev *dev)
+void mt76x2_mac_resume(struct mt76x02_dev *dev)
{
mt76_wr(dev, MT_MAC_SYS_CTRL,
MT_MAC_SYS_CTRL_ENABLE_TX |
@@ -201,7 +199,7 @@ void mt76x2_mac_resume(struct mt76x2_dev *dev)
}
static void
-mt76x2_power_on_rf_patch(struct mt76x2_dev *dev)
+mt76x2_power_on_rf_patch(struct mt76x02_dev *dev)
{
mt76_set(dev, 0x10130, BIT(0) | BIT(16));
udelay(1);
@@ -222,7 +220,7 @@ mt76x2_power_on_rf_patch(struct mt76x2_dev *dev)
}
static void
-mt76x2_power_on_rf(struct mt76x2_dev *dev, int unit)
+mt76x2_power_on_rf(struct mt76x02_dev *dev, int unit)
{
int shift = unit ? 8 : 0;
@@ -244,7 +242,7 @@ mt76x2_power_on_rf(struct mt76x2_dev *dev, int unit)
}
static void
-mt76x2_power_on(struct mt76x2_dev *dev)
+mt76x2_power_on(struct mt76x02_dev *dev)
{
u32 val;
@@ -279,7 +277,7 @@ mt76x2_power_on(struct mt76x2_dev *dev)
mt76x2_power_on_rf(dev, 1);
}
-void mt76x2_set_tx_ackto(struct mt76x2_dev *dev)
+void mt76x2_set_tx_ackto(struct mt76x02_dev *dev)
{
u8 ackto, sifs, slottime = dev->slottime;
@@ -296,14 +294,14 @@ void mt76x2_set_tx_ackto(struct mt76x2_dev *dev)
MT_TX_TIMEOUT_CFG_ACKTO, ackto);
}
-int mt76x2_init_hardware(struct mt76x2_dev *dev)
+int mt76x2_init_hardware(struct mt76x02_dev *dev)
{
int ret;
tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet,
(unsigned long) dev);
- mt76x02_dma_disable(&dev->mt76);
+ mt76x02_dma_disable(dev);
mt76x2_reset_wlan(dev, true);
mt76x2_power_on(dev);
@@ -317,7 +315,7 @@ int mt76x2_init_hardware(struct mt76x2_dev *dev)
dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
- ret = mt76x02_dma_init(&dev->mt76);
+ ret = mt76x02_dma_init(dev);
if (ret)
return ret;
@@ -335,43 +333,42 @@ int mt76x2_init_hardware(struct mt76x2_dev *dev)
return 0;
}
-void mt76x2_stop_hardware(struct mt76x2_dev *dev)
+void mt76x2_stop_hardware(struct mt76x02_dev *dev)
{
cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mac_work);
- mt76x02_mcu_set_radio_state(&dev->mt76, false, true);
+ mt76x02_mcu_set_radio_state(dev, false, true);
mt76x2_mac_stop(dev, false);
}
-void mt76x2_cleanup(struct mt76x2_dev *dev)
+void mt76x2_cleanup(struct mt76x02_dev *dev)
{
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
tasklet_disable(&dev->pre_tbtt_tasklet);
mt76x2_stop_hardware(dev);
- mt76x2_dma_cleanup(dev);
- mt76x02_mcu_cleanup(&dev->mt76);
+ mt76x02_dma_cleanup(dev);
+ mt76x02_mcu_cleanup(dev);
}
-struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev)
+struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev)
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
.update_survey = mt76x2_update_channel,
- .tx_prepare_skb = mt76x2_tx_prepare_skb,
- .tx_complete_skb = mt76x2_tx_complete_skb,
- .rx_skb = mt76x2_queue_rx_skb,
- .rx_poll_complete = mt76x2_rx_poll_complete,
+ .tx_prepare_skb = mt76x02_tx_prepare_skb,
+ .tx_complete_skb = mt76x02_tx_complete_skb,
+ .rx_skb = mt76x02_queue_rx_skb,
+ .rx_poll_complete = mt76x02_rx_poll_complete,
.sta_ps = mt76x2_sta_ps,
- .get_max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj,
};
- struct mt76x2_dev *dev;
+ struct mt76x02_dev *dev;
struct mt76_dev *mdev;
mdev = mt76_alloc_device(sizeof(*dev), &mt76x2_ops);
if (!mdev)
return NULL;
- dev = container_of(mdev, struct mt76x2_dev, mt76);
+ dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev->dev = pdev;
mdev->drv = &drv_ops;
@@ -382,7 +379,7 @@ static void mt76x2_regd_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
mt76x2_dfs_set_domain(dev, request->dfs_region);
}
@@ -418,8 +415,8 @@ static const struct ieee80211_iface_combination if_comb[] = {
static void mt76x2_led_set_config(struct mt76_dev *mt76, u8 delay_on,
u8 delay_off)
{
- struct mt76x2_dev *dev = container_of(mt76, struct mt76x2_dev,
- mt76);
+ struct mt76x02_dev *dev = container_of(mt76, struct mt76x02_dev,
+ mt76);
u32 val;
val = MT_LED_STATUS_DURATION(0xff) |
@@ -463,21 +460,12 @@ static void mt76x2_led_set_brightness(struct led_classdev *led_cdev,
mt76x2_led_set_config(mt76, 0xff, 0);
}
-int mt76x2_register_device(struct mt76x2_dev *dev)
+int mt76x2_register_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
struct wiphy *wiphy = hw->wiphy;
- void *status_fifo;
- int fifo_size;
int i, ret;
- fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status));
- status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
- if (!status_fifo)
- return -ENOMEM;
-
- tasklet_init(&dev->tx_tasklet, mt76x2_tx_tasklet, (unsigned long)dev);
- kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c
index bb9c0a059a6e..4b331ed14bb2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c
@@ -16,12 +16,10 @@
#include <linux/delay.h>
#include "mt76x2.h"
-#include "mt76x2_mcu.h"
-#include "mt76x2_eeprom.h"
-#include "mt76x2_trace.h"
-#include "mt76x02_util.h"
+#include "mcu.h"
+#include "eeprom.h"
-void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
+void mt76x2_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
{
idx &= 7;
mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
@@ -29,76 +27,8 @@ void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
get_unaligned_le16(addr + 4));
}
-void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
-{
- struct mt76x02_tx_status stat = {};
- unsigned long flags;
- u8 update = 1;
- bool ret;
-
- if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
- return;
-
- trace_mac_txstat_poll(dev);
-
- while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
- spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
- ret = mt76x02_mac_load_tx_status(&dev->mt76, &stat);
- spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
-
- if (!ret)
- break;
-
- trace_mac_txstat_fetch(dev, &stat);
-
- if (!irq) {
- mt76x02_send_tx_status(&dev->mt76, &stat, &update);
- continue;
- }
-
- kfifo_put(&dev->txstatus_fifo, stat);
- }
-}
-
-static void
-mt76x2_mac_queue_txdone(struct mt76x2_dev *dev, struct sk_buff *skb,
- void *txwi_ptr)
-{
- struct mt76x2_tx_info *txi = mt76x2_skb_tx_info(skb);
- struct mt76x02_txwi *txwi = txwi_ptr;
-
- mt76x2_mac_poll_tx_status(dev, false);
-
- txi->tries = 0;
- txi->jiffies = jiffies;
- txi->wcid = txwi->wcid;
- txi->pktid = txwi->pktid;
- trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
- mt76x02_tx_complete(&dev->mt76, skb);
-}
-
-void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev)
-{
- struct mt76x02_tx_status stat;
- u8 update = 1;
-
- while (kfifo_get(&dev->txstatus_fifo, &stat))
- mt76x02_send_tx_status(&dev->mt76, &stat, &update);
-}
-
-void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush)
-{
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
-
- if (e->txwi)
- mt76x2_mac_queue_txdone(dev, e->skb, &e->txwi->txwi);
- else
- dev_kfree_skb_any(e->skb);
-}
-
static int
-mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
+mt76_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
{
int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0];
struct mt76x02_txwi txwi;
@@ -106,7 +36,7 @@ mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
return -ENOSPC;
- mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
+ mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
offset += sizeof(txwi);
@@ -116,7 +46,7 @@ mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
}
static int
-__mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb)
+__mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx, struct sk_buff *skb)
{
int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0];
int beacon_addr = mt76x02_beacon_offsets[bcn_idx];
@@ -141,7 +71,7 @@ __mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb)
return ret;
}
-int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
+int mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
struct sk_buff *skb)
{
bool force_update = false;
@@ -176,7 +106,8 @@ int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
return 0;
}
-void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val)
+void mt76x2_mac_set_beacon_enable(struct mt76x02_dev *dev,
+ u8 vif_idx, bool val)
{
u8 old_mask = dev->beacon_mask;
bool en;
@@ -201,14 +132,14 @@ void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val)
mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
if (en)
- mt76x02_irq_enable(&dev->mt76, MT_INT_PRE_TBTT | MT_INT_TBTT);
+ mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
else
- mt76x02_irq_disable(&dev->mt76, MT_INT_PRE_TBTT | MT_INT_TBTT);
+ mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
}
void mt76x2_update_channel(struct mt76_dev *mdev)
{
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76_channel_state *state;
u32 active, busy;
@@ -225,8 +156,8 @@ void mt76x2_update_channel(struct mt76_dev *mdev)
void mt76x2_mac_work(struct work_struct *work)
{
- struct mt76x2_dev *dev = container_of(work, struct mt76x2_dev,
- mac_work.work);
+ struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
+ mac_work.work);
int i, idx;
mt76x2_update_channel(&dev->mt76);
@@ -241,7 +172,7 @@ void mt76x2_mac_work(struct work_struct *work)
MT_CALIBRATE_INTERVAL);
}
-void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val)
+void mt76x2_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val)
{
u32 data = 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index 63691b68a436..034a06295668 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -15,12 +15,11 @@
*/
#include "mt76x2.h"
-#include "mt76x02_util.h"
static int
mt76x2_start(struct ieee80211_hw *hw)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
int ret;
mutex_lock(&dev->mt76.mutex);
@@ -46,7 +45,7 @@ out:
static void
mt76x2_stop(struct ieee80211_hw *hw)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
@@ -55,7 +54,7 @@ mt76x2_stop(struct ieee80211_hw *hw)
}
static int
-mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef)
+mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
{
int ret;
@@ -91,7 +90,7 @@ mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef)
static int
mt76x2_config(struct ieee80211_hw *hw, u32 changed)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
int ret = 0;
mutex_lock(&dev->mt76.mutex);
@@ -113,7 +112,7 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) {
mt76x2_phy_set_txpower(dev);
- mt76x2_tx_set_txpwr_auto(dev, dev->mt76.txpower_conf);
+ mt76x02_tx_set_txpwr_auto(dev, dev->mt76.txpower_conf);
}
}
@@ -132,7 +131,7 @@ static void
mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
mutex_lock(&dev->mt76.mutex);
@@ -169,18 +168,18 @@ void
mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
{
struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
int idx = msta->wcid.idx;
mt76_stop_tx_queues(&dev->mt76, sta, true);
- mt76x02_mac_wcid_set_drop(&dev->mt76, idx, ps);
+ mt76x02_mac_wcid_set_drop(dev, idx, ps);
}
static void
mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
tasklet_disable(&dev->pre_tbtt_tasklet);
set_bit(MT76_SCANNING, &dev->mt76.state);
@@ -189,7 +188,7 @@ mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
static void
mt76x2_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
clear_bit(MT76_SCANNING, &dev->mt76.state);
tasklet_enable(&dev->pre_tbtt_tasklet);
@@ -204,7 +203,7 @@ mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
static int
mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
*dbm = dev->mt76.txpower_cur / 2;
@@ -217,7 +216,7 @@ mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
static void mt76x2_set_coverage_class(struct ieee80211_hw *hw,
s16 coverage_class)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
dev->coverage_class = coverage_class;
@@ -234,7 +233,7 @@ mt76x2_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
u32 rx_ant)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
if (!tx_ant || tx_ant > 3 || tx_ant != rx_ant)
return -EINVAL;
@@ -255,7 +254,7 @@ static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
u32 *rx_ant)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
*tx_ant = dev->mt76.antenna_mask;
@@ -268,7 +267,7 @@ static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
static int
mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
if (val != ~0 && val > 0xffff)
return -EINVAL;
@@ -281,7 +280,7 @@ mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
}
const struct ieee80211_ops mt76x2_ops = {
- .tx = mt76x2_tx,
+ .tx = mt76x02_tx,
.start = mt76x2_start,
.stop = mt76x2_stop,
.add_interface = mt76x02_add_interface,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
index 55716fd7e01d..d8fa9ba56437 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
@@ -19,12 +19,11 @@
#include <linux/delay.h>
#include "mt76x2.h"
-#include "mt76x2_mcu.h"
-#include "mt76x2_eeprom.h"
-#include "mt76x02_dma.h"
+#include "mcu.h"
+#include "eeprom.h"
static int
-mt76pci_load_rom_patch(struct mt76x2_dev *dev)
+mt76pci_load_rom_patch(struct mt76x02_dev *dev)
{
const struct firmware *fw = NULL;
struct mt76x02_patch_header *hdr;
@@ -90,7 +89,7 @@ out:
}
static int
-mt76pci_load_firmware(struct mt76x2_dev *dev)
+mt76pci_load_firmware(struct mt76x02_dev *dev)
{
const struct firmware *fw;
const struct mt76x02_fw_header *hdr;
@@ -141,7 +140,7 @@ mt76pci_load_firmware(struct mt76x2_dev *dev)
mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2);
+ val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1)
mt76_set(dev, MT_MCU_COM_REG0, BIT(30));
@@ -153,8 +152,8 @@ mt76pci_load_firmware(struct mt76x2_dev *dev)
return -ETIMEDOUT;
}
+ mt76x02_set_ethtool_fwver(dev, hdr);
dev_info(dev->mt76.dev, "Firmware running!\n");
- mt76x02_set_ethtool_fwver(&dev->mt76, hdr);
release_firmware(fw);
@@ -166,7 +165,7 @@ error:
return -ENOENT;
}
-int mt76x2_mcu_init(struct mt76x2_dev *dev)
+int mt76x2_mcu_init(struct mt76x02_dev *dev)
{
static const struct mt76_mcu_ops mt76x2_mcu_ops = {
.mcu_msg_alloc = mt76x02_mcu_msg_alloc,
@@ -184,6 +183,6 @@ int mt76x2_mcu_init(struct mt76x2_dev *dev)
if (ret)
return ret;
- mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, true);
+ mt76x02_mcu_function_select(dev, Q_SELECT, 1, true);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
index 22e66006a5f8..5bda44540225 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
@@ -16,16 +16,17 @@
#include <linux/delay.h>
#include "mt76x2.h"
-#include "mt76x2_mcu.h"
-#include "mt76x2_eeprom.h"
+#include "mcu.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
static bool
-mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev)
+mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
u32 flag = 0;
- if (!mt76x02_tssi_enabled(&dev->mt76))
+ if (!mt76x2_tssi_enabled(dev))
return false;
if (mt76x2_channel_silent(dev))
@@ -34,16 +35,16 @@ mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev)
if (chan->band == NL80211_BAND_5GHZ)
flag |= BIT(0);
- if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band))
+ if (mt76x02_ext_pa_enabled(dev, chan->band))
flag |= BIT(8);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TSSI, flag, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag, true);
dev->cal.tssi_cal_done = true;
return true;
}
static void
-mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped)
+mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
@@ -61,13 +62,13 @@ mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped)
mt76x2_mac_stop(dev, false);
if (is_5ghz)
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, 0, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0, true);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_LOFT, is_5ghz, true);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, true);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQC_FI, is_5ghz, true);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TEMP_SENSOR, 0, true);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_SHAPING, 0, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0, true);
if (!mac_stopped)
mt76x2_mac_resume(dev);
@@ -77,7 +78,7 @@ mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped)
dev->cal.channel_cal_done = true;
}
-void mt76x2_phy_set_antenna(struct mt76x2_dev *dev)
+void mt76x2_phy_set_antenna(struct mt76x02_dev *dev)
{
u32 val;
@@ -124,40 +125,7 @@ void mt76x2_phy_set_antenna(struct mt76x2_dev *dev)
}
static void
-mt76x2_get_agc_gain(struct mt76x2_dev *dev, u8 *dest)
-{
- dest[0] = mt76_get_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN);
- dest[1] = mt76_get_field(dev, MT_BBP(AGC, 9), MT_BBP_AGC_GAIN);
-}
-
-static int
-mt76x2_get_rssi_gain_thresh(struct mt76x2_dev *dev)
-{
- switch (dev->mt76.chandef.width) {
- case NL80211_CHAN_WIDTH_80:
- return -62;
- case NL80211_CHAN_WIDTH_40:
- return -65;
- default:
- return -68;
- }
-}
-
-static int
-mt76x2_get_low_rssi_gain_thresh(struct mt76x2_dev *dev)
-{
- switch (dev->mt76.chandef.width) {
- case NL80211_CHAN_WIDTH_80:
- return -76;
- case NL80211_CHAN_WIDTH_40:
- return -79;
- default:
- return -82;
- }
-}
-
-static void
-mt76x2_phy_set_gain_val(struct mt76x2_dev *dev)
+mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
{
u32 val;
u8 gain_val[2];
@@ -182,26 +150,7 @@ mt76x2_phy_set_gain_val(struct mt76x2_dev *dev)
}
static void
-mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev)
-{
- u32 false_cca;
- u8 limit = dev->cal.low_gain > 0 ? 16 : 4;
-
- false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
- dev->cal.false_cca = false_cca;
- if (false_cca > 800 && dev->cal.agc_gain_adjust < limit)
- dev->cal.agc_gain_adjust += 2;
- else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) ||
- (dev->cal.agc_gain_adjust >= limit && false_cca < 500))
- dev->cal.agc_gain_adjust -= 2;
- else
- return;
-
- mt76x2_phy_set_gain_val(dev);
-}
-
-static void
-mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev)
+mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
{
u8 *gain = dev->cal.agc_gain_init;
u8 low_gain_delta, gain_delta;
@@ -209,16 +158,17 @@ mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev)
int low_gain;
u32 val;
- dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev);
+ dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
- low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) +
- (dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev));
+ low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
+ (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
dev->cal.low_gain = low_gain;
if (!gain_change) {
- mt76x2_phy_adjust_vga_gain(dev);
+ if (mt76x02_phy_adjust_vga_gain(dev))
+ mt76x2_phy_set_gain_val(dev);
return;
}
@@ -264,7 +214,7 @@ mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev)
mt76_rr(dev, MT_RX_STAT_1);
}
-int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
+int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef)
{
struct ieee80211_channel *chan = chandef->chan;
@@ -336,8 +286,8 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
mt76x2_configure_tx_delay(dev, band, bw);
mt76x2_phy_set_txpower(dev);
- mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
- mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
+ mt76x02_phy_set_band(dev, chan->band, ch_group_index & 1);
+ mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
mt76_rmw(dev, MT_EXT_CCA_CFG,
(MT_EXT_CCA_CFG_CCA0 |
@@ -360,17 +310,17 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
if (!dev->cal.init_cal_done) {
- u8 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_BT_RCAL_RESULT);
+ u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
if (val != 0xff)
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R, 0, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, true);
}
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, channel, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel, true);
/* Rx LPF calibration */
if (!dev->cal.init_cal_done)
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RC, 0, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0, true);
dev->cal.init_cal_done = true;
@@ -383,14 +333,11 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
if (scan)
return 0;
- dev->cal.low_gain = -1;
mt76x2_phy_channel_calibrate(dev, true);
- mt76x2_get_agc_gain(dev, dev->cal.agc_gain_init);
- memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
- sizeof(dev->cal.agc_gain_cur));
+ mt76x02_init_agc_gain(dev);
/* init default values for temp compensation */
- if (mt76x02_tssi_enabled(&dev->mt76)) {
+ if (mt76x2_tssi_enabled(dev)) {
mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
0x38);
mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
@@ -404,7 +351,7 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
}
static void
-mt76x2_phy_temp_compensate(struct mt76x2_dev *dev)
+mt76x2_phy_temp_compensate(struct mt76x02_dev *dev)
{
struct mt76x2_temp_comp t;
int temp, db_diff;
@@ -433,9 +380,9 @@ mt76x2_phy_temp_compensate(struct mt76x2_dev *dev)
void mt76x2_phy_calibrate(struct work_struct *work)
{
- struct mt76x2_dev *dev;
+ struct mt76x02_dev *dev;
- dev = container_of(work, struct mt76x2_dev, cal_work.work);
+ dev = container_of(work, struct mt76x02_dev, cal_work.work);
mt76x2_phy_channel_calibrate(dev, false);
mt76x2_phy_tssi_compensate(dev, true);
mt76x2_phy_temp_compensate(dev);
@@ -444,11 +391,11 @@ void mt76x2_phy_calibrate(struct work_struct *work)
MT_CALIBRATE_INTERVAL);
}
-int mt76x2_phy_start(struct mt76x2_dev *dev)
+int mt76x2_phy_start(struct mt76x02_dev *dev)
{
int ret;
- ret = mt76x02_mcu_set_radio_state(&dev->mt76, true, true);
+ ret = mt76x02_mcu_set_radio_state(dev, true, true);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c
index fcdf1879162e..3a2ec86d3e88 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c
@@ -15,50 +15,17 @@
*/
#include "mt76x2.h"
-#include "mt76x02_util.h"
-#include "mt76x02_dma.h"
struct beacon_bc_data {
- struct mt76x2_dev *dev;
+ struct mt76x02_dev *dev;
struct sk_buff_head q;
struct sk_buff *tail[8];
};
-int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info)
-{
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- int qsel = MT_QSEL_EDCA;
- int ret;
-
- if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
- mt76x02_mac_wcid_set_drop(&dev->mt76, wcid->idx, false);
-
- mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
-
- ret = mt76x02_insert_hdr_pad(skb);
- if (ret < 0)
- return ret;
-
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
- qsel = MT_QSEL_MGMT;
-
- *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
- MT_TXD_INFO_80211;
-
- if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
- *tx_info |= MT_TXD_INFO_WIV;
-
- return 0;
-}
-
static void
mt76x2_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- struct mt76x2_dev *dev = (struct mt76x2_dev *) priv;
+ struct mt76x02_dev *dev = (struct mt76x02_dev *) priv;
struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
struct sk_buff *skb = NULL;
@@ -76,7 +43,7 @@ static void
mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
struct beacon_bc_data *data = priv;
- struct mt76x2_dev *dev = data->dev;
+ struct mt76x02_dev *dev = data->dev;
struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
struct ieee80211_tx_info *info;
struct sk_buff *skb;
@@ -97,7 +64,7 @@ mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
}
static void
-mt76x2_resync_beacon_timer(struct mt76x2_dev *dev)
+mt76x2_resync_beacon_timer(struct mt76x02_dev *dev)
{
u32 timer_val = dev->beacon_int << 4;
@@ -129,7 +96,7 @@ mt76x2_resync_beacon_timer(struct mt76x2_dev *dev)
void mt76x2_pre_tbtt_tasklet(unsigned long arg)
{
- struct mt76x2_dev *dev = (struct mt76x2_dev *) arg;
+ struct mt76x02_dev *dev = (struct mt76x02_dev *) arg;
struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD];
struct beacon_bc_data data = {};
struct sk_buff *skb;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
index dd32e756d8b7..e9fff5b7f125 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -16,12 +16,12 @@
*/
#include "mt76x2.h"
-#include "mt76x2_eeprom.h"
-#include "mt76x2_mcu.h"
-#include "mt76x02_phy.h"
+#include "eeprom.h"
+#include "mcu.h"
+#include "../mt76x02_phy.h"
static void
-mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+mt76x2_adjust_high_lna_gain(struct mt76x02_dev *dev, int reg, s8 offset)
{
s8 gain;
@@ -31,7 +31,7 @@ mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
}
static void
-mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+mt76x2_adjust_agc_gain(struct mt76x02_dev *dev, int reg, s8 offset)
{
s8 gain;
@@ -40,7 +40,7 @@ mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
}
-void mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
+void mt76x2_apply_gain_adj(struct mt76x02_dev *dev)
{
s8 *gain_adj = dev->cal.rx.high_gain;
@@ -52,7 +52,7 @@ void mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x2_apply_gain_adj);
-void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
+void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
enum nl80211_band band)
{
u32 pa_mode[2];
@@ -65,7 +65,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
- if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
+ if (mt76x02_ext_pa_enabled(dev, band)) {
mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
} else {
@@ -76,7 +76,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
pa_mode[0] = 0x0000ffff;
pa_mode[1] = 0x00ff00ff;
- if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
+ if (mt76x02_ext_pa_enabled(dev, band)) {
mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
} else {
@@ -84,7 +84,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
}
- if (mt76x02_ext_pa_enabled(&dev->mt76, band))
+ if (mt76x02_ext_pa_enabled(dev, band))
pa_mode_adj = 0x04000000;
else
pa_mode_adj = 0;
@@ -98,7 +98,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
- if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
+ if (mt76x02_ext_pa_enabled(dev, band)) {
u32 val;
if (band == NL80211_BAND_2GHZ)
@@ -144,7 +144,7 @@ mt76x2_get_min_rate_power(struct mt76_rate_power *r)
return ret;
}
-void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
+void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
{
enum nl80211_chan_width width = dev->mt76.chandef.width;
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
@@ -187,16 +187,16 @@ void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
dev->mt76.rate_power = t;
- mt76x02_phy_set_txpower(&dev->mt76, txp_0, txp_1);
+ mt76x02_phy_set_txpower(dev, txp_0, txp_1);
}
EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower);
-void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
+void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
enum nl80211_band band, u8 bw)
{
u32 cfg0, cfg1;
- if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
+ if (mt76x02_ext_pa_enabled(dev, band)) {
cfg0 = bw ? 0x000b0c01 : 0x00101101;
cfg1 = 0x00011414;
} else {
@@ -210,98 +210,7 @@ void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
}
EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
-void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
-{
- int core_val, agc_val;
-
- switch (width) {
- case NL80211_CHAN_WIDTH_80:
- core_val = 3;
- agc_val = 7;
- break;
- case NL80211_CHAN_WIDTH_40:
- core_val = 2;
- agc_val = 3;
- break;
- default:
- core_val = 0;
- agc_val = 1;
- break;
- }
-
- mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
- mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
- mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
- mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
-}
-EXPORT_SYMBOL_GPL(mt76x2_phy_set_bw);
-
-void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
-{
- switch (band) {
- case NL80211_BAND_2GHZ:
- mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
- mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
- break;
- case NL80211_BAND_5GHZ:
- mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
- mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
- break;
- }
-
- mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
- primary_upper);
-}
-EXPORT_SYMBOL_GPL(mt76x2_phy_set_band);
-
-int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev)
-{
- struct mt76x02_sta *sta;
- struct mt76_wcid *wcid;
- int i, j, min_rssi = 0;
- s8 cur_rssi;
-
- local_bh_disable();
- rcu_read_lock();
-
- for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid_mask); i++) {
- unsigned long mask = dev->mt76.wcid_mask[i];
-
- if (!mask)
- continue;
-
- for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
- if (!(mask & 1))
- continue;
-
- wcid = rcu_dereference(dev->mt76.wcid[j]);
- if (!wcid)
- continue;
-
- sta = container_of(wcid, struct mt76x02_sta, wcid);
- spin_lock(&dev->mt76.rx_lock);
- if (sta->inactive_count++ < 5)
- cur_rssi = ewma_signal_read(&sta->rssi);
- else
- cur_rssi = 0;
- spin_unlock(&dev->mt76.rx_lock);
-
- if (cur_rssi < min_rssi)
- min_rssi = cur_rssi;
- }
- }
-
- rcu_read_unlock();
- local_bh_enable();
-
- if (!min_rssi)
- return -75;
-
- return min_rssi;
-}
-EXPORT_SYMBOL_GPL(mt76x2_phy_get_min_avg_rssi);
-
-void mt76x2_phy_tssi_compensate(struct mt76x2_dev *dev, bool wait)
+void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
struct mt76x2_tx_power_info txp;
@@ -322,7 +231,7 @@ void mt76x2_phy_tssi_compensate(struct mt76x2_dev *dev, bool wait)
dev->cal.tssi_comp_pending = false;
mt76x2_get_power_info(dev, &txp, chan);
- if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band))
+ if (mt76x02_ext_pa_enabled(dev, chan->band))
t.pa_mode = 1;
t.cal_mode = BIT(1);
@@ -336,8 +245,7 @@ void mt76x2_phy_tssi_compensate(struct mt76x2_dev *dev, bool wait)
return;
usleep_range(10000, 20000);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_DPD,
- chan->hw_value, wait);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value, wait);
dev->cal.dpd_cal_done = true;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index feb5cec66c67..57baf8d1c830 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -17,7 +17,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include "mt76x02_usb.h"
+#include "../mt76x02_usb.h"
#include "mt76x2u.h"
static const struct usb_device_id mt76x2u_device_table[] = {
@@ -37,7 +37,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
- struct mt76x2_dev *dev;
+ struct mt76x02_dev *dev;
int err;
dev = mt76x2u_alloc_device(&intf->dev);
@@ -72,7 +72,7 @@ err:
static void mt76x2u_disconnect(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
- struct mt76x2_dev *dev = usb_get_intfdata(intf);
+ struct mt76x02_dev *dev = usb_get_intfdata(intf);
struct ieee80211_hw *hw = mt76_hw(dev);
set_bit(MT76_REMOVED, &dev->mt76.state);
@@ -87,7 +87,7 @@ static void mt76x2u_disconnect(struct usb_interface *intf)
static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
pm_message_t state)
{
- struct mt76x2_dev *dev = usb_get_intfdata(intf);
+ struct mt76x02_dev *dev = usb_get_intfdata(intf);
struct mt76_usb *usb = &dev->mt76.usb;
mt76u_stop_queues(&dev->mt76);
@@ -99,7 +99,7 @@ static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
static int __maybe_unused mt76x2u_resume(struct usb_interface *intf)
{
- struct mt76x2_dev *dev = usb_get_intfdata(intf);
+ struct mt76x02_dev *dev = usb_get_intfdata(intf);
struct mt76_usb *usb = &dev->mt76.usb;
int err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
index 5759a72d7ef6..13cce2937573 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
@@ -17,11 +17,11 @@
#include <linux/delay.h>
#include "mt76x2u.h"
-#include "mt76x02_util.h"
-#include "mt76x02_phy.h"
-#include "mt76x2_eeprom.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
+#include "../mt76x02_usb.h"
-static void mt76x2u_init_dma(struct mt76x2_dev *dev)
+static void mt76x2u_init_dma(struct mt76x02_dev *dev)
{
u32 val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
@@ -36,7 +36,7 @@ static void mt76x2u_init_dma(struct mt76x2_dev *dev)
mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
}
-static void mt76x2u_power_on_rf_patch(struct mt76x2_dev *dev)
+static void mt76x2u_power_on_rf_patch(struct mt76x02_dev *dev)
{
mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) | BIT(16));
udelay(1);
@@ -56,7 +56,7 @@ static void mt76x2u_power_on_rf_patch(struct mt76x2_dev *dev)
mt76_set(dev, MT_VEND_ADDR(CFG, 0x14c), BIT(19) | BIT(20));
}
-static void mt76x2u_power_on_rf(struct mt76x2_dev *dev, int unit)
+static void mt76x2u_power_on_rf(struct mt76x02_dev *dev, int unit)
{
int shift = unit ? 8 : 0;
u32 val = (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift;
@@ -78,7 +78,7 @@ static void mt76x2u_power_on_rf(struct mt76x2_dev *dev, int unit)
mt76_set(dev, 0x530, 0xf);
}
-static void mt76x2u_power_on(struct mt76x2_dev *dev)
+static void mt76x2u_power_on(struct mt76x02_dev *dev)
{
u32 val;
@@ -114,7 +114,7 @@ static void mt76x2u_power_on(struct mt76x2_dev *dev)
mt76x2u_power_on_rf(dev, 1);
}
-static int mt76x2u_init_eeprom(struct mt76x2_dev *dev)
+static int mt76x2u_init_eeprom(struct mt76x02_dev *dev)
{
u32 val, i;
@@ -130,33 +130,33 @@ static int mt76x2u_init_eeprom(struct mt76x2_dev *dev)
put_unaligned_le32(val, dev->mt76.eeprom.data + i);
}
- mt76x02_eeprom_parse_hw_cap(&dev->mt76);
+ mt76x02_eeprom_parse_hw_cap(dev);
return 0;
}
-struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev)
+struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev)
{
static const struct mt76_driver_ops drv_ops = {
- .tx_prepare_skb = mt76x2u_tx_prepare_skb,
- .tx_complete_skb = mt76x02_tx_complete_skb,
+ .tx_prepare_skb = mt76x02u_tx_prepare_skb,
+ .tx_complete_skb = mt76x02u_tx_complete_skb,
.tx_status_data = mt76x02_tx_status_data,
- .rx_skb = mt76x2_queue_rx_skb,
+ .rx_skb = mt76x02_queue_rx_skb,
};
- struct mt76x2_dev *dev;
+ struct mt76x02_dev *dev;
struct mt76_dev *mdev;
mdev = mt76_alloc_device(sizeof(*dev), &mt76x2u_ops);
if (!mdev)
return NULL;
- dev = container_of(mdev, struct mt76x2_dev, mt76);
+ dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev->dev = pdev;
mdev->drv = &drv_ops;
return dev;
}
-static void mt76x2u_init_beacon_offsets(struct mt76x2_dev *dev)
+static void mt76x2u_init_beacon_offsets(struct mt76x02_dev *dev)
{
mt76_wr(dev, MT_BCN_OFFSET(0), 0x18100800);
mt76_wr(dev, MT_BCN_OFFSET(1), 0x38302820);
@@ -164,7 +164,7 @@ static void mt76x2u_init_beacon_offsets(struct mt76x2_dev *dev)
mt76_wr(dev, MT_BCN_OFFSET(3), 0x78706860);
}
-int mt76x2u_init_hardware(struct mt76x2_dev *dev)
+int mt76x2u_init_hardware(struct mt76x02_dev *dev)
{
const struct mt76_wcid_addr addr = {
.macaddr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
@@ -204,8 +204,7 @@ int mt76x2u_init_hardware(struct mt76x2_dev *dev)
if (err < 0)
return err;
- mt76x02_mac_setaddr(&dev->mt76,
- dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
+ mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
mt76x2u_init_beacon_offsets(dev);
@@ -237,13 +236,13 @@ int mt76x2u_init_hardware(struct mt76x2_dev *dev)
if (err < 0)
return err;
- mt76x02_phy_set_rxpath(&dev->mt76);
- mt76x02_phy_set_txdac(&dev->mt76);
+ mt76x02_phy_set_rxpath(dev);
+ mt76x02_phy_set_txdac(dev);
return mt76x2u_mac_stop(dev);
}
-int mt76x2u_register_device(struct mt76x2_dev *dev)
+int mt76x2u_register_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
struct wiphy *wiphy = hw->wiphy;
@@ -262,7 +261,7 @@ int mt76x2u_register_device(struct mt76x2_dev *dev)
err = mt76u_mcu_init_rx(&dev->mt76);
if (err < 0)
- return err;
+ goto fail;
err = mt76x2u_init_hardware(dev);
if (err < 0)
@@ -294,16 +293,16 @@ fail:
return err;
}
-void mt76x2u_stop_hw(struct mt76x2_dev *dev)
+void mt76x2u_stop_hw(struct mt76x02_dev *dev)
{
mt76u_stop_stat_wk(&dev->mt76);
cancel_delayed_work_sync(&dev->cal_work);
mt76x2u_mac_stop(dev);
}
-void mt76x2u_cleanup(struct mt76x2_dev *dev)
+void mt76x2u_cleanup(struct mt76x02_dev *dev)
{
- mt76x02_mcu_set_radio_state(&dev->mt76, false, false);
+ mt76x02_mcu_set_radio_state(dev, false, false);
mt76x2u_stop_hw(dev);
mt76u_queues_deinit(&dev->mt76);
mt76u_mcu_deinit(&dev->mt76);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
index f28c6fbcc305..db2194a92e67 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
@@ -15,9 +15,9 @@
*/
#include "mt76x2u.h"
-#include "mt76x2_eeprom.h"
+#include "eeprom.h"
-static void mt76x2u_mac_reset_counters(struct mt76x2_dev *dev)
+static void mt76x2u_mac_reset_counters(struct mt76x02_dev *dev)
{
mt76_rr(dev, MT_RX_STAT_0);
mt76_rr(dev, MT_RX_STAT_1);
@@ -27,12 +27,12 @@ static void mt76x2u_mac_reset_counters(struct mt76x2_dev *dev)
mt76_rr(dev, MT_TX_STA_2);
}
-static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev)
+static void mt76x2u_mac_fixup_xtal(struct mt76x02_dev *dev)
{
s8 offset = 0;
u16 eep_val;
- eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_2);
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
offset = eep_val & 0x7f;
if ((eep_val & 0xff) == 0xff)
@@ -42,7 +42,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev)
eep_val >>= 8;
if (eep_val == 0x00 || eep_val == 0xff) {
- eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_1);
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
eep_val &= 0xff;
if (eep_val == 0x00 || eep_val == 0xff)
@@ -67,7 +67,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev)
/* init fce */
mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
- eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2);
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
case 0:
mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
@@ -80,7 +80,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev)
}
}
-int mt76x2u_mac_reset(struct mt76x2_dev *dev)
+int mt76x2u_mac_reset(struct mt76x02_dev *dev)
{
mt76_wr(dev, MT_WPDMA_GLO_CFG, BIT(4) | BIT(5));
@@ -114,7 +114,7 @@ int mt76x2u_mac_reset(struct mt76x2_dev *dev)
return 0;
}
-int mt76x2u_mac_start(struct mt76x2_dev *dev)
+int mt76x2u_mac_start(struct mt76x02_dev *dev)
{
mt76x2u_mac_reset_counters(dev);
@@ -131,7 +131,7 @@ int mt76x2u_mac_start(struct mt76x2_dev *dev)
return 0;
}
-int mt76x2u_mac_stop(struct mt76x2_dev *dev)
+int mt76x2u_mac_stop(struct mt76x02_dev *dev)
{
int i, count = 0, val;
bool stopped = false;
@@ -212,7 +212,7 @@ int mt76x2u_mac_stop(struct mt76x2_dev *dev)
return 0;
}
-void mt76x2u_mac_resume(struct mt76x2_dev *dev)
+void mt76x2u_mac_resume(struct mt76x02_dev *dev)
{
mt76_wr(dev, MT_MAC_SYS_CTRL,
MT_MAC_SYS_CTRL_ENABLE_TX |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index a80704568780..1971a1b00038 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -15,11 +15,10 @@
*/
#include "mt76x2u.h"
-#include "mt76x02_util.h"
static int mt76x2u_start(struct ieee80211_hw *hw)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
int ret;
mutex_lock(&dev->mt76.mutex);
@@ -37,7 +36,7 @@ out:
static void mt76x2u_stop(struct ieee80211_hw *hw)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
@@ -48,17 +47,17 @@ static void mt76x2u_stop(struct ieee80211_hw *hw)
static int mt76x2u_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
if (!ether_addr_equal(dev->mt76.macaddr, vif->addr))
- mt76x02_mac_setaddr(&dev->mt76, vif->addr);
+ mt76x02_mac_setaddr(dev, vif->addr);
- mt76x02_vif_init(&dev->mt76, vif, 0);
+ mt76x02_vif_init(dev, vif, 0);
return 0;
}
static int
-mt76x2u_set_channel(struct mt76x2_dev *dev,
+mt76x2u_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef)
{
int err;
@@ -86,7 +85,7 @@ static void
mt76x2u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
@@ -108,7 +107,7 @@ mt76x2u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
static int
mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
int err = 0;
mutex_lock(&dev->mt76.mutex);
@@ -146,7 +145,7 @@ static void
mt76x2u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
set_bit(MT76_SCANNING, &dev->mt76.state);
}
@@ -154,13 +153,13 @@ mt76x2u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
static void
mt76x2u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
clear_bit(MT76_SCANNING, &dev->mt76.state);
}
const struct ieee80211_ops mt76x2u_ops = {
- .tx = mt76x2_tx,
+ .tx = mt76x02_tx,
.start = mt76x2u_start,
.stop = mt76x2u_stop,
.add_interface = mt76x2u_add_interface,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
index fdd94cad7b66..3f1e558e5e6d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
@@ -17,8 +17,8 @@
#include <linux/firmware.h>
#include "mt76x2u.h"
-#include "mt76x2_eeprom.h"
-#include "mt76x02_usb.h"
+#include "eeprom.h"
+#include "../mt76x02_usb.h"
#define MT_CMD_HDR_LEN 4
@@ -29,7 +29,7 @@
#define MT76U_MCU_DLM_OFFSET 0x110000
#define MT76U_MCU_ROM_PATCH_OFFSET 0x90000
-int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
+int mt76x2u_mcu_set_dynamic_vga(struct mt76x02_dev *dev, u8 channel, bool ap,
bool ext, int rssi, u32 false_cca)
{
struct {
@@ -53,14 +53,14 @@ int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
return mt76_mcu_send_msg(dev, skb, CMD_DYNC_VGA_OP, true);
}
-static void mt76x2u_mcu_load_ivb(struct mt76x2_dev *dev)
+static void mt76x2u_mcu_load_ivb(struct mt76x02_dev *dev)
{
mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
USB_DIR_OUT | USB_TYPE_VENDOR,
0x12, 0, NULL, 0);
}
-static void mt76x2u_mcu_enable_patch(struct mt76x2_dev *dev)
+static void mt76x2u_mcu_enable_patch(struct mt76x02_dev *dev)
{
struct mt76_usb *usb = &dev->mt76.usb;
const u8 data[] = {
@@ -75,7 +75,7 @@ static void mt76x2u_mcu_enable_patch(struct mt76x2_dev *dev)
0x12, 0, usb->data, sizeof(data));
}
-static void mt76x2u_mcu_reset_wmt(struct mt76x2_dev *dev)
+static void mt76x2u_mcu_reset_wmt(struct mt76x02_dev *dev)
{
struct mt76_usb *usb = &dev->mt76.usb;
u8 data[] = {
@@ -89,7 +89,7 @@ static void mt76x2u_mcu_reset_wmt(struct mt76x2_dev *dev)
0x12, 0, usb->data, sizeof(data));
}
-static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev)
+static int mt76x2u_mcu_load_rom_patch(struct mt76x02_dev *dev)
{
bool rom_protect = !is_mt7612(dev);
struct mt76x02_patch_header *hdr;
@@ -137,7 +137,7 @@ static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev)
mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
/* vendor reset */
- mt76x02u_mcu_fw_reset(&dev->mt76);
+ mt76x02u_mcu_fw_reset(dev);
usleep_range(5000, 10000);
/* enable FCE to send in-band cmd */
@@ -151,7 +151,7 @@ static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev)
/* FCE skip_fs_en */
mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
- err = mt76x02u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
+ err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr),
fw->size - sizeof(*hdr),
MCU_ROM_PATCH_MAX_PAYLOAD,
MT76U_MCU_ROM_PATCH_OFFSET);
@@ -176,7 +176,7 @@ out:
return err;
}
-static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev)
+static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev)
{
u32 val, dlm_offset = MT76U_MCU_DLM_OFFSET;
const struct mt76x02_fw_header *hdr;
@@ -210,7 +210,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev)
dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
/* vendor reset */
- mt76x02u_mcu_fw_reset(&dev->mt76);
+ mt76x02u_mcu_fw_reset(dev);
usleep_range(5000, 10000);
/* enable USB_DMA_CFG */
@@ -230,7 +230,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev)
mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
/* load ILM */
- err = mt76x02u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
+ err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr),
ilm_len, MCU_FW_URB_MAX_PAYLOAD,
MT76U_MCU_ILM_OFFSET);
if (err < 0) {
@@ -241,8 +241,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev)
/* load DLM */
if (mt76xx_rev(dev) >= MT76XX_REV_E3)
dlm_offset += 0x800;
- err = mt76x02u_mcu_fw_send_data(&dev->mt76,
- fw->data + sizeof(*hdr) + ilm_len,
+ err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr) + ilm_len,
dlm_len, MCU_FW_URB_MAX_PAYLOAD,
dlm_offset);
if (err < 0) {
@@ -260,15 +259,15 @@ static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev)
mt76_set(dev, MT_MCU_COM_REG0, BIT(1));
/* enable FCE to send in-band cmd */
mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+ mt76x02_set_ethtool_fwver(dev, hdr);
dev_dbg(dev->mt76.dev, "firmware running\n");
- mt76x02_set_ethtool_fwver(&dev->mt76, hdr);
out:
release_firmware(fw);
return err;
}
-int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev)
+int mt76x2u_mcu_fw_init(struct mt76x02_dev *dev)
{
int err;
@@ -279,14 +278,13 @@ int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev)
return mt76x2u_mcu_load_firmware(dev);
}
-int mt76x2u_mcu_init(struct mt76x2_dev *dev)
+int mt76x2u_mcu_init(struct mt76x02_dev *dev)
{
int err;
- err = mt76x02_mcu_function_select(&dev->mt76, Q_SELECT,
- 1, false);
+ err = mt76x02_mcu_function_select(dev, Q_SELECT, 1, false);
if (err < 0)
return err;
- return mt76x02_mcu_set_radio_state(&dev->mt76, true, false);
+ return mt76x02_mcu_set_radio_state(dev, true, false);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
index 06362d3487be..ca96ba60510e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
@@ -15,9 +15,10 @@
*/
#include "mt76x2u.h"
-#include "mt76x2_eeprom.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
-void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev)
+void mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
@@ -28,18 +29,18 @@ void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev)
mt76x2u_mac_stop(dev);
if (is_5ghz)
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, 0, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0, false);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_LOFT, is_5ghz, false);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, false);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQC_FI, is_5ghz, false);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TEMP_SENSOR, 0, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0, false);
mt76x2u_mac_resume(dev);
}
static void
-mt76x2u_phy_update_channel_gain(struct mt76x2_dev *dev)
+mt76x2u_phy_update_channel_gain(struct mt76x02_dev *dev)
{
u8 channel = dev->mt76.chandef.chan->hw_value;
int freq, freq1;
@@ -68,7 +69,7 @@ mt76x2u_phy_update_channel_gain(struct mt76x2_dev *dev)
break;
}
- dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev);
+ dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS,
mt76_rr(dev, MT_RX_STAT_1));
@@ -78,9 +79,9 @@ mt76x2u_phy_update_channel_gain(struct mt76x2_dev *dev)
void mt76x2u_phy_calibrate(struct work_struct *work)
{
- struct mt76x2_dev *dev;
+ struct mt76x02_dev *dev;
- dev = container_of(work, struct mt76x2_dev, cal_work.work);
+ dev = container_of(work, struct mt76x02_dev, cal_work.work);
mt76x2_phy_tssi_compensate(dev, false);
mt76x2u_phy_update_channel_gain(dev);
@@ -88,7 +89,7 @@ void mt76x2u_phy_calibrate(struct work_struct *work)
MT_CALIBRATE_INTERVAL);
}
-int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
+int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef)
{
u32 ext_cca_chan[4] = {
@@ -154,8 +155,8 @@ int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
mt76x2_configure_tx_delay(dev, chan->band, bw);
mt76x2_phy_set_txpower(dev);
- mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
- mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
+ mt76x02_phy_set_band(dev, chan->band, ch_group_index & 1);
+ mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
mt76_rmw(dev, MT_EXT_CCA_CFG,
(MT_EXT_CCA_CFG_CCA0 |
@@ -176,18 +177,17 @@ int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
if (!dev->cal.init_cal_done) {
- u8 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_BT_RCAL_RESULT);
+ u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
if (val != 0xff)
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R,
- 0, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
}
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, channel, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel, false);
/* Rx LPF calibration */
if (!dev->cal.init_cal_done)
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RC, 0, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0, false);
dev->cal.init_cal_done = true;
mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2);
@@ -202,7 +202,7 @@ int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
if (scan)
return 0;
- if (mt76x02_tssi_enabled(&dev->mt76)) {
+ if (mt76x2_tssi_enabled(dev)) {
/* init default values for temp compensation */
mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
0x38);
@@ -217,10 +217,9 @@ int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
chan = dev->mt76.chandef.chan;
if (chan->band == NL80211_BAND_5GHZ)
flag |= BIT(0);
- if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band))
+ if (mt76x02_ext_pa_enabled(dev, chan->band))
flag |= BIT(8);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TSSI,
- flag, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag, false);
dev->cal.tssi_cal_done = true;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2_core.c
deleted file mode 100644
index 06e47f960f9a..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_core.c
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include "mt76x2.h"
-#include "mt76x2_trace.h"
-#include "mt76x02_util.h"
-
-void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-{
- mt76x02_irq_enable(mdev, MT_INT_RX_DONE(q));
-}
-
-irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance)
-{
- struct mt76x2_dev *dev = dev_instance;
- u32 intr;
-
- intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
- mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
-
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
- return IRQ_NONE;
-
- trace_dev_irq(dev, intr, dev->mt76.mmio.irqmask);
-
- intr &= dev->mt76.mmio.irqmask;
-
- if (intr & MT_INT_TX_DONE_ALL) {
- mt76x02_irq_disable(&dev->mt76, MT_INT_TX_DONE_ALL);
- tasklet_schedule(&dev->tx_tasklet);
- }
-
- if (intr & MT_INT_RX_DONE(0)) {
- mt76x02_irq_disable(&dev->mt76, MT_INT_RX_DONE(0));
- napi_schedule(&dev->mt76.napi[0]);
- }
-
- if (intr & MT_INT_RX_DONE(1)) {
- mt76x02_irq_disable(&dev->mt76, MT_INT_RX_DONE(1));
- napi_schedule(&dev->mt76.napi[1]);
- }
-
- if (intr & MT_INT_PRE_TBTT)
- tasklet_schedule(&dev->pre_tbtt_tasklet);
-
- /* send buffered multicast frames now */
- if (intr & MT_INT_TBTT)
- mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
-
- if (intr & MT_INT_TX_STAT) {
- mt76x2_mac_poll_tx_status(dev, true);
- tasklet_schedule(&dev->tx_tasklet);
- }
-
- if (intr & MT_INT_GPTIMER) {
- mt76x02_irq_disable(&dev->mt76, MT_INT_GPTIMER);
- tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
- }
-
- return IRQ_HANDLED;
-}
-
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
deleted file mode 100644
index 66a57294fcfc..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef __MT76x2_MAC_H
-#define __MT76x2_MAC_H
-
-#include "mt76.h"
-#include "mt76x02_mac.h"
-
-struct mt76x2_dev;
-struct mt76x2_sta;
-struct mt76x02_vif;
-
-struct mt76x2_tx_info {
- unsigned long jiffies;
- u8 tries;
-
- u8 wcid;
- u8 pktid;
- u8 retry;
-};
-
-static inline struct mt76x2_tx_info *
-mt76x2_skb_tx_info(struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- return (void *) info->status.status_driver_data;
-}
-
-int mt76x2_mac_start(struct mt76x2_dev *dev);
-void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force);
-void mt76x2_mac_resume(struct mt76x2_dev *dev);
-void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr);
-
-int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
- void *rxi);
-void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x02_txwi *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta, int len);
-
-int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
- struct sk_buff *skb);
-void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val);
-
-void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq);
-void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev);
-
-void mt76x2_mac_work(struct work_struct *work);
-
-#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
deleted file mode 100644
index ed4f56a3aae9..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2.h"
-#include "mt76x02_util.h"
-
-void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
-{
- bool stopped = false;
- u32 rts_cfg;
- int i;
-
- mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
-
- rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
- mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
-
- /* Wait for MAC to become idle */
- for (i = 0; i < 300; i++) {
- if ((mt76_rr(dev, MT_MAC_STATUS) &
- (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
- mt76_rr(dev, MT_BBP(IBI, 12))) {
- udelay(1);
- continue;
- }
-
- stopped = true;
- break;
- }
-
- if (force && !stopped) {
- mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
- mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
-
- mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
- mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
- }
-
- mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
-}
-EXPORT_SYMBOL_GPL(mt76x2_mac_stop);
-
-void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x02_txwi *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta, int len)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_rate *rate = &info->control.rates[0];
- struct ieee80211_key_conf *key = info->control.hw_key;
- u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
- u8 nss;
- s8 txpwr_adj, max_txpwr_adj;
- u8 ccmp_pn[8];
-
- memset(txwi, 0, sizeof(*txwi));
-
- if (wcid)
- txwi->wcid = wcid->idx;
- else
- txwi->wcid = 0xff;
-
- txwi->pktid = 1;
-
- if (wcid && wcid->sw_iv && key) {
- u64 pn = atomic64_inc_return(&key->tx_pn);
- ccmp_pn[0] = pn;
- ccmp_pn[1] = pn >> 8;
- ccmp_pn[2] = 0;
- ccmp_pn[3] = 0x20 | (key->keyidx << 6);
- ccmp_pn[4] = pn >> 16;
- ccmp_pn[5] = pn >> 24;
- ccmp_pn[6] = pn >> 32;
- ccmp_pn[7] = pn >> 40;
- txwi->iv = *((__le32 *)&ccmp_pn[0]);
- txwi->eiv = *((__le32 *)&ccmp_pn[1]);
- }
-
- spin_lock_bh(&dev->mt76.lock);
- if (wcid && (rate->idx < 0 || !rate->count)) {
- txwi->rate = wcid->tx_rate;
- max_txpwr_adj = wcid->max_txpwr_adj;
- nss = wcid->tx_rate_nss;
- } else {
- txwi->rate = mt76x02_mac_tx_rate_val(&dev->mt76, rate, &nss);
- max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(&dev->mt76, rate);
- }
- spin_unlock_bh(&dev->mt76.lock);
-
- txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->mt76.txpower_conf,
- max_txpwr_adj);
- txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
-
- if (mt76xx_rev(dev) >= MT76XX_REV_E4)
- txwi->txstream = 0x13;
- else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
- !(txwi->rate & cpu_to_le16(rate_ht_mask)))
- txwi->txstream = 0x93;
-
- mt76x02_mac_fill_txwi(txwi, skb, sta, len, nss);
-}
-EXPORT_SYMBOL_GPL(mt76x2_mac_write_txwi);
-
-int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
-{
- struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
-
- rssi += cal->rssi_offset[chain];
- rssi -= cal->lna_gain;
-
- return rssi;
-}
-
-static struct mt76x02_sta *
-mt76x2_rx_get_sta(struct mt76x2_dev *dev, u8 idx)
-{
- struct mt76_wcid *wcid;
-
- if (idx >= ARRAY_SIZE(dev->mt76.wcid))
- return NULL;
-
- wcid = rcu_dereference(dev->mt76.wcid[idx]);
- if (!wcid)
- return NULL;
-
- return container_of(wcid, struct mt76x02_sta, wcid);
-}
-
-static struct mt76_wcid *
-mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, struct mt76x02_sta *sta,
- bool unicast)
-{
- if (!sta)
- return NULL;
-
- if (unicast)
- return &sta->wcid;
- else
- return &sta->vif->group_wcid;
-}
-
-int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
- void *rxi)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
- struct mt76x02_rxwi *rxwi = rxi;
- struct mt76x02_sta *sta;
- u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
- u32 ctl = le32_to_cpu(rxwi->ctl);
- u16 rate = le16_to_cpu(rxwi->rate);
- u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
- bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
- int pad_len = 0;
- u8 pn_len;
- u8 wcid;
- int len;
-
- if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
- return -EINVAL;
-
- if (rxinfo & MT_RXINFO_L2PAD)
- pad_len += 2;
-
- if (rxinfo & MT_RXINFO_DECRYPT) {
- status->flag |= RX_FLAG_DECRYPTED;
- status->flag |= RX_FLAG_MMIC_STRIPPED;
- status->flag |= RX_FLAG_MIC_STRIPPED;
- status->flag |= RX_FLAG_IV_STRIPPED;
- }
-
- wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
- sta = mt76x2_rx_get_sta(dev, wcid);
- status->wcid = mt76x2_rx_get_sta_wcid(dev, sta, unicast);
-
- len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
- pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
- if (pn_len) {
- int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
- u8 *data = skb->data + offset;
-
- status->iv[0] = data[7];
- status->iv[1] = data[6];
- status->iv[2] = data[5];
- status->iv[3] = data[4];
- status->iv[4] = data[1];
- status->iv[5] = data[0];
-
- /*
- * Driver CCMP validation can't deal with fragments.
- * Let mac80211 take care of it.
- */
- if (rxinfo & MT_RXINFO_FRAG) {
- status->flag &= ~RX_FLAG_IV_STRIPPED;
- } else {
- pad_len += pn_len << 2;
- len -= pn_len << 2;
- }
- }
-
- mt76x02_remove_hdr_pad(skb, pad_len);
-
- if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
- status->aggr = true;
-
- if (WARN_ON_ONCE(len > skb->len))
- return -EINVAL;
-
- pskb_trim(skb, len);
- status->chains = BIT(0) | BIT(1);
- status->chain_signal[0] = mt76x2_mac_get_rssi(dev, rxwi->rssi[0], 0);
- status->chain_signal[1] = mt76x2_mac_get_rssi(dev, rxwi->rssi[1], 1);
- status->signal = max(status->chain_signal[0], status->chain_signal[1]);
- status->freq = dev->mt76.chandef.chan->center_freq;
- status->band = dev->mt76.chandef.chan->band;
-
- status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
- status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
-
- if (sta) {
- ewma_signal_add(&sta->rssi, status->signal);
- sta->inactive_count = 0;
- }
-
- return mt76x02_mac_process_rate(status, rate);
-}
-EXPORT_SYMBOL_GPL(mt76x2_mac_process_rx);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c
deleted file mode 100644
index 1ec3c293e2c4..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2.h"
-#include "dma.h"
-
-void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
- struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct mt76x2_dev *dev = hw->priv;
- struct ieee80211_vif *vif = info->control.vif;
- struct mt76_wcid *wcid = &dev->mt76.global_wcid;
-
- if (control->sta) {
- struct mt76x02_sta *msta;
-
- msta = (struct mt76x02_sta *)control->sta->drv_priv;
- wcid = &msta->wcid;
- /* sw encrypted frames */
- if (!info->control.hw_key && wcid->hw_key_idx != 0xff)
- control->sta = NULL;
- }
-
- if (vif && !control->sta) {
- struct mt76x02_vif *mvif;
-
- mvif = (struct mt76x02_vif *)vif->drv_priv;
- wcid = &mvif->group_wcid;
- }
-
- mt76_tx(&dev->mt76, control->sta, wcid, skb);
-}
-EXPORT_SYMBOL_GPL(mt76x2_tx);
-
-s8 mt76x2_tx_get_max_txpwr_adj(struct mt76_dev *mdev,
- const struct ieee80211_tx_rate *rate)
-{
- struct mt76x2_dev *dev = (struct mt76x2_dev *) mdev;
- s8 max_txpwr;
-
- if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
- u8 mcs = ieee80211_rate_get_vht_mcs(rate);
-
- if (mcs == 8 || mcs == 9) {
- max_txpwr = mdev->rate_power.vht[8];
- } else {
- u8 nss, idx;
-
- nss = ieee80211_rate_get_vht_nss(rate);
- idx = ((nss - 1) << 3) + mcs;
- max_txpwr = mdev->rate_power.ht[idx & 0xf];
- }
- } else if (rate->flags & IEEE80211_TX_RC_MCS) {
- max_txpwr = mdev->rate_power.ht[rate->idx & 0xf];
- } else {
- enum nl80211_band band = dev->mt76.chandef.chan->band;
-
- if (band == NL80211_BAND_2GHZ) {
- const struct ieee80211_rate *r;
- struct wiphy *wiphy = mt76_hw(dev)->wiphy;
- struct mt76_rate_power *rp = &mdev->rate_power;
-
- r = &wiphy->bands[band]->bitrates[rate->idx];
- if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
- max_txpwr = rp->cck[r->hw_value & 0x3];
- else
- max_txpwr = rp->ofdm[r->hw_value & 0x7];
- } else {
- max_txpwr = mdev->rate_power.ofdm[rate->idx & 0x7];
- }
- }
-
- return max_txpwr;
-}
-EXPORT_SYMBOL_GPL(mt76x2_tx_get_max_txpwr_adj);
-
-s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
-{
- txpwr = min_t(s8, txpwr, dev->mt76.txpower_conf);
- txpwr -= (dev->target_power + dev->target_power_delta[0]);
- txpwr = min_t(s8, txpwr, max_txpwr_adj);
-
- if (!dev->enable_tpc)
- return 0;
- else if (txpwr >= 0)
- return min_t(s8, txpwr, 7);
- else
- return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
-}
-EXPORT_SYMBOL_GPL(mt76x2_tx_get_txpwr_adj);
-
-void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
-{
- s8 txpwr_adj;
-
- txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
- dev->mt76.rate_power.ofdm[4]);
- mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
- MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
- mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
- MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
-}
-EXPORT_SYMBOL_GPL(mt76x2_tx_set_txpwr_auto);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c
deleted file mode 100644
index c2ccdebca470..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2u.h"
-#include "dma.h"
-#include "mt76x02_util.h"
-#include "mt76x02_usb.h"
-
-static int
-mt76x2u_check_skb_rooms(struct sk_buff *skb)
-{
- int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
- u32 need_head;
-
- need_head = sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN;
- if (hdr_len % 4)
- need_head += 2;
- return skb_cow(skb, need_head);
-}
-
-int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info)
-{
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
- struct mt76x02_txwi *txwi;
- int err, len = skb->len;
-
- err = mt76x2u_check_skb_rooms(skb);
- if (err < 0)
- return -ENOMEM;
-
- mt76x02_insert_hdr_pad(skb);
-
- txwi = skb_push(skb, sizeof(struct mt76x02_txwi));
- mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
-
- return mt76x02u_set_txinfo(skb, wcid, q2ep(q->hw_idx));
-}
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index bf0e9e666bc4..7cbce03aa65b 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -96,7 +96,8 @@ mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- if (!ieee80211_is_data_qos(hdr->frame_control))
+ if (!ieee80211_is_data_qos(hdr->frame_control) ||
+ !ieee80211_is_data_present(hdr->frame_control))
return;
mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index de7785c4f6af..5f0faf07c346 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -286,7 +286,7 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
void *data;
int offset;
- data = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
+ data = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC);
if (!data)
break;
@@ -318,7 +318,7 @@ int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
if (!buf->urb)
return -ENOMEM;
- buf->urb->sg = devm_kzalloc(dev->dev, nsgs * sizeof(*buf->urb->sg),
+ buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg),
gfp);
if (!buf->urb->sg)
return -ENOMEM;
@@ -525,8 +525,8 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
spin_lock_init(&q->rx_page_lock);
spin_lock_init(&q->lock);
- q->entry = devm_kzalloc(dev->dev,
- MT_NUM_RX_ENTRIES * sizeof(*q->entry),
+ q->entry = devm_kcalloc(dev->dev,
+ MT_NUM_RX_ENTRIES, sizeof(*q->entry),
GFP_KERNEL);
if (!q->entry)
return -ENOMEM;
@@ -755,8 +755,8 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
INIT_LIST_HEAD(&q->swq);
q->hw_idx = mt76_ac_to_hwq(i);
- q->entry = devm_kzalloc(dev->dev,
- MT_NUM_TX_ENTRIES * sizeof(*q->entry),
+ q->entry = devm_kcalloc(dev->dev,
+ MT_NUM_TX_ENTRIES, sizeof(*q->entry),
GFP_KERNEL);
if (!q->entry)
return -ENOMEM;
@@ -862,6 +862,7 @@ int mt76u_init(struct mt76_dev *dev,
.copy = mt76u_copy,
.wr_rp = mt76u_wr_rp,
.rd_rp = mt76u_rd_rp,
+ .type = MT76_BUS_USB,
};
struct mt76_usb *usb = &dev->usb;
diff --git a/drivers/net/wireless/quantenna/Kconfig b/drivers/net/wireless/quantenna/Kconfig
index de84ce125c26..7628d9c1ea6a 100644
--- a/drivers/net/wireless/quantenna/Kconfig
+++ b/drivers/net/wireless/quantenna/Kconfig
@@ -1,7 +1,7 @@
config WLAN_VENDOR_QUANTENNA
bool "Quantenna wireless cards support"
default y
- ---help---
+ help
If you have a wireless card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
diff --git a/drivers/net/wireless/quantenna/qtnfmac/Kconfig b/drivers/net/wireless/quantenna/qtnfmac/Kconfig
index 8d1492a90bd1..6cf5202c3666 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/Kconfig
+++ b/drivers/net/wireless/quantenna/qtnfmac/Kconfig
@@ -1,19 +1,20 @@
config QTNFMAC
tristate
- depends on QTNFMAC_PEARL_PCIE
- default m if QTNFMAC_PEARL_PCIE=m
- default y if QTNFMAC_PEARL_PCIE=y
+ depends on QTNFMAC_PCIE
+ default m if QTNFMAC_PCIE=m
+ default y if QTNFMAC_PCIE=y
-config QTNFMAC_PEARL_PCIE
- tristate "Quantenna QSR10g PCIe support"
+config QTNFMAC_PCIE
+ tristate "Quantenna QSR1000/QSR2000/QSR10g PCIe support"
default n
depends on PCI && CFG80211
select QTNFMAC
select FW_LOADER
select CRC32
- ---help---
+ help
This option adds support for wireless adapters based on Quantenna
- 802.11ac QSR10g (aka Pearl) FullMAC chipset running over PCIe.
+ 802.11ac QSR10g (aka Pearl) and QSR1000/QSR2000 (aka Topaz)
+ FullMAC chipsets running over PCIe.
If you choose to build it as a module, two modules will be built:
- qtnfmac.ko and qtnfmac_pearl_pcie.ko.
+ qtnfmac.ko and qtnfmac_pcie.ko.
diff --git a/drivers/net/wireless/quantenna/qtnfmac/Makefile b/drivers/net/wireless/quantenna/qtnfmac/Makefile
index 17cd7adb4109..40dffbd2ea47 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/Makefile
+++ b/drivers/net/wireless/quantenna/qtnfmac/Makefile
@@ -19,11 +19,12 @@ qtnfmac-objs += \
#
-obj-$(CONFIG_QTNFMAC_PEARL_PCIE) += qtnfmac_pearl_pcie.o
+obj-$(CONFIG_QTNFMAC_PCIE) += qtnfmac_pcie.o
-qtnfmac_pearl_pcie-objs += \
+qtnfmac_pcie-objs += \
shm_ipc.o \
pcie/pcie.o \
- pcie/pearl_pcie.o
+ pcie/pearl_pcie.o \
+ pcie/topaz_pcie.o
-qtnfmac_pearl_pcie-$(CONFIG_DEBUG_FS) += debug.o
+qtnfmac_pcie-$(CONFIG_DEBUG_FS) += debug.o
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 452d4b7c832d..51b33ec78fac 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -141,8 +141,8 @@ qtnf_change_virtual_intf(struct wiphy *wiphy,
ret = qtnf_cmd_send_change_intf_type(vif, type, mac_addr);
if (ret) {
- pr_err("VIF%u.%u: failed to change VIF type: %d\n",
- vif->mac->macid, vif->vifid, ret);
+ pr_err("VIF%u.%u: failed to change type to %d\n",
+ vif->mac->macid, vif->vifid, type);
return ret;
}
@@ -216,7 +216,6 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
eth_zero_addr(vif->mac_addr);
eth_zero_addr(vif->bssid);
vif->bss_priority = QTNF_DEF_BSS_PRIORITY;
- vif->sta_state = QTNF_STA_DISCONNECTED;
memset(&vif->wdev, 0, sizeof(vif->wdev));
vif->wdev.wiphy = wiphy;
vif->wdev.iftype = type;
@@ -229,18 +228,22 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
if (params)
mac_addr = params->macaddr;
- if (qtnf_cmd_send_add_intf(vif, type, mac_addr)) {
- pr_err("VIF%u.%u: failed to add VIF\n", mac->macid, vif->vifid);
+ ret = qtnf_cmd_send_add_intf(vif, type, mac_addr);
+ if (ret) {
+ pr_err("VIF%u.%u: failed to add VIF %pM\n",
+ mac->macid, vif->vifid, mac_addr);
goto err_cmd;
}
if (!is_valid_ether_addr(vif->mac_addr)) {
pr_err("VIF%u.%u: FW reported bad MAC: %pM\n",
mac->macid, vif->vifid, vif->mac_addr);
+ ret = -EINVAL;
goto err_mac;
}
- if (qtnf_core_net_attach(mac, vif, name, name_assign_t)) {
+ ret = qtnf_core_net_attach(mac, vif, name, name_assign_t);
+ if (ret) {
pr_err("VIF%u.%u: failed to attach netdev\n", mac->macid,
vif->vifid);
goto err_net;
@@ -256,7 +259,7 @@ err_mac:
err_cmd:
vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
- return ERR_PTR(-EFAULT);
+ return ERR_PTR(ret);
}
static int qtnf_mgmt_set_appie(struct qtnf_vif *vif,
@@ -335,12 +338,11 @@ static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev)
qtnf_scan_done(vif->mac, true);
ret = qtnf_cmd_send_stop_ap(vif);
- if (ret) {
+ if (ret)
pr_err("VIF%u.%u: failed to stop AP operation in FW\n",
vif->mac->macid, vif->vifid);
- netif_carrier_off(vif->netdev);
- }
+ netif_carrier_off(vif->netdev);
return ret;
}
@@ -478,19 +480,31 @@ qtnf_dump_station(struct wiphy *wiphy, struct net_device *dev,
const struct qtnf_sta_node *sta_node;
int ret;
- sta_node = qtnf_sta_list_lookup_index(&vif->sta_list, idx);
+ switch (vif->wdev.iftype) {
+ case NL80211_IFTYPE_STATION:
+ if (idx != 0 || !vif->wdev.current_bss)
+ return -ENOENT;
- if (unlikely(!sta_node))
- return -ENOENT;
+ ether_addr_copy(mac, vif->bssid);
+ break;
+ case NL80211_IFTYPE_AP:
+ sta_node = qtnf_sta_list_lookup_index(&vif->sta_list, idx);
+ if (unlikely(!sta_node))
+ return -ENOENT;
- ether_addr_copy(mac, sta_node->mac_addr);
+ ether_addr_copy(mac, sta_node->mac_addr);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
- ret = qtnf_cmd_get_sta_info(vif, sta_node->mac_addr, sinfo);
+ ret = qtnf_cmd_get_sta_info(vif, mac, sinfo);
- if (unlikely(ret == -ENOENT)) {
- qtnf_sta_list_del(vif, mac);
- cfg80211_del_sta(vif->netdev, mac, GFP_KERNEL);
- sinfo->filled = 0;
+ if (vif->wdev.iftype == NL80211_IFTYPE_AP) {
+ if (ret == -ENOENT) {
+ cfg80211_del_sta(vif->netdev, mac, GFP_KERNEL);
+ sinfo->filled = 0;
+ }
}
sinfo->generation = vif->generation;
@@ -521,9 +535,16 @@ static int qtnf_del_key(struct wiphy *wiphy, struct net_device *dev,
int ret;
ret = qtnf_cmd_send_del_key(vif, key_index, pairwise, mac_addr);
- if (ret)
- pr_err("VIF%u.%u: failed to delete key: idx=%u pw=%u\n",
- vif->mac->macid, vif->vifid, key_index, pairwise);
+ if (ret) {
+ if (ret == -ENOENT) {
+ pr_debug("VIF%u.%u: key index %d out of bounds\n",
+ vif->mac->macid, vif->vifid, key_index);
+ } else {
+ pr_err("VIF%u.%u: failed to delete key: idx=%u pw=%u\n",
+ vif->mac->macid, vif->vifid,
+ key_index, pairwise);
+ }
+ }
return ret;
}
@@ -590,6 +611,7 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev,
if (ret)
pr_err("VIF%u.%u: failed to delete STA %pM\n",
vif->mac->macid, vif->vifid, params->mac);
+
return ret;
}
@@ -597,21 +619,25 @@ static int
qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
{
struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ int ret;
cancel_delayed_work_sync(&mac->scan_timeout);
mac->scan_req = request;
- if (qtnf_cmd_send_scan(mac)) {
+ ret = qtnf_cmd_send_scan(mac);
+ if (ret) {
pr_err("MAC%u: failed to start scan\n", mac->macid);
mac->scan_req = NULL;
- return -EFAULT;
+ goto out;
}
+ pr_debug("MAC%u: scan started\n", mac->macid);
queue_delayed_work(mac->bus->workqueue, &mac->scan_timeout,
QTNF_SCAN_TIMEOUT_SEC * HZ);
- return 0;
+out:
+ return ret;
}
static int
@@ -624,9 +650,6 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev,
if (vif->wdev.iftype != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
- if (vif->sta_state != QTNF_STA_DISCONNECTED)
- return -EBUSY;
-
if (sme->bssid)
ether_addr_copy(vif->bssid, sme->bssid);
else
@@ -634,13 +657,13 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev,
ret = qtnf_cmd_send_connect(vif, sme);
if (ret) {
- pr_err("VIF%u.%u: failed to connect\n", vif->mac->macid,
- vif->vifid);
- return ret;
+ pr_err("VIF%u.%u: failed to connect\n",
+ vif->mac->macid, vif->vifid);
+ goto out;
}
- vif->sta_state = QTNF_STA_CONNECTING;
- return 0;
+out:
+ return ret;
}
static int
@@ -662,22 +685,18 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
goto out;
}
- qtnf_scan_done(mac, true);
-
- if (vif->sta_state == QTNF_STA_DISCONNECTED)
- goto out;
-
ret = qtnf_cmd_send_disconnect(vif, reason_code);
- if (ret) {
- pr_err("VIF%u.%u: failed to disconnect\n", mac->macid,
- vif->vifid);
- goto out;
+ if (ret)
+ pr_err("VIF%u.%u: failed to disconnect\n",
+ mac->macid, vif->vifid);
+
+ if (vif->wdev.current_bss) {
+ netif_carrier_off(vif->netdev);
+ cfg80211_disconnected(vif->netdev, reason_code,
+ NULL, 0, true, GFP_KERNEL);
}
out:
- if (vif->sta_state == QTNF_STA_CONNECTING)
- vif->sta_state = QTNF_STA_DISCONNECTED;
-
return ret;
}
@@ -747,7 +766,6 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
default:
pr_debug("failed to get chan(%d) stats from card\n",
chan->hw_value);
- ret = -EINVAL;
break;
}
@@ -770,6 +788,7 @@ qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
ret = qtnf_cmd_get_channel(vif, chandef);
if (ret) {
pr_err("%s: failed to get channel: %d\n", ndev->name, ret);
+ ret = -ENODATA;
goto out;
}
@@ -779,6 +798,7 @@ qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
chandef->center_freq1, chandef->center_freq2,
chandef->width);
ret = -ENODATA;
+ goto out;
}
out:
@@ -848,10 +868,8 @@ static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
ret = qtnf_cmd_send_pm_set(vif, enabled ? QLINK_PM_AUTO_STANDBY :
QLINK_PM_OFF, timeout);
- if (ret) {
+ if (ret)
pr_err("%s: failed to set PM mode ret=%d\n", dev->name, ret);
- return ret;
- }
return ret;
}
@@ -971,9 +989,16 @@ static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
ret = qtnf_cmd_reg_notify(bus, req);
if (ret) {
- if (ret != -EOPNOTSUPP && ret != -EALREADY)
+ if (ret == -EOPNOTSUPP) {
+ pr_warn("reg update not supported\n");
+ } else if (ret == -EALREADY) {
+ pr_info("regulatory domain is already set to %c%c",
+ req->alpha2[0], req->alpha2[1]);
+ } else {
pr_err("failed to update reg domain to %c%c\n",
req->alpha2[0], req->alpha2[1]);
+ }
+
return;
}
@@ -1088,6 +1113,10 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD);
+ if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_DWELL)
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+
wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2;
@@ -1106,6 +1135,9 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR)
wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ if (!(hw_info->hw_capab & QLINK_HW_CAPAB_OBSS_SCAN))
+ wiphy->features |= NL80211_FEATURE_NEED_OBSS_SCAN;
+
#ifdef CONFIG_PM
if (macinfo->wowlan)
wiphy->wowlan = macinfo->wowlan;
@@ -1120,6 +1152,15 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
}
+ if (mac->macinfo.extended_capabilities_len) {
+ wiphy->extended_capabilities =
+ mac->macinfo.extended_capabilities;
+ wiphy->extended_capabilities_mask =
+ mac->macinfo.extended_capabilities_mask;
+ wiphy->extended_capabilities_len =
+ mac->macinfo.extended_capabilities_len;
+ }
+
strlcpy(wiphy->fw_version, hw_info->fw_version,
sizeof(wiphy->fw_version));
wiphy->hw_version = hw_info->hw_version;
@@ -1143,7 +1184,8 @@ void qtnf_netdev_updown(struct net_device *ndev, bool up)
struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
if (qtnf_cmd_send_updown_intf(vif, up))
- pr_err("failed to send up/down command to FW\n");
+ pr_err("failed to send %s command to VIF%u.%u\n",
+ up ? "UP" : "DOWN", vif->mac->macid, vif->vifid);
}
void qtnf_virtual_intf_cleanup(struct net_device *ndev)
@@ -1151,57 +1193,20 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev)
struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
struct qtnf_wmac *mac = wiphy_priv(vif->wdev.wiphy);
- if (vif->wdev.iftype == NL80211_IFTYPE_STATION) {
- switch (vif->sta_state) {
- case QTNF_STA_DISCONNECTED:
- break;
- case QTNF_STA_CONNECTING:
- cfg80211_connect_result(vif->netdev,
- vif->bssid, NULL, 0,
- NULL, 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_KERNEL);
- qtnf_disconnect(vif->wdev.wiphy, ndev,
- WLAN_REASON_DEAUTH_LEAVING);
- break;
- case QTNF_STA_CONNECTED:
- cfg80211_disconnected(vif->netdev,
- WLAN_REASON_DEAUTH_LEAVING,
- NULL, 0, 1, GFP_KERNEL);
- qtnf_disconnect(vif->wdev.wiphy, ndev,
- WLAN_REASON_DEAUTH_LEAVING);
- break;
- }
-
- vif->sta_state = QTNF_STA_DISCONNECTED;
- }
+ if (vif->wdev.iftype == NL80211_IFTYPE_STATION)
+ qtnf_disconnect(vif->wdev.wiphy, ndev,
+ WLAN_REASON_DEAUTH_LEAVING);
qtnf_scan_done(mac, true);
}
void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif)
{
- if (vif->wdev.iftype == NL80211_IFTYPE_STATION) {
- switch (vif->sta_state) {
- case QTNF_STA_CONNECTING:
- cfg80211_connect_result(vif->netdev,
- vif->bssid, NULL, 0,
- NULL, 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_KERNEL);
- break;
- case QTNF_STA_CONNECTED:
- cfg80211_disconnected(vif->netdev,
- WLAN_REASON_DEAUTH_LEAVING,
- NULL, 0, 1, GFP_KERNEL);
- break;
- case QTNF_STA_DISCONNECTED:
- break;
- }
- }
+ if (vif->wdev.iftype == NL80211_IFTYPE_STATION)
+ cfg80211_disconnected(vif->netdev, WLAN_REASON_DEAUTH_LEAVING,
+ NULL, 0, 1, GFP_KERNEL);
cfg80211_shutdown_all_interfaces(vif->wdev.wiphy);
- vif->sta_state = QTNF_STA_DISCONNECTED;
}
void qtnf_band_init_rates(struct ieee80211_supported_band *band)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index ae9e77300533..659e7649fe22 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -80,15 +80,15 @@ static int qtnf_cmd_resp_result_decode(enum qlink_cmd_result qcode)
static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
struct sk_buff *cmd_skb,
struct sk_buff **response_skb,
- u16 *result_code,
size_t const_resp_size,
size_t *var_resp_size)
{
struct qlink_cmd *cmd;
- const struct qlink_resp *resp;
+ struct qlink_resp *resp = NULL;
struct sk_buff *resp_skb = NULL;
u16 cmd_id;
- u8 mac_id, vif_id;
+ u8 mac_id;
+ u8 vif_id;
int ret;
cmd = (struct qlink_cmd *)cmd_skb->data;
@@ -97,8 +97,11 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
vif_id = cmd->vifid;
cmd->mhdr.len = cpu_to_le16(cmd_skb->len);
- if (unlikely(bus->fw_state != QTNF_FW_STATE_ACTIVE &&
- le16_to_cpu(cmd->cmd_id) != QLINK_CMD_FW_INIT)) {
+ pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id,
+ le16_to_cpu(cmd->cmd_id));
+
+ if (bus->fw_state != QTNF_FW_STATE_ACTIVE &&
+ le16_to_cpu(cmd->cmd_id) != QLINK_CMD_FW_INIT) {
pr_warn("VIF%u.%u: drop cmd 0x%.4X in fw state %d\n",
mac_id, vif_id, le16_to_cpu(cmd->cmd_id),
bus->fw_state);
@@ -106,24 +109,21 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
return -ENODEV;
}
- pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id,
- le16_to_cpu(cmd->cmd_id));
-
ret = qtnf_trans_send_cmd_with_resp(bus, cmd_skb, &resp_skb);
+ if (ret)
+ goto out;
- if (unlikely(ret))
+ if (WARN_ON(!resp_skb || !resp_skb->data)) {
+ ret = -EFAULT;
goto out;
+ }
- resp = (const struct qlink_resp *)resp_skb->data;
+ resp = (struct qlink_resp *)resp_skb->data;
ret = qtnf_cmd_check_reply_header(resp, cmd_id, mac_id, vif_id,
const_resp_size);
-
- if (unlikely(ret))
+ if (ret)
goto out;
- if (likely(result_code))
- *result_code = le16_to_cpu(resp->result);
-
/* Return length of variable part of response */
if (response_skb && var_resp_size)
*var_resp_size = le16_to_cpu(resp->mhdr.len) - const_resp_size;
@@ -134,14 +134,18 @@ out:
else
consume_skb(resp_skb);
+ if (!ret && resp)
+ return qtnf_cmd_resp_result_decode(le16_to_cpu(resp->result));
+
+ pr_warn("VIF%u.%u: cmd 0x%.4X failed: %d\n",
+ mac_id, vif_id, le16_to_cpu(cmd->cmd_id), ret);
+
return ret;
}
-static inline int qtnf_cmd_send(struct qtnf_bus *bus,
- struct sk_buff *cmd_skb,
- u16 *result_code)
+static inline int qtnf_cmd_send(struct qtnf_bus *bus, struct sk_buff *cmd_skb)
{
- return qtnf_cmd_send_with_reply(bus, cmd_skb, NULL, result_code,
+ return qtnf_cmd_send_with_reply(bus, cmd_skb, NULL,
sizeof(struct qlink_resp), NULL);
}
@@ -228,7 +232,6 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
struct sk_buff *cmd_skb;
struct qlink_cmd_start_ap *cmd;
struct qlink_auth_encr *aen;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
int i;
@@ -329,30 +332,21 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
}
qtnf_bus_lock(vif->mac->bus);
-
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
netif_carrier_on(vif->netdev);
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif)
{
struct sk_buff *cmd_skb;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -362,23 +356,13 @@ int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
-
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
-
- netif_carrier_off(vif->netdev);
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -386,7 +370,6 @@ int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg)
{
struct sk_buff *cmd_skb;
struct qlink_cmd_mgmt_frame_register *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -401,20 +384,13 @@ int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg)
cmd->frame_type = cpu_to_le16(frame_type);
cmd->do_register = reg;
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -423,7 +399,6 @@ int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
{
struct sk_buff *cmd_skb;
struct qlink_cmd_mgmt_frame_tx *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
if (sizeof(*cmd) + len > QTNF_MAX_CMD_BUF_SIZE) {
@@ -448,20 +423,13 @@ int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
if (len && buf)
qtnf_cmd_skb_put_buffer(cmd_skb, buf, len);
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -469,7 +437,6 @@ int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type,
const u8 *buf, size_t len)
{
struct sk_buff *cmd_skb;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
if (len > QTNF_MAX_CMD_BUF_SIZE) {
@@ -487,21 +454,13 @@ int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type,
qtnf_cmd_tlv_ie_set_add(cmd_skb, frame_type, buf, len);
qtnf_bus_lock(vif->mac->bus);
-
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u frame %u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, frame_type, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -544,6 +503,9 @@ qtnf_sta_info_parse_rate(struct rate_info *rate_dst,
rate_dst->flags |= RATE_INFO_FLAGS_MCS;
else if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_VHT_MCS)
rate_dst->flags |= RATE_INFO_FLAGS_VHT_MCS;
+
+ if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_SHORT_GI)
+ rate_dst->flags |= RATE_INFO_FLAGS_SHORT_GI;
}
static void
@@ -729,8 +691,7 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
struct sk_buff *cmd_skb, *resp_skb = NULL;
struct qlink_cmd_get_sta_info *cmd;
const struct qlink_resp_get_sta_info *resp;
- size_t var_resp_len;
- u16 res_code = QLINK_CMD_RESULT_OK;
+ size_t var_resp_len = 0;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -745,31 +706,13 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
ether_addr_copy(cmd->sta_addr, sta_mac);
ret = qtnf_cmd_send_with_reply(vif->mac->bus, cmd_skb, &resp_skb,
- &res_code, sizeof(*resp),
- &var_resp_len);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- switch (res_code) {
- case QLINK_CMD_RESULT_ENOTFOUND:
- pr_warn("VIF%u.%u: %pM STA not found\n",
- vif->mac->macid, vif->vifid, sta_mac);
- ret = -ENOENT;
- break;
- default:
- pr_err("VIF%u.%u: can't get info for %pM: %u\n",
- vif->mac->macid, vif->vifid, sta_mac, res_code);
- ret = -EFAULT;
- break;
- }
+ sizeof(*resp), &var_resp_len);
+ if (ret)
goto out;
- }
resp = (const struct qlink_resp_get_sta_info *)resp_skb->data;
- if (unlikely(!ether_addr_equal(sta_mac, resp->sta_addr))) {
+ if (!ether_addr_equal(sta_mac, resp->sta_addr)) {
pr_err("VIF%u.%u: wrong mac in reply: %pM != %pM\n",
vif->mac->macid, vif->vifid, resp->sta_addr, sta_mac);
ret = -EINVAL;
@@ -795,7 +738,6 @@ static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif,
struct sk_buff *cmd_skb, *resp_skb = NULL;
struct qlink_cmd_manage_intf *cmd;
const struct qlink_resp_manage_intf *resp;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -828,17 +770,9 @@ static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif,
eth_zero_addr(cmd->intf_info.mac_addr);
ret = qtnf_cmd_send_with_reply(vif->mac->bus, cmd_skb, &resp_skb,
- &res_code, sizeof(*resp), NULL);
-
- if (unlikely(ret))
- goto out;
-
- ret = qtnf_cmd_resp_result_decode(res_code);
- if (ret) {
- pr_err("VIF%u.%u: CMD %d failed: %u\n", vif->mac->macid,
- vif->vifid, cmd_type, res_code);
+ sizeof(*resp), NULL);
+ if (ret)
goto out;
- }
resp = (const struct qlink_resp_manage_intf *)resp_skb->data;
ether_addr_copy(vif->mac_addr, resp->intf_info.mac_addr);
@@ -868,7 +802,6 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
{
struct sk_buff *cmd_skb;
struct qlink_cmd_manage_intf *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -897,17 +830,9 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
eth_zero_addr(cmd->intf_info.mac_addr);
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
@@ -1353,8 +1278,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
ext_capa_mask = NULL;
}
- kfree(mac->macinfo.extended_capabilities);
- kfree(mac->macinfo.extended_capabilities_mask);
+ qtnf_mac_ext_caps_free(mac);
mac->macinfo.extended_capabilities = ext_capa;
mac->macinfo.extended_capabilities_mask = ext_capa_mask;
mac->macinfo.extended_capabilities_len = ext_capa_len;
@@ -1731,8 +1655,7 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
{
struct sk_buff *cmd_skb, *resp_skb = NULL;
const struct qlink_resp_get_mac_info *resp;
- size_t var_data_len;
- u16 res_code = QLINK_CMD_RESULT_OK;
+ size_t var_data_len = 0;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
@@ -1742,17 +1665,10 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
return -ENOMEM;
qtnf_bus_lock(mac->bus);
-
- ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code,
+ ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb,
sizeof(*resp), &var_data_len);
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
- ret = -EFAULT;
+ if (ret)
goto out;
- }
resp = (const struct qlink_resp_get_mac_info *)resp_skb->data;
qtnf_cmd_resp_proc_mac_info(mac, resp);
@@ -1769,9 +1685,8 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus)
{
struct sk_buff *cmd_skb, *resp_skb = NULL;
const struct qlink_resp_get_hw_info *resp;
- u16 res_code = QLINK_CMD_RESULT_OK;
+ size_t info_len = 0;
int ret = 0;
- size_t info_len;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
QLINK_CMD_GET_HW_INFO,
@@ -1780,18 +1695,10 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus)
return -ENOMEM;
qtnf_bus_lock(bus);
-
- ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb, &res_code,
+ ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb,
sizeof(*resp), &info_len);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("cmd exec failed: 0x%.4X\n", res_code);
- ret = -EFAULT;
+ if (ret)
goto out;
- }
resp = (const struct qlink_resp_get_hw_info *)resp_skb->data;
ret = qtnf_cmd_resp_proc_hw_info(bus, resp, info_len);
@@ -1807,10 +1714,9 @@ int qtnf_cmd_band_info_get(struct qtnf_wmac *mac,
struct ieee80211_supported_band *band)
{
struct sk_buff *cmd_skb, *resp_skb = NULL;
- size_t info_len;
struct qlink_cmd_band_info_get *cmd;
struct qlink_resp_band_info_get *resp;
- u16 res_code = QLINK_CMD_RESULT_OK;
+ size_t info_len = 0;
int ret = 0;
u8 qband;
@@ -1838,18 +1744,10 @@ int qtnf_cmd_band_info_get(struct qtnf_wmac *mac,
cmd->band = qband;
qtnf_bus_lock(mac->bus);
-
- ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code,
+ ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb,
sizeof(*resp), &info_len);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
- ret = -EFAULT;
+ if (ret)
goto out;
- }
resp = (struct qlink_resp_band_info_get *)resp_skb->data;
if (resp->band != qband) {
@@ -1871,9 +1769,8 @@ out:
int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac)
{
struct sk_buff *cmd_skb, *resp_skb = NULL;
- size_t response_size;
struct qlink_resp_phy_params *resp;
- u16 res_code = QLINK_CMD_RESULT_OK;
+ size_t response_size = 0;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
@@ -1883,18 +1780,10 @@ int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac)
return -ENOMEM;
qtnf_bus_lock(mac->bus);
-
- ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code,
+ ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb,
sizeof(*resp), &response_size);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
- ret = -EFAULT;
+ if (ret)
goto out;
- }
resp = (struct qlink_resp_phy_params *)resp_skb->data;
ret = qtnf_cmd_resp_proc_phy_params(mac, resp->info, response_size);
@@ -1910,7 +1799,6 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed)
{
struct wiphy *wiphy = priv_to_wiphy(mac);
struct sk_buff *cmd_skb;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
@@ -1931,26 +1819,19 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed)
qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_COVERAGE_CLASS,
wiphy->coverage_class);
- ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
out:
qtnf_bus_unlock(mac->bus);
+
return ret;
}
int qtnf_cmd_send_init_fw(struct qtnf_bus *bus)
{
struct sk_buff *cmd_skb;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
@@ -1960,20 +1841,13 @@ int qtnf_cmd_send_init_fw(struct qtnf_bus *bus)
return -ENOMEM;
qtnf_bus_lock(bus);
-
- ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("cmd exec failed: 0x%.4X\n", res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(bus, cmd_skb);
+ if (ret)
goto out;
- }
out:
qtnf_bus_unlock(bus);
+
return ret;
}
@@ -1988,9 +1862,7 @@ void qtnf_cmd_send_deinit_fw(struct qtnf_bus *bus)
return;
qtnf_bus_lock(bus);
-
- qtnf_cmd_send(bus, cmd_skb, NULL);
-
+ qtnf_cmd_send(bus, cmd_skb);
qtnf_bus_unlock(bus);
}
@@ -1999,7 +1871,6 @@ int qtnf_cmd_send_add_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
{
struct sk_buff *cmd_skb;
struct qlink_cmd_add_key *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2031,19 +1902,13 @@ int qtnf_cmd_send_add_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
params->seq,
params->seq_len);
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n",
- vif->mac->macid, vif->vifid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -2052,7 +1917,6 @@ int qtnf_cmd_send_del_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
{
struct sk_buff *cmd_skb;
struct qlink_cmd_del_key *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2072,19 +1936,14 @@ int qtnf_cmd_send_del_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
cmd->key_index = key_index;
cmd->pairwise = pairwise;
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
- if (unlikely(ret))
- goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n",
- vif->mac->macid, vif->vifid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -2093,7 +1952,6 @@ int qtnf_cmd_send_set_default_key(struct qtnf_vif *vif, u8 key_index,
{
struct sk_buff *cmd_skb;
struct qlink_cmd_set_def_key *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2108,19 +1966,14 @@ int qtnf_cmd_send_set_default_key(struct qtnf_vif *vif, u8 key_index,
cmd->key_index = key_index;
cmd->unicast = unicast;
cmd->multicast = multicast;
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
- if (unlikely(ret))
- goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -2128,7 +1981,6 @@ int qtnf_cmd_send_set_default_mgmt_key(struct qtnf_vif *vif, u8 key_index)
{
struct sk_buff *cmd_skb;
struct qlink_cmd_set_def_mgmt_key *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2141,19 +1993,14 @@ int qtnf_cmd_send_set_default_mgmt_key(struct qtnf_vif *vif, u8 key_index)
cmd = (struct qlink_cmd_set_def_mgmt_key *)cmd_skb->data;
cmd->key_index = key_index;
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
- if (unlikely(ret))
- goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -2183,7 +2030,6 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
{
struct sk_buff *cmd_skb;
struct qlink_cmd_change_sta *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2214,19 +2060,13 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
goto out;
}
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -2235,7 +2075,6 @@ int qtnf_cmd_send_del_sta(struct qtnf_vif *vif,
{
struct sk_buff *cmd_skb;
struct qlink_cmd_del_sta *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2256,19 +2095,13 @@ int qtnf_cmd_send_del_sta(struct qtnf_vif *vif,
cmd->subtype = params->subtype;
cmd->reason_code = cpu_to_le16(params->reason_code);
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -2312,7 +2145,6 @@ static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb,
int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
{
struct sk_buff *cmd_skb;
- u16 res_code = QLINK_CMD_RESULT_OK;
struct ieee80211_channel *sc;
struct cfg80211_scan_request *scan_req = mac->scan_req;
int n_channels;
@@ -2370,20 +2202,28 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
scan_req->mac_addr_mask);
}
- ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code);
+ if (scan_req->flags & NL80211_SCAN_FLAG_FLUSH) {
+ pr_debug("MAC%u: flush cache before scan\n", mac->macid);
- if (unlikely(ret))
- goto out;
+ qtnf_cmd_skb_put_tlv_tag(cmd_skb, QTN_TLV_ID_SCAN_FLUSH);
+ }
- pr_debug("MAC%u: scan started\n", mac->macid);
+ if (scan_req->duration) {
+ pr_debug("MAC%u: %s scan duration %u\n", mac->macid,
+ scan_req->duration_mandatory ? "mandatory" : "max",
+ scan_req->duration);
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
- ret = -EFAULT;
- goto out;
+ qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_SCAN_DWELL,
+ scan_req->duration);
}
+
+ ret = qtnf_cmd_send(mac->bus, cmd_skb);
+ if (ret)
+ goto out;
+
out:
qtnf_bus_unlock(mac->bus);
+
return ret;
}
@@ -2393,7 +2233,6 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif,
struct sk_buff *cmd_skb;
struct qlink_cmd_connect *cmd;
struct qlink_auth_encr *aen;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
int i;
u32 connect_flags = 0;
@@ -2474,20 +2313,13 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif,
qtnf_cmd_channel_tlv_add(cmd_skb, sme->channel);
qtnf_bus_lock(vif->mac->bus);
-
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
- goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -2495,7 +2327,6 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code)
{
struct sk_buff *cmd_skb;
struct qlink_cmd_disconnect *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2509,19 +2340,13 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code)
cmd = (struct qlink_cmd_disconnect *)cmd_skb->data;
cmd->reason = cpu_to_le16(reason_code);
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
- goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -2529,7 +2354,6 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up)
{
struct sk_buff *cmd_skb;
struct qlink_cmd_updown *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2542,20 +2366,13 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up)
cmd->if_up = !!up;
qtnf_bus_lock(vif->mac->bus);
-
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
- goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -2563,7 +2380,6 @@ int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req)
{
struct sk_buff *cmd_skb;
int ret;
- u16 res_code;
struct qlink_cmd_reg_notify *cmd;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
@@ -2604,29 +2420,10 @@ int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req)
}
qtnf_bus_lock(bus);
-
- ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+ ret = qtnf_cmd_send(bus, cmd_skb);
if (ret)
goto out;
- switch (res_code) {
- case QLINK_CMD_RESULT_ENOTSUPP:
- pr_warn("reg update not supported\n");
- ret = -EOPNOTSUPP;
- break;
- case QLINK_CMD_RESULT_EALREADY:
- pr_info("regulatory domain is already set to %c%c",
- req->alpha2[0], req->alpha2[1]);
- ret = -EALREADY;
- break;
- case QLINK_CMD_RESULT_OK:
- ret = 0;
- break;
- default:
- ret = -EFAULT;
- break;
- }
-
out:
qtnf_bus_unlock(bus);
@@ -2639,8 +2436,7 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
struct sk_buff *cmd_skb, *resp_skb = NULL;
struct qlink_cmd_get_chan_stats *cmd;
struct qlink_resp_get_chan_stats *resp;
- size_t var_data_len;
- u16 res_code = QLINK_CMD_RESULT_OK;
+ size_t var_data_len = 0;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
@@ -2654,25 +2450,10 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
cmd = (struct qlink_cmd_get_chan_stats *)cmd_skb->data;
cmd->channel = cpu_to_le16(channel);
- ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code,
+ ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb,
sizeof(*resp), &var_data_len);
- if (unlikely(ret)) {
- qtnf_bus_unlock(mac->bus);
- return ret;
- }
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- switch (res_code) {
- case QLINK_CMD_RESULT_ENOTFOUND:
- ret = -ENOENT;
- break;
- default:
- pr_err("cmd exec failed: 0x%.4X\n", res_code);
- ret = -EFAULT;
- break;
- }
+ if (ret)
goto out;
- }
resp = (struct qlink_resp_get_chan_stats *)resp_skb->data;
ret = qtnf_cmd_resp_proc_chan_stat_info(stats, resp->info,
@@ -2681,6 +2462,7 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
out:
qtnf_bus_unlock(mac->bus);
consume_skb(resp_skb);
+
return ret;
}
@@ -2690,7 +2472,6 @@ int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
struct qtnf_wmac *mac = vif->mac;
struct qlink_cmd_chan_switch *cmd;
struct sk_buff *cmd_skb;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, vif->vifid,
@@ -2707,32 +2488,13 @@ int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
cmd->block_tx = params->block_tx;
cmd->beacon_count = params->count;
- ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
+ ret = qtnf_cmd_send(mac->bus, cmd_skb);
+ if (ret)
goto out;
- switch (res_code) {
- case QLINK_CMD_RESULT_OK:
- ret = 0;
- break;
- case QLINK_CMD_RESULT_ENOTFOUND:
- ret = -ENOENT;
- break;
- case QLINK_CMD_RESULT_ENOTSUPP:
- ret = -EOPNOTSUPP;
- break;
- case QLINK_CMD_RESULT_EALREADY:
- ret = -EALREADY;
- break;
- case QLINK_CMD_RESULT_INVALID:
- default:
- ret = -EFAULT;
- break;
- }
-
out:
qtnf_bus_unlock(mac->bus);
+
return ret;
}
@@ -2742,7 +2504,6 @@ int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef)
const struct qlink_resp_channel_get *resp;
struct sk_buff *cmd_skb;
struct sk_buff *resp_skb = NULL;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2752,25 +2513,18 @@ int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef)
return -ENOMEM;
qtnf_bus_lock(bus);
-
- ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb, &res_code,
+ ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb,
sizeof(*resp), NULL);
-
- qtnf_bus_unlock(bus);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- ret = -ENODATA;
+ if (ret)
goto out;
- }
resp = (const struct qlink_resp_channel_get *)resp_skb->data;
qlink_chandef_q2cfg(priv_to_wiphy(vif->mac), &resp->chan, chdef);
out:
+ qtnf_bus_unlock(bus);
consume_skb(resp_skb);
+
return ret;
}
@@ -2782,7 +2536,6 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
struct sk_buff *cmd_skb;
struct qlink_cmd_start_cac *cmd;
int ret;
- u16 res_code;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_START_CAC,
@@ -2795,19 +2548,12 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
qlink_chandef_cfg2q(chdef, &cmd->chan);
qtnf_bus_lock(bus);
- ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
- qtnf_bus_unlock(bus);
-
+ ret = qtnf_cmd_send(bus, cmd_skb);
if (ret)
- return ret;
+ goto out;
- switch (res_code) {
- case QLINK_CMD_RESULT_OK:
- break;
- default:
- ret = -EOPNOTSUPP;
- break;
- }
+out:
+ qtnf_bus_unlock(bus);
return ret;
}
@@ -2819,7 +2565,6 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
struct sk_buff *cmd_skb;
struct qlink_tlv_hdr *tlv;
size_t acl_size = qtnf_cmd_acl_data_size(params);
- u16 res_code;
int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2834,22 +2579,12 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
qlink_acl_data_cfg2q(params, (struct qlink_acl_data *)tlv->val);
qtnf_bus_lock(bus);
- ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
- qtnf_bus_unlock(bus);
-
- if (unlikely(ret))
- return ret;
+ ret = qtnf_cmd_send(bus, cmd_skb);
+ if (ret)
+ goto out;
- switch (res_code) {
- case QLINK_CMD_RESULT_OK:
- break;
- case QLINK_CMD_RESULT_INVALID:
- ret = -EINVAL;
- break;
- default:
- ret = -EOPNOTSUPP;
- break;
- }
+out:
+ qtnf_bus_unlock(bus);
return ret;
}
@@ -2858,7 +2593,6 @@ int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout)
{
struct qtnf_bus *bus = vif->mac->bus;
struct sk_buff *cmd_skb;
- u16 res_code = QLINK_CMD_RESULT_OK;
struct qlink_cmd_pm_set *cmd;
int ret = 0;
@@ -2873,18 +2607,13 @@ int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout)
qtnf_bus_lock(bus);
- ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
+ ret = qtnf_cmd_send(bus, cmd_skb);
+ if (ret)
goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("cmd exec failed: 0x%.4X\n", res_code);
- ret = -EFAULT;
- }
-
out:
qtnf_bus_unlock(bus);
+
return ret;
}
@@ -2893,7 +2622,6 @@ int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
{
struct qtnf_bus *bus = vif->mac->bus;
struct sk_buff *cmd_skb;
- u16 res_code = QLINK_CMD_RESULT_OK;
struct qlink_cmd_wowlan_set *cmd;
u32 triggers = 0;
int count = 0;
@@ -2929,16 +2657,10 @@ int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
cmd->triggers = cpu_to_le32(triggers);
- ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
+ ret = qtnf_cmd_send(bus, cmd_skb);
+ if (ret)
goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("cmd exec failed: 0x%.4X\n", res_code);
- ret = -EFAULT;
- }
-
out:
qtnf_bus_unlock(bus);
return ret;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 19abbc4e23e0..5d18a4a917c9 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -304,6 +304,19 @@ void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac)
}
}
+void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac)
+{
+ if (mac->macinfo.extended_capabilities_len) {
+ kfree(mac->macinfo.extended_capabilities);
+ mac->macinfo.extended_capabilities = NULL;
+
+ kfree(mac->macinfo.extended_capabilities_mask);
+ mac->macinfo.extended_capabilities_mask = NULL;
+
+ mac->macinfo.extended_capabilities_len = 0;
+ }
+}
+
static void qtnf_vif_reset_handler(struct work_struct *work)
{
struct qtnf_vif *vif = container_of(work, struct qtnf_vif, reset_work);
@@ -370,6 +383,7 @@ static void qtnf_mac_scan_timeout(struct work_struct *work)
static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
unsigned int macid)
{
+ struct qtnf_vif *vif;
struct wiphy *wiphy;
struct qtnf_wmac *mac;
unsigned int i;
@@ -382,18 +396,20 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
mac->macid = macid;
mac->bus = bus;
+ mutex_init(&mac->mac_lock);
+ INIT_DELAYED_WORK(&mac->scan_timeout, qtnf_mac_scan_timeout);
for (i = 0; i < QTNF_MAX_INTF; i++) {
- memset(&mac->iflist[i], 0, sizeof(struct qtnf_vif));
- mac->iflist[i].wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
- mac->iflist[i].mac = mac;
- mac->iflist[i].vifid = i;
- qtnf_sta_list_init(&mac->iflist[i].sta_list);
- mutex_init(&mac->mac_lock);
- INIT_DELAYED_WORK(&mac->scan_timeout, qtnf_mac_scan_timeout);
- mac->iflist[i].stats64 =
- netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!mac->iflist[i].stats64)
+ vif = &mac->iflist[i];
+
+ memset(vif, 0, sizeof(*vif));
+ vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+ vif->mac = mac;
+ vif->vifid = i;
+ qtnf_sta_list_init(&vif->sta_list);
+
+ vif->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!vif->stats64)
pr_warn("VIF%u.%u: per cpu stats allocation failed\n",
macid, i);
}
@@ -493,8 +509,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
}
qtnf_mac_iface_comb_free(mac);
- kfree(mac->macinfo.extended_capabilities);
- kfree(mac->macinfo.extended_capabilities_mask);
+ qtnf_mac_ext_caps_free(mac);
kfree(mac->macinfo.wowlan);
wiphy_free(wiphy);
bus->mac[macid] = NULL;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index a1e338a1f055..293055049caa 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -64,12 +64,6 @@ struct qtnf_sta_list {
atomic_t size;
};
-enum qtnf_sta_state {
- QTNF_STA_DISCONNECTED,
- QTNF_STA_CONNECTING,
- QTNF_STA_CONNECTED
-};
-
struct qtnf_vif {
struct wireless_dev wdev;
u8 bssid[ETH_ALEN];
@@ -77,7 +71,6 @@ struct qtnf_vif {
u8 vifid;
u8 bss_priority;
u8 bss_status;
- enum qtnf_sta_state sta_state;
u16 mgmt_frames_bitmask;
struct net_device *netdev;
struct qtnf_wmac *mac;
@@ -151,6 +144,7 @@ struct qtnf_hw_info {
struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac);
void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac);
+void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac);
struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus);
int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv,
const char *name, unsigned char name_assign_type);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 68da81bec4e9..8b542b431b75 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -171,24 +171,14 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
return -EPROTO;
}
- if (vif->sta_state != QTNF_STA_CONNECTING) {
- pr_err("VIF%u.%u: BSS_JOIN event when STA is not connecting\n",
- vif->mac->macid, vif->vifid);
- return -EPROTO;
- }
-
pr_debug("VIF%u.%u: BSSID:%pM\n", vif->mac->macid, vif->vifid,
join_info->bssid);
cfg80211_connect_result(vif->netdev, join_info->bssid, NULL, 0, NULL,
0, le16_to_cpu(join_info->status), GFP_KERNEL);
- if (le16_to_cpu(join_info->status) == WLAN_STATUS_SUCCESS) {
- vif->sta_state = QTNF_STA_CONNECTED;
+ if (le16_to_cpu(join_info->status) == WLAN_STATUS_SUCCESS)
netif_carrier_on(vif->netdev);
- } else {
- vif->sta_state = QTNF_STA_DISCONNECTED;
- }
return 0;
}
@@ -211,16 +201,10 @@ qtnf_event_handle_bss_leave(struct qtnf_vif *vif,
return -EPROTO;
}
- if (vif->sta_state != QTNF_STA_CONNECTED)
- pr_warn("VIF%u.%u: BSS_LEAVE event when STA is not connected\n",
- vif->mac->macid, vif->vifid);
-
pr_debug("VIF%u.%u: disconnected\n", vif->mac->macid, vif->vifid);
cfg80211_disconnected(vif->netdev, le16_to_cpu(leave_info->reason),
NULL, 0, 0, GFP_KERNEL);
-
- vif->sta_state = QTNF_STA_DISCONNECTED;
netif_carrier_off(vif->netdev);
return 0;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
index d1637f2354a6..c3a32effa6f0 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2018 Quantenna Communications, Inc. All rights reserved. */
+#include <linux/module.h>
#include <linux/printk.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
@@ -15,14 +16,37 @@
#include "shm_ipc.h"
#include "core.h"
#include "debug.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) "qtnf_pcie: %s: " fmt, __func__
+#include "util.h"
+#include "qtn_hw_ids.h"
#define QTN_SYSCTL_BAR 0
#define QTN_SHMEM_BAR 2
#define QTN_DMA_BAR 3
+#define QTN_PCIE_MAX_FW_BUFSZ (1 * 1024 * 1024)
+
+static bool use_msi = true;
+module_param(use_msi, bool, 0644);
+MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt");
+
+static unsigned int tx_bd_size_param;
+module_param(tx_bd_size_param, uint, 0644);
+MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size");
+
+static unsigned int rx_bd_size_param = 256;
+module_param(rx_bd_size_param, uint, 0644);
+MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size");
+
+static u8 flashboot = 1;
+module_param(flashboot, byte, 0644);
+MODULE_PARM_DESC(flashboot, "set to 0 to use FW binary file on FS");
+
+static unsigned int fw_blksize_param = QTN_PCIE_MAX_FW_BUFSZ;
+module_param(fw_blksize_param, uint, 0644);
+MODULE_PARM_DESC(fw_blksize_param, "firmware loading block size in bytes");
+
+#define DRV_NAME "qtnfmac_pcie"
+
int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb)
{
struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
@@ -58,7 +82,7 @@ int qtnf_pcie_alloc_skb_array(struct qtnf_pcie_bus_priv *priv)
return 0;
}
-void qtnf_pcie_bringup_fw_async(struct qtnf_bus *bus)
+static void qtnf_pcie_bringup_fw_async(struct qtnf_bus *bus)
{
struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
struct pci_dev *pdev = priv->pdev;
@@ -72,7 +96,7 @@ static int qtnf_dbg_mps_show(struct seq_file *s, void *data)
struct qtnf_bus *bus = dev_get_drvdata(s->private);
struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
- seq_printf(s, "%d\n", priv->mps);
+ seq_printf(s, "%d\n", pcie_get_mps(priv->pdev));
return 0;
}
@@ -104,8 +128,7 @@ static int qtnf_dbg_shm_stats(struct seq_file *s, void *data)
return 0;
}
-void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success,
- const char *drv_name)
+void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success)
{
struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
struct pci_dev *pdev = priv->pdev;
@@ -122,7 +145,7 @@ void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success,
}
if (boot_success) {
- qtnf_debugfs_init(bus, drv_name);
+ qtnf_debugfs_init(bus, DRV_NAME);
qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show);
qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show);
qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats);
@@ -133,9 +156,8 @@ void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success,
put_device(&pdev->dev);
}
-static void qtnf_tune_pcie_mps(struct qtnf_pcie_bus_priv *priv)
+static void qtnf_tune_pcie_mps(struct pci_dev *pdev)
{
- struct pci_dev *pdev = priv->pdev;
struct pci_dev *parent;
int mps_p, mps_o, mps_m, mps;
int ret;
@@ -163,12 +185,10 @@ static void qtnf_tune_pcie_mps(struct qtnf_pcie_bus_priv *priv)
if (ret) {
pr_err("failed to set mps to %d, keep using current %d\n",
mps, mps_o);
- priv->mps = mps_o;
return;
}
pr_debug("set mps to %d (was %d, max %d)\n", mps, mps_o, mps_m);
- priv->mps = mps;
}
static void qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv, bool use_msi)
@@ -194,20 +214,20 @@ static void qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv, bool use_msi)
}
}
-static void __iomem *qtnf_map_bar(struct qtnf_pcie_bus_priv *priv, u8 index)
+static void __iomem *qtnf_map_bar(struct pci_dev *pdev, u8 index)
{
void __iomem *vaddr;
dma_addr_t busaddr;
size_t len;
int ret;
- ret = pcim_iomap_regions(priv->pdev, 1 << index, "qtnfmac_pcie");
+ ret = pcim_iomap_regions(pdev, 1 << index, "qtnfmac_pcie");
if (ret)
return IOMEM_ERR_PTR(ret);
- busaddr = pci_resource_start(priv->pdev, index);
- len = pci_resource_len(priv->pdev, index);
- vaddr = pcim_iomap_table(priv->pdev)[index];
+ busaddr = pci_resource_start(pdev, index);
+ len = pci_resource_len(pdev, index);
+ vaddr = pcim_iomap_table(pdev)[index];
if (!vaddr)
return IOMEM_ERR_PTR(-ENOMEM);
@@ -217,32 +237,8 @@ static void __iomem *qtnf_map_bar(struct qtnf_pcie_bus_priv *priv, u8 index)
return vaddr;
}
-static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv)
-{
- int ret = -ENOMEM;
-
- priv->sysctl_bar = qtnf_map_bar(priv, QTN_SYSCTL_BAR);
- if (IS_ERR(priv->sysctl_bar)) {
- pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR);
- return ret;
- }
-
- priv->dmareg_bar = qtnf_map_bar(priv, QTN_DMA_BAR);
- if (IS_ERR(priv->dmareg_bar)) {
- pr_err("failed to map BAR%u\n", QTN_DMA_BAR);
- return ret;
- }
-
- priv->epmem_bar = qtnf_map_bar(priv, QTN_SHMEM_BAR);
- if (IS_ERR(priv->epmem_bar)) {
- pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR);
- return ret;
- }
-
- return 0;
-}
-
-static void qtnf_pcie_control_rx_callback(void *arg, const u8 *buf, size_t len)
+static void qtnf_pcie_control_rx_callback(void *arg, const u8 __iomem *buf,
+ size_t len)
{
struct qtnf_pcie_bus_priv *priv = arg;
struct qtnf_bus *bus = pci_get_drvdata(priv->pdev);
@@ -260,7 +256,7 @@ static void qtnf_pcie_control_rx_callback(void *arg, const u8 *buf, size_t len)
return;
}
- skb_put_data(skb, buf, len);
+ memcpy_fromio(skb_put(skb, len), buf, len);
qtnf_trans_handle_rx_ctl_packet(bus, skb);
}
@@ -281,27 +277,83 @@ void qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv,
ipc_int, &rx_callback);
}
-int qtnf_pcie_probe(struct pci_dev *pdev, size_t priv_size,
- const struct qtnf_bus_ops *bus_ops, u64 dma_mask,
- bool use_msi)
+static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct qtnf_pcie_bus_priv *pcie_priv;
struct qtnf_bus *bus;
+ void __iomem *sysctl_bar;
+ void __iomem *epmem_bar;
+ void __iomem *dmareg_bar;
+ unsigned int chipid;
int ret;
- bus = devm_kzalloc(&pdev->dev,
- sizeof(*bus) + priv_size, GFP_KERNEL);
+ if (!pci_is_pcie(pdev)) {
+ pr_err("device %s is not PCI Express\n", pci_name(pdev));
+ return -EIO;
+ }
+
+ qtnf_tune_pcie_mps(pdev);
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ pr_err("failed to init PCI device %x\n", pdev->device);
+ return ret;
+ }
+
+ pci_set_master(pdev);
+
+ sysctl_bar = qtnf_map_bar(pdev, QTN_SYSCTL_BAR);
+ if (IS_ERR(sysctl_bar)) {
+ pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR);
+ return ret;
+ }
+
+ dmareg_bar = qtnf_map_bar(pdev, QTN_DMA_BAR);
+ if (IS_ERR(dmareg_bar)) {
+ pr_err("failed to map BAR%u\n", QTN_DMA_BAR);
+ return ret;
+ }
+
+ epmem_bar = qtnf_map_bar(pdev, QTN_SHMEM_BAR);
+ if (IS_ERR(epmem_bar)) {
+ pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR);
+ return ret;
+ }
+
+ chipid = qtnf_chip_id_get(sysctl_bar);
+
+ pr_info("identified device: %s\n", qtnf_chipid_to_string(chipid));
+
+ switch (chipid) {
+ case QTN_CHIP_ID_PEARL:
+ case QTN_CHIP_ID_PEARL_B:
+ case QTN_CHIP_ID_PEARL_C:
+ bus = qtnf_pcie_pearl_alloc(pdev);
+ break;
+ case QTN_CHIP_ID_TOPAZ:
+ bus = qtnf_pcie_topaz_alloc(pdev);
+ break;
+ default:
+ pr_err("unsupported chip ID 0x%x\n", chipid);
+ return -ENOTSUPP;
+ }
+
if (!bus)
return -ENOMEM;
pcie_priv = get_bus_priv(bus);
-
pci_set_drvdata(pdev, bus);
- bus->bus_ops = bus_ops;
bus->dev = &pdev->dev;
bus->fw_state = QTNF_FW_STATE_RESET;
pcie_priv->pdev = pdev;
pcie_priv->tx_stopped = 0;
+ pcie_priv->rx_bd_num = rx_bd_size_param;
+ pcie_priv->flashboot = flashboot;
+
+ if (fw_blksize_param > QTN_PCIE_MAX_FW_BUFSZ)
+ pcie_priv->fw_blksize = QTN_PCIE_MAX_FW_BUFSZ;
+ else
+ pcie_priv->fw_blksize = fw_blksize_param;
mutex_init(&bus->bus_lock);
spin_lock_init(&pcie_priv->tx_lock);
@@ -316,53 +368,35 @@ int qtnf_pcie_probe(struct pci_dev *pdev, size_t priv_size,
pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PCIE");
if (!pcie_priv->workqueue) {
pr_err("failed to alloc bus workqueue\n");
- ret = -ENODEV;
- goto err_init;
- }
-
- init_dummy_netdev(&bus->mux_dev);
-
- if (!pci_is_pcie(pdev)) {
- pr_err("device %s is not PCI Express\n", pci_name(pdev));
- ret = -EIO;
- goto err_base;
- }
-
- qtnf_tune_pcie_mps(pcie_priv);
-
- ret = pcim_enable_device(pdev);
- if (ret) {
- pr_err("failed to init PCI device %x\n", pdev->device);
- goto err_base;
- } else {
- pr_debug("successful init of PCI device %x\n", pdev->device);
+ return -ENODEV;
}
- ret = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
+ ret = dma_set_mask_and_coherent(&pdev->dev,
+ pcie_priv->dma_mask_get_cb());
if (ret) {
- pr_err("PCIE DMA coherent mask init failed\n");
- goto err_base;
+ pr_err("PCIE DMA coherent mask init failed 0x%llx\n",
+ pcie_priv->dma_mask_get_cb());
+ goto error;
}
- pci_set_master(pdev);
+ init_dummy_netdev(&bus->mux_dev);
qtnf_pcie_init_irq(pcie_priv, use_msi);
-
- ret = qtnf_pcie_init_memory(pcie_priv);
- if (ret < 0) {
- pr_err("PCIE memory init failed\n");
- goto err_base;
- }
-
+ pcie_priv->sysctl_bar = sysctl_bar;
+ pcie_priv->dmareg_bar = dmareg_bar;
+ pcie_priv->epmem_bar = epmem_bar;
pci_save_state(pdev);
+ ret = pcie_priv->probe_cb(bus, tx_bd_size_param);
+ if (ret)
+ goto error;
+
+ qtnf_pcie_bringup_fw_async(bus);
return 0;
-err_base:
+error:
flush_workqueue(pcie_priv->workqueue);
destroy_workqueue(pcie_priv->workqueue);
-err_init:
pci_set_drvdata(pdev, NULL);
-
return ret;
}
@@ -372,8 +406,17 @@ static void qtnf_pcie_free_shm_ipc(struct qtnf_pcie_bus_priv *priv)
qtnf_shm_ipc_free(&priv->shm_ipc_ep_out);
}
-void qtnf_pcie_remove(struct qtnf_bus *bus, struct qtnf_pcie_bus_priv *priv)
+static void qtnf_pcie_remove(struct pci_dev *dev)
{
+ struct qtnf_pcie_bus_priv *priv;
+ struct qtnf_bus *bus;
+
+ bus = pci_get_drvdata(dev);
+ if (!bus)
+ return;
+
+ priv = get_bus_priv(bus);
+
cancel_work_sync(&bus->fw_work);
if (bus->fw_state == QTNF_FW_STATE_ACTIVE ||
@@ -387,5 +430,77 @@ void qtnf_pcie_remove(struct qtnf_bus *bus, struct qtnf_pcie_bus_priv *priv)
qtnf_pcie_free_shm_ipc(priv);
qtnf_debugfs_remove(bus);
+ priv->remove_cb(bus);
pci_set_drvdata(priv->pdev, NULL);
}
+
+#ifdef CONFIG_PM_SLEEP
+static int qtnf_pcie_suspend(struct device *dev)
+{
+ struct qtnf_pcie_bus_priv *priv;
+ struct qtnf_bus *bus;
+
+ bus = pci_get_drvdata(to_pci_dev(dev));
+ if (!bus)
+ return -EFAULT;
+
+ priv = get_bus_priv(bus);
+ return priv->suspend_cb(bus);
+}
+
+static int qtnf_pcie_resume(struct device *dev)
+{
+ struct qtnf_pcie_bus_priv *priv;
+ struct qtnf_bus *bus;
+
+ bus = pci_get_drvdata(to_pci_dev(dev));
+ if (!bus)
+ return -EFAULT;
+
+ priv = get_bus_priv(bus);
+ return priv->resume_cb(bus);
+}
+
+/* Power Management Hooks */
+static SIMPLE_DEV_PM_OPS(qtnf_pcie_pm_ops, qtnf_pcie_suspend,
+ qtnf_pcie_resume);
+#endif
+
+static const struct pci_device_id qtnf_pcie_devid_table[] = {
+ {
+ PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QSR,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ },
+ { },
+};
+
+MODULE_DEVICE_TABLE(pci, qtnf_pcie_devid_table);
+
+static struct pci_driver qtnf_pcie_drv_data = {
+ .name = DRV_NAME,
+ .id_table = qtnf_pcie_devid_table,
+ .probe = qtnf_pcie_probe,
+ .remove = qtnf_pcie_remove,
+#ifdef CONFIG_PM_SLEEP
+ .driver = {
+ .pm = &qtnf_pcie_pm_ops,
+ },
+#endif
+};
+
+static int __init qtnf_pcie_register(void)
+{
+ return pci_register_driver(&qtnf_pcie_drv_data);
+}
+
+static void __exit qtnf_pcie_exit(void)
+{
+ pci_unregister_driver(&qtnf_pcie_drv_data);
+}
+
+module_init(qtnf_pcie_register);
+module_exit(qtnf_pcie_exit);
+
+MODULE_AUTHOR("Quantenna Communications");
+MODULE_DESCRIPTION("Quantenna PCIe bus driver for 802.11 wireless LAN.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
index 5c70fb4c0f92..bbc074e1f34d 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
@@ -23,9 +23,14 @@
struct qtnf_pcie_bus_priv {
struct pci_dev *pdev;
+ int (*probe_cb)(struct qtnf_bus *bus, unsigned int tx_bd_size);
+ void (*remove_cb)(struct qtnf_bus *bus);
+ int (*suspend_cb)(struct qtnf_bus *bus);
+ int (*resume_cb)(struct qtnf_bus *bus);
+ u64 (*dma_mask_get_cb)(void);
+
spinlock_t tx_reclaim_lock;
spinlock_t tx_lock;
- int mps;
struct workqueue_struct *workqueue;
struct tasklet_struct reclaim_tq;
@@ -43,6 +48,8 @@ struct qtnf_pcie_bus_priv {
struct sk_buff **tx_skb;
struct sk_buff **rx_skb;
+ unsigned int fw_blksize;
+
u32 rx_bd_w_index;
u32 rx_bd_r_index;
@@ -58,21 +65,18 @@ struct qtnf_pcie_bus_priv {
u8 msi_enabled;
u8 tx_stopped;
+ bool flashboot;
};
int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb);
int qtnf_pcie_alloc_skb_array(struct qtnf_pcie_bus_priv *priv);
-void qtnf_pcie_bringup_fw_async(struct qtnf_bus *bus);
-void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success,
- const char *drv_name);
+void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success);
void qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv,
struct qtnf_shm_ipc_region __iomem *ipc_tx_reg,
struct qtnf_shm_ipc_region __iomem *ipc_rx_reg,
const struct qtnf_shm_ipc_int *ipc_int);
-int qtnf_pcie_probe(struct pci_dev *pdev, size_t priv_size,
- const struct qtnf_bus_ops *bus_ops, u64 dma_mask,
- bool use_msi);
-void qtnf_pcie_remove(struct qtnf_bus *bus, struct qtnf_pcie_bus_priv *priv);
+struct qtnf_bus *qtnf_pcie_pearl_alloc(struct pci_dev *pdev);
+struct qtnf_bus *qtnf_pcie_topaz_alloc(struct pci_dev *pdev);
static inline void qtnf_non_posted_write(u32 val, void __iomem *basereg)
{
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index 5aca12a51fe3..1f5facbb8905 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -1,21 +1,7 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018 Quantenna Communications */
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
@@ -37,23 +23,7 @@
#include "shm_ipc.h"
#include "debug.h"
-static bool use_msi = true;
-module_param(use_msi, bool, 0644);
-MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt");
-
-static unsigned int tx_bd_size_param = 32;
-module_param(tx_bd_size_param, uint, 0644);
-MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size, power of two");
-
-static unsigned int rx_bd_size_param = 256;
-module_param(rx_bd_size_param, uint, 0644);
-MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size, power of two");
-
-static u8 flashboot = 1;
-module_param(flashboot, byte, 0644);
-MODULE_PARM_DESC(flashboot, "set to 0 to use FW binary file on FS");
-
-#define DRV_NAME "qtnfmac_pearl_pcie"
+#define PEARL_TX_BD_SIZE_DEFAULT 32
struct qtnf_pearl_bda {
__le16 bda_len;
@@ -428,30 +398,28 @@ static int pearl_hhbm_init(struct qtnf_pcie_pearl_state *ps)
return 0;
}
-static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps)
+static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps,
+ unsigned int tx_bd_size)
{
struct qtnf_pcie_bus_priv *priv = &ps->base;
int ret;
u32 val;
- priv->tx_bd_num = tx_bd_size_param;
- priv->rx_bd_num = rx_bd_size_param;
- priv->rx_bd_w_index = 0;
- priv->rx_bd_r_index = 0;
+ if (tx_bd_size == 0)
+ tx_bd_size = PEARL_TX_BD_SIZE_DEFAULT;
- if (!priv->tx_bd_num || !is_power_of_2(priv->tx_bd_num)) {
- pr_err("tx_bd_size_param %u is not power of two\n",
- priv->tx_bd_num);
- return -EINVAL;
- }
+ val = tx_bd_size * sizeof(struct qtnf_pearl_tx_bd);
- val = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd);
- if (val > PCIE_HHBM_MAX_SIZE) {
- pr_err("tx_bd_size_param %u is too large\n",
- priv->tx_bd_num);
- return -EINVAL;
+ if (!is_power_of_2(tx_bd_size) || val > PCIE_HHBM_MAX_SIZE) {
+ pr_warn("bad tx_bd_size value %u\n", tx_bd_size);
+ priv->tx_bd_num = PEARL_TX_BD_SIZE_DEFAULT;
+ } else {
+ priv->tx_bd_num = tx_bd_size;
}
+ priv->rx_bd_w_index = 0;
+ priv->rx_bd_r_index = 0;
+
if (!priv->rx_bd_num || !is_power_of_2(priv->rx_bd_num)) {
pr_err("rx_bd_size_param %u is not power of two\n",
priv->rx_bd_num);
@@ -1019,7 +987,7 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work)
const char *fwname = QTN_PCI_PEARL_FW_NAME;
bool fw_boot_success = false;
- if (flashboot) {
+ if (ps->base.flashboot) {
state |= QTN_RC_FW_FLASHBOOT;
} else {
ret = request_firmware(&fw, fwname, &pdev->dev);
@@ -1035,7 +1003,7 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work)
QTN_FW_DL_TIMEOUT_MS)) {
pr_err("card is not ready\n");
- if (!flashboot)
+ if (!ps->base.flashboot)
release_firmware(fw);
goto fw_load_exit;
@@ -1043,7 +1011,7 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work)
qtnf_clear_state(&ps->bda->bda_ep_state, QTN_EP_FW_LOADRDY);
- if (flashboot) {
+ if (ps->base.flashboot) {
pr_info("booting firmware from flash\n");
} else {
@@ -1074,7 +1042,7 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work)
fw_boot_success = true;
fw_load_exit:
- qtnf_pcie_fw_boot_done(bus, fw_boot_success, DRV_NAME);
+ qtnf_pcie_fw_boot_done(bus, fw_boot_success);
if (fw_boot_success) {
qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats);
@@ -1090,74 +1058,34 @@ static void qtnf_pearl_reclaim_tasklet_fn(unsigned long data)
qtnf_en_txdone_irq(ps);
}
-static int qtnf_pearl_check_chip_id(struct qtnf_pcie_pearl_state *ps)
+static u64 qtnf_pearl_dma_mask_get(void)
{
- unsigned int chipid;
-
- chipid = qtnf_chip_id_get(ps->base.sysctl_bar);
-
- switch (chipid) {
- case QTN_CHIP_ID_PEARL:
- case QTN_CHIP_ID_PEARL_B:
- case QTN_CHIP_ID_PEARL_C:
- pr_info("chip ID is 0x%x\n", chipid);
- break;
- default:
- pr_err("incorrect chip ID 0x%x\n", chipid);
- return -ENODEV;
- }
-
- return 0;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ return DMA_BIT_MASK(64);
+#else
+ return DMA_BIT_MASK(32);
+#endif
}
-static int qtnf_pcie_pearl_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size)
{
struct qtnf_shm_ipc_int ipc_int;
- struct qtnf_pcie_pearl_state *ps;
- struct qtnf_bus *bus;
+ struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
+ struct pci_dev *pdev = ps->base.pdev;
int ret;
- u64 dma_mask;
-
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- dma_mask = DMA_BIT_MASK(64);
-#else
- dma_mask = DMA_BIT_MASK(32);
-#endif
-
- ret = qtnf_pcie_probe(pdev, sizeof(*ps), &qtnf_pcie_pearl_bus_ops,
- dma_mask, use_msi);
- if (ret)
- return ret;
-
- bus = pci_get_drvdata(pdev);
- ps = get_bus_priv(bus);
+ bus->bus_ops = &qtnf_pcie_pearl_bus_ops;
spin_lock_init(&ps->irq_lock);
-
- tasklet_init(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn,
- (unsigned long)ps);
- netif_napi_add(&bus->mux_dev, &bus->mux_napi,
- qtnf_pcie_pearl_rx_poll, 10);
INIT_WORK(&bus->fw_work, qtnf_pearl_fw_work_handler);
ps->pcie_reg_base = ps->base.dmareg_bar;
ps->bda = ps->base.epmem_bar;
writel(ps->base.msi_enabled, &ps->bda->bda_rc_msi_enabled);
- ipc_int.fn = qtnf_pcie_pearl_ipc_gen_ep_int;
- ipc_int.arg = ps;
- qtnf_pcie_init_shm_ipc(&ps->base, &ps->bda->bda_shm_reg1,
- &ps->bda->bda_shm_reg2, &ipc_int);
-
- ret = qtnf_pearl_check_chip_id(ps);
- if (ret)
- goto error;
-
- ret = qtnf_pcie_pearl_init_xfer(ps);
+ ret = qtnf_pcie_pearl_init_xfer(ps, tx_bd_size);
if (ret) {
pr_err("PCIE xfer init failed\n");
- goto error;
+ return ret;
}
/* init default irq settings */
@@ -1168,95 +1096,63 @@ static int qtnf_pcie_pearl_probe(struct pci_dev *pdev,
ret = devm_request_irq(&pdev->dev, pdev->irq,
&qtnf_pcie_pearl_interrupt, 0,
- "qtnf_pcie_irq", (void *)bus);
+ "qtnf_pearl_irq", (void *)bus);
if (ret) {
pr_err("failed to request pcie irq %d\n", pdev->irq);
- goto err_xfer;
+ qtnf_pearl_free_xfer_buffers(ps);
+ return ret;
}
- qtnf_pcie_bringup_fw_async(bus);
-
- return 0;
+ tasklet_init(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn,
+ (unsigned long)ps);
+ netif_napi_add(&bus->mux_dev, &bus->mux_napi,
+ qtnf_pcie_pearl_rx_poll, 10);
-err_xfer:
- qtnf_pearl_free_xfer_buffers(ps);
-error:
- qtnf_pcie_remove(bus, &ps->base);
+ ipc_int.fn = qtnf_pcie_pearl_ipc_gen_ep_int;
+ ipc_int.arg = ps;
+ qtnf_pcie_init_shm_ipc(&ps->base, &ps->bda->bda_shm_reg1,
+ &ps->bda->bda_shm_reg2, &ipc_int);
- return ret;
+ return 0;
}
-static void qtnf_pcie_pearl_remove(struct pci_dev *pdev)
+static void qtnf_pcie_pearl_remove(struct qtnf_bus *bus)
{
- struct qtnf_pcie_pearl_state *ps;
- struct qtnf_bus *bus;
-
- bus = pci_get_drvdata(pdev);
- if (!bus)
- return;
-
- ps = get_bus_priv(bus);
+ struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
- qtnf_pcie_remove(bus, &ps->base);
qtnf_pearl_reset_ep(ps);
qtnf_pearl_free_xfer_buffers(ps);
}
#ifdef CONFIG_PM_SLEEP
-static int qtnf_pcie_pearl_suspend(struct device *dev)
+static int qtnf_pcie_pearl_suspend(struct qtnf_bus *bus)
{
return -EOPNOTSUPP;
}
-static int qtnf_pcie_pearl_resume(struct device *dev)
+static int qtnf_pcie_pearl_resume(struct qtnf_bus *bus)
{
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-
-#ifdef CONFIG_PM_SLEEP
-/* Power Management Hooks */
-static SIMPLE_DEV_PM_OPS(qtnf_pcie_pearl_pm_ops, qtnf_pcie_pearl_suspend,
- qtnf_pcie_pearl_resume);
#endif
-static const struct pci_device_id qtnf_pcie_devid_table[] = {
- {
- PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QTN_PEARL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- },
- { },
-};
+struct qtnf_bus *qtnf_pcie_pearl_alloc(struct pci_dev *pdev)
+{
+ struct qtnf_bus *bus;
+ struct qtnf_pcie_pearl_state *ps;
-MODULE_DEVICE_TABLE(pci, qtnf_pcie_devid_table);
+ bus = devm_kzalloc(&pdev->dev, sizeof(*bus) + sizeof(*ps), GFP_KERNEL);
+ if (!bus)
+ return NULL;
-static struct pci_driver qtnf_pcie_pearl_drv_data = {
- .name = DRV_NAME,
- .id_table = qtnf_pcie_devid_table,
- .probe = qtnf_pcie_pearl_probe,
- .remove = qtnf_pcie_pearl_remove,
+ ps = get_bus_priv(bus);
+ ps->base.probe_cb = qtnf_pcie_pearl_probe;
+ ps->base.remove_cb = qtnf_pcie_pearl_remove;
+ ps->base.dma_mask_get_cb = qtnf_pearl_dma_mask_get;
#ifdef CONFIG_PM_SLEEP
- .driver = {
- .pm = &qtnf_pcie_pearl_pm_ops,
- },
+ ps->base.resume_cb = qtnf_pcie_pearl_resume;
+ ps->base.suspend_cb = qtnf_pcie_pearl_suspend;
#endif
-};
-
-static int __init qtnf_pcie_pearl_register(void)
-{
- pr_info("register Quantenna QSR10g FullMAC PCIE driver\n");
- return pci_register_driver(&qtnf_pcie_pearl_drv_data);
-}
-static void __exit qtnf_pcie_pearl_exit(void)
-{
- pr_info("unregister Quantenna QSR10g FullMAC PCIE driver\n");
- pci_unregister_driver(&qtnf_pcie_pearl_drv_data);
+ return bus;
}
-
-module_init(qtnf_pcie_pearl_register);
-module_exit(qtnf_pcie_pearl_exit);
-
-MODULE_AUTHOR("Quantenna Communications");
-MODULE_DESCRIPTION("Quantenna QSR10g PCIe bus driver for 802.11 wireless LAN.");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h
index f21e97ede090..634480fe6a64 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications */
#ifndef _QTN_FMAC_PCIE_IPC_H_
#define _QTN_FMAC_PCIE_IPC_H_
@@ -85,11 +72,6 @@
#define QTN_EP_LHOST_TQE_PORT 4
-enum qtnf_pcie_bda_ipc_flags {
- QTN_PCIE_IPC_FLAG_HBM_MAGIC = BIT(0),
- QTN_PCIE_IPC_FLAG_SHM_PIO = BIT(1),
-};
-
enum qtnf_fw_loadtype {
QTN_FW_DBEGIN,
QTN_FW_DSUB,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h
index 0bfe285b6b48..6e9a5c61d46f 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h
@@ -1,28 +1,10 @@
-/*
- * Copyright (c) 2015 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015 Quantenna Communications */
#ifndef __PEARL_PCIE_H
#define __PEARL_PCIE_H
-#define PCIE_GEN2_BASE (0xe9000000)
-#define PCIE_GEN3_BASE (0xe7000000)
-
-#define PEARL_CUR_PCIE_BASE (PCIE_GEN2_BASE)
-#define PCIE_HDP_OFFSET (0x2000)
-
+/* Pearl PCIe HDP registers */
#define PCIE_HDP_CTRL(base) ((base) + 0x2c00)
#define PCIE_HDP_AXI_CTRL(base) ((base) + 0x2c04)
#define PCIE_HDP_HOST_WR_DESC0(base) ((base) + 0x2c10)
@@ -86,7 +68,7 @@
#define PCIE_HDP_TX_HOST_Q_RD_PTR(base) ((base) + 0x2d3c)
#define PCIE_HDP_TX_HOST_Q_STS(base) ((base) + 0x2d40)
-/* Host HBM pool registers */
+/* Pearl PCIe HBM pool registers */
#define PCIE_HHBM_CSR_REG(base) ((base) + 0x2e00)
#define PCIE_HHBM_Q_BASE_REG(base) ((base) + 0x2e04)
#define PCIE_HHBM_Q_LIMIT_REG(base) ((base) + 0x2e08)
@@ -104,230 +86,13 @@
#define HBM_INT_STATUS(base) ((base) + 0x2f9c)
#define PCIE_HHBM_POOL_CNFIG(base) ((base) + 0x2f9c)
-/* host HBM bit field definition */
+/* Pearl PCIe HBM bit field definitions */
#define HHBM_CONFIG_SOFT_RESET (BIT(8))
#define HHBM_WR_REQ (BIT(0))
#define HHBM_RD_REQ (BIT(1))
#define HHBM_DONE (BIT(31))
#define HHBM_64BIT (BIT(10))
-/* offsets for dual PCIE */
-#define PCIE_PORT_LINK_CTL(base) ((base) + 0x0710)
-#define PCIE_GEN2_CTL(base) ((base) + 0x080C)
-#define PCIE_GEN3_OFF(base) ((base) + 0x0890)
-#define PCIE_ATU_CTRL1(base) ((base) + 0x0904)
-#define PCIE_ATU_CTRL2(base) ((base) + 0x0908)
-#define PCIE_ATU_BASE_LOW(base) ((base) + 0x090C)
-#define PCIE_ATU_BASE_HIGH(base) ((base) + 0x0910)
-#define PCIE_ATU_BASE_LIMIT(base) ((base) + 0x0914)
-#define PCIE_ATU_TGT_LOW(base) ((base) + 0x0918)
-#define PCIE_ATU_TGT_HIGH(base) ((base) + 0x091C)
-#define PCIE_DMA_WR_ENABLE(base) ((base) + 0x097C)
-#define PCIE_DMA_WR_CHWTLOW(base) ((base) + 0x0988)
-#define PCIE_DMA_WR_CHWTHIG(base) ((base) + 0x098C)
-#define PCIE_DMA_WR_INTSTS(base) ((base) + 0x09BC)
-#define PCIE_DMA_WR_INTMASK(base) ((base) + 0x09C4)
-#define PCIE_DMA_WR_INTCLER(base) ((base) + 0x09C8)
-#define PCIE_DMA_WR_DONE_IMWR_ADDR_L(base) ((base) + 0x09D0)
-#define PCIE_DMA_WR_DONE_IMWR_ADDR_H(base) ((base) + 0x09D4)
-#define PCIE_DMA_WR_ABORT_IMWR_ADDR_L(base) ((base) + 0x09D8)
-#define PCIE_DMA_WR_ABORT_IMWR_ADDR_H(base) ((base) + 0x09DC)
-#define PCIE_DMA_WR_IMWR_DATA(base) ((base) + 0x09E0)
-#define PCIE_DMA_WR_LL_ERR_EN(base) ((base) + 0x0A00)
-#define PCIE_DMA_WR_DOORBELL(base) ((base) + 0x0980)
-#define PCIE_DMA_RD_ENABLE(base) ((base) + 0x099C)
-#define PCIE_DMA_RD_DOORBELL(base) ((base) + 0x09A0)
-#define PCIE_DMA_RD_CHWTLOW(base) ((base) + 0x09A8)
-#define PCIE_DMA_RD_CHWTHIG(base) ((base) + 0x09AC)
-#define PCIE_DMA_RD_INTSTS(base) ((base) + 0x0A10)
-#define PCIE_DMA_RD_INTMASK(base) ((base) + 0x0A18)
-#define PCIE_DMA_RD_INTCLER(base) ((base) + 0x0A1C)
-#define PCIE_DMA_RD_ERR_STS_L(base) ((base) + 0x0A24)
-#define PCIE_DMA_RD_ERR_STS_H(base) ((base) + 0x0A28)
-#define PCIE_DMA_RD_LL_ERR_EN(base) ((base) + 0x0A34)
-#define PCIE_DMA_RD_DONE_IMWR_ADDR_L(base) ((base) + 0x0A3C)
-#define PCIE_DMA_RD_DONE_IMWR_ADDR_H(base) ((base) + 0x0A40)
-#define PCIE_DMA_RD_ABORT_IMWR_ADDR_L(base) ((base) + 0x0A44)
-#define PCIE_DMA_RD_ABORT_IMWR_ADDR_H(base) ((base) + 0x0A48)
-#define PCIE_DMA_RD_IMWR_DATA(base) ((base) + 0x0A4C)
-#define PCIE_DMA_CHNL_CONTEXT(base) ((base) + 0x0A6C)
-#define PCIE_DMA_CHNL_CNTRL(base) ((base) + 0x0A70)
-#define PCIE_DMA_XFR_SIZE(base) ((base) + 0x0A78)
-#define PCIE_DMA_SAR_LOW(base) ((base) + 0x0A7C)
-#define PCIE_DMA_SAR_HIGH(base) ((base) + 0x0A80)
-#define PCIE_DMA_DAR_LOW(base) ((base) + 0x0A84)
-#define PCIE_DMA_DAR_HIGH(base) ((base) + 0x0A88)
-#define PCIE_DMA_LLPTR_LOW(base) ((base) + 0x0A8C)
-#define PCIE_DMA_LLPTR_HIGH(base) ((base) + 0x0A90)
-#define PCIE_DMA_WRLL_ERR_ENB(base) ((base) + 0x0A00)
-#define PCIE_DMA_RDLL_ERR_ENB(base) ((base) + 0x0A34)
-#define PCIE_DMABD_CHNL_CNTRL(base) ((base) + 0x8000)
-#define PCIE_DMABD_XFR_SIZE(base) ((base) + 0x8004)
-#define PCIE_DMABD_SAR_LOW(base) ((base) + 0x8008)
-#define PCIE_DMABD_SAR_HIGH(base) ((base) + 0x800c)
-#define PCIE_DMABD_DAR_LOW(base) ((base) + 0x8010)
-#define PCIE_DMABD_DAR_HIGH(base) ((base) + 0x8014)
-#define PCIE_DMABD_LLPTR_LOW(base) ((base) + 0x8018)
-#define PCIE_DMABD_LLPTR_HIGH(base) ((base) + 0x801c)
-#define PCIE_WRDMA0_CHNL_CNTRL(base) ((base) + 0x8000)
-#define PCIE_WRDMA0_XFR_SIZE(base) ((base) + 0x8004)
-#define PCIE_WRDMA0_SAR_LOW(base) ((base) + 0x8008)
-#define PCIE_WRDMA0_SAR_HIGH(base) ((base) + 0x800c)
-#define PCIE_WRDMA0_DAR_LOW(base) ((base) + 0x8010)
-#define PCIE_WRDMA0_DAR_HIGH(base) ((base) + 0x8014)
-#define PCIE_WRDMA0_LLPTR_LOW(base) ((base) + 0x8018)
-#define PCIE_WRDMA0_LLPTR_HIGH(base) ((base) + 0x801c)
-#define PCIE_WRDMA1_CHNL_CNTRL(base) ((base) + 0x8020)
-#define PCIE_WRDMA1_XFR_SIZE(base) ((base) + 0x8024)
-#define PCIE_WRDMA1_SAR_LOW(base) ((base) + 0x8028)
-#define PCIE_WRDMA1_SAR_HIGH(base) ((base) + 0x802c)
-#define PCIE_WRDMA1_DAR_LOW(base) ((base) + 0x8030)
-#define PCIE_WRDMA1_DAR_HIGH(base) ((base) + 0x8034)
-#define PCIE_WRDMA1_LLPTR_LOW(base) ((base) + 0x8038)
-#define PCIE_WRDMA1_LLPTR_HIGH(base) ((base) + 0x803c)
-#define PCIE_RDDMA0_CHNL_CNTRL(base) ((base) + 0x8040)
-#define PCIE_RDDMA0_XFR_SIZE(base) ((base) + 0x8044)
-#define PCIE_RDDMA0_SAR_LOW(base) ((base) + 0x8048)
-#define PCIE_RDDMA0_SAR_HIGH(base) ((base) + 0x804c)
-#define PCIE_RDDMA0_DAR_LOW(base) ((base) + 0x8050)
-#define PCIE_RDDMA0_DAR_HIGH(base) ((base) + 0x8054)
-#define PCIE_RDDMA0_LLPTR_LOW(base) ((base) + 0x8058)
-#define PCIE_RDDMA0_LLPTR_HIGH(base) ((base) + 0x805c)
-#define PCIE_RDDMA1_CHNL_CNTRL(base) ((base) + 0x8060)
-#define PCIE_RDDMA1_XFR_SIZE(base) ((base) + 0x8064)
-#define PCIE_RDDMA1_SAR_LOW(base) ((base) + 0x8068)
-#define PCIE_RDDMA1_SAR_HIGH(base) ((base) + 0x806c)
-#define PCIE_RDDMA1_DAR_LOW(base) ((base) + 0x8070)
-#define PCIE_RDDMA1_DAR_HIGH(base) ((base) + 0x8074)
-#define PCIE_RDDMA1_LLPTR_LOW(base) ((base) + 0x8078)
-#define PCIE_RDDMA1_LLPTR_HIGH(base) ((base) + 0x807c)
-
-#define PCIE_ID(base) ((base) + 0x0000)
-#define PCIE_CMD(base) ((base) + 0x0004)
-#define PCIE_BAR(base, n) ((base) + 0x0010 + ((n) << 2))
-#define PCIE_CAP_PTR(base) ((base) + 0x0034)
-#define PCIE_MSI_LBAR(base) ((base) + 0x0054)
-#define PCIE_MSI_CTRL(base) ((base) + 0x0050)
-#define PCIE_MSI_ADDR_L(base) ((base) + 0x0054)
-#define PCIE_MSI_ADDR_H(base) ((base) + 0x0058)
-#define PCIE_MSI_DATA(base) ((base) + 0x005C)
-#define PCIE_MSI_MASK_BIT(base) ((base) + 0x0060)
-#define PCIE_MSI_PEND_BIT(base) ((base) + 0x0064)
-#define PCIE_DEVCAP(base) ((base) + 0x0074)
-#define PCIE_DEVCTLSTS(base) ((base) + 0x0078)
-
-#define PCIE_CMDSTS(base) ((base) + 0x0004)
-#define PCIE_LINK_STAT(base) ((base) + 0x80)
-#define PCIE_LINK_CTL2(base) ((base) + 0xa0)
-#define PCIE_ASPM_L1_CTRL(base) ((base) + 0x70c)
-#define PCIE_ASPM_LINK_CTRL(base) (PCIE_LINK_STAT)
-#define PCIE_ASPM_L1_SUBSTATE_TIMING(base) ((base) + 0xB44)
-#define PCIE_L1SUB_CTRL1(base) ((base) + 0x150)
-#define PCIE_PMCSR(base) ((base) + 0x44)
-#define PCIE_CFG_SPACE_LIMIT(base) ((base) + 0x100)
-
-/* PCIe link defines */
-#define PEARL_PCIE_LINKUP (0x7)
-#define PEARL_PCIE_DATA_LINK (BIT(0))
-#define PEARL_PCIE_PHY_LINK (BIT(1))
-#define PEARL_PCIE_LINK_RST (BIT(3))
-#define PEARL_PCIE_FATAL_ERR (BIT(5))
-#define PEARL_PCIE_NONFATAL_ERR (BIT(6))
-
-/* PCIe Lane defines */
-#define PCIE_G2_LANE_X1 ((BIT(0)) << 16)
-#define PCIE_G2_LANE_X2 ((BIT(0) | BIT(1)) << 16)
-
-/* PCIe DLL link enable */
-#define PCIE_DLL_LINK_EN ((BIT(0)) << 5)
-
-#define PCIE_LINK_GEN1 (BIT(0))
-#define PCIE_LINK_GEN2 (BIT(1))
-#define PCIE_LINK_GEN3 (BIT(2))
-#define PCIE_LINK_MODE(x) (((x) >> 16) & 0x7)
-
-#define MSI_EN (BIT(0))
-#define MSI_64_EN (BIT(7))
-#define PCIE_MSI_ADDR_OFFSET(a) ((a) & 0xFFFF)
-#define PCIE_MSI_ADDR_ALIGN(a) ((a) & (~0xFFFF))
-
-#define PCIE_BAR_MASK(base, n) ((base) + 0x1010 + ((n) << 2))
-#define PCIE_MAX_BAR (6)
-
-#define PCIE_ATU_VIEW(base) ((base) + 0x0900)
-#define PCIE_ATU_CTL1(base) ((base) + 0x0904)
-#define PCIE_ATU_CTL2(base) ((base) + 0x0908)
-#define PCIE_ATU_LBAR(base) ((base) + 0x090c)
-#define PCIE_ATU_UBAR(base) ((base) + 0x0910)
-#define PCIE_ATU_LAR(base) ((base) + 0x0914)
-#define PCIE_ATU_LTAR(base) ((base) + 0x0918)
-#define PCIE_ATU_UTAR(base) ((base) + 0x091c)
-
-#define PCIE_MSI_ADDR_LOWER(base) ((base) + 0x0820)
-#define PCIE_MSI_ADDR_UPPER(base) ((base) + 0x0824)
-#define PCIE_MSI_ENABLE(base) ((base) + 0x0828)
-#define PCIE_MSI_MASK_RC(base) ((base) + 0x082c)
-#define PCIE_MSI_STATUS(base) ((base) + 0x0830)
-#define PEARL_PCIE_MSI_REGION (0xce000000)
-#define PEARL_PCIE_MSI_DATA (0)
-#define PCIE_MSI_GPIO(base) ((base) + 0x0888)
-
-#define PCIE_HDP_HOST_QUEUE_FULL (BIT(17))
-#define USE_BAR_MATCH_MODE
-#define PCIE_ATU_OB_REGION (BIT(0))
-#define PCIE_ATU_EN_REGION (BIT(31))
-#define PCIE_ATU_EN_MATCH (BIT(30))
-#define PCIE_BASE_REGION (0xb0000000)
-#define PCIE_MEM_MAP_SIZE (512 * 1024)
-
-#define PCIE_OB_REG_REGION (0xcf000000)
-#define PCIE_CONFIG_REGION (0xcf000000)
-#define PCIE_CONFIG_SIZE (4096)
-#define PCIE_CONFIG_CH (1)
-
-/* inbound mapping */
-#define PCIE_IB_BAR0 (0x00000000) /* ddr */
-#define PCIE_IB_BAR0_CH (0)
-#define PCIE_IB_BAR3 (0xe0000000) /* sys_reg */
-#define PCIE_IB_BAR3_CH (1)
-
-/* outbound mapping */
-#define PCIE_MEM_CH (0)
-#define PCIE_REG_CH (1)
-#define PCIE_MEM_REGION (0xc0000000)
-#define PCIE_MEM_SIZE (0x000fffff)
-#define PCIE_MEM_TAR (0x80000000)
-
-#define PCIE_MSI_REGION (0xce000000)
-#define PCIE_MSI_SIZE (KBYTE(4) - 1)
-#define PCIE_MSI_CH (1)
-
-/* size of config region */
-#define PCIE_CFG_SIZE (0x0000ffff)
-
-#define PCIE_ATU_DIR_IB (BIT(31))
-#define PCIE_ATU_DIR_OB (0)
-#define PCIE_ATU_DIR_CFG (2)
-#define PCIE_ATU_DIR_MATCH_IB (BIT(31) | BIT(30))
-
-#define PCIE_DMA_WR_0 (0)
-#define PCIE_DMA_WR_1 (1)
-#define PCIE_DMA_RD_0 (2)
-#define PCIE_DMA_RD_1 (3)
-
-#define PCIE_DMA_CHNL_CNTRL_CB (BIT(0))
-#define PCIE_DMA_CHNL_CNTRL_TCB (BIT(1))
-#define PCIE_DMA_CHNL_CNTRL_LLP (BIT(2))
-#define PCIE_DMA_CHNL_CNTRL_LIE (BIT(3))
-#define PCIE_DMA_CHNL_CNTRL_RIE (BIT(4))
-#define PCIE_DMA_CHNL_CNTRL_CSS (BIT(8))
-#define PCIE_DMA_CHNL_CNTRL_LLE (BIT(9))
-#define PCIE_DMA_CHNL_CNTRL_TLP (BIT(26))
-
-#define PCIE_DMA_CHNL_CONTEXT_RD (BIT(31))
-#define PCIE_DMA_CHNL_CONTEXT_WR (0)
-#define PCIE_MAX_BAR (6)
-
/* PCIe HDP interrupt status definition */
#define PCIE_HDP_INT_EP_RXDMA (BIT(0))
#define PCIE_HDP_INT_HBM_UF (BIT(1))
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
new file mode 100644
index 000000000000..598edb814421
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
@@ -0,0 +1,1219 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018 Quantenna Communications */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/crc32.h>
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/circ_buf.h>
+
+#include "pcie_priv.h"
+#include "topaz_pcie_regs.h"
+#include "topaz_pcie_ipc.h"
+#include "qtn_hw_ids.h"
+#include "core.h"
+#include "bus.h"
+#include "shm_ipc.h"
+#include "debug.h"
+
+#define TOPAZ_TX_BD_SIZE_DEFAULT 128
+
+struct qtnf_topaz_tx_bd {
+ __le32 addr;
+ __le32 info;
+} __packed;
+
+struct qtnf_topaz_rx_bd {
+ __le32 addr;
+ __le32 info;
+} __packed;
+
+struct qtnf_extra_bd_params {
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ __le32 param4;
+} __packed;
+
+#define QTNF_BD_PARAM_OFFSET(n) offsetof(struct qtnf_extra_bd_params, param##n)
+
+struct vmac_pkt_info {
+ __le32 addr;
+ __le32 info;
+};
+
+struct qtnf_topaz_bda {
+ __le16 bda_len;
+ __le16 bda_version;
+ __le32 bda_bootstate;
+ __le32 bda_dma_mask;
+ __le32 bda_dma_offset;
+ __le32 bda_flags;
+ __le32 bda_img;
+ __le32 bda_img_size;
+ __le32 bda_ep2h_irqstatus;
+ __le32 bda_h2ep_irqstatus;
+ __le32 bda_msi_addr;
+ u8 reserved1[56];
+ __le32 bda_flashsz;
+ u8 bda_boardname[PCIE_BDA_NAMELEN];
+ __le32 bda_pci_pre_status;
+ __le32 bda_pci_endian;
+ __le32 bda_pci_post_status;
+ __le32 bda_h2ep_txd_budget;
+ __le32 bda_ep2h_txd_budget;
+ __le32 bda_rc_rx_bd_base;
+ __le32 bda_rc_rx_bd_num;
+ __le32 bda_rc_tx_bd_base;
+ __le32 bda_rc_tx_bd_num;
+ u8 bda_ep_link_state;
+ u8 bda_rc_link_state;
+ u8 bda_rc_msi_enabled;
+ u8 reserved2;
+ __le32 bda_ep_next_pkt;
+ struct vmac_pkt_info request[QTN_PCIE_RC_TX_QUEUE_LEN];
+ struct qtnf_shm_ipc_region bda_shm_reg1 __aligned(4096);
+ struct qtnf_shm_ipc_region bda_shm_reg2 __aligned(4096);
+} __packed;
+
+struct qtnf_pcie_topaz_state {
+ struct qtnf_pcie_bus_priv base;
+ struct qtnf_topaz_bda __iomem *bda;
+
+ dma_addr_t dma_msi_dummy;
+ u32 dma_msi_imwr;
+
+ struct qtnf_topaz_tx_bd *tx_bd_vbase;
+ struct qtnf_topaz_rx_bd *rx_bd_vbase;
+
+ __le32 __iomem *ep_next_rx_pkt;
+ __le32 __iomem *txqueue_wake;
+ __le32 __iomem *ep_pmstate;
+
+ unsigned long rx_pkt_count;
+};
+
+static void qtnf_deassert_intx(struct qtnf_pcie_topaz_state *ts)
+{
+ void __iomem *reg = ts->base.sysctl_bar + TOPAZ_PCIE_CFG0_OFFSET;
+ u32 cfg;
+
+ cfg = readl(reg);
+ cfg &= ~TOPAZ_ASSERT_INTX;
+ qtnf_non_posted_write(cfg, reg);
+}
+
+static inline int qtnf_topaz_intx_asserted(struct qtnf_pcie_topaz_state *ts)
+{
+ void __iomem *reg = ts->base.sysctl_bar + TOPAZ_PCIE_CFG0_OFFSET;
+ u32 cfg = readl(reg);
+
+ return !!(cfg & TOPAZ_ASSERT_INTX);
+}
+
+static void qtnf_topaz_reset_ep(struct qtnf_pcie_topaz_state *ts)
+{
+ writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_RST_EP_IRQ),
+ TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
+ msleep(QTN_EP_RESET_WAIT_MS);
+ pci_restore_state(ts->base.pdev);
+}
+
+static void setup_rx_irqs(struct qtnf_pcie_topaz_state *ts)
+{
+ void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
+
+ ts->dma_msi_imwr = readl(reg);
+}
+
+static void enable_rx_irqs(struct qtnf_pcie_topaz_state *ts)
+{
+ void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
+
+ qtnf_non_posted_write(ts->dma_msi_imwr, reg);
+}
+
+static void disable_rx_irqs(struct qtnf_pcie_topaz_state *ts)
+{
+ void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
+
+ qtnf_non_posted_write(QTN_HOST_LO32(ts->dma_msi_dummy), reg);
+}
+
+static void qtnf_topaz_ipc_gen_ep_int(void *arg)
+{
+ struct qtnf_pcie_topaz_state *ts = arg;
+
+ writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_CTRL_IRQ),
+ TOPAZ_CTL_M2L_INT(ts->base.sysctl_bar));
+}
+
+static int qtnf_is_state(__le32 __iomem *reg, u32 state)
+{
+ u32 s = readl(reg);
+
+ return (s == state);
+}
+
+static void qtnf_set_state(__le32 __iomem *reg, u32 state)
+{
+ qtnf_non_posted_write(state, reg);
+}
+
+static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms)
+{
+ u32 timeout = 0;
+
+ while ((qtnf_is_state(reg, state) == 0)) {
+ usleep_range(1000, 1200);
+ if (++timeout > delay_in_ms)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int topaz_alloc_bd_table(struct qtnf_pcie_topaz_state *ts,
+ struct qtnf_topaz_bda __iomem *bda)
+{
+ struct qtnf_extra_bd_params __iomem *extra_params;
+ struct qtnf_pcie_bus_priv *priv = &ts->base;
+ dma_addr_t paddr;
+ void *vaddr;
+ int len;
+ int i;
+
+ /* bd table */
+
+ len = priv->tx_bd_num * sizeof(struct qtnf_topaz_tx_bd) +
+ priv->rx_bd_num * sizeof(struct qtnf_topaz_rx_bd) +
+ sizeof(struct qtnf_extra_bd_params);
+
+ vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
+ if (!vaddr)
+ return -ENOMEM;
+
+ memset(vaddr, 0, len);
+
+ /* tx bd */
+
+ ts->tx_bd_vbase = vaddr;
+ qtnf_non_posted_write(paddr, &bda->bda_rc_tx_bd_base);
+
+ for (i = 0; i < priv->tx_bd_num; i++)
+ ts->tx_bd_vbase[i].info |= cpu_to_le32(QTN_BD_EMPTY);
+
+ pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
+
+ priv->tx_bd_r_index = 0;
+ priv->tx_bd_w_index = 0;
+
+ /* rx bd */
+
+ vaddr = ((struct qtnf_topaz_tx_bd *)vaddr) + priv->tx_bd_num;
+ paddr += priv->tx_bd_num * sizeof(struct qtnf_topaz_tx_bd);
+
+ ts->rx_bd_vbase = vaddr;
+ qtnf_non_posted_write(paddr, &bda->bda_rc_rx_bd_base);
+
+ pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
+
+ /* extra shared params */
+
+ vaddr = ((struct qtnf_topaz_rx_bd *)vaddr) + priv->rx_bd_num;
+ paddr += priv->rx_bd_num * sizeof(struct qtnf_topaz_rx_bd);
+
+ extra_params = (struct qtnf_extra_bd_params __iomem *)vaddr;
+
+ ts->ep_next_rx_pkt = &extra_params->param1;
+ qtnf_non_posted_write(paddr + QTNF_BD_PARAM_OFFSET(1),
+ &bda->bda_ep_next_pkt);
+ ts->txqueue_wake = &extra_params->param2;
+ ts->ep_pmstate = &extra_params->param3;
+ ts->dma_msi_dummy = paddr + QTNF_BD_PARAM_OFFSET(4);
+
+ return 0;
+}
+
+static int
+topaz_skb2rbd_attach(struct qtnf_pcie_topaz_state *ts, u16 index, u32 wrap)
+{
+ struct qtnf_topaz_rx_bd *rxbd = &ts->rx_bd_vbase[index];
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+
+ skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC);
+ if (!skb) {
+ ts->base.rx_skb[index] = NULL;
+ return -ENOMEM;
+ }
+
+ ts->base.rx_skb[index] = skb;
+
+ paddr = pci_map_single(ts->base.pdev, skb->data,
+ SKB_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(ts->base.pdev, paddr)) {
+ pr_err("skb mapping error: %pad\n", &paddr);
+ return -ENOMEM;
+ }
+
+ rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr));
+ rxbd->info = cpu_to_le32(QTN_BD_EMPTY | wrap);
+
+ ts->base.rx_bd_w_index = index;
+
+ return 0;
+}
+
+static int topaz_alloc_rx_buffers(struct qtnf_pcie_topaz_state *ts)
+{
+ u16 i;
+ int ret = 0;
+
+ memset(ts->rx_bd_vbase, 0x0,
+ ts->base.rx_bd_num * sizeof(struct qtnf_topaz_rx_bd));
+
+ for (i = 0; i < ts->base.rx_bd_num; i++) {
+ ret = topaz_skb2rbd_attach(ts, i, 0);
+ if (ret)
+ break;
+ }
+
+ ts->rx_bd_vbase[ts->base.rx_bd_num - 1].info |=
+ cpu_to_le32(QTN_BD_WRAP);
+
+ return ret;
+}
+
+/* all rx/tx activity should have ceased before calling this function */
+static void qtnf_topaz_free_xfer_buffers(struct qtnf_pcie_topaz_state *ts)
+{
+ struct qtnf_pcie_bus_priv *priv = &ts->base;
+ struct qtnf_topaz_rx_bd *rxbd;
+ struct qtnf_topaz_tx_bd *txbd;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int i;
+
+ /* free rx buffers */
+ for (i = 0; i < priv->rx_bd_num; i++) {
+ if (priv->rx_skb && priv->rx_skb[i]) {
+ rxbd = &ts->rx_bd_vbase[i];
+ skb = priv->rx_skb[i];
+ paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(rxbd->addr));
+ pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(skb);
+ priv->rx_skb[i] = NULL;
+ rxbd->addr = 0;
+ rxbd->info = 0;
+ }
+ }
+
+ /* free tx buffers */
+ for (i = 0; i < priv->tx_bd_num; i++) {
+ if (priv->tx_skb && priv->tx_skb[i]) {
+ txbd = &ts->tx_bd_vbase[i];
+ skb = priv->tx_skb[i];
+ paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(txbd->addr));
+ pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+ priv->tx_skb[i] = NULL;
+ txbd->addr = 0;
+ txbd->info = 0;
+ }
+ }
+}
+
+static int qtnf_pcie_topaz_init_xfer(struct qtnf_pcie_topaz_state *ts,
+ unsigned int tx_bd_size)
+{
+ struct qtnf_topaz_bda __iomem *bda = ts->bda;
+ struct qtnf_pcie_bus_priv *priv = &ts->base;
+ int ret;
+
+ if (tx_bd_size == 0)
+ tx_bd_size = TOPAZ_TX_BD_SIZE_DEFAULT;
+
+ /* check TX BD queue max length according to struct qtnf_topaz_bda */
+ if (tx_bd_size > QTN_PCIE_RC_TX_QUEUE_LEN) {
+ pr_warn("TX BD queue cannot exceed %d\n",
+ QTN_PCIE_RC_TX_QUEUE_LEN);
+ tx_bd_size = QTN_PCIE_RC_TX_QUEUE_LEN;
+ }
+
+ priv->tx_bd_num = tx_bd_size;
+ qtnf_non_posted_write(priv->tx_bd_num, &bda->bda_rc_tx_bd_num);
+ qtnf_non_posted_write(priv->rx_bd_num, &bda->bda_rc_rx_bd_num);
+
+ priv->rx_bd_w_index = 0;
+ priv->rx_bd_r_index = 0;
+
+ ret = qtnf_pcie_alloc_skb_array(priv);
+ if (ret) {
+ pr_err("failed to allocate skb array\n");
+ return ret;
+ }
+
+ ret = topaz_alloc_bd_table(ts, bda);
+ if (ret) {
+ pr_err("failed to allocate bd table\n");
+ return ret;
+ }
+
+ ret = topaz_alloc_rx_buffers(ts);
+ if (ret) {
+ pr_err("failed to allocate rx buffers\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static void qtnf_topaz_data_tx_reclaim(struct qtnf_pcie_topaz_state *ts)
+{
+ struct qtnf_pcie_bus_priv *priv = &ts->base;
+ struct qtnf_topaz_tx_bd *txbd;
+ struct sk_buff *skb;
+ unsigned long flags;
+ dma_addr_t paddr;
+ u32 tx_done_index;
+ int count = 0;
+ int i;
+
+ spin_lock_irqsave(&priv->tx_reclaim_lock, flags);
+
+ tx_done_index = readl(ts->ep_next_rx_pkt);
+ i = priv->tx_bd_r_index;
+
+ if (CIRC_CNT(priv->tx_bd_w_index, tx_done_index, priv->tx_bd_num))
+ writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_DONE_IRQ),
+ TOPAZ_LH_IPC4_INT(priv->sysctl_bar));
+
+ while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) {
+ skb = priv->tx_skb[i];
+
+ if (likely(skb)) {
+ txbd = &ts->tx_bd_vbase[i];
+ paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(txbd->addr));
+ pci_unmap_single(priv->pdev, paddr, skb->len,
+ PCI_DMA_TODEVICE);
+
+ if (skb->dev) {
+ qtnf_update_tx_stats(skb->dev, skb);
+ if (unlikely(priv->tx_stopped)) {
+ qtnf_wake_all_queues(skb->dev);
+ priv->tx_stopped = 0;
+ }
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ priv->tx_skb[i] = NULL;
+ count++;
+
+ if (++i >= priv->tx_bd_num)
+ i = 0;
+ }
+
+ priv->tx_reclaim_done += count;
+ priv->tx_reclaim_req++;
+ priv->tx_bd_r_index = i;
+
+ spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags);
+}
+
+static void qtnf_try_stop_xmit(struct qtnf_bus *bus, struct net_device *ndev)
+{
+ struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
+
+ if (ndev) {
+ netif_tx_stop_all_queues(ndev);
+ ts->base.tx_stopped = 1;
+ }
+
+ writel(0x0, ts->txqueue_wake);
+
+ /* sync up tx queue status before generating interrupt */
+ dma_wmb();
+
+ /* send irq to card: tx stopped */
+ writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_STOP_IRQ),
+ TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
+
+ /* schedule reclaim attempt */
+ tasklet_hi_schedule(&ts->base.reclaim_tq);
+}
+
+static void qtnf_try_wake_xmit(struct qtnf_bus *bus, struct net_device *ndev)
+{
+ struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
+ int ready;
+
+ ready = readl(ts->txqueue_wake);
+ if (ready) {
+ netif_wake_queue(ndev);
+ } else {
+ /* re-send irq to card: tx stopped */
+ writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_STOP_IRQ),
+ TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
+ }
+}
+
+static int qtnf_tx_queue_ready(struct qtnf_pcie_topaz_state *ts)
+{
+ struct qtnf_pcie_bus_priv *priv = &ts->base;
+
+ if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
+ priv->tx_bd_num)) {
+ qtnf_topaz_data_tx_reclaim(ts);
+
+ if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
+ priv->tx_bd_num)) {
+ priv->tx_full_count++;
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
+{
+ struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
+ struct qtnf_pcie_bus_priv *priv = &ts->base;
+ struct qtnf_topaz_bda __iomem *bda = ts->bda;
+ struct qtnf_topaz_tx_bd *txbd;
+ dma_addr_t skb_paddr;
+ unsigned long flags;
+ int ret = 0;
+ int len;
+ int i;
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ if (!qtnf_tx_queue_ready(ts)) {
+ qtnf_try_stop_xmit(bus, skb->dev);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ i = priv->tx_bd_w_index;
+ priv->tx_skb[i] = skb;
+ len = skb->len;
+
+ skb_paddr = pci_map_single(priv->pdev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(priv->pdev, skb_paddr)) {
+ ret = -ENOMEM;
+ goto tx_done;
+ }
+
+ txbd = &ts->tx_bd_vbase[i];
+ txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr));
+
+ writel(QTN_HOST_LO32(skb_paddr), &bda->request[i].addr);
+ writel(len | QTN_PCIE_TX_VALID_PKT, &bda->request[i].info);
+
+ /* sync up descriptor updates before generating interrupt */
+ dma_wmb();
+
+ /* generate irq to card: tx done */
+ writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_DONE_IRQ),
+ TOPAZ_LH_IPC4_INT(priv->sysctl_bar));
+
+ if (++i >= priv->tx_bd_num)
+ i = 0;
+
+ priv->tx_bd_w_index = i;
+
+tx_done:
+ if (ret) {
+ if (skb->dev)
+ skb->dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ }
+
+ priv->tx_done_count++;
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ qtnf_topaz_data_tx_reclaim(ts);
+
+ return NETDEV_TX_OK;
+}
+
+static irqreturn_t qtnf_pcie_topaz_interrupt(int irq, void *data)
+{
+ struct qtnf_bus *bus = (struct qtnf_bus *)data;
+ struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
+ struct qtnf_pcie_bus_priv *priv = &ts->base;
+
+ if (!priv->msi_enabled && !qtnf_topaz_intx_asserted(ts))
+ return IRQ_NONE;
+
+ priv->pcie_irq_count++;
+
+ qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
+ qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out);
+
+ if (napi_schedule_prep(&bus->mux_napi)) {
+ disable_rx_irqs(ts);
+ __napi_schedule(&bus->mux_napi);
+ }
+
+ tasklet_hi_schedule(&priv->reclaim_tq);
+
+ if (!priv->msi_enabled)
+ qtnf_deassert_intx(ts);
+
+ return IRQ_HANDLED;
+}
+
+static int qtnf_rx_data_ready(struct qtnf_pcie_topaz_state *ts)
+{
+ u16 index = ts->base.rx_bd_r_index;
+ struct qtnf_topaz_rx_bd *rxbd;
+ u32 descw;
+
+ rxbd = &ts->rx_bd_vbase[index];
+ descw = le32_to_cpu(rxbd->info);
+
+ if (descw & QTN_BD_EMPTY)
+ return 0;
+
+ return 1;
+}
+
+static int qtnf_topaz_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi);
+ struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
+ struct qtnf_pcie_bus_priv *priv = &ts->base;
+ struct net_device *ndev = NULL;
+ struct sk_buff *skb = NULL;
+ int processed = 0;
+ struct qtnf_topaz_rx_bd *rxbd;
+ dma_addr_t skb_paddr;
+ int consume;
+ u32 descw;
+ u32 poffset;
+ u32 psize;
+ u16 r_idx;
+ u16 w_idx;
+ int ret;
+
+ while (processed < budget) {
+ if (!qtnf_rx_data_ready(ts))
+ goto rx_out;
+
+ r_idx = priv->rx_bd_r_index;
+ rxbd = &ts->rx_bd_vbase[r_idx];
+ descw = le32_to_cpu(rxbd->info);
+
+ skb = priv->rx_skb[r_idx];
+ poffset = QTN_GET_OFFSET(descw);
+ psize = QTN_GET_LEN(descw);
+ consume = 1;
+
+ if (descw & QTN_BD_EMPTY) {
+ pr_warn("skip invalid rxbd[%d]\n", r_idx);
+ consume = 0;
+ }
+
+ if (!skb) {
+ pr_warn("skip missing rx_skb[%d]\n", r_idx);
+ consume = 0;
+ }
+
+ if (skb && (skb_tailroom(skb) < psize)) {
+ pr_err("skip packet with invalid length: %u > %u\n",
+ psize, skb_tailroom(skb));
+ consume = 0;
+ }
+
+ if (skb) {
+ skb_paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(rxbd->addr));
+ pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ }
+
+ if (consume) {
+ skb_reserve(skb, poffset);
+ skb_put(skb, psize);
+ ndev = qtnf_classify_skb(bus, skb);
+ if (likely(ndev)) {
+ qtnf_update_rx_stats(ndev, skb);
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb(skb);
+ } else {
+ pr_debug("drop untagged skb\n");
+ bus->mux_dev.stats.rx_dropped++;
+ dev_kfree_skb_any(skb);
+ }
+ } else {
+ if (skb) {
+ bus->mux_dev.stats.rx_dropped++;
+ dev_kfree_skb_any(skb);
+ }
+ }
+
+ /* notify card about recv packets once per several packets */
+ if (((++ts->rx_pkt_count) & RX_DONE_INTR_MSK) == 0)
+ writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_RX_DONE_IRQ),
+ TOPAZ_LH_IPC4_INT(priv->sysctl_bar));
+
+ priv->rx_skb[r_idx] = NULL;
+ if (++r_idx >= priv->rx_bd_num)
+ r_idx = 0;
+
+ priv->rx_bd_r_index = r_idx;
+
+ /* repalce processed buffer by a new one */
+ w_idx = priv->rx_bd_w_index;
+ while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
+ priv->rx_bd_num) > 0) {
+ if (++w_idx >= priv->rx_bd_num)
+ w_idx = 0;
+
+ ret = topaz_skb2rbd_attach(ts, w_idx,
+ descw & QTN_BD_WRAP);
+ if (ret) {
+ pr_err("failed to allocate new rx_skb[%d]\n",
+ w_idx);
+ break;
+ }
+ }
+
+ processed++;
+ }
+
+rx_out:
+ if (processed < budget) {
+ napi_complete(napi);
+ enable_rx_irqs(ts);
+ }
+
+ return processed;
+}
+
+static void
+qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev)
+{
+ struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
+
+ qtnf_try_wake_xmit(bus, ndev);
+ tasklet_hi_schedule(&ts->base.reclaim_tq);
+}
+
+static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus)
+{
+ struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
+
+ napi_enable(&bus->mux_napi);
+ enable_rx_irqs(ts);
+}
+
+static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
+{
+ struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
+
+ disable_rx_irqs(ts);
+ napi_disable(&bus->mux_napi);
+}
+
+static const struct qtnf_bus_ops qtnf_pcie_topaz_bus_ops = {
+ /* control path methods */
+ .control_tx = qtnf_pcie_control_tx,
+
+ /* data path methods */
+ .data_tx = qtnf_pcie_data_tx,
+ .data_tx_timeout = qtnf_pcie_data_tx_timeout,
+ .data_rx_start = qtnf_pcie_data_rx_start,
+ .data_rx_stop = qtnf_pcie_data_rx_stop,
+};
+
+static int qtnf_dbg_irq_stats(struct seq_file *s, void *data)
+{
+ struct qtnf_bus *bus = dev_get_drvdata(s->private);
+ struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
+
+ seq_printf(s, "pcie_irq_count(%u)\n", ts->base.pcie_irq_count);
+
+ return 0;
+}
+
+static int qtnf_dbg_pkt_stats(struct seq_file *s, void *data)
+{
+ struct qtnf_bus *bus = dev_get_drvdata(s->private);
+ struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
+ struct qtnf_pcie_bus_priv *priv = &ts->base;
+ u32 tx_done_index = readl(ts->ep_next_rx_pkt);
+
+ seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count);
+ seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
+ seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
+ seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
+
+ seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
+ seq_printf(s, "tx_done_index(%u)\n", tx_done_index);
+ seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index);
+
+ seq_printf(s, "tx host queue len(%u)\n",
+ CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index,
+ priv->tx_bd_num));
+ seq_printf(s, "tx reclaim queue len(%u)\n",
+ CIRC_CNT(tx_done_index, priv->tx_bd_r_index,
+ priv->tx_bd_num));
+ seq_printf(s, "tx card queue len(%u)\n",
+ CIRC_CNT(priv->tx_bd_w_index, tx_done_index,
+ priv->tx_bd_num));
+
+ seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index);
+ seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index);
+ seq_printf(s, "rx alloc queue len(%u)\n",
+ CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
+ priv->rx_bd_num));
+
+ return 0;
+}
+
+static void qtnf_reset_dma_offset(struct qtnf_pcie_topaz_state *ts)
+{
+ struct qtnf_topaz_bda __iomem *bda = ts->bda;
+ u32 offset = readl(&bda->bda_dma_offset);
+
+ if ((offset & PCIE_DMA_OFFSET_ERROR_MASK) != PCIE_DMA_OFFSET_ERROR)
+ return;
+
+ writel(0x0, &bda->bda_dma_offset);
+}
+
+static int qtnf_pcie_endian_detect(struct qtnf_pcie_topaz_state *ts)
+{
+ struct qtnf_topaz_bda __iomem *bda = ts->bda;
+ u32 timeout = 0;
+ u32 endian;
+ int ret = 0;
+
+ writel(QTN_PCI_ENDIAN_DETECT_DATA, &bda->bda_pci_endian);
+
+ /* flush endian modifications before status update */
+ dma_wmb();
+
+ writel(QTN_PCI_ENDIAN_VALID_STATUS, &bda->bda_pci_pre_status);
+
+ while (readl(&bda->bda_pci_post_status) !=
+ QTN_PCI_ENDIAN_VALID_STATUS) {
+ usleep_range(1000, 1200);
+ if (++timeout > QTN_FW_DL_TIMEOUT_MS) {
+ pr_err("card endianness detection timed out\n");
+ ret = -ETIMEDOUT;
+ goto endian_out;
+ }
+ }
+
+ /* do not read before status is updated */
+ dma_rmb();
+
+ endian = readl(&bda->bda_pci_endian);
+ WARN(endian != QTN_PCI_LITTLE_ENDIAN,
+ "%s: unexpected card endianness", __func__);
+
+endian_out:
+ writel(0, &bda->bda_pci_pre_status);
+ writel(0, &bda->bda_pci_post_status);
+ writel(0, &bda->bda_pci_endian);
+
+ return ret;
+}
+
+static int qtnf_pre_init_ep(struct qtnf_bus *bus)
+{
+ struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
+ struct qtnf_topaz_bda __iomem *bda = ts->bda;
+ u32 flags;
+ int ret;
+
+ ret = qtnf_pcie_endian_detect(ts);
+ if (ret < 0) {
+ pr_err("failed to detect card endianness\n");
+ return ret;
+ }
+
+ writeb(ts->base.msi_enabled, &ts->bda->bda_rc_msi_enabled);
+ qtnf_reset_dma_offset(ts);
+
+ /* notify card about driver type and boot mode */
+ flags = readl(&bda->bda_flags) | QTN_BDA_HOST_QLINK_DRV;
+
+ if (ts->base.flashboot)
+ flags |= QTN_BDA_FLASH_BOOT;
+ else
+ flags &= ~QTN_BDA_FLASH_BOOT;
+
+ writel(flags, &bda->bda_flags);
+
+ qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_HOST_RDY);
+ if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_TARGET_RDY,
+ QTN_FW_DL_TIMEOUT_MS)) {
+ pr_err("card is not ready to boot...\n");
+ return -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static int qtnf_post_init_ep(struct qtnf_pcie_topaz_state *ts)
+{
+ struct pci_dev *pdev = ts->base.pdev;
+
+ setup_rx_irqs(ts);
+ disable_rx_irqs(ts);
+
+ if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_QLINK_DONE,
+ QTN_FW_QLINK_TIMEOUT_MS))
+ return -ETIMEDOUT;
+
+ enable_irq(pdev->irq);
+ return 0;
+}
+
+static int
+qtnf_ep_fw_load(struct qtnf_pcie_topaz_state *ts, const u8 *fw, u32 fw_size)
+{
+ struct qtnf_topaz_bda __iomem *bda = ts->bda;
+ struct pci_dev *pdev = ts->base.pdev;
+ u32 remaining = fw_size;
+ u8 *curr = (u8 *)fw;
+ u32 blksize;
+ u32 nblocks;
+ u32 offset;
+ u32 count;
+ u32 size;
+ dma_addr_t paddr;
+ void *data;
+ int ret = 0;
+
+ pr_debug("FW upload started: fw_addr = 0x%p, size=%d\n", fw, fw_size);
+
+ blksize = ts->base.fw_blksize;
+
+ if (blksize < PAGE_SIZE)
+ blksize = PAGE_SIZE;
+
+ while (blksize >= PAGE_SIZE) {
+ pr_debug("allocating %u bytes to upload FW\n", blksize);
+ data = dma_alloc_coherent(&pdev->dev, blksize,
+ &paddr, GFP_KERNEL);
+ if (data)
+ break;
+ blksize /= 2;
+ }
+
+ if (!data) {
+ pr_err("failed to allocate DMA buffer for FW upload\n");
+ ret = -ENOMEM;
+ goto fw_load_out;
+ }
+
+ nblocks = NBLOCKS(fw_size, blksize);
+ offset = readl(&bda->bda_dma_offset);
+
+ qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_HOST_LOAD);
+ if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_EP_RDY,
+ QTN_FW_DL_TIMEOUT_MS)) {
+ pr_err("card is not ready to download FW\n");
+ ret = -ETIMEDOUT;
+ goto fw_load_map;
+ }
+
+ for (count = 0 ; count < nblocks; count++) {
+ size = (remaining > blksize) ? blksize : remaining;
+
+ memcpy(data, curr, size);
+ qtnf_non_posted_write(paddr + offset, &bda->bda_img);
+ qtnf_non_posted_write(size, &bda->bda_img_size);
+
+ pr_debug("chunk[%u] VA[0x%p] PA[%pad] sz[%u]\n",
+ count, (void *)curr, &paddr, size);
+
+ qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_RDY);
+ if (qtnf_poll_state(&ts->bda->bda_bootstate,
+ QTN_BDA_FW_BLOCK_DONE,
+ QTN_FW_DL_TIMEOUT_MS)) {
+ pr_err("confirmation for block #%d timed out\n", count);
+ ret = -ETIMEDOUT;
+ goto fw_load_map;
+ }
+
+ remaining = (remaining < size) ? remaining : (remaining - size);
+ curr += size;
+ }
+
+ /* upload completion mark: zero-sized block */
+ qtnf_non_posted_write(0, &bda->bda_img);
+ qtnf_non_posted_write(0, &bda->bda_img_size);
+
+ qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_RDY);
+ if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_DONE,
+ QTN_FW_DL_TIMEOUT_MS)) {
+ pr_err("confirmation for the last block timed out\n");
+ ret = -ETIMEDOUT;
+ goto fw_load_map;
+ }
+
+ /* RC is done */
+ qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_END);
+ if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_LOAD_DONE,
+ QTN_FW_DL_TIMEOUT_MS)) {
+ pr_err("confirmation for FW upload completion timed out\n");
+ ret = -ETIMEDOUT;
+ goto fw_load_map;
+ }
+
+ pr_debug("FW upload completed: totally sent %d blocks\n", count);
+
+fw_load_map:
+ dma_free_coherent(&pdev->dev, blksize, data, paddr);
+
+fw_load_out:
+ return ret;
+}
+
+static int qtnf_topaz_fw_upload(struct qtnf_pcie_topaz_state *ts,
+ const char *fwname)
+{
+ const struct firmware *fw;
+ struct pci_dev *pdev = ts->base.pdev;
+ int ret;
+
+ if (qtnf_poll_state(&ts->bda->bda_bootstate,
+ QTN_BDA_FW_LOAD_RDY,
+ QTN_FW_DL_TIMEOUT_MS)) {
+ pr_err("%s: card is not ready\n", fwname);
+ return -1;
+ }
+
+ pr_info("starting firmware upload: %s\n", fwname);
+
+ ret = request_firmware(&fw, fwname, &pdev->dev);
+ if (ret < 0) {
+ pr_err("%s: request_firmware error %d\n", fwname, ret);
+ return -1;
+ }
+
+ ret = qtnf_ep_fw_load(ts, fw->data, fw->size);
+ release_firmware(fw);
+
+ if (ret)
+ pr_err("%s: FW upload error\n", fwname);
+
+ return ret;
+}
+
+static void qtnf_topaz_fw_work_handler(struct work_struct *work)
+{
+ struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
+ struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
+ int ret;
+ int bootloader_needed = readl(&ts->bda->bda_flags) & QTN_BDA_XMIT_UBOOT;
+
+ qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_TARGET_BOOT);
+
+ if (bootloader_needed) {
+ ret = qtnf_topaz_fw_upload(ts, QTN_PCI_TOPAZ_BOOTLD_NAME);
+ if (ret)
+ goto fw_load_exit;
+
+ ret = qtnf_pre_init_ep(bus);
+ if (ret)
+ goto fw_load_exit;
+
+ qtnf_set_state(&ts->bda->bda_bootstate,
+ QTN_BDA_FW_TARGET_BOOT);
+ }
+
+ if (ts->base.flashboot) {
+ pr_info("booting firmware from flash\n");
+
+ ret = qtnf_poll_state(&ts->bda->bda_bootstate,
+ QTN_BDA_FW_FLASH_BOOT,
+ QTN_FW_DL_TIMEOUT_MS);
+ if (ret)
+ goto fw_load_exit;
+ } else {
+ ret = qtnf_topaz_fw_upload(ts, QTN_PCI_TOPAZ_FW_NAME);
+ if (ret)
+ goto fw_load_exit;
+
+ qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_START);
+ ret = qtnf_poll_state(&ts->bda->bda_bootstate,
+ QTN_BDA_FW_CONFIG,
+ QTN_FW_QLINK_TIMEOUT_MS);
+ if (ret) {
+ pr_err("FW bringup timed out\n");
+ goto fw_load_exit;
+ }
+
+ qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_RUN);
+ ret = qtnf_poll_state(&ts->bda->bda_bootstate,
+ QTN_BDA_FW_RUNNING,
+ QTN_FW_QLINK_TIMEOUT_MS);
+ if (ret) {
+ pr_err("card bringup timed out\n");
+ goto fw_load_exit;
+ }
+ }
+
+ pr_info("firmware is up and running\n");
+
+ ret = qtnf_post_init_ep(ts);
+ if (ret)
+ pr_err("FW runtime failure\n");
+
+fw_load_exit:
+ qtnf_pcie_fw_boot_done(bus, ret ? false : true);
+
+ if (ret == 0) {
+ qtnf_debugfs_add_entry(bus, "pkt_stats", qtnf_dbg_pkt_stats);
+ qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
+ }
+}
+
+static void qtnf_reclaim_tasklet_fn(unsigned long data)
+{
+ struct qtnf_pcie_topaz_state *ts = (void *)data;
+
+ qtnf_topaz_data_tx_reclaim(ts);
+}
+
+static u64 qtnf_topaz_dma_mask_get(void)
+{
+ return DMA_BIT_MASK(32);
+}
+
+static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus, unsigned int tx_bd_num)
+{
+ struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
+ struct pci_dev *pdev = ts->base.pdev;
+ struct qtnf_shm_ipc_int ipc_int;
+ unsigned long irqflags;
+ int ret;
+
+ bus->bus_ops = &qtnf_pcie_topaz_bus_ops;
+ INIT_WORK(&bus->fw_work, qtnf_topaz_fw_work_handler);
+ ts->bda = ts->base.epmem_bar;
+
+ /* assign host msi irq before card init */
+ if (ts->base.msi_enabled)
+ irqflags = IRQF_NOBALANCING;
+ else
+ irqflags = IRQF_NOBALANCING | IRQF_SHARED;
+
+ ret = devm_request_irq(&pdev->dev, pdev->irq,
+ &qtnf_pcie_topaz_interrupt,
+ irqflags, "qtnf_topaz_irq", (void *)bus);
+ if (ret) {
+ pr_err("failed to request pcie irq %d\n", pdev->irq);
+ return ret;
+ }
+
+ disable_irq(pdev->irq);
+
+ ret = qtnf_pre_init_ep(bus);
+ if (ret) {
+ pr_err("failed to init card\n");
+ return ret;
+ }
+
+ ret = qtnf_pcie_topaz_init_xfer(ts, tx_bd_num);
+ if (ret) {
+ pr_err("PCIE xfer init failed\n");
+ return ret;
+ }
+
+ tasklet_init(&ts->base.reclaim_tq, qtnf_reclaim_tasklet_fn,
+ (unsigned long)ts);
+ netif_napi_add(&bus->mux_dev, &bus->mux_napi,
+ qtnf_topaz_rx_poll, 10);
+
+ ipc_int.fn = qtnf_topaz_ipc_gen_ep_int;
+ ipc_int.arg = ts;
+ qtnf_pcie_init_shm_ipc(&ts->base, &ts->bda->bda_shm_reg1,
+ &ts->bda->bda_shm_reg2, &ipc_int);
+
+ return 0;
+}
+
+static void qtnf_pcie_topaz_remove(struct qtnf_bus *bus)
+{
+ struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
+
+ qtnf_topaz_reset_ep(ts);
+ qtnf_topaz_free_xfer_buffers(ts);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int qtnf_pcie_topaz_suspend(struct qtnf_bus *bus)
+{
+ struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
+ struct pci_dev *pdev = ts->base.pdev;
+
+ writel((u32 __force)PCI_D3hot, ts->ep_pmstate);
+ dma_wmb();
+ writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_PM_EP_IRQ),
+ TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
+
+ pci_save_state(pdev);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int qtnf_pcie_topaz_resume(struct qtnf_bus *bus)
+{
+ struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
+ struct pci_dev *pdev = ts->base.pdev;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_wake(pdev, PCI_D0, 0);
+
+ writel((u32 __force)PCI_D0, ts->ep_pmstate);
+ dma_wmb();
+ writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_PM_EP_IRQ),
+ TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
+
+ return 0;
+}
+#endif
+
+struct qtnf_bus *qtnf_pcie_topaz_alloc(struct pci_dev *pdev)
+{
+ struct qtnf_bus *bus;
+ struct qtnf_pcie_topaz_state *ts;
+
+ bus = devm_kzalloc(&pdev->dev, sizeof(*bus) + sizeof(*ts), GFP_KERNEL);
+ if (!bus)
+ return NULL;
+
+ ts = get_bus_priv(bus);
+ ts->base.probe_cb = qtnf_pcie_topaz_probe;
+ ts->base.remove_cb = qtnf_pcie_topaz_remove;
+ ts->base.dma_mask_get_cb = qtnf_topaz_dma_mask_get;
+#ifdef CONFIG_PM_SLEEP
+ ts->base.resume_cb = qtnf_pcie_topaz_resume;
+ ts->base.suspend_cb = qtnf_pcie_topaz_suspend;
+#endif
+
+ return bus;
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_ipc.h
new file mode 100644
index 000000000000..eb30e9d08de2
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_ipc.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018 Quantenna Communications */
+
+#ifndef _QTN_FMAC_PCIE_IPC_H_
+#define _QTN_FMAC_PCIE_IPC_H_
+
+#include <linux/types.h>
+
+#include "shm_ipc_defs.h"
+
+/* EP/RC status and flags */
+#define QTN_BDA_PCIE_INIT 0x01
+#define QTN_BDA_PCIE_RDY 0x02
+#define QTN_BDA_FW_LOAD_RDY 0x03
+#define QTN_BDA_FW_LOAD_DONE 0x04
+#define QTN_BDA_FW_START 0x05
+#define QTN_BDA_FW_RUN 0x06
+#define QTN_BDA_FW_HOST_RDY 0x07
+#define QTN_BDA_FW_TARGET_RDY 0x11
+#define QTN_BDA_FW_TARGET_BOOT 0x12
+#define QTN_BDA_FW_FLASH_BOOT 0x13
+#define QTN_BDA_FW_QLINK_DONE 0x14
+#define QTN_BDA_FW_HOST_LOAD 0x08
+#define QTN_BDA_FW_BLOCK_DONE 0x09
+#define QTN_BDA_FW_BLOCK_RDY 0x0A
+#define QTN_BDA_FW_EP_RDY 0x0B
+#define QTN_BDA_FW_BLOCK_END 0x0C
+#define QTN_BDA_FW_CONFIG 0x0D
+#define QTN_BDA_FW_RUNNING 0x0E
+#define QTN_BDA_PCIE_FAIL 0x82
+#define QTN_BDA_FW_LOAD_FAIL 0x85
+
+#define QTN_BDA_RCMODE BIT(1)
+#define QTN_BDA_MSI BIT(2)
+#define QTN_BDA_HOST_CALCMD BIT(3)
+#define QTN_BDA_FLASH_PRESENT BIT(4)
+#define QTN_BDA_FLASH_BOOT BIT(5)
+#define QTN_BDA_XMIT_UBOOT BIT(6)
+#define QTN_BDA_HOST_QLINK_DRV BIT(7)
+#define QTN_BDA_TARGET_FBOOT_ERR BIT(8)
+#define QTN_BDA_TARGET_FWLOAD_ERR BIT(9)
+#define QTN_BDA_HOST_NOFW_ERR BIT(12)
+#define QTN_BDA_HOST_MEMALLOC_ERR BIT(13)
+#define QTN_BDA_HOST_MEMMAP_ERR BIT(14)
+#define QTN_BDA_VER(x) (((x) >> 4) & 0xFF)
+#define QTN_BDA_ERROR_MASK 0xFF00
+
+/* registers and shmem address macros */
+#if BITS_PER_LONG == 64
+#define QTN_HOST_HI32(a) ((u32)(((u64)a) >> 32))
+#define QTN_HOST_LO32(a) ((u32)(((u64)a) & 0xffffffffUL))
+#define QTN_HOST_ADDR(h, l) ((((u64)h) << 32) | ((u64)l))
+#elif BITS_PER_LONG == 32
+#define QTN_HOST_HI32(a) 0
+#define QTN_HOST_LO32(a) ((u32)(((u32)a) & 0xffffffffUL))
+#define QTN_HOST_ADDR(h, l) ((u32)l)
+#else
+#error Unexpected BITS_PER_LONG value
+#endif
+
+#define QTN_PCIE_BDA_VERSION 0x1001
+
+#define PCIE_BDA_NAMELEN 32
+
+#define QTN_PCIE_RC_TX_QUEUE_LEN 256
+#define QTN_PCIE_TX_VALID_PKT 0x80000000
+#define QTN_PCIE_PKT_LEN_MASK 0xffff
+
+#define QTN_BD_EMPTY ((uint32_t)0x00000001)
+#define QTN_BD_WRAP ((uint32_t)0x00000002)
+#define QTN_BD_MASK_LEN ((uint32_t)0xFFFF0000)
+#define QTN_BD_MASK_OFFSET ((uint32_t)0x0000FF00)
+
+#define QTN_GET_LEN(x) (((x) >> 16) & 0xFFFF)
+#define QTN_GET_OFFSET(x) (((x) >> 8) & 0xFF)
+#define QTN_SET_LEN(len) (((len) & 0xFFFF) << 16)
+#define QTN_SET_OFFSET(of) (((of) & 0xFF) << 8)
+
+#define RX_DONE_INTR_MSK ((0x1 << 6) - 1)
+
+#define PCIE_DMA_OFFSET_ERROR 0xFFFF
+#define PCIE_DMA_OFFSET_ERROR_MASK 0xFFFF
+
+#define QTN_PCI_ENDIAN_DETECT_DATA 0x12345678
+#define QTN_PCI_ENDIAN_REVERSE_DATA 0x78563412
+#define QTN_PCI_ENDIAN_VALID_STATUS 0x3c3c3c3c
+#define QTN_PCI_ENDIAN_INVALID_STATUS 0
+#define QTN_PCI_LITTLE_ENDIAN 0
+#define QTN_PCI_BIG_ENDIAN 0xffffffff
+
+#define NBLOCKS(size, blksize) \
+ ((size) / (blksize) + (((size) % (blksize) > 0) ? 1 : 0))
+
+#endif /* _QTN_FMAC_PCIE_IPC_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_regs.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_regs.h
new file mode 100644
index 000000000000..4782e1ed3c2c
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_regs.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018 Quantenna Communications */
+
+#ifndef __TOPAZ_PCIE_H
+#define __TOPAZ_PCIE_H
+
+/* Topaz PCIe DMA registers */
+#define PCIE_DMA_WR_INTR_STATUS(base) ((base) + 0x9bc)
+#define PCIE_DMA_WR_INTR_MASK(base) ((base) + 0x9c4)
+#define PCIE_DMA_WR_INTR_CLR(base) ((base) + 0x9c8)
+#define PCIE_DMA_WR_ERR_STATUS(base) ((base) + 0x9cc)
+#define PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(base) ((base) + 0x9D0)
+#define PCIE_DMA_WR_DONE_IMWR_ADDR_HIGH(base) ((base) + 0x9d4)
+
+#define PCIE_DMA_RD_INTR_STATUS(base) ((base) + 0x310)
+#define PCIE_DMA_RD_INTR_MASK(base) ((base) + 0x319)
+#define PCIE_DMA_RD_INTR_CLR(base) ((base) + 0x31c)
+#define PCIE_DMA_RD_ERR_STATUS_LOW(base) ((base) + 0x324)
+#define PCIE_DMA_RD_ERR_STATUS_HIGH(base) ((base) + 0x328)
+#define PCIE_DMA_RD_DONE_IMWR_ADDR_LOW(base) ((base) + 0x33c)
+#define PCIE_DMA_RD_DONE_IMWR_ADDR_HIGH(base) ((base) + 0x340)
+
+/* Topaz LHost IPC4 interrupt */
+#define TOPAZ_LH_IPC4_INT(base) ((base) + 0x13C)
+#define TOPAZ_LH_IPC4_INT_MASK(base) ((base) + 0x140)
+
+#define TOPAZ_RC_TX_DONE_IRQ (0)
+#define TOPAZ_RC_RST_EP_IRQ (1)
+#define TOPAZ_RC_TX_STOP_IRQ (2)
+#define TOPAZ_RC_RX_DONE_IRQ (3)
+#define TOPAZ_RC_PM_EP_IRQ (4)
+
+/* Topaz LHost M2L interrupt */
+#define TOPAZ_CTL_M2L_INT(base) ((base) + 0x2C)
+#define TOPAZ_CTL_M2L_INT_MASK(base) ((base) + 0x30)
+
+#define TOPAZ_RC_CTRL_IRQ (6)
+
+#define TOPAZ_IPC_IRQ_WORD(irq) (BIT(irq) | BIT(irq + 16))
+
+/* PCIe legacy INTx */
+#define TOPAZ_PCIE_CFG0_OFFSET (0x6C)
+#define TOPAZ_ASSERT_INTX BIT(9)
+
+#endif /* __TOPAZ_PCIE_H */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 99d37e3efba6..8d62addea895 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -71,6 +71,7 @@ struct qlink_msg_header {
* @QLINK_HW_CAPAB_DFS_OFFLOAD: device implements DFS offload functionality
* @QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR: device supports MAC Address
* Randomization in probe requests.
+ * @QLINK_HW_CAPAB_OBSS_SCAN: device can perform OBSS scanning.
*/
enum qlink_hw_capab {
QLINK_HW_CAPAB_REG_UPDATE = BIT(0),
@@ -78,6 +79,8 @@ enum qlink_hw_capab {
QLINK_HW_CAPAB_DFS_OFFLOAD = BIT(2),
QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR = BIT(3),
QLINK_HW_CAPAB_PWR_MGMT = BIT(4),
+ QLINK_HW_CAPAB_OBSS_SCAN = BIT(5),
+ QLINK_HW_CAPAB_SCAN_DWELL = BIT(6),
};
enum qlink_iface_type {
@@ -1149,6 +1152,8 @@ enum qlink_tlv_id {
QTN_TLV_ID_MAX_SCAN_SSIDS = 0x0409,
QTN_TLV_ID_WOWLAN_CAPAB = 0x0410,
QTN_TLV_ID_WOWLAN_PATTERN = 0x0411,
+ QTN_TLV_ID_SCAN_FLUSH = 0x0412,
+ QTN_TLV_ID_SCAN_DWELL = 0x0413,
};
struct qlink_tlv_hdr {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
index 54caeb38917c..960d5d97492f 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
@@ -40,6 +40,14 @@ static inline void qtnf_cmd_skb_put_tlv_arr(struct sk_buff *skb,
memcpy(hdr->val, arr, arr_len);
}
+static inline void qtnf_cmd_skb_put_tlv_tag(struct sk_buff *skb, u16 tlv_id)
+{
+ struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr));
+
+ hdr->type = cpu_to_le16(tlv_id);
+ hdr->len = cpu_to_le16(0);
+}
+
static inline void qtnf_cmd_skb_put_tlv_u8(struct sk_buff *skb, u16 tlv_id,
u8 value)
{
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
index 1fe798a9a667..40295a511224 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
@@ -23,7 +23,7 @@
/* PCIE Device IDs */
-#define PCIE_DEVICE_ID_QTN_PEARL (0x0008)
+#define PCIE_DEVICE_ID_QSR (0x0008)
#define QTN_REG_SYS_CTRL_CSR 0x14
#define QTN_CHIP_ID_MASK 0xF0
@@ -35,6 +35,8 @@
/* FW names */
#define QTN_PCI_PEARL_FW_NAME "qtn/fmac_qsr10g.img"
+#define QTN_PCI_TOPAZ_FW_NAME "qtn/fmac_qsr1000.img"
+#define QTN_PCI_TOPAZ_BOOTLD_NAME "qtn/uboot_qsr1000.img"
static inline unsigned int qtnf_chip_id_get(const void __iomem *regs_base)
{
diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c
index aa106dd0a14b..2ec334199c2b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c
@@ -42,19 +42,18 @@ static void qtnf_shm_handle_new_data(struct qtnf_shm_ipc *ipc)
if (unlikely(size == 0 || size > QTN_IPC_MAX_DATA_SZ)) {
pr_err("wrong rx packet size: %zu\n", size);
rx_buff_ok = false;
- } else {
- memcpy_fromio(ipc->rx_data, ipc->shm_region->data, size);
+ }
+
+ if (likely(rx_buff_ok)) {
+ ipc->rx_packet_count++;
+ ipc->rx_callback.fn(ipc->rx_callback.arg,
+ ipc->shm_region->data, size);
}
writel(QTNF_SHM_IPC_ACK, &shm_reg_hdr->flags);
readl(&shm_reg_hdr->flags); /* flush PCIe write */
ipc->interrupt.fn(ipc->interrupt.arg);
-
- if (likely(rx_buff_ok)) {
- ipc->rx_packet_count++;
- ipc->rx_callback.fn(ipc->rx_callback.arg, ipc->rx_data, size);
- }
}
static void qtnf_shm_ipc_irq_work(struct work_struct *work)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h
index 453dd6477b12..c2a3702a9ee7 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h
@@ -32,7 +32,7 @@ struct qtnf_shm_ipc_int {
};
struct qtnf_shm_ipc_rx_callback {
- void (*fn)(void *arg, const u8 *buf, size_t len);
+ void (*fn)(void *arg, const u8 __iomem *buf, size_t len);
void *arg;
};
@@ -51,8 +51,6 @@ struct qtnf_shm_ipc {
u8 waiting_for_ack;
- u8 rx_data[QTN_IPC_MAX_DATA_SZ] __aligned(sizeof(u32));
-
struct qtnf_shm_ipc_int interrupt;
struct qtnf_shm_ipc_rx_callback rx_callback;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.c b/drivers/net/wireless/quantenna/qtnfmac/util.c
index e745733ba417..3bc96b264769 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/util.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/util.c
@@ -15,6 +15,7 @@
*/
#include "util.h"
+#include "qtn_hw_ids.h"
void qtnf_sta_list_init(struct qtnf_sta_list *list)
{
@@ -116,3 +117,20 @@ void qtnf_sta_list_free(struct qtnf_sta_list *list)
INIT_LIST_HEAD(&list->head);
}
+
+const char *qtnf_chipid_to_string(unsigned long chip_id)
+{
+ switch (chip_id) {
+ case QTN_CHIP_ID_TOPAZ:
+ return "Topaz";
+ case QTN_CHIP_ID_PEARL:
+ return "Pearl revA";
+ case QTN_CHIP_ID_PEARL_B:
+ return "Pearl revB";
+ case QTN_CHIP_ID_PEARL_C:
+ return "Pearl revC";
+ default:
+ return "unknown";
+ }
+}
+EXPORT_SYMBOL_GPL(qtnf_chipid_to_string);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.h b/drivers/net/wireless/quantenna/qtnfmac/util.h
index 0d4d92b11540..b8744baac332 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/util.h
@@ -20,6 +20,8 @@
#include <linux/kernel.h>
#include "core.h"
+const char *qtnf_chipid_to_string(unsigned long chip_id);
+
void qtnf_sta_list_init(struct qtnf_sta_list *list);
struct qtnf_sta_node *qtnf_sta_list_lookup(struct qtnf_sta_list *list,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
index 0bc8b0249c57..49a732798395 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
@@ -1302,7 +1302,7 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
break;
case 2: /* Failure, excessive retries */
__set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags);
- /* Don't break, this is a failed frame! */
+ /* Fall through - this is a failed frame! */
default: /* Failure */
__set_bit(TXDONE_FAILURE, &txdesc.flags);
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
index 1ff5434798ec..e8e7bfe1ba9b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
@@ -1430,7 +1430,7 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
break;
case 2: /* Failure, excessive retries */
__set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags);
- /* Don't break, this is a failed frame! */
+ /* Fall through - this is a failed frame! */
default: /* Failure */
__set_bit(TXDONE_FAILURE, &txdesc.flags);
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 9e7b8933d30c..0e95555aec62 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -2482,6 +2482,7 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
switch (rt2x00dev->default_ant.tx_chain_num) {
case 1:
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
+ /* fall through */
case 2:
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
break;
@@ -2490,6 +2491,7 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
switch (rt2x00dev->default_ant.rx_chain_num) {
case 1:
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
+ /* fall through */
case 2:
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
break;
@@ -9457,8 +9459,10 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
switch (rx_chains) {
case 3:
spec->ht.mcs.rx_mask[2] = 0xff;
+ /* fall through */
case 2:
spec->ht.mcs.rx_mask[1] = 0xff;
+ /* fall through */
case 1:
spec->ht.mcs.rx_mask[0] = 0xff;
spec->ht.mcs.rx_mask[4] = 0x1; /* MCS32 */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index cb0e1196f2c2..4c5de8fc8f12 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -2226,7 +2226,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
break;
case 6: /* Failure, excessive retries */
__set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags);
- /* Don't break, this is a failed frame! */
+ /* Fall through - this is a failed frame! */
default: /* Failure */
__set_bit(TXDONE_FAILURE, &txdesc.flags);
}
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 08c607c031bc..33ad87528d9a 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -889,8 +889,10 @@ static int ray_hw_xmit(unsigned char *data, int len, struct net_device *dev,
switch (ccsindex = get_free_tx_ccs(local)) {
case ECCSBUSY:
pr_debug("ray_hw_xmit tx_ccs table busy\n");
+ /* fall through */
case ECCSFULL:
pr_debug("ray_hw_xmit No free tx ccs\n");
+ /* fall through */
case ECARDGONE:
netif_stop_queue(dev);
return XMIT_NO_CCS;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 73f6fc0d4a01..2bd43057dda3 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1153,6 +1153,7 @@ void rtl8xxxu_gen1_config_channel(struct ieee80211_hw *hw)
switch (hw->conf.chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
ht = false;
+ /* fall through */
case NL80211_CHAN_WIDTH_20:
opmode |= BW_OPMODE_20MHZ;
rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode);
@@ -1280,6 +1281,7 @@ void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw)
switch (hw->conf.chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
ht = false;
+ /* fall through */
case NL80211_CHAN_WIDTH_20:
rf_mode_bw |= WMAC_TRXPTCL_CTL_BW_20;
subchannel = 0;
@@ -1748,9 +1750,11 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
case 3:
priv->ep_tx_low_queue = 1;
priv->ep_tx_count++;
+ /* fall through */
case 2:
priv->ep_tx_normal_queue = 1;
priv->ep_tx_count++;
+ /* fall through */
case 1:
priv->ep_tx_high_queue = 1;
priv->ep_tx_count++;
@@ -4918,11 +4922,10 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
struct device *dev = &priv->udev->dev;
u32 queue, rts_rate;
u16 pktlen = skb->len;
- u16 seq_number;
u16 rate_flag = tx_info->control.rates[0].flags;
int tx_desc_size = priv->fops->tx_desc_size;
int ret;
- bool usedesc40, ampdu_enable, sgi = false, short_preamble = false;
+ bool ampdu_enable, sgi = false, short_preamble = false;
if (skb_headroom(skb) < tx_desc_size) {
dev_warn(dev,
@@ -4946,7 +4949,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
if (ieee80211_is_action(hdr->frame_control))
rtl8xxxu_dump_action(dev, hdr);
- usedesc40 = (tx_desc_size == 40);
tx_info->rate_driver_data[0] = hw;
if (control && control->sta)
@@ -5013,7 +5015,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
else
rts_rate = 0;
- seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
priv->fops->fill_txdesc(hw, hdr, tx_info, tx_desc, sgi, short_preamble,
ampdu_enable, rts_rate);
@@ -5691,6 +5692,7 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
break;
case WLAN_CIPHER_SUITE_TKIP:
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ break;
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index 6fbf8845a2ab..d748aab66aa2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -292,11 +292,9 @@ bool halbtc_send_bt_mp_operation(struct btc_coexist *btcoexist, u8 op_code,
static void halbtc_leave_lps(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv;
- struct rtl_ps_ctl *ppsc;
bool ap_enable = false;
rtlpriv = btcoexist->adapter;
- ppsc = rtl_psc(rtlpriv);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
&ap_enable);
@@ -315,11 +313,9 @@ static void halbtc_leave_lps(struct btc_coexist *btcoexist)
static void halbtc_enter_lps(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv;
- struct rtl_ps_ctl *ppsc;
bool ap_enable = false;
rtlpriv = btcoexist->adapter;
- ppsc = rtl_psc(rtlpriv);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
&ap_enable);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
index 4c1f8b08fc10..14dcb0816bc0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
@@ -29,7 +29,6 @@
#include "../stats.h"
#include "reg.h"
#include "def.h"
-#include "phy.h"
#include "trx.h"
#include "led.h"
#include "dm.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
index 85cedd083d2b..75bfa9dfef4a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
@@ -173,7 +173,7 @@ static int _rtl92d_fw_init(struct ieee80211_hw *hw)
rtl_read_byte(rtlpriv, FW_MAC1_READY));
}
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready fail!! REG_MCUFWDL:0x%08ul\n",
+ "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n",
rtl_read_dword(rtlpriv, REG_MCUFWDL));
return -1;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
index 5cf29f5a4b54..3f3327878b51 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
@@ -509,13 +509,10 @@ bool rtl8723e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
int i;
bool rtstatus = true;
u32 *radioa_array_table;
- u32 *radiob_array_table;
- u16 radioa_arraylen, radiob_arraylen;
+ u16 radioa_arraylen;
radioa_arraylen = RTL8723ERADIOA_1TARRAYLENGTH;
radioa_array_table = RTL8723E_RADIOA_1TARRAY;
- radiob_arraylen = RTL8723E_RADIOB_1TARRAYLENGTH;
- radiob_array_table = RTL8723E_RADIOB_1TARRAY;
rtstatus = true;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c
index 61e86045f15c..1bbee0bfac23 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c
@@ -475,10 +475,6 @@ u32 RTL8723E_RADIOA_1TARRAY[RTL8723ERADIOA_1TARRAYLENGTH] = {
0x000, 0x00030159,
};
-u32 RTL8723E_RADIOB_1TARRAY[RTL8723E_RADIOB_1TARRAYLENGTH] = {
- 0x0,
-};
-
u32 RTL8723EMAC_ARRAY[RTL8723E_MACARRAYLENGTH] = {
0x420, 0x00000080,
0x423, 0x00000000,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h
index 57a548ceba7d..a044f3c456fa 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h
@@ -36,8 +36,6 @@ extern u32 RTL8723EPHY_REG_1TARRAY[RTL8723E_PHY_REG_1TARRAY_LENGTH];
extern u32 RTL8723EPHY_REG_ARRAY_PG[RTL8723E_PHY_REG_ARRAY_PGLENGTH];
#define RTL8723ERADIOA_1TARRAYLENGTH 282
extern u32 RTL8723E_RADIOA_1TARRAY[RTL8723ERADIOA_1TARRAYLENGTH];
-#define RTL8723E_RADIOB_1TARRAYLENGTH 1
-extern u32 RTL8723E_RADIOB_1TARRAY[RTL8723E_RADIOB_1TARRAYLENGTH];
#define RTL8723E_MACARRAYLENGTH 172
extern u32 RTL8723EMAC_ARRAY[RTL8723E_MACARRAYLENGTH];
#define RTL8723E_AGCTAB_1TARRAYLENGTH 320
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 317c1b3101da..ba258318ee9f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -3404,75 +3404,6 @@ static void rtl8821ae_update_hal_rate_table(struct ieee80211_hw *hw,
"%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
}
-static u8 _rtl8821ae_mrate_idx_to_arfr_id(
- struct ieee80211_hw *hw, u8 rate_index,
- enum wireless_mode wirelessmode)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 ret = 0;
- switch (rate_index) {
- case RATR_INX_WIRELESS_NGB:
- if (rtlphy->rf_type == RF_1T1R)
- ret = 1;
- else
- ret = 0;
- ; break;
- case RATR_INX_WIRELESS_N:
- case RATR_INX_WIRELESS_NG:
- if (rtlphy->rf_type == RF_1T1R)
- ret = 5;
- else
- ret = 4;
- ; break;
- case RATR_INX_WIRELESS_NB:
- if (rtlphy->rf_type == RF_1T1R)
- ret = 3;
- else
- ret = 2;
- ; break;
- case RATR_INX_WIRELESS_GB:
- ret = 6;
- break;
- case RATR_INX_WIRELESS_G:
- ret = 7;
- break;
- case RATR_INX_WIRELESS_B:
- ret = 8;
- break;
- case RATR_INX_WIRELESS_MC:
- if ((wirelessmode == WIRELESS_MODE_B)
- || (wirelessmode == WIRELESS_MODE_G)
- || (wirelessmode == WIRELESS_MODE_N_24G)
- || (wirelessmode == WIRELESS_MODE_AC_24G))
- ret = 6;
- else
- ret = 7;
- case RATR_INX_WIRELESS_AC_5N:
- if (rtlphy->rf_type == RF_1T1R)
- ret = 10;
- else
- ret = 9;
- break;
- case RATR_INX_WIRELESS_AC_24N:
- if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) {
- if (rtlphy->rf_type == RF_1T1R)
- ret = 10;
- else
- ret = 9;
- } else {
- if (rtlphy->rf_type == RF_1T1R)
- ret = 11;
- else
- ret = 12;
- }
- break;
- default:
- ret = 0; break;
- }
- return ret;
-}
-
static u32 _rtl8821ae_rate_to_bitmap_2ssvht(__le16 vht_rate)
{
u8 i, j, tmp_rate;
@@ -3761,7 +3692,7 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
break;
}
- ratr_index = _rtl8821ae_mrate_idx_to_arfr_id(hw, ratr_index, wirelessmode);
+ ratr_index = rtl_mrate_idx_to_arfr_id(hw, ratr_index, wirelessmode);
sta_entry->ratr_index = ratr_index;
ratr_bitmap = _rtl8821ae_set_ra_vht_ratr_bitmap(hw, wirelessmode,
ratr_bitmap);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 176deb2b5386..a75451c246fd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -394,6 +394,7 @@ static void _rtl8812ae_phy_set_rfe_reg_24g(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
break;
}
+ /* fall through */
case 0:
case 2:
default:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
index d7960dd5bf1a..b01c3c5e21c7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
@@ -29,7 +29,6 @@
#include "../stats.h"
#include "reg.h"
#include "def.h"
-#include "phy.h"
#include "trx.h"
#include "led.h"
#include "dm.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 0f3b98c5227f..87bc21bb5e8b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -1905,10 +1905,6 @@ struct rtl_efuse {
u8 efuse_map[2][EFUSE_MAX_LOGICAL_SIZE];
u16 efuse_usedbytes;
u8 efuse_usedpercentage;
-#ifdef EFUSE_REPG_WORKAROUND
- bool efuse_re_pg_sec1flag;
- u8 efuse_re_pg_data[8];
-#endif
u8 autoload_failflag;
u8 autoload_status;
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index 612c211e21a1..449f6d23c5e3 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -210,7 +210,7 @@ int rsi_init_sdio_slave_regs(struct rsi_hw *adapter)
}
/* This tells SDIO FIFO when to start read to host */
- rsi_dbg(INIT_ZONE, "%s: Initialzing SDIO read start level\n", __func__);
+ rsi_dbg(INIT_ZONE, "%s: Initializing SDIO read start level\n", __func__);
byte = 0x24;
status = rsi_sdio_write_register(adapter,
@@ -223,7 +223,7 @@ int rsi_init_sdio_slave_regs(struct rsi_hw *adapter)
return -1;
}
- rsi_dbg(INIT_ZONE, "%s: Initialzing FIFO ctrl registers\n", __func__);
+ rsi_dbg(INIT_ZONE, "%s: Initializing FIFO ctrl registers\n", __func__);
byte = (128 - 32);
status = rsi_sdio_write_register(adapter,
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index 38678e9a0562..8dae92a79fe1 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -1123,7 +1123,7 @@ int cw1200_setup_mac(struct cw1200_common *priv)
*
* NOTE2: RSSI based reports have been switched to RCPI, since
* FW has a bug and RSSI reported values are not stable,
- * what can leads to signal level oscilations in user-end applications
+ * what can lead to signal level oscilations in user-end applications
*/
struct wsm_rcpi_rssi_threshold threshold = {
.rssiRcpiMode = WSM_RCPI_RSSI_THRESHOLD_ENABLE |
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 89b0d0fade9f..26b187336875 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include "wlcore.h"
#include "debug.h"
@@ -957,6 +958,8 @@ static void wl1271_recovery_work(struct work_struct *work)
BUG_ON(wl->conf.recovery.bug_on_recovery &&
!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
+ clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
+
if (wl->conf.recovery.no_recovery) {
wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
goto out_unlock;
@@ -6625,13 +6628,25 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
}
#ifdef CONFIG_PM
+ device_init_wakeup(wl->dev, true);
+
ret = enable_irq_wake(wl->irq);
if (!ret) {
wl->irq_wake_enabled = true;
- device_init_wakeup(wl->dev, 1);
if (pdev_data->pwr_in_suspend)
wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
}
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ if (res) {
+ wl->wakeirq = res->start;
+ wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
+ ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
+ if (ret)
+ wl->wakeirq = -ENODEV;
+ } else {
+ wl->wakeirq = -ENODEV;
+ }
#endif
disable_irq(wl->irq);
wl1271_power_off(wl);
@@ -6659,6 +6674,9 @@ out_unreg:
wl1271_unregister_hw(wl);
out_irq:
+ if (wl->wakeirq >= 0)
+ dev_pm_clear_wake_irq(wl->dev);
+ device_init_wakeup(wl->dev, false);
free_irq(wl->irq, wl);
out_free_nvs:
@@ -6710,6 +6728,7 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev)
int ret;
unsigned long start_time = jiffies;
bool pending = false;
+ bool recovery = false;
/* Nothing to do if no ELP mode requested */
if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
@@ -6726,7 +6745,7 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev)
ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
if (ret < 0) {
- wl12xx_queue_recovery_work(wl);
+ recovery = true;
goto err;
}
@@ -6734,11 +6753,12 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev)
ret = wait_for_completion_timeout(&compl,
msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
if (ret == 0) {
- wl1271_error("ELP wakeup timeout!");
- wl12xx_queue_recovery_work(wl);
+ wl1271_warning("ELP wakeup timeout!");
/* Return no error for runtime PM for recovery */
- return 0;
+ ret = 0;
+ recovery = true;
+ goto err;
}
}
@@ -6753,6 +6773,12 @@ err:
spin_lock_irqsave(&wl->wl_lock, flags);
wl->elp_compl = NULL;
spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ if (recovery) {
+ set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
+ wl12xx_queue_recovery_work(wl);
+ }
+
return ret;
}
@@ -6815,10 +6841,16 @@ int wlcore_remove(struct platform_device *pdev)
if (!wl->initialized)
return 0;
- if (wl->irq_wake_enabled) {
- device_init_wakeup(wl->dev, 0);
- disable_irq_wake(wl->irq);
+ if (wl->wakeirq >= 0) {
+ dev_pm_clear_wake_irq(wl->dev);
+ wl->wakeirq = -ENODEV;
}
+
+ device_init_wakeup(wl->dev, false);
+
+ if (wl->irq_wake_enabled)
+ disable_irq_wake(wl->irq);
+
wl1271_unregister_hw(wl);
pm_runtime_put_sync(wl->dev);
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 750bea3574ee..4c2154b9e6a3 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -241,7 +241,7 @@ static const struct of_device_id wlcore_sdio_of_match_table[] = {
{ }
};
-static int wlcore_probe_of(struct device *dev, int *irq,
+static int wlcore_probe_of(struct device *dev, int *irq, int *wakeirq,
struct wlcore_platdev_data *pdev_data)
{
struct device_node *np = dev->of_node;
@@ -259,6 +259,8 @@ static int wlcore_probe_of(struct device *dev, int *irq,
return -EINVAL;
}
+ *wakeirq = irq_of_parse_and_map(np, 1);
+
/* optional clock frequency params */
of_property_read_u32(np, "ref-clock-frequency",
&pdev_data->ref_clock_freq);
@@ -268,7 +270,7 @@ static int wlcore_probe_of(struct device *dev, int *irq,
return 0;
}
#else
-static int wlcore_probe_of(struct device *dev, int *irq,
+static int wlcore_probe_of(struct device *dev, int *irq, int *wakeirq,
struct wlcore_platdev_data *pdev_data)
{
return -ENODATA;
@@ -280,10 +282,10 @@ static int wl1271_probe(struct sdio_func *func,
{
struct wlcore_platdev_data *pdev_data;
struct wl12xx_sdio_glue *glue;
- struct resource res[1];
+ struct resource res[2];
mmc_pm_flag_t mmcflags;
int ret = -ENOMEM;
- int irq;
+ int irq, wakeirq;
const char *chip_family;
/* We are only able to handle the wlan function */
@@ -308,7 +310,7 @@ static int wl1271_probe(struct sdio_func *func,
/* Use block mode for transferring over one block size of data */
func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
- ret = wlcore_probe_of(&func->dev, &irq, pdev_data);
+ ret = wlcore_probe_of(&func->dev, &irq, &wakeirq, pdev_data);
if (ret)
goto out;
@@ -351,6 +353,11 @@ static int wl1271_probe(struct sdio_func *func,
irqd_get_trigger_type(irq_get_irq_data(irq));
res[0].name = "irq";
+ res[1].start = wakeirq;
+ res[1].flags = IORESOURCE_IRQ |
+ irqd_get_trigger_type(irq_get_irq_data(wakeirq));
+ res[1].name = "wakeirq";
+
ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
if (ret) {
dev_err(glue->dev, "can't add resources\n");
diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
index dbe78d8491ef..7f34ec077ee5 100644
--- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c
+++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
@@ -70,7 +70,7 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy,
out:
mutex_unlock(&wl->mutex);
- return 0;
+ return ret;
}
static int
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index d4b1f66ef457..dd14850b0603 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -199,8 +199,10 @@ struct wl1271 {
struct wl1271_if_operations *if_ops;
int irq;
+ int wakeirq;
int irq_flags;
+ int wakeirq_flags;
spinlock_t wl_lock;
diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
index 253403899fe9..22c70f1f568c 100644
--- a/drivers/net/wireless/zydas/zd1201.c
+++ b/drivers/net/wireless/zydas/zd1201.c
@@ -969,6 +969,7 @@ static int zd1201_set_mode(struct net_device *dev,
*/
zd1201_join(zd, "\0-*#\0", 5);
/* Put port in pIBSS */
+ /* Fall through */
case 8: /* No pseudo-IBSS in wireless extensions (yet) */
porttype = ZD1201_PORTTYPE_PSEUDOIBSS;
break;
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index 1f6d9f357e57..9ccd780695f0 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -235,7 +235,7 @@ void zd_mac_clear(struct zd_mac *mac)
{
flush_workqueue(zd_workqueue);
zd_chip_clear(&mac->chip);
- ZD_ASSERT(!spin_is_locked(&mac->lock));
+ lockdep_assert_held(&mac->lock);
ZD_MEMCLEAR(mac, sizeof(struct zd_mac));
}
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index a46a1e94505d..936c0b3e0ba2 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -241,8 +241,9 @@ struct xenvif_hash_cache {
struct xenvif_hash {
unsigned int alg;
u32 flags;
+ bool mapping_sel;
u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
- u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE];
+ u32 mapping[2][XEN_NETBK_MAX_HASH_MAPPING_SIZE];
unsigned int size;
struct xenvif_hash_cache cache;
};
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 3c4c58b9fe76..0ccb021f1e78 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -324,7 +324,8 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
vif->hash.size = size;
- memset(vif->hash.mapping, 0, sizeof(u32) * size);
+ memset(vif->hash.mapping[vif->hash.mapping_sel], 0,
+ sizeof(u32) * size);
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
@@ -332,31 +333,49 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
u32 off)
{
- u32 *mapping = &vif->hash.mapping[off];
- struct gnttab_copy copy_op = {
+ u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel];
+ unsigned int nr = 1;
+ struct gnttab_copy copy_op[2] = {{
.source.u.ref = gref,
.source.domid = vif->domid,
- .dest.u.gmfn = virt_to_gfn(mapping),
.dest.domid = DOMID_SELF,
- .dest.offset = xen_offset_in_page(mapping),
- .len = len * sizeof(u32),
+ .len = len * sizeof(*mapping),
.flags = GNTCOPY_source_gref
- };
+ }};
- if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
+ if ((off + len < off) || (off + len > vif->hash.size) ||
+ len > XEN_PAGE_SIZE / sizeof(*mapping))
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
- while (len-- != 0)
- if (mapping[off++] >= vif->num_queues)
- return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+ copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off);
+ copy_op[0].dest.offset = xen_offset_in_page(mapping + off);
+ if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) {
+ copy_op[1] = copy_op[0];
+ copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset;
+ copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len);
+ copy_op[1].dest.offset = 0;
+ copy_op[1].len = copy_op[0].len - copy_op[1].source.offset;
+ copy_op[0].len = copy_op[1].source.offset;
+ nr = 2;
+ }
- if (copy_op.len != 0) {
- gnttab_batch_copy(&copy_op, 1);
+ memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel],
+ vif->hash.size * sizeof(*mapping));
- if (copy_op.status != GNTST_okay)
+ if (copy_op[0].len != 0) {
+ gnttab_batch_copy(copy_op, nr);
+
+ if (copy_op[0].status != GNTST_okay ||
+ copy_op[nr - 1].status != GNTST_okay)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
}
+ while (len-- != 0)
+ if (mapping[off++] >= vif->num_queues)
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+ vif->hash.mapping_sel = !vif->hash.mapping_sel;
+
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
@@ -408,6 +427,8 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
}
if (vif->hash.size != 0) {
+ const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel];
+
seq_puts(m, "\nHash Mapping:\n");
for (i = 0; i < vif->hash.size; ) {
@@ -420,7 +441,7 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
for (j = 0; j < n; j++, i++)
- seq_printf(m, "%4u ", vif->hash.mapping[i]);
+ seq_printf(m, "%4u ", mapping[i]);
seq_puts(m, "\n");
}
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 7e3ea39a1b39..182d6770f102 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -162,7 +162,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
if (size == 0)
return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
- return vif->hash.mapping[skb_get_hash_raw(skb) % size];
+ return vif->hash.mapping[vif->hash.mapping_sel]
+ [skb_get_hash_raw(skb) % size];
}
static netdev_tx_t