summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/marvell
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-10-15 18:42:13 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-10-15 18:42:13 -0700
commit9ff9b0d392ea08090cd1780fb196f36dbb586529 (patch)
tree276a3a5c4525b84dee64eda30b423fc31bf94850 /drivers/net/ethernet/marvell
parent840e5bb326bbcb16ce82dd2416d2769de4839aea (diff)
parent105faa8742437c28815b2a3eb8314ebc5fd9288c (diff)
Merge tag 'net-next-5.10' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from Jakub Kicinski: - Add redirect_neigh() BPF packet redirect helper, allowing to limit stack traversal in common container configs and improving TCP back-pressure. Daniel reports ~10Gbps => ~15Gbps single stream TCP performance gain. - Expand netlink policy support and improve policy export to user space. (Ge)netlink core performs request validation according to declared policies. Expand the expressiveness of those policies (min/max length and bitmasks). Allow dumping policies for particular commands. This is used for feature discovery by user space (instead of kernel version parsing or trial and error). - Support IGMPv3/MLDv2 multicast listener discovery protocols in bridge. - Allow more than 255 IPv4 multicast interfaces. - Add support for Type of Service (ToS) reflection in SYN/SYN-ACK packets of TCPv6. - In Multi-patch TCP (MPTCP) support concurrent transmission of data on multiple subflows in a load balancing scenario. Enhance advertising addresses via the RM_ADDR/ADD_ADDR options. - Support SMC-Dv2 version of SMC, which enables multi-subnet deployments. - Allow more calls to same peer in RxRPC. - Support two new Controller Area Network (CAN) protocols - CAN-FD and ISO 15765-2:2016. - Add xfrm/IPsec compat layer, solving the 32bit user space on 64bit kernel problem. - Add TC actions for implementing MPLS L2 VPNs. - Improve nexthop code - e.g. handle various corner cases when nexthop objects are removed from groups better, skip unnecessary notifications and make it easier to offload nexthops into HW by converting to a blocking notifier. - Support adding and consuming TCP header options by BPF programs, opening the doors for easy experimental and deployment-specific TCP option use. - Reorganize TCP congestion control (CC) initialization to simplify life of TCP CC implemented in BPF. - Add support for shipping BPF programs with the kernel and loading them early on boot via the User Mode Driver mechanism, hence reusing all the user space infra we have. - Support sleepable BPF programs, initially targeting LSM and tracing. - Add bpf_d_path() helper for returning full path for given 'struct path'. - Make bpf_tail_call compatible with bpf-to-bpf calls. - Allow BPF programs to call map_update_elem on sockmaps. - Add BPF Type Format (BTF) support for type and enum discovery, as well as support for using BTF within the kernel itself (current use is for pretty printing structures). - Support listing and getting information about bpf_links via the bpf syscall. - Enhance kernel interfaces around NIC firmware update. Allow specifying overwrite mask to control if settings etc. are reset during update; report expected max time operation may take to users; support firmware activation without machine reboot incl. limits of how much impact reset may have (e.g. dropping link or not). - Extend ethtool configuration interface to report IEEE-standard counters, to limit the need for per-vendor logic in user space. - Adopt or extend devlink use for debug, monitoring, fw update in many drivers (dsa loop, ice, ionic, sja1105, qed, mlxsw, mv88e6xxx, dpaa2-eth). - In mlxsw expose critical and emergency SFP module temperature alarms. Refactor port buffer handling to make the defaults more suitable and support setting these values explicitly via the DCBNL interface. - Add XDP support for Intel's igb driver. - Support offloading TC flower classification and filtering rules to mscc_ocelot switches. - Add PTP support for Marvell Octeontx2 and PP2.2 hardware, as well as fixed interval period pulse generator and one-step timestamping in dpaa-eth. - Add support for various auth offloads in WiFi APs, e.g. SAE (WPA3) offload. - Add Lynx PHY/PCS MDIO module, and convert various drivers which have this HW to use it. Convert mvpp2 to split PCS. - Support Marvell Prestera 98DX3255 24-port switch ASICs, as well as 7-port Mediatek MT7531 IP. - Add initial support for QCA6390 and IPQ6018 in ath11k WiFi driver, and wcn3680 support in wcn36xx. - Improve performance for packets which don't require much offloads on recent Mellanox NICs by 20% by making multiple packets share a descriptor entry. - Move chelsio inline crypto drivers (for TLS and IPsec) from the crypto subtree to drivers/net. Move MDIO drivers out of the phy directory. - Clean up a lot of W=1 warnings, reportedly the actively developed subsections of networking drivers should now build W=1 warning free. - Make sure drivers don't use in_interrupt() to dynamically adapt their code. Convert tasklets to use new tasklet_setup API (sadly this conversion is not yet complete). * tag 'net-next-5.10' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (2583 commits) Revert "bpfilter: Fix build error with CONFIG_BPFILTER_UMH" net, sockmap: Don't call bpf_prog_put() on NULL pointer bpf, selftest: Fix flaky tcp_hdr_options test when adding addr to lo bpf, sockmap: Add locking annotations to iterator netfilter: nftables: allow re-computing sctp CRC-32C in 'payload' statements net: fix pos incrementment in ipv6_route_seq_next net/smc: fix invalid return code in smcd_new_buf_create() net/smc: fix valid DMBE buffer sizes net/smc: fix use-after-free of delayed events bpfilter: Fix build error with CONFIG_BPFILTER_UMH cxgb4/ch_ipsec: Replace the module name to ch_ipsec from chcr net: sched: Fix suspicious RCU usage while accessing tcf_tunnel_info bpf: Fix register equivalence tracking. rxrpc: Fix loss of final ack on shutdown rxrpc: Fix bundle counting for exclusive connections netfilter: restore NF_INET_NUMHOOKS ibmveth: Identify ingress large send packets. ibmveth: Switch order of ibmveth_helper calls. cxgb4: handle 4-tuple PEDIT to NAT mode translation selftests: Add VRF route leaking tests ...
Diffstat (limited to 'drivers/net/ethernet/marvell')
-rw-r--r--drivers/net/ethernet/marvell/Kconfig7
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c47
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h203
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c878
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_tai.c457
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h47
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h541
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c275
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c36
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c41
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c87
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c239
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h103
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c98
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c35
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c180
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c212
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c112
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c5
-rw-r--r--drivers/net/ethernet/marvell/prestera/Kconfig25
-rw-r--r--drivers/net/ethernet/marvell/prestera/Makefile7
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h206
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.c112
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.h23
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_dsa.c104
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_dsa.h35
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.c780
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.h11
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c1253
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h182
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c667
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c769
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.c820
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.h19
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c1277
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.h13
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c7
-rw-r--r--drivers/net/ethernet/marvell/skge.c6
52 files changed, 9560 insertions, 536 deletions
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index ef4f35ba077d..41815b609569 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -92,6 +92,12 @@ config MVPP2
This driver supports the network interface units in the
Marvell ARMADA 375, 7K and 8K SoCs.
+config MVPP2_PTP
+ bool "Marvell Armada 8K Enable PTP support"
+ depends on NETWORK_PHY_TIMESTAMPING
+ depends on (PTP_1588_CLOCK = y && MVPP2 = y) || \
+ (PTP_1588_CLOCK && MVPP2 = m)
+
config PXA168_ETH
tristate "Marvell pxa168 ethernet support"
depends on HAS_IOMEM
@@ -172,5 +178,6 @@ config SKY2_DEBUG
source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
+source "drivers/net/ethernet/marvell/prestera/Kconfig"
endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 89dea7284d5b..9f88fe822555 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
obj-y += octeontx2/
+obj-y += prestera/
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 5bf0409f5d42..54b0bf574c05 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -330,7 +330,6 @@
#define MVNETA_SKB_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
#define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
MVNETA_SKB_HEADROOM))
-#define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD)
#define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD)
#define IS_TSO_HEADER(txq, addr) \
@@ -752,13 +751,12 @@ static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
static void mvneta_mib_counters_clear(struct mvneta_port *pp)
{
int i;
- u32 dummy;
/* Perform dummy reads from MIB counters */
for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
- dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
- dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
- dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
+ mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
+ mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
+ mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
}
/* Get System Network Statistics */
@@ -1833,7 +1831,7 @@ static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
/* Free tx queue skbuffs */
static void mvneta_txq_bufs_free(struct mvneta_port *pp,
struct mvneta_tx_queue *txq, int num,
- struct netdev_queue *nq)
+ struct netdev_queue *nq, bool napi)
{
unsigned int bytes_compl = 0, pkts_compl = 0;
int i;
@@ -1856,7 +1854,10 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
dev_kfree_skb_any(buf->skb);
} else if (buf->type == MVNETA_TYPE_XDP_TX ||
buf->type == MVNETA_TYPE_XDP_NDO) {
- xdp_return_frame(buf->xdpf);
+ if (napi && buf->type == MVNETA_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(buf->xdpf);
+ else
+ xdp_return_frame(buf->xdpf);
}
}
@@ -1874,7 +1875,7 @@ static void mvneta_txq_done(struct mvneta_port *pp,
if (!tx_done)
return;
- mvneta_txq_bufs_free(pp, txq, tx_done, nq);
+ mvneta_txq_bufs_free(pp, txq, tx_done, nq, true);
txq->count -= tx_done;
@@ -2227,8 +2228,7 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc,
struct mvneta_rx_queue *rxq,
struct xdp_buff *xdp, int *size,
- struct page *page,
- struct mvneta_stats *stats)
+ struct page *page)
{
unsigned char *data = page_address(page);
int data_len = -MVNETA_MH_SIZE, len;
@@ -2236,19 +2236,22 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
enum dma_data_direction dma_dir;
struct skb_shared_info *sinfo;
- if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
+ if (*size > MVNETA_MAX_RX_BUF_SIZE) {
len = MVNETA_MAX_RX_BUF_SIZE;
data_len += len;
} else {
- len = rx_desc->data_size;
+ len = *size;
data_len += len - ETH_FCS_LEN;
}
+ *size = *size - len;
dma_dir = page_pool_get_dma_dir(rxq->page_pool);
dma_sync_single_for_cpu(dev->dev.parent,
rx_desc->buf_phys_addr,
len, dma_dir);
+ rx_desc->buf_phys_addr = 0;
+
/* Prefetch header */
prefetch(data);
@@ -2259,9 +2262,6 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
sinfo = xdp_get_shared_info_from_buff(xdp);
sinfo->nr_frags = 0;
-
- *size = rx_desc->data_size - len;
- rx_desc->buf_phys_addr = 0;
}
static void
@@ -2307,11 +2307,8 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
int i, num_frags = sinfo->nr_frags;
- skb_frag_t frags[MAX_SKB_FRAGS];
struct sk_buff *skb;
- memcpy(frags, sinfo->frags, sizeof(skb_frag_t) * num_frags);
-
skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
if (!skb)
return ERR_PTR(-ENOMEM);
@@ -2323,12 +2320,12 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
mvneta_rx_csum(pp, desc_status, skb);
for (i = 0; i < num_frags; i++) {
- struct page *page = skb_frag_page(&frags[i]);
+ skb_frag_t *frag = &sinfo->frags[i];
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- page, skb_frag_off(&frags[i]),
- skb_frag_size(&frags[i]), PAGE_SIZE);
- page_pool_release_page(rxq->page_pool, page);
+ skb_frag_page(frag), skb_frag_off(frag),
+ skb_frag_size(frag), PAGE_SIZE);
+ page_pool_release_page(rxq->page_pool, skb_frag_page(frag));
}
return skb;
@@ -2378,10 +2375,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
size = rx_desc->data_size;
frame_sz = size - ETH_FCS_LEN;
- desc_status = rx_desc->status;
+ desc_status = rx_status;
mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
- &size, page, &ps);
+ &size, page);
} else {
if (unlikely(!xdp_buf.data_hard_start)) {
rx_desc->buf_phys_addr = 0;
@@ -2865,7 +2862,7 @@ static void mvneta_txq_done_force(struct mvneta_port *pp,
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
int tx_done = txq->count;
- mvneta_txq_bufs_free(pp, txq, tx_done, nq);
+ mvneta_txq_bufs_free(pp, txq, tx_done, nq, false);
/* reset txq */
txq->count = 0;
diff --git a/drivers/net/ethernet/marvell/mvpp2/Makefile b/drivers/net/ethernet/marvell/mvpp2/Makefile
index 51f65a202c6e..9bd8e7964b40 100644
--- a/drivers/net/ethernet/marvell/mvpp2/Makefile
+++ b/drivers/net/ethernet/marvell/mvpp2/Makefile
@@ -4,4 +4,5 @@
#
obj-$(CONFIG_MVPP2) := mvpp2.o
-mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o mvpp2_debugfs.o
+mvpp2-y := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o mvpp2_debugfs.o
+mvpp2-$(CONFIG_MVPP2_PTP) += mvpp2_tai.o
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 32753cc771bf..834775843067 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
#include <linux/phy.h>
#include <linux/phylink.h>
#include <net/flow_offload.h>
@@ -461,8 +462,12 @@
#define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6)
#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
+#define MVPP22_GMAC_INT_SUM_STAT 0xa0
+#define MVPP22_GMAC_INT_SUM_STAT_INTERNAL BIT(1)
+#define MVPP22_GMAC_INT_SUM_STAT_PTP BIT(2)
#define MVPP22_GMAC_INT_SUM_MASK 0xa4
#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
+#define MVPP22_GMAC_INT_SUM_MASK_PTP BIT(2)
/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
* relative to port->base.
@@ -488,9 +493,13 @@
#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
+#define MVPP22_XLG_EXT_INT_STAT 0x158
+#define MVPP22_XLG_EXT_INT_STAT_XLG BIT(1)
+#define MVPP22_XLG_EXT_INT_STAT_PTP BIT(7)
#define MVPP22_XLG_EXT_INT_MASK 0x15c
#define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1)
#define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2)
+#define MVPP22_XLG_EXT_INT_MASK_PTP BIT(7)
#define MVPP22_XLG_CTRL4_REG 0x184
#define MVPP22_XLG_CTRL4_FWD_FC BIT(5)
#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6)
@@ -501,6 +510,70 @@
#define MVPP22_SMI_MISC_CFG_REG 0x1204
#define MVPP22_SMI_POLLING_EN BIT(10)
+/* TAI registers, PPv2.2 only, relative to priv->iface_base */
+#define MVPP22_TAI_INT_CAUSE 0x1400
+#define MVPP22_TAI_INT_MASK 0x1404
+#define MVPP22_TAI_CR0 0x1408
+#define MVPP22_TAI_CR1 0x140c
+#define MVPP22_TAI_TCFCR0 0x1410
+#define MVPP22_TAI_TCFCR1 0x1414
+#define MVPP22_TAI_TCFCR2 0x1418
+#define MVPP22_TAI_FATWR 0x141c
+#define MVPP22_TAI_TOD_STEP_NANO_CR 0x1420
+#define MVPP22_TAI_TOD_STEP_FRAC_HIGH 0x1424
+#define MVPP22_TAI_TOD_STEP_FRAC_LOW 0x1428
+#define MVPP22_TAI_TAPDC_HIGH 0x142c
+#define MVPP22_TAI_TAPDC_LOW 0x1430
+#define MVPP22_TAI_TGTOD_SEC_HIGH 0x1434
+#define MVPP22_TAI_TGTOD_SEC_MED 0x1438
+#define MVPP22_TAI_TGTOD_SEC_LOW 0x143c
+#define MVPP22_TAI_TGTOD_NANO_HIGH 0x1440
+#define MVPP22_TAI_TGTOD_NANO_LOW 0x1444
+#define MVPP22_TAI_TGTOD_FRAC_HIGH 0x1448
+#define MVPP22_TAI_TGTOD_FRAC_LOW 0x144c
+#define MVPP22_TAI_TLV_SEC_HIGH 0x1450
+#define MVPP22_TAI_TLV_SEC_MED 0x1454
+#define MVPP22_TAI_TLV_SEC_LOW 0x1458
+#define MVPP22_TAI_TLV_NANO_HIGH 0x145c
+#define MVPP22_TAI_TLV_NANO_LOW 0x1460
+#define MVPP22_TAI_TLV_FRAC_HIGH 0x1464
+#define MVPP22_TAI_TLV_FRAC_LOW 0x1468
+#define MVPP22_TAI_TCV0_SEC_HIGH 0x146c
+#define MVPP22_TAI_TCV0_SEC_MED 0x1470
+#define MVPP22_TAI_TCV0_SEC_LOW 0x1474
+#define MVPP22_TAI_TCV0_NANO_HIGH 0x1478
+#define MVPP22_TAI_TCV0_NANO_LOW 0x147c
+#define MVPP22_TAI_TCV0_FRAC_HIGH 0x1480
+#define MVPP22_TAI_TCV0_FRAC_LOW 0x1484
+#define MVPP22_TAI_TCV1_SEC_HIGH 0x1488
+#define MVPP22_TAI_TCV1_SEC_MED 0x148c
+#define MVPP22_TAI_TCV1_SEC_LOW 0x1490
+#define MVPP22_TAI_TCV1_NANO_HIGH 0x1494
+#define MVPP22_TAI_TCV1_NANO_LOW 0x1498
+#define MVPP22_TAI_TCV1_FRAC_HIGH 0x149c
+#define MVPP22_TAI_TCV1_FRAC_LOW 0x14a0
+#define MVPP22_TAI_TCSR 0x14a4
+#define MVPP22_TAI_TUC_LSB 0x14a8
+#define MVPP22_TAI_GFM_SEC_HIGH 0x14ac
+#define MVPP22_TAI_GFM_SEC_MED 0x14b0
+#define MVPP22_TAI_GFM_SEC_LOW 0x14b4
+#define MVPP22_TAI_GFM_NANO_HIGH 0x14b8
+#define MVPP22_TAI_GFM_NANO_LOW 0x14bc
+#define MVPP22_TAI_GFM_FRAC_HIGH 0x14c0
+#define MVPP22_TAI_GFM_FRAC_LOW 0x14c4
+#define MVPP22_TAI_PCLK_DA_HIGH 0x14c8
+#define MVPP22_TAI_PCLK_DA_LOW 0x14cc
+#define MVPP22_TAI_CTCR 0x14d0
+#define MVPP22_TAI_PCLK_CCC_HIGH 0x14d4
+#define MVPP22_TAI_PCLK_CCC_LOW 0x14d8
+#define MVPP22_TAI_DTC_HIGH 0x14dc
+#define MVPP22_TAI_DTC_LOW 0x14e0
+#define MVPP22_TAI_CCC_HIGH 0x14e4
+#define MVPP22_TAI_CCC_LOW 0x14e8
+#define MVPP22_TAI_ICICE 0x14f4
+#define MVPP22_TAI_ICICC_LOW 0x14f8
+#define MVPP22_TAI_TUC_MSB 0x14fc
+
#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
@@ -527,6 +600,46 @@
#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3)
#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5)
+/* PTP registers. PPv2.2 only */
+#define MVPP22_PTP_BASE(port) (0x7800 + (port * 0x1000))
+#define MVPP22_PTP_INT_CAUSE 0x00
+#define MVPP22_PTP_INT_CAUSE_QUEUE1 BIT(6)
+#define MVPP22_PTP_INT_CAUSE_QUEUE0 BIT(5)
+#define MVPP22_PTP_INT_MASK 0x04
+#define MVPP22_PTP_INT_MASK_QUEUE1 BIT(6)
+#define MVPP22_PTP_INT_MASK_QUEUE0 BIT(5)
+#define MVPP22_PTP_GCR 0x08
+#define MVPP22_PTP_GCR_RX_RESET BIT(13)
+#define MVPP22_PTP_GCR_TX_RESET BIT(1)
+#define MVPP22_PTP_GCR_TSU_ENABLE BIT(0)
+#define MVPP22_PTP_TX_Q0_R0 0x0c
+#define MVPP22_PTP_TX_Q0_R1 0x10
+#define MVPP22_PTP_TX_Q0_R2 0x14
+#define MVPP22_PTP_TX_Q1_R0 0x18
+#define MVPP22_PTP_TX_Q1_R1 0x1c
+#define MVPP22_PTP_TX_Q1_R2 0x20
+#define MVPP22_PTP_TPCR 0x24
+#define MVPP22_PTP_V1PCR 0x28
+#define MVPP22_PTP_V2PCR 0x2c
+#define MVPP22_PTP_Y1731PCR 0x30
+#define MVPP22_PTP_NTPTSPCR 0x34
+#define MVPP22_PTP_NTPRXPCR 0x38
+#define MVPP22_PTP_NTPTXPCR 0x3c
+#define MVPP22_PTP_WAMPPCR 0x40
+#define MVPP22_PTP_NAPCR 0x44
+#define MVPP22_PTP_FAPCR 0x48
+#define MVPP22_PTP_CAPCR 0x50
+#define MVPP22_PTP_ATAPCR 0x54
+#define MVPP22_PTP_ACTAPCR 0x58
+#define MVPP22_PTP_CATAPCR 0x5c
+#define MVPP22_PTP_CACTAPCR 0x60
+#define MVPP22_PTP_AITAPCR 0x64
+#define MVPP22_PTP_CAITAPCR 0x68
+#define MVPP22_PTP_CITAPCR 0x6c
+#define MVPP22_PTP_NTP_OFF_HIGH 0x70
+#define MVPP22_PTP_NTP_OFF_LOW 0x74
+#define MVPP22_PTP_TX_PIPE_STATUS_DELAY 0x78
+
/* System controller registers. Accessed through a regmap. */
#define GENCONF_SOFT_RESET1 0x1108
#define GENCONF_SOFT_RESET1_GOP BIT(6)
@@ -692,6 +805,43 @@ enum mvpp2_prs_l3_cast {
MVPP2_PRS_L3_BROAD_CAST
};
+/* PTP descriptor constants. The low bits of the descriptor are stored
+ * separately from the high bits.
+ */
+#define MVPP22_PTP_DESC_MASK_LOW 0xfff
+
+/* PTPAction */
+enum mvpp22_ptp_action {
+ MVPP22_PTP_ACTION_NONE = 0,
+ MVPP22_PTP_ACTION_FORWARD = 1,
+ MVPP22_PTP_ACTION_CAPTURE = 3,
+ /* The following have not been verified */
+ MVPP22_PTP_ACTION_ADDTIME = 4,
+ MVPP22_PTP_ACTION_ADDCORRECTEDTIME = 5,
+ MVPP22_PTP_ACTION_CAPTUREADDTIME = 6,
+ MVPP22_PTP_ACTION_CAPTUREADDCORRECTEDTIME = 7,
+ MVPP22_PTP_ACTION_ADDINGRESSTIME = 8,
+ MVPP22_PTP_ACTION_CAPTUREADDINGRESSTIME = 9,
+ MVPP22_PTP_ACTION_CAPTUREINGRESSTIME = 10,
+};
+
+/* PTPPacketFormat */
+enum mvpp22_ptp_packet_format {
+ MVPP22_PTP_PKT_FMT_PTPV2 = 0,
+ MVPP22_PTP_PKT_FMT_PTPV1 = 1,
+ MVPP22_PTP_PKT_FMT_Y1731 = 2,
+ MVPP22_PTP_PKT_FMT_NTPTS = 3,
+ MVPP22_PTP_PKT_FMT_NTPRX = 4,
+ MVPP22_PTP_PKT_FMT_NTPTX = 5,
+ MVPP22_PTP_PKT_FMT_TWAMP = 6,
+};
+
+#define MVPP22_PTP_ACTION(x) (((x) & 15) << 0)
+#define MVPP22_PTP_PACKETFORMAT(x) (((x) & 7) << 4)
+#define MVPP22_PTP_MACTIMESTAMPINGEN BIT(11)
+#define MVPP22_PTP_TIMESTAMPENTRYID(x) (((x) & 31) << 12)
+#define MVPP22_PTP_TIMESTAMPQUEUESELECT BIT(18)
+
/* BM constants */
#define MVPP2_BM_JUMBO_BUF_NUM 512
#define MVPP2_BM_LONG_BUF_NUM 1024
@@ -759,6 +909,8 @@ enum mvpp2_prs_l3_cast {
#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
+struct mvpp2_tai;
+
/* Definitions */
struct mvpp2_dbgfs_entries;
@@ -794,6 +946,7 @@ struct mvpp2 {
/* List of pointers to port structures */
int port_count;
struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
+ struct mvpp2_tai *tai;
/* Number of Tx threads used */
unsigned int nthreads;
@@ -907,6 +1060,11 @@ struct mvpp2_ethtool_fs {
struct ethtool_rxnfc rxnfc;
};
+struct mvpp2_hwtstamp_queue {
+ struct sk_buff *skb[32];
+ u8 next;
+};
+
struct mvpp2_port {
u8 id;
@@ -915,7 +1073,7 @@ struct mvpp2_port {
*/
int gop_id;
- int link_irq;
+ int port_irq;
struct mvpp2 *priv;
@@ -967,6 +1125,7 @@ struct mvpp2_port {
phy_interface_t phy_interface;
struct phylink *phylink;
struct phylink_config phylink_config;
+ struct phylink_pcs phylink_pcs;
struct phy *comphy;
struct mvpp2_bm_pool *pool_long;
@@ -989,6 +1148,11 @@ struct mvpp2_port {
* them from 0
*/
int rss_ctx[MVPP22_N_RSS_TABLES];
+
+ bool hwtstamp;
+ bool rx_hwtstamp;
+ enum hwtstamp_tx_types tx_hwtstamp_type;
+ struct mvpp2_hwtstamp_queue tx_hwtstamp_queue[2];
};
/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
@@ -1057,7 +1221,8 @@ struct mvpp22_tx_desc {
u8 packet_offset;
u8 phys_txq;
__le16 data_size;
- __le64 reserved1;
+ __le32 ptp_descriptor;
+ __le32 reserved2;
__le64 buf_dma_addr_ptp;
__le64 buf_cookie_misc;
};
@@ -1068,7 +1233,7 @@ struct mvpp22_rx_desc {
__le16 reserved1;
__le16 data_size;
__le32 reserved2;
- __le32 reserved3;
+ __le32 timestamp;
__le64 buf_dma_addr_key_hash;
__le64 buf_cookie_misc;
};
@@ -1248,4 +1413,36 @@ void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
+#ifdef CONFIG_MVPP2_PTP
+int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv);
+void mvpp22_tai_tstamp(struct mvpp2_tai *tai, u32 tstamp,
+ struct skb_shared_hwtstamps *hwtstamp);
+void mvpp22_tai_start(struct mvpp2_tai *tai);
+void mvpp22_tai_stop(struct mvpp2_tai *tai);
+int mvpp22_tai_ptp_clock_index(struct mvpp2_tai *tai);
+#else
+static inline int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv)
+{
+ return 0;
+}
+static inline void mvpp22_tai_tstamp(struct mvpp2_tai *tai, u32 tstamp,
+ struct skb_shared_hwtstamps *hwtstamp)
+{
+}
+static inline void mvpp22_tai_start(struct mvpp2_tai *tai)
+{
+}
+static inline void mvpp22_tai_stop(struct mvpp2_tai *tai)
+{
+}
+static inline int mvpp22_tai_ptp_clock_index(struct mvpp2_tai *tai)
+{
+ return -1;
+}
+#endif
+
+static inline bool mvpp22_rx_hwtstamping(struct mvpp2_port *port)
+{
+ return IS_ENABLED(CONFIG_MVPP2_PTP) && port->rx_hwtstamp;
+}
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 6e140d1b8967..f6616c8933ca 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -28,6 +28,7 @@
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/phy/phy.h>
+#include <linux/ptp_classify.h>
#include <linux/clk.h>
#include <linux/hrtimer.h>
#include <linux/ktime.h>
@@ -57,13 +58,7 @@ static struct {
/* The prototype is added here to be used in start_dev when using ACPI. This
* will be removed once phylink is used for all modes (dt+ACPI).
*/
-static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
- const struct phylink_link_state *state);
-static void mvpp2_mac_link_up(struct phylink_config *config,
- struct phy_device *phy,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex,
- bool tx_pause, bool rx_pause);
+static void mvpp2_acpi_start(struct mvpp2_port *port);
/* Queue modes */
#define MVPP2_QDIST_SINGLE_MODE 0
@@ -1385,6 +1380,10 @@ static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
{
u32 val;
+ mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK,
+ MVPP22_GMAC_INT_SUM_MASK_PTP,
+ MVPP22_GMAC_INT_SUM_MASK_PTP);
+
if (port->phylink ||
phy_interface_mode_is_rgmii(port->phy_interface) ||
phy_interface_mode_is_8023z(port->phy_interface) ||
@@ -1398,6 +1397,10 @@ static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
val = readl(port->base + MVPP22_XLG_INT_MASK);
val |= MVPP22_XLG_INT_MASK_LINK;
writel(val, port->base + MVPP22_XLG_INT_MASK);
+
+ mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK,
+ MVPP22_XLG_EXT_INT_MASK_PTP,
+ MVPP22_XLG_EXT_INT_MASK_PTP);
}
mvpp22_gop_unmask_irq(port);
@@ -1485,8 +1488,8 @@ static void mvpp2_port_loopback_set(struct mvpp2_port *port,
else
val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
- if (phy_interface_mode_is_8023z(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII)
+ if (phy_interface_mode_is_8023z(state->interface) ||
+ state->interface == PHY_INTERFACE_MODE_SGMII)
val |= MVPP2_GMAC_PCS_LB_EN_MASK;
else
val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
@@ -2980,44 +2983,67 @@ static irqreturn_t mvpp2_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Per-port interrupt for link status changes */
-static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
+static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq)
{
- struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
- struct net_device *dev = port->dev;
- bool event = false, link = false;
- u32 val;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct mvpp2_hwtstamp_queue *queue;
+ struct sk_buff *skb;
+ void __iomem *ptp_q;
+ unsigned int id;
+ u32 r0, r1, r2;
- mvpp22_gop_mask_irq(port);
+ ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
+ if (nq)
+ ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0;
- if (mvpp2_port_supports_xlg(port) &&
- mvpp2_is_xlg(port->phy_interface)) {
- val = readl(port->base + MVPP22_XLG_INT_STAT);
- if (val & MVPP22_XLG_INT_STAT_LINK) {
- event = true;
- val = readl(port->base + MVPP22_XLG_STATUS);
- if (val & MVPP22_XLG_STATUS_LINK_UP)
- link = true;
- }
- } else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- phy_interface_mode_is_8023z(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
- val = readl(port->base + MVPP22_GMAC_INT_STAT);
- if (val & MVPP22_GMAC_INT_STAT_LINK) {
- event = true;
- val = readl(port->base + MVPP2_GMAC_STATUS0);
- if (val & MVPP2_GMAC_STATUS0_LINK_UP)
- link = true;
+ queue = &port->tx_hwtstamp_queue[nq];
+
+ while (1) {
+ r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff;
+ if (!r0)
+ break;
+
+ r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff;
+ r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff;
+
+ id = (r0 >> 1) & 31;
+
+ skb = queue->skb[id];
+ queue->skb[id] = NULL;
+ if (skb) {
+ u32 ts = r2 << 19 | r1 << 3 | r0 >> 13;
+
+ mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
}
}
+}
+
+static void mvpp2_isr_handle_ptp(struct mvpp2_port *port)
+{
+ void __iomem *ptp;
+ u32 val;
+
+ ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
+ val = readl(ptp + MVPP22_PTP_INT_CAUSE);
+ if (val & MVPP22_PTP_INT_CAUSE_QUEUE0)
+ mvpp2_isr_handle_ptp_queue(port, 0);
+ if (val & MVPP22_PTP_INT_CAUSE_QUEUE1)
+ mvpp2_isr_handle_ptp_queue(port, 1);
+}
+
+static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link)
+{
+ struct net_device *dev = port->dev;
if (port->phylink) {
phylink_mac_change(port->phylink, link);
- goto handled;
+ return;
}
- if (!netif_running(dev) || !event)
- goto handled;
+ if (!netif_running(dev))
+ return;
if (link) {
mvpp2_interrupts_enable(port);
@@ -3034,8 +3060,65 @@ static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
mvpp2_interrupts_disable(port);
}
+}
+
+static void mvpp2_isr_handle_xlg(struct mvpp2_port *port)
+{
+ bool link;
+ u32 val;
+
+ val = readl(port->base + MVPP22_XLG_INT_STAT);
+ if (val & MVPP22_XLG_INT_STAT_LINK) {
+ val = readl(port->base + MVPP22_XLG_STATUS);
+ link = (val & MVPP22_XLG_STATUS_LINK_UP);
+ mvpp2_isr_handle_link(port, link);
+ }
+}
+
+static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port)
+{
+ bool link;
+ u32 val;
+
+ if (phy_interface_mode_is_rgmii(port->phy_interface) ||
+ phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ val = readl(port->base + MVPP22_GMAC_INT_STAT);
+ if (val & MVPP22_GMAC_INT_STAT_LINK) {
+ val = readl(port->base + MVPP2_GMAC_STATUS0);
+ link = (val & MVPP2_GMAC_STATUS0_LINK_UP);
+ mvpp2_isr_handle_link(port, link);
+ }
+ }
+}
+
+/* Per-port interrupt for link status changes */
+static irqreturn_t mvpp2_port_isr(int irq, void *dev_id)
+{
+ struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
+ u32 val;
+
+ mvpp22_gop_mask_irq(port);
+
+ if (mvpp2_port_supports_xlg(port) &&
+ mvpp2_is_xlg(port->phy_interface)) {
+ /* Check the external status register */
+ val = readl(port->base + MVPP22_XLG_EXT_INT_STAT);
+ if (val & MVPP22_XLG_EXT_INT_STAT_XLG)
+ mvpp2_isr_handle_xlg(port);
+ if (val & MVPP22_XLG_EXT_INT_STAT_PTP)
+ mvpp2_isr_handle_ptp(port);
+ } else {
+ /* If it's not the XLG, we must be using the GMAC.
+ * Check the summary status.
+ */
+ val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT);
+ if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL)
+ mvpp2_isr_handle_gmac_internal(port);
+ if (val & MVPP22_GMAC_INT_SUM_STAT_PTP)
+ mvpp2_isr_handle_ptp(port);
+ }
-handled:
mvpp22_gop_unmask_irq(port);
return IRQ_HANDLED;
}
@@ -3427,7 +3510,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
unsigned int frag_size;
dma_addr_t dma_addr;
phys_addr_t phys_addr;
- u32 rx_status;
+ u32 rx_status, timestamp;
int pool, rx_bytes, err, ret;
void *data;
@@ -3505,6 +3588,15 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
goto err_drop_frame;
}
+ /* If we have RX hardware timestamping enabled, grab the
+ * timestamp from the queue and convert.
+ */
+ if (mvpp22_rx_hwtstamping(port)) {
+ timestamp = le32_to_cpu(rx_desc->pp22.timestamp);
+ mvpp22_tai_tstamp(port->priv->tai, timestamp,
+ skb_hwtstamps(skb));
+ }
+
err = mvpp2_rx_refill(port, bm_pool, pp, pool);
if (err) {
netdev_err(port->dev, "failed to refill BM pools\n");
@@ -3579,6 +3671,94 @@ tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
mvpp2_txq_desc_put(txq);
}
+static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *desc)
+{
+ /* We only need to clear the low bits */
+ if (port->priv->hw_version != MVPP21)
+ desc->pp22.ptp_descriptor &=
+ cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
+}
+
+static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ struct sk_buff *skb)
+{
+ struct mvpp2_hwtstamp_queue *queue;
+ unsigned int mtype, type, i;
+ struct ptp_header *hdr;
+ u64 ptpdesc;
+
+ if (port->priv->hw_version == MVPP21 ||
+ port->tx_hwtstamp_type == HWTSTAMP_TX_OFF)
+ return false;
+
+ type = ptp_classify_raw(skb);
+ if (!type)
+ return false;
+
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
+ return false;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN |
+ MVPP22_PTP_ACTION_CAPTURE;
+ queue = &port->tx_hwtstamp_queue[0];
+
+ switch (type & PTP_CLASS_VMASK) {
+ case PTP_CLASS_V1:
+ ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1);
+ break;
+
+ case PTP_CLASS_V2:
+ ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2);
+ mtype = hdr->tsmt & 15;
+ /* Direct PTP Sync messages to queue 1 */
+ if (mtype == 0) {
+ ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT;
+ queue = &port->tx_hwtstamp_queue[1];
+ }
+ break;
+ }
+
+ /* Take a reference on the skb and insert into our queue */
+ i = queue->next;
+ queue->next = (i + 1) & 31;
+ if (queue->skb[i])
+ dev_kfree_skb_any(queue->skb[i]);
+ queue->skb[i] = skb_get(skb);
+
+ ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i);
+
+ /*
+ * 3:0 - PTPAction
+ * 6:4 - PTPPacketFormat
+ * 7 - PTP_CF_WraparoundCheckEn
+ * 9:8 - IngressTimestampSeconds[1:0]
+ * 10 - Reserved
+ * 11 - MACTimestampingEn
+ * 17:12 - PTP_TimestampQueueEntryID[5:0]
+ * 18 - PTPTimestampQueueSelect
+ * 19 - UDPChecksumUpdateEn
+ * 27:20 - TimestampOffset
+ * PTP, NTPTransmit, OWAMP/TWAMP - L3 to PTP header
+ * NTPTs, Y.1731 - L3 to timestamp entry
+ * 35:28 - UDP Checksum Offset
+ *
+ * stored in tx descriptor bits 75:64 (11:0) and 191:168 (35:12)
+ */
+ tx_desc->pp22.ptp_descriptor &=
+ cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
+ tx_desc->pp22.ptp_descriptor |=
+ cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW);
+ tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL);
+ tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40);
+
+ return true;
+}
+
/* Handle tx fragmentation processing */
static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
struct mvpp2_tx_queue *aggr_txq,
@@ -3595,6 +3775,7 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
void *addr = skb_frag_address(frag);
tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+ mvpp2_txdesc_clear_ptp(port, tx_desc);
mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
@@ -3644,6 +3825,7 @@ static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
dma_addr_t addr;
+ mvpp2_txdesc_clear_ptp(port, tx_desc);
mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
@@ -3668,6 +3850,7 @@ static inline int mvpp2_tso_put_data(struct sk_buff *skb,
struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
dma_addr_t buf_dma_addr;
+ mvpp2_txdesc_clear_ptp(port, tx_desc);
mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
mvpp2_txdesc_size_set(port, tx_desc, sz);
@@ -3784,6 +3967,9 @@ static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
/* Get a descriptor for the first part of the packet */
tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
+ !mvpp2_tx_hw_tstamp(port, tx_desc, skb))
+ mvpp2_txdesc_clear_ptp(port, tx_desc);
mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
@@ -4007,17 +4193,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
if (port->phylink) {
phylink_start(port->phylink);
} else {
- /* Phylink isn't used as of now for ACPI, so the MAC has to be
- * configured manually when the interface is started. This will
- * be removed as soon as the phylink ACPI support lands in.
- */
- struct phylink_link_state state = {
- .interface = port->phy_interface,
- };
- mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
- mvpp2_mac_link_up(&port->phylink_config, NULL,
- MLO_AN_INBAND, port->phy_interface,
- SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
+ mvpp2_acpi_start(port);
}
netif_tx_start_all_queues(port->dev);
@@ -4227,12 +4403,13 @@ static int mvpp2_open(struct net_device *dev)
valid = true;
}
- if (priv->hw_version == MVPP22 && port->link_irq) {
- err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
+ if (priv->hw_version == MVPP22 && port->port_irq) {
+ err = request_irq(port->port_irq, mvpp2_port_isr, 0,
dev->name, port);
if (err) {
- netdev_err(port->dev, "cannot request link IRQ %d\n",
- port->link_irq);
+ netdev_err(port->dev,
+ "cannot request port link/ptp IRQ %d\n",
+ port->port_irq);
goto err_free_irq;
}
@@ -4243,7 +4420,7 @@ static int mvpp2_open(struct net_device *dev)
valid = true;
} else {
- port->link_irq = 0;
+ port->port_irq = 0;
}
if (!valid) {
@@ -4287,8 +4464,8 @@ static int mvpp2_stop(struct net_device *dev)
if (port->phylink)
phylink_disconnect_phy(port->phylink);
- if (port->link_irq)
- free_irq(port->link_irq, port);
+ if (port->port_irq)
+ free_irq(port->port_irq, port);
mvpp2_irqs_deinit(port);
if (!port->has_tx_irqs) {
@@ -4548,10 +4725,124 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_dropped = dev->stats.tx_dropped;
}
+static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ void __iomem *ptp;
+ u32 gcr, int_mask;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ if (config.flags)
+ return -EINVAL;
+
+ if (config.tx_type != HWTSTAMP_TX_OFF &&
+ config.tx_type != HWTSTAMP_TX_ON)
+ return -ERANGE;
+
+ ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
+
+ int_mask = gcr = 0;
+ if (config.tx_type != HWTSTAMP_TX_OFF) {
+ gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET;
+ int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 |
+ MVPP22_PTP_INT_MASK_QUEUE0;
+ }
+
+ /* It seems we must also release the TX reset when enabling the TSU */
+ if (config.rx_filter != HWTSTAMP_FILTER_NONE)
+ gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET |
+ MVPP22_PTP_GCR_TX_RESET;
+
+ if (gcr & MVPP22_PTP_GCR_TSU_ENABLE)
+ mvpp22_tai_start(port->priv->tai);
+
+ if (config.rx_filter != HWTSTAMP_FILTER_NONE) {
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ mvpp2_modify(ptp + MVPP22_PTP_GCR,
+ MVPP22_PTP_GCR_RX_RESET |
+ MVPP22_PTP_GCR_TX_RESET |
+ MVPP22_PTP_GCR_TSU_ENABLE, gcr);
+ port->rx_hwtstamp = true;
+ } else {
+ port->rx_hwtstamp = false;
+ mvpp2_modify(ptp + MVPP22_PTP_GCR,
+ MVPP22_PTP_GCR_RX_RESET |
+ MVPP22_PTP_GCR_TX_RESET |
+ MVPP22_PTP_GCR_TSU_ENABLE, gcr);
+ }
+
+ mvpp2_modify(ptp + MVPP22_PTP_INT_MASK,
+ MVPP22_PTP_INT_MASK_QUEUE1 |
+ MVPP22_PTP_INT_MASK_QUEUE0, int_mask);
+
+ if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE))
+ mvpp22_tai_stop(port->priv->tai);
+
+ port->tx_hwtstamp_type = config.tx_type;
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+
+ memset(&config, 0, sizeof(config));
+
+ config.tx_type = port->tx_hwtstamp_type;
+ config.rx_filter = port->rx_hwtstamp ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->hwtstamp)
+ return -EOPNOTSUPP;
+
+ info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai);
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mvpp2_port *port = netdev_priv(dev);
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ if (port->hwtstamp)
+ return mvpp2_set_ts_config(port, ifr);
+ break;
+
+ case SIOCGHWTSTAMP:
+ if (port->hwtstamp)
+ return mvpp2_get_ts_config(port, ifr);
+ break;
+ }
+
if (!port->phylink)
return -ENOTSUPP;
@@ -5021,6 +5312,7 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
ETHTOOL_COALESCE_MAX_FRAMES,
.nway_reset = mvpp2_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_ts_info = mvpp2_ethtool_get_ts_info,
.set_coalesce = mvpp2_ethtool_set_coalesce,
.get_coalesce = mvpp2_ethtool_get_coalesce,
.get_drvinfo = mvpp2_ethtool_get_drvinfo,
@@ -5392,6 +5684,155 @@ static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
return container_of(config, struct mvpp2_port, phylink_config);
}
+static struct mvpp2_port *mvpp2_pcs_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mvpp2_port, phylink_pcs);
+}
+
+static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ u32 val;
+
+ state->speed = SPEED_10000;
+ state->duplex = 1;
+ state->an_complete = 1;
+
+ val = readl(port->base + MVPP22_XLG_STATUS);
+ state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
+
+ state->pause = 0;
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
+ state->pause |= MLO_PAUSE_TX;
+ if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
+ state->pause |= MLO_PAUSE_RX;
+}
+
+static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ return 0;
+}
+
+static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
+ .pcs_get_state = mvpp2_xlg_pcs_get_state,
+ .pcs_config = mvpp2_xlg_pcs_config,
+};
+
+static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_STATUS0);
+
+ state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
+ state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
+ state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
+
+ switch (port->phy_interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ state->speed = SPEED_1000;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ state->speed = SPEED_2500;
+ break;
+ default:
+ if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
+ state->speed = SPEED_1000;
+ else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
+ state->speed = SPEED_100;
+ else
+ state->speed = SPEED_10;
+ }
+
+ state->pause = 0;
+ if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
+ state->pause |= MLO_PAUSE_RX;
+ if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
+ state->pause |= MLO_PAUSE_TX;
+}
+
+static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ u32 mask, val, an, old_an, changed;
+
+ mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
+ MVPP2_GMAC_IN_BAND_AUTONEG |
+ MVPP2_GMAC_AN_SPEED_EN |
+ MVPP2_GMAC_FLOW_CTRL_AUTONEG |
+ MVPP2_GMAC_AN_DUPLEX_EN;
+
+ if (phylink_autoneg_inband(mode)) {
+ mask |= MVPP2_GMAC_CONFIG_MII_SPEED |
+ MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+ val = MVPP2_GMAC_IN_BAND_AUTONEG;
+
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
+ /* SGMII mode receives the speed and duplex from PHY */
+ val |= MVPP2_GMAC_AN_SPEED_EN |
+ MVPP2_GMAC_AN_DUPLEX_EN;
+ } else {
+ /* 802.3z mode has fixed speed and duplex */
+ val |= MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+
+ /* The FLOW_CTRL_AUTONEG bit selects either the hardware
+ * automatically or the bits in MVPP22_GMAC_CTRL_4_REG
+ * manually controls the GMAC pause modes.
+ */
+ if (permit_pause_to_mac)
+ val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
+
+ /* Configure advertisement bits */
+ mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN;
+ if (phylink_test(advertising, Pause))
+ val |= MVPP2_GMAC_FC_ADV_EN;
+ if (phylink_test(advertising, Asym_Pause))
+ val |= MVPP2_GMAC_FC_ADV_ASM_EN;
+ }
+ } else {
+ val = 0;
+ }
+
+ old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ an = (an & ~mask) | val;
+ changed = an ^ old_an;
+ if (changed)
+ writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+ /* We are only interested in the advertisement bits changing */
+ return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN);
+}
+
+static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+ writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
+ port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
+ port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+}
+
+static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
+ .pcs_get_state = mvpp2_gmac_pcs_get_state,
+ .pcs_config = mvpp2_gmac_pcs_config,
+ .pcs_an_restart = mvpp2_gmac_pcs_an_restart,
+};
+
static void mvpp2_phylink_validate(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state)
@@ -5480,89 +5921,6 @@ empty_set:
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static void mvpp22_xlg_pcs_get_state(struct mvpp2_port *port,
- struct phylink_link_state *state)
-{
- u32 val;
-
- state->speed = SPEED_10000;
- state->duplex = 1;
- state->an_complete = 1;
-
- val = readl(port->base + MVPP22_XLG_STATUS);
- state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
-
- state->pause = 0;
- val = readl(port->base + MVPP22_XLG_CTRL0_REG);
- if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
- state->pause |= MLO_PAUSE_TX;
- if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
- state->pause |= MLO_PAUSE_RX;
-}
-
-static void mvpp2_gmac_pcs_get_state(struct mvpp2_port *port,
- struct phylink_link_state *state)
-{
- u32 val;
-
- val = readl(port->base + MVPP2_GMAC_STATUS0);
-
- state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
- state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
- state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
-
- switch (port->phy_interface) {
- case PHY_INTERFACE_MODE_1000BASEX:
- state->speed = SPEED_1000;
- break;
- case PHY_INTERFACE_MODE_2500BASEX:
- state->speed = SPEED_2500;
- break;
- default:
- if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
- state->speed = SPEED_1000;
- else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
- state->speed = SPEED_100;
- else
- state->speed = SPEED_10;
- }
-
- state->pause = 0;
- if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
- state->pause |= MLO_PAUSE_RX;
- if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
- state->pause |= MLO_PAUSE_TX;
-}
-
-static void mvpp2_phylink_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
-{
- struct mvpp2_port *port = mvpp2_phylink_to_port(config);
-
- if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
- u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG);
- mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
-
- if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) {
- mvpp22_xlg_pcs_get_state(port, state);
- return;
- }
- }
-
- mvpp2_gmac_pcs_get_state(port, state);
-}
-
-static void mvpp2_mac_an_restart(struct phylink_config *config)
-{
- struct mvpp2_port *port = mvpp2_phylink_to_port(config);
- u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-
- writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
- port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
- port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-}
-
static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -5586,23 +5944,16 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
const struct phylink_link_state *state)
{
- u32 old_an, an;
u32 old_ctrl0, ctrl0;
u32 old_ctrl2, ctrl2;
u32 old_ctrl4, ctrl4;
- old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
- an &= ~(MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN |
- MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
- MVPP2_GMAC_AN_DUPLEX_EN | MVPP2_GMAC_IN_BAND_AUTONEG |
- MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS);
ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
- ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PORT_RESET_MASK |
- MVPP2_GMAC_PCS_ENABLE_MASK);
+ ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK);
/* Configure port type */
if (phy_interface_mode_is_8023z(state->interface)) {
@@ -5624,12 +5975,6 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
}
- /* Configure advertisement bits */
- if (phylink_test(state->advertising, Pause))
- an |= MVPP2_GMAC_FC_ADV_EN;
- if (phylink_test(state->advertising, Asym_Pause))
- an |= MVPP2_GMAC_FC_ADV_ASM_EN;
-
/* Configure negotiation style */
if (!phylink_autoneg_inband(mode)) {
/* Phy or fixed speed - no in-band AN, nothing to do, leave the
@@ -5638,14 +5983,6 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
/* SGMII in-band mode receives the speed and duplex from
* the PHY. Flow control information is not received. */
- an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN |
- MVPP2_GMAC_FORCE_LINK_PASS |
- MVPP2_GMAC_CONFIG_MII_SPEED |
- MVPP2_GMAC_CONFIG_GMII_SPEED |
- MVPP2_GMAC_CONFIG_FULL_DUPLEX);
- an |= MVPP2_GMAC_IN_BAND_AUTONEG |
- MVPP2_GMAC_AN_SPEED_EN |
- MVPP2_GMAC_AN_DUPLEX_EN;
} else if (phy_interface_mode_is_8023z(state->interface)) {
/* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
* they negotiate duplex: they are always operating with a fixed
@@ -5653,42 +5990,6 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
* speed and full duplex here.
*/
ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
- an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN |
- MVPP2_GMAC_FORCE_LINK_PASS |
- MVPP2_GMAC_CONFIG_MII_SPEED |
- MVPP2_GMAC_CONFIG_GMII_SPEED |
- MVPP2_GMAC_CONFIG_FULL_DUPLEX);
- an |= MVPP2_GMAC_IN_BAND_AUTONEG |
- MVPP2_GMAC_CONFIG_GMII_SPEED |
- MVPP2_GMAC_CONFIG_FULL_DUPLEX;
-
- if (state->pause & MLO_PAUSE_AN && state->an_enabled)
- an |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
- }
-
-/* Some fields of the auto-negotiation register require the port to be down when
- * their value is updated.
- */
-#define MVPP2_GMAC_AN_PORT_DOWN_MASK \
- (MVPP2_GMAC_IN_BAND_AUTONEG | \
- MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | \
- MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | \
- MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_CONFIG_FULL_DUPLEX | \
- MVPP2_GMAC_AN_DUPLEX_EN)
-
- if ((old_ctrl0 ^ ctrl0) & MVPP2_GMAC_PORT_TYPE_MASK ||
- (old_ctrl2 ^ ctrl2) & MVPP2_GMAC_INBAND_AN_MASK ||
- (old_an ^ an) & MVPP2_GMAC_AN_PORT_DOWN_MASK) {
- /* Force link down */
- old_an &= ~MVPP2_GMAC_FORCE_LINK_PASS;
- old_an |= MVPP2_GMAC_FORCE_LINK_DOWN;
- writel(old_an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-
- /* Set the GMAC in a reset state - do this in a way that
- * ensures we clear it below.
- */
- old_ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK;
- writel(old_ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
}
if (old_ctrl0 != ctrl0)
@@ -5697,41 +5998,85 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
if (old_ctrl4 != ctrl4)
writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
- if (old_an != an)
- writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-
- if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) {
- while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
- MVPP2_GMAC_PORT_RESET_MASK)
- continue;
- }
}
-static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
- const struct phylink_link_state *state)
+static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
{
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
- bool change_interface = port->phy_interface != state->interface;
/* Check for invalid configuration */
- if (mvpp2_is_xlg(state->interface) && port->gop_id != 0) {
+ if (mvpp2_is_xlg(interface) && port->gop_id != 0) {
netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name);
- return;
+ return -EINVAL;
+ }
+
+ if (port->phy_interface != interface ||
+ phylink_autoneg_inband(mode)) {
+ /* Force the link down when changing the interface or if in
+ * in-band mode to ensure we do not change the configuration
+ * while the hardware is indicating link is up. We force both
+ * XLG and GMAC down to ensure that they're both in a known
+ * state.
+ */
+ mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
+ MVPP2_GMAC_FORCE_LINK_PASS |
+ MVPP2_GMAC_FORCE_LINK_DOWN,
+ MVPP2_GMAC_FORCE_LINK_DOWN);
+
+ if (mvpp2_port_supports_xlg(port))
+ mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
+ MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
+ MVPP22_XLG_CTRL0_FORCE_LINK_DOWN,
+ MVPP22_XLG_CTRL0_FORCE_LINK_DOWN);
}
/* Make sure the port is disabled when reconfiguring the mode */
mvpp2_port_disable(port);
- if (port->priv->hw_version == MVPP22 && change_interface) {
- mvpp22_gop_mask_irq(port);
+ if (port->phy_interface != interface) {
+ /* Place GMAC into reset */
+ mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
+ MVPP2_GMAC_PORT_RESET_MASK,
+ MVPP2_GMAC_PORT_RESET_MASK);
- port->phy_interface = state->interface;
+ if (port->priv->hw_version == MVPP22) {
+ mvpp22_gop_mask_irq(port);
- /* Reconfigure the serdes lanes */
- phy_power_off(port->comphy);
- mvpp22_mode_reconfigure(port);
+ phy_power_off(port->comphy);
+ }
}
+ /* Select the appropriate PCS operations depending on the
+ * configured interface mode. We will only switch to a mode
+ * that the validate() checks have already passed.
+ */
+ if (mvpp2_is_xlg(interface))
+ port->phylink_pcs.ops = &mvpp2_phylink_xlg_pcs_ops;
+ else
+ port->phylink_pcs.ops = &mvpp2_phylink_gmac_pcs_ops;
+
+ return 0;
+}
+
+static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+ int ret;
+
+ ret = mvpp2__mac_prepare(config, mode, interface);
+ if (ret == 0)
+ phylink_set_pcs(port->phylink, &port->phylink_pcs);
+
+ return ret;
+}
+
+static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+
/* mac (re)configuration */
if (mvpp2_is_xlg(state->interface))
mvpp2_xlg_config(port, mode, state);
@@ -5742,11 +6087,51 @@ static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
mvpp2_port_loopback_set(port, state);
+}
+
+static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+
+ if (port->priv->hw_version == MVPP22 &&
+ port->phy_interface != interface) {
+ port->phy_interface = interface;
- if (port->priv->hw_version == MVPP22 && change_interface)
+ /* Reconfigure the serdes lanes */
+ mvpp22_mode_reconfigure(port);
+
+ /* Unmask interrupts */
mvpp22_gop_unmask_irq(port);
+ }
+
+ if (!mvpp2_is_xlg(interface)) {
+ /* Release GMAC reset and wait */
+ mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
+ MVPP2_GMAC_PORT_RESET_MASK, 0);
+
+ while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
+ MVPP2_GMAC_PORT_RESET_MASK)
+ continue;
+ }
mvpp2_port_enable(port);
+
+ /* Allow the link to come up if in in-band mode, otherwise the
+ * link is forced via mac_link_down()/mac_link_up()
+ */
+ if (phylink_autoneg_inband(mode)) {
+ if (mvpp2_is_xlg(interface))
+ mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
+ MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
+ MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0);
+ else
+ mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
+ MVPP2_GMAC_FORCE_LINK_PASS |
+ MVPP2_GMAC_FORCE_LINK_DOWN, 0);
+ }
+
+ return 0;
}
static void mvpp2_mac_link_up(struct phylink_config *config,
@@ -5843,13 +6228,36 @@ static void mvpp2_mac_link_down(struct phylink_config *config,
static const struct phylink_mac_ops mvpp2_phylink_ops = {
.validate = mvpp2_phylink_validate,
- .mac_pcs_get_state = mvpp2_phylink_mac_pcs_get_state,
- .mac_an_restart = mvpp2_mac_an_restart,
+ .mac_prepare = mvpp2_mac_prepare,
.mac_config = mvpp2_mac_config,
+ .mac_finish = mvpp2_mac_finish,
.mac_link_up = mvpp2_mac_link_up,
.mac_link_down = mvpp2_mac_link_down,
};
+/* Work-around for ACPI */
+static void mvpp2_acpi_start(struct mvpp2_port *port)
+{
+ /* Phylink isn't used as of now for ACPI, so the MAC has to be
+ * configured manually when the interface is started. This will
+ * be removed as soon as the phylink ACPI support lands in.
+ */
+ struct phylink_link_state state = {
+ .interface = port->phy_interface,
+ };
+ mvpp2__mac_prepare(&port->phylink_config, MLO_AN_INBAND,
+ port->phy_interface);
+ mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
+ port->phylink_pcs.ops->pcs_config(&port->phylink_pcs, MLO_AN_INBAND,
+ port->phy_interface,
+ state.advertising, false);
+ mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND,
+ port->phy_interface);
+ mvpp2_mac_link_up(&port->phylink_config, NULL,
+ MLO_AN_INBAND, port->phy_interface,
+ SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
+}
+
/* Ports initialization */
static int mvpp2_port_probe(struct platform_device *pdev,
struct fwnode_handle *port_fwnode,
@@ -5937,16 +6345,16 @@ static int mvpp2_port_probe(struct platform_device *pdev,
goto err_free_netdev;
if (port_node)
- port->link_irq = of_irq_get_byname(port_node, "link");
+ port->port_irq = of_irq_get_byname(port_node, "link");
else
- port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
- if (port->link_irq == -EPROBE_DEFER) {
+ port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
+ if (port->port_irq == -EPROBE_DEFER) {
err = -EPROBE_DEFER;
goto err_deinit_qvecs;
}
- if (port->link_irq <= 0)
+ if (port->port_irq <= 0)
/* the link irq is optional */
- port->link_irq = 0;
+ port->port_irq = 0;
if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
port->flags |= MVPP2_F_LOOPBACK;
@@ -5983,6 +6391,12 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->stats_base = port->priv->iface_base +
MVPP22_MIB_COUNTERS_OFFSET +
port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
+
+ /* We may want a property to describe whether we should use
+ * MAC hardware timestamping.
+ */
+ if (priv->tai)
+ port->hwtstamp = true;
}
/* Alloc per-cpu and ethtool stats */
@@ -6110,8 +6524,8 @@ err_free_txq_pcpu:
err_free_stats:
free_percpu(port->stats);
err_free_irq:
- if (port->link_irq)
- irq_dispose_mapping(port->link_irq);
+ if (port->port_irq)
+ irq_dispose_mapping(port->port_irq);
err_deinit_qvecs:
mvpp2_queue_vectors_deinit(port);
err_free_netdev:
@@ -6132,8 +6546,8 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
for (i = 0; i < port->ntxqs; i++)
free_percpu(port->txqs[i]->pcpu);
mvpp2_queue_vectors_deinit(port);
- if (port->link_irq)
- irq_dispose_mapping(port->link_irq);
+ if (port->port_irq)
+ irq_dispose_mapping(port->port_irq);
free_netdev(port->dev);
}
@@ -6545,6 +6959,10 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_axi_clk;
}
+ err = mvpp22_tai_probe(&pdev->dev, priv);
+ if (err < 0)
+ goto err_axi_clk;
+
/* Initialize ports */
fwnode_for_each_available_child_node(fwnode, port_fwnode) {
err = mvpp2_port_probe(pdev, port_fwnode, priv);
@@ -6663,11 +7081,13 @@ static const struct of_device_id mvpp2_match[] = {
};
MODULE_DEVICE_TABLE(of, mvpp2_match);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id mvpp2_acpi_match[] = {
{ "MRVL0110", MVPP22 },
{ },
};
MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
+#endif
static struct platform_driver mvpp2_driver = {
.probe = mvpp2_probe,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_tai.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_tai.c
new file mode 100644
index 000000000000..95862aff49f1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_tai.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Marvell PP2.2 TAI support
+ *
+ * Note:
+ * Do NOT use the event capture support.
+ * Do Not even set the MPP muxes to allow PTP_EVENT_REQ to be used.
+ * It will disrupt the operation of this driver, and there is nothing
+ * that this driver can do to prevent that. Even using PTP_EVENT_REQ
+ * as an output will be seen as a trigger input, which can't be masked.
+ * When ever a trigger input is seen, the action in the TCFCR0_TCF
+ * field will be performed - whether it is a set, increment, decrement
+ * read, or frequency update.
+ *
+ * Other notes (useful, not specified in the documentation):
+ * - PTP_PULSE_OUT (PTP_EVENT_REQ MPP)
+ * It looks like the hardware can't generate a pulse at nsec=0. (The
+ * output doesn't trigger if the nsec field is zero.)
+ * Note: when configured as an output via the register at 0xfX441120,
+ * the input is still very much alive, and will trigger the current TCF
+ * function.
+ * - PTP_CLK_OUT (PTP_TRIG_GEN MPP)
+ * This generates a "PPS" signal determined by the CCC registers. It
+ * seems this is not aligned to the TOD counter in any way (it may be
+ * initially, but if you specify a non-round second interval, it won't,
+ * and you can't easily get it back.)
+ * - PTP_PCLK_OUT
+ * This generates a 50% duty cycle clock based on the TOD counter, and
+ * seems it can be set to any period of 1ns resolution. It is probably
+ * limited by the TOD step size. Its period is defined by the PCLK_CCC
+ * registers. Again, its alignment to the second is questionable.
+ *
+ * Consequently, we support none of these.
+ */
+#include <linux/io.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/slab.h>
+
+#include "mvpp2.h"
+
+#define CR0_SW_NRESET BIT(0)
+
+#define TCFCR0_PHASE_UPDATE_ENABLE BIT(8)
+#define TCFCR0_TCF_MASK (7 << 2)
+#define TCFCR0_TCF_UPDATE (0 << 2)
+#define TCFCR0_TCF_FREQUPDATE (1 << 2)
+#define TCFCR0_TCF_INCREMENT (2 << 2)
+#define TCFCR0_TCF_DECREMENT (3 << 2)
+#define TCFCR0_TCF_CAPTURE (4 << 2)
+#define TCFCR0_TCF_NOP (7 << 2)
+#define TCFCR0_TCF_TRIGGER BIT(0)
+
+#define TCSR_CAPTURE_1_VALID BIT(1)
+#define TCSR_CAPTURE_0_VALID BIT(0)
+
+struct mvpp2_tai {
+ struct ptp_clock_info caps;
+ struct ptp_clock *ptp_clock;
+ void __iomem *base;
+ spinlock_t lock;
+ u64 period; // nanosecond period in 32.32 fixed point
+ /* This timestamp is updated every two seconds */
+ struct timespec64 stamp;
+};
+
+static void mvpp2_tai_modify(void __iomem *reg, u32 mask, u32 set)
+{
+ u32 val;
+
+ val = readl_relaxed(reg) & ~mask;
+ val |= set & mask;
+ writel(val, reg);
+}
+
+static void mvpp2_tai_write(u32 val, void __iomem *reg)
+{
+ writel_relaxed(val & 0xffff, reg);
+}
+
+static u32 mvpp2_tai_read(void __iomem *reg)
+{
+ return readl_relaxed(reg) & 0xffff;
+}
+
+static struct mvpp2_tai *ptp_to_tai(struct ptp_clock_info *ptp)
+{
+ return container_of(ptp, struct mvpp2_tai, caps);
+}
+
+static void mvpp22_tai_read_ts(struct timespec64 *ts, void __iomem *base)
+{
+ ts->tv_sec = (u64)mvpp2_tai_read(base + 0) << 32 |
+ mvpp2_tai_read(base + 4) << 16 |
+ mvpp2_tai_read(base + 8);
+
+ ts->tv_nsec = mvpp2_tai_read(base + 12) << 16 |
+ mvpp2_tai_read(base + 16);
+
+ /* Read and discard fractional part */
+ readl_relaxed(base + 20);
+ readl_relaxed(base + 24);
+}
+
+static void mvpp2_tai_write_tlv(const struct timespec64 *ts, u32 frac,
+ void __iomem *base)
+{
+ mvpp2_tai_write(ts->tv_sec >> 32, base + MVPP22_TAI_TLV_SEC_HIGH);
+ mvpp2_tai_write(ts->tv_sec >> 16, base + MVPP22_TAI_TLV_SEC_MED);
+ mvpp2_tai_write(ts->tv_sec, base + MVPP22_TAI_TLV_SEC_LOW);
+ mvpp2_tai_write(ts->tv_nsec >> 16, base + MVPP22_TAI_TLV_NANO_HIGH);
+ mvpp2_tai_write(ts->tv_nsec, base + MVPP22_TAI_TLV_NANO_LOW);
+ mvpp2_tai_write(frac >> 16, base + MVPP22_TAI_TLV_FRAC_HIGH);
+ mvpp2_tai_write(frac, base + MVPP22_TAI_TLV_FRAC_LOW);
+}
+
+static void mvpp2_tai_op(u32 op, void __iomem *base)
+{
+ /* Trigger the operation. Note that an external unmaskable
+ * event on PTP_EVENT_REQ will also trigger this action.
+ */
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0,
+ TCFCR0_TCF_MASK | TCFCR0_TCF_TRIGGER,
+ op | TCFCR0_TCF_TRIGGER);
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_TCF_MASK,
+ TCFCR0_TCF_NOP);
+}
+
+/* The adjustment has a range of +0.5ns to -0.5ns in 2^32 steps, so has units
+ * of 2^-32 ns.
+ *
+ * units(s) = 1 / (2^32 * 10^9)
+ * fractional = abs_scaled_ppm / (2^16 * 10^6)
+ *
+ * What we want to achieve:
+ * freq_adjusted = freq_nominal * (1 + fractional)
+ * freq_delta = freq_adjusted - freq_nominal => positive = faster
+ * freq_delta = freq_nominal * (1 + fractional) - freq_nominal
+ * So: freq_delta = freq_nominal * fractional
+ *
+ * However, we are dealing with periods, so:
+ * period_adjusted = period_nominal / (1 + fractional)
+ * period_delta = period_nominal - period_adjusted => positive = faster
+ * period_delta = period_nominal * fractional / (1 + fractional)
+ *
+ * Hence:
+ * period_delta = period_nominal * abs_scaled_ppm /
+ * (2^16 * 10^6 + abs_scaled_ppm)
+ *
+ * To avoid overflow, we reduce both sides of the divide operation by a factor
+ * of 16.
+ */
+static u64 mvpp22_calc_frac_ppm(struct mvpp2_tai *tai, long abs_scaled_ppm)
+{
+ u64 val = tai->period * abs_scaled_ppm >> 4;
+
+ return div_u64(val, (1000000 << 12) + (abs_scaled_ppm >> 4));
+}
+
+static s32 mvpp22_calc_max_adj(struct mvpp2_tai *tai)
+{
+ return 1000000;
+}
+
+static int mvpp22_tai_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct mvpp2_tai *tai = ptp_to_tai(ptp);
+ unsigned long flags;
+ void __iomem *base;
+ bool neg_adj;
+ s32 frac;
+ u64 val;
+
+ neg_adj = scaled_ppm < 0;
+ if (neg_adj)
+ scaled_ppm = -scaled_ppm;
+
+ val = mvpp22_calc_frac_ppm(tai, scaled_ppm);
+
+ /* Convert to a signed 32-bit adjustment */
+ if (neg_adj) {
+ /* -S32_MIN warns, -val < S32_MIN fails, so go for the easy
+ * solution.
+ */
+ if (val > 0x80000000)
+ return -ERANGE;
+
+ frac = -val;
+ } else {
+ if (val > S32_MAX)
+ return -ERANGE;
+
+ frac = val;
+ }
+
+ base = tai->base;
+ spin_lock_irqsave(&tai->lock, flags);
+ mvpp2_tai_write(frac >> 16, base + MVPP22_TAI_TLV_FRAC_HIGH);
+ mvpp2_tai_write(frac, base + MVPP22_TAI_TLV_FRAC_LOW);
+ mvpp2_tai_op(TCFCR0_TCF_FREQUPDATE, base);
+ spin_unlock_irqrestore(&tai->lock, flags);
+
+ return 0;
+}
+
+static int mvpp22_tai_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mvpp2_tai *tai = ptp_to_tai(ptp);
+ struct timespec64 ts;
+ unsigned long flags;
+ void __iomem *base;
+ u32 tcf;
+
+ /* We can't deal with S64_MIN */
+ if (delta == S64_MIN)
+ return -ERANGE;
+
+ if (delta < 0) {
+ delta = -delta;
+ tcf = TCFCR0_TCF_DECREMENT;
+ } else {
+ tcf = TCFCR0_TCF_INCREMENT;
+ }
+
+ ts = ns_to_timespec64(delta);
+
+ base = tai->base;
+ spin_lock_irqsave(&tai->lock, flags);
+ mvpp2_tai_write_tlv(&ts, 0, base);
+ mvpp2_tai_op(tcf, base);
+ spin_unlock_irqrestore(&tai->lock, flags);
+
+ return 0;
+}
+
+static int mvpp22_tai_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct mvpp2_tai *tai = ptp_to_tai(ptp);
+ unsigned long flags;
+ void __iomem *base;
+ u32 tcsr;
+ int ret;
+
+ base = tai->base;
+ spin_lock_irqsave(&tai->lock, flags);
+ /* XXX: the only way to read the PTP time is for the CPU to trigger
+ * an event. However, there is no way to distinguish between the CPU
+ * triggered event, and an external event on PTP_EVENT_REQ. So this
+ * is incompatible with external use of PTP_EVENT_REQ.
+ */
+ ptp_read_system_prets(sts);
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0,
+ TCFCR0_TCF_MASK | TCFCR0_TCF_TRIGGER,
+ TCFCR0_TCF_CAPTURE | TCFCR0_TCF_TRIGGER);
+ ptp_read_system_postts(sts);
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_TCF_MASK,
+ TCFCR0_TCF_NOP);
+
+ tcsr = readl(base + MVPP22_TAI_TCSR);
+ if (tcsr & TCSR_CAPTURE_1_VALID) {
+ mvpp22_tai_read_ts(ts, base + MVPP22_TAI_TCV1_SEC_HIGH);
+ ret = 0;
+ } else if (tcsr & TCSR_CAPTURE_0_VALID) {
+ mvpp22_tai_read_ts(ts, base + MVPP22_TAI_TCV0_SEC_HIGH);
+ ret = 0;
+ } else {
+ /* We don't seem to have a reading... */
+ ret = -EBUSY;
+ }
+ spin_unlock_irqrestore(&tai->lock, flags);
+
+ return ret;
+}
+
+static int mvpp22_tai_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct mvpp2_tai *tai = ptp_to_tai(ptp);
+ unsigned long flags;
+ void __iomem *base;
+
+ base = tai->base;
+ spin_lock_irqsave(&tai->lock, flags);
+ mvpp2_tai_write_tlv(ts, 0, base);
+
+ /* Trigger an update to load the value from the TLV registers
+ * into the TOD counter. Note that an external unmaskable event on
+ * PTP_EVENT_REQ will also trigger this action.
+ */
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0,
+ TCFCR0_PHASE_UPDATE_ENABLE |
+ TCFCR0_TCF_MASK | TCFCR0_TCF_TRIGGER,
+ TCFCR0_TCF_UPDATE | TCFCR0_TCF_TRIGGER);
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_TCF_MASK,
+ TCFCR0_TCF_NOP);
+ spin_unlock_irqrestore(&tai->lock, flags);
+
+ return 0;
+}
+
+static long mvpp22_tai_aux_work(struct ptp_clock_info *ptp)
+{
+ struct mvpp2_tai *tai = ptp_to_tai(ptp);
+
+ mvpp22_tai_gettimex64(ptp, &tai->stamp, NULL);
+
+ return msecs_to_jiffies(2000);
+}
+
+static void mvpp22_tai_set_step(struct mvpp2_tai *tai)
+{
+ void __iomem *base = tai->base;
+ u32 nano, frac;
+
+ nano = upper_32_bits(tai->period);
+ frac = lower_32_bits(tai->period);
+
+ /* As the fractional nanosecond is a signed offset, if the MSB (sign)
+ * bit is set, we have to increment the whole nanoseconds.
+ */
+ if (frac >= 0x80000000)
+ nano += 1;
+
+ mvpp2_tai_write(nano, base + MVPP22_TAI_TOD_STEP_NANO_CR);
+ mvpp2_tai_write(frac >> 16, base + MVPP22_TAI_TOD_STEP_FRAC_HIGH);
+ mvpp2_tai_write(frac, base + MVPP22_TAI_TOD_STEP_FRAC_LOW);
+}
+
+static void mvpp22_tai_init(struct mvpp2_tai *tai)
+{
+ void __iomem *base = tai->base;
+
+ mvpp22_tai_set_step(tai);
+
+ /* Release the TAI reset */
+ mvpp2_tai_modify(base + MVPP22_TAI_CR0, CR0_SW_NRESET, CR0_SW_NRESET);
+}
+
+int mvpp22_tai_ptp_clock_index(struct mvpp2_tai *tai)
+{
+ return ptp_clock_index(tai->ptp_clock);
+}
+
+void mvpp22_tai_tstamp(struct mvpp2_tai *tai, u32 tstamp,
+ struct skb_shared_hwtstamps *hwtstamp)
+{
+ struct timespec64 ts;
+ int delta;
+
+ /* The tstamp consists of 2 bits of seconds and 30 bits of nanoseconds.
+ * We use our stored timestamp (tai->stamp) to form a full timestamp,
+ * and we must read the seconds exactly once.
+ */
+ ts.tv_sec = READ_ONCE(tai->stamp.tv_sec);
+ ts.tv_nsec = tstamp & 0x3fffffff;
+
+ /* Calculate the delta in seconds between our stored timestamp and
+ * the value read from the queue. Allow timestamps one second in the
+ * past, otherwise consider them to be in the future.
+ */
+ delta = ((tstamp >> 30) - (ts.tv_sec & 3)) & 3;
+ if (delta == 3)
+ delta -= 4;
+ ts.tv_sec += delta;
+
+ memset(hwtstamp, 0, sizeof(*hwtstamp));
+ hwtstamp->hwtstamp = timespec64_to_ktime(ts);
+}
+
+void mvpp22_tai_start(struct mvpp2_tai *tai)
+{
+ long delay;
+
+ delay = mvpp22_tai_aux_work(&tai->caps);
+
+ ptp_schedule_worker(tai->ptp_clock, delay);
+}
+
+void mvpp22_tai_stop(struct mvpp2_tai *tai)
+{
+ ptp_cancel_worker_sync(tai->ptp_clock);
+}
+
+static void mvpp22_tai_remove(void *priv)
+{
+ struct mvpp2_tai *tai = priv;
+
+ if (!IS_ERR(tai->ptp_clock))
+ ptp_clock_unregister(tai->ptp_clock);
+}
+
+int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv)
+{
+ struct mvpp2_tai *tai;
+ int ret;
+
+ tai = devm_kzalloc(dev, sizeof(*tai), GFP_KERNEL);
+ if (!tai)
+ return -ENOMEM;
+
+ spin_lock_init(&tai->lock);
+
+ tai->base = priv->iface_base;
+
+ /* The step size consists of three registers - a 16-bit nanosecond step
+ * size, and a 32-bit fractional nanosecond step size split over two
+ * registers. The fractional nanosecond step size has units of 2^-32ns.
+ *
+ * To calculate this, we calculate:
+ * (10^9 + freq / 2) / (freq * 2^-32)
+ * which gives us the nanosecond step to the nearest integer in 16.32
+ * fixed point format, and the fractional part of the step size with
+ * the MSB inverted. With rounding of the fractional nanosecond, and
+ * simplification, this becomes:
+ * (10^9 << 32 + freq << 31 + (freq + 1) >> 1) / freq
+ *
+ * So:
+ * div = (10^9 << 32 + freq << 31 + (freq + 1) >> 1) / freq
+ * nano = upper_32_bits(div);
+ * frac = lower_32_bits(div) ^ 0x80000000;
+ * Will give the values for the registers.
+ *
+ * This is all seems perfect, but alas it is not when considering the
+ * whole story. The system is clocked from 25MHz, which is multiplied
+ * by a PLL to 1GHz, and then divided by three, giving 333333333Hz
+ * (recurring). This gives exactly 3ns, but using 333333333Hz with
+ * the above gives an error of 13*2^-32ns.
+ *
+ * Consequently, we use the period rather than calculating from the
+ * frequency.
+ */
+ tai->period = 3ULL << 32;
+
+ mvpp22_tai_init(tai);
+
+ tai->caps.owner = THIS_MODULE;
+ strscpy(tai->caps.name, "Marvell PP2.2", sizeof(tai->caps.name));
+ tai->caps.max_adj = mvpp22_calc_max_adj(tai);
+ tai->caps.adjfine = mvpp22_tai_adjfine;
+ tai->caps.adjtime = mvpp22_tai_adjtime;
+ tai->caps.gettimex64 = mvpp22_tai_gettimex64;
+ tai->caps.settime64 = mvpp22_tai_settime64;
+ tai->caps.do_aux_work = mvpp22_tai_aux_work;
+
+ ret = devm_add_action(dev, mvpp22_tai_remove, tai);
+ if (ret)
+ return ret;
+
+ tai->ptp_clock = ptp_clock_register(&tai->caps, dev);
+ if (IS_ERR(tai->ptp_clock))
+ return PTR_ERR(tai->ptp_clock);
+
+ priv->tai = tai;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 1b25948c662b..2f7a861d0c7b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -3,9 +3,10 @@
# Makefile for Marvell's OcteonTX2 RVU Admin Function driver
#
+ccflags-y += -I$(src)
obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o
obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
-octeontx2_mbox-y := mbox.o
+octeontx2_mbox-y := mbox.o rvu_trace.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
- rvu_reg.o rvu_npc.o rvu_debugfs.o
+ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index a4e65da8d95b..8f17e26dca53 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -468,6 +468,35 @@ static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
}
}
+void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!cgx)
+ return;
+
+ if (enable) {
+ /* Enable inbound PTP timestamping */
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ /* Disable inbound PTP stamping */
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ }
+}
+
/* CGX Firmware interface low level support */
static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 394f96591feb..27ca3291682b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -58,8 +58,10 @@
#define CGXX_SMUX_RX_FRM_CTL 0x20020
#define CGX_SMUX_RX_FRM_CTL_CTL_BCK BIT_ULL(3)
+#define CGX_SMUX_RX_FRM_CTL_PTP_MODE BIT_ULL(12)
#define CGXX_GMP_GMI_RXX_FRM_CTL 0x38028
#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3)
+#define CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE BIT_ULL(12)
#define CGXX_SMUX_TX_CTL 0x20178
#define CGXX_SMUX_TX_PAUSE_PKT_TIME 0x20110
#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
@@ -139,4 +141,6 @@ int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
u8 *tx_pause, u8 *rx_pause);
int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
u8 tx_pause, u8 rx_pause);
+void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable);
+
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 2718fe201c14..bbabb8e64201 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -14,6 +14,7 @@
#include "rvu_reg.h"
#include "mbox.h"
+#include "rvu_trace.h"
static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
@@ -207,6 +208,9 @@ void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
*/
tx_hdr->num_msgs = mdev->num_msgs;
rx_hdr->num_msgs = 0;
+
+ trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size);
+
spin_unlock(&mdev->mbox_lock);
/* The interrupt should be fired after num_msgs is written
@@ -303,10 +307,15 @@ int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
struct mbox_msghdr *preq = mdev->mbase + ireq;
struct mbox_msghdr *prsp = mdev->mbase + irsp;
- if (preq->id != prsp->id)
+ if (preq->id != prsp->id) {
+ trace_otx2_msg_check(mbox->pdev, preq->id,
+ prsp->id, prsp->rc);
goto exit;
+ }
if (prsp->rc) {
rc = prsp->rc;
+ trace_otx2_msg_check(mbox->pdev, preq->id,
+ prsp->id, prsp->rc);
goto exit;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index ab433789d2c3..263a21129416 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -128,6 +128,7 @@ M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
+M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
@@ -144,6 +145,8 @@ M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \
M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \
M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \
M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \
+M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \
+M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \
M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
cgx_pause_frm_cfg) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
@@ -214,6 +217,8 @@ M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
nix_lso_format_cfg, \
nix_lso_format_cfg_rsp) \
M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \
+M(NIX_LF_PTP_TX_ENABLE, 0x8013, nix_lf_ptp_tx_enable, msg_req, msg_rsp) \
+M(NIX_LF_PTP_TX_DISABLE, 0x8014, nix_lf_ptp_tx_disable, msg_req, msg_rsp) \
M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
nix_bp_cfg_rsp) \
M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
@@ -621,6 +626,7 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15)
#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
+#define NIX_FLOW_KEY_TYPE_VLAN BIT(20)
u32 flowkey_cfg; /* Flowkey types selected */
u8 group; /* RSS context or group */
};
@@ -859,4 +865,20 @@ struct npc_get_kex_cfg_rsp {
u8 mkex_pfl_name[MKEX_NAME_LEN];
};
+enum ptp_op {
+ PTP_OP_ADJFINE = 0,
+ PTP_OP_GET_CLOCK = 1,
+};
+
+struct ptp_req {
+ struct mbox_msghdr hdr;
+ u8 op;
+ s64 scaled_ppm;
+};
+
+struct ptp_rsp {
+ struct mbox_msghdr hdr;
+ u64 clk;
+};
+
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 3803af9231c6..91a9d00e4fb5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -49,6 +49,7 @@ enum npc_kpu_lb_ltype {
NPC_LT_LB_EDSA_VLAN,
NPC_LT_LB_EXDSA,
NPC_LT_LB_EXDSA_VLAN,
+ NPC_LT_LB_FDSA,
NPC_LT_LB_CUSTOM0 = 0xE,
NPC_LT_LB_CUSTOM1 = 0xF,
};
@@ -77,21 +78,21 @@ enum npc_kpu_ld_ltype {
NPC_LT_LD_ICMP,
NPC_LT_LD_SCTP,
NPC_LT_LD_ICMP6,
+ NPC_LT_LD_CUSTOM0,
+ NPC_LT_LD_CUSTOM1,
NPC_LT_LD_IGMP = 8,
- NPC_LT_LD_ESP,
NPC_LT_LD_AH,
NPC_LT_LD_GRE,
NPC_LT_LD_NVGRE,
NPC_LT_LD_NSH,
NPC_LT_LD_TU_MPLS_IN_NSH,
NPC_LT_LD_TU_MPLS_IN_IP,
- NPC_LT_LD_CUSTOM0 = 0xE,
- NPC_LT_LD_CUSTOM1 = 0xF,
};
enum npc_kpu_le_ltype {
NPC_LT_LE_VXLAN = 1,
NPC_LT_LE_GENEVE,
+ NPC_LT_LE_ESP,
NPC_LT_LE_GTPU = 4,
NPC_LT_LE_VXLANGPE,
NPC_LT_LE_GTPC,
@@ -173,8 +174,8 @@ struct npc_kpu_profile_action {
struct npc_kpu_profile {
int cam_entries;
int action_entries;
- struct npc_kpu_profile_cam *cam;
- struct npc_kpu_profile_action *action;
+ const struct npc_kpu_profile_cam *cam;
+ const struct npc_kpu_profile_action *action;
};
/* NPC KPU register formats */
@@ -296,6 +297,9 @@ struct nix_rx_action {
#endif
};
+/* NPC_AF_INTFX_KEX_CFG field masks */
+#define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0)
+
/* NIX Receive Vtag Action Structure */
#define VTAG0_VALID_BIT BIT_ULL(15)
#define VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
@@ -320,4 +324,37 @@ struct npc_mcam_kex {
u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
} __packed;
+struct npc_lt_def {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+};
+
+struct npc_lt_def_ipsec {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 spi_offset;
+ u8 spi_nz;
+};
+
+struct npc_lt_def_cfg {
+ struct npc_lt_def rx_ol2;
+ struct npc_lt_def rx_oip4;
+ struct npc_lt_def rx_iip4;
+ struct npc_lt_def rx_oip6;
+ struct npc_lt_def rx_iip6;
+ struct npc_lt_def rx_otcp;
+ struct npc_lt_def rx_itcp;
+ struct npc_lt_def rx_oudp;
+ struct npc_lt_def rx_iudp;
+ struct npc_lt_def rx_osctp;
+ struct npc_lt_def rx_isctp;
+ struct npc_lt_def_ipsec rx_ipsec[2];
+ struct npc_lt_def pck_ol2;
+ struct npc_lt_def pck_oip4;
+ struct npc_lt_def pck_oip6;
+ struct npc_lt_def pck_iip4;
+};
+
#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index aa2727e6211a..77bb4ed32600 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -63,6 +63,7 @@
#define NPC_UDP_PORT_VXLANGPE 4790
#define NPC_UDP_PORT_GENEVE 6081
#define NPC_UDP_PORT_MPLS 6635
+#define NPC_UDP_PORT_ESP 4500
#define NPC_VXLANGPE_NP_IP 0x1
#define NPC_VXLANGPE_NP_IP6 0x2
@@ -139,6 +140,13 @@
#define NPC_DSA_EXTEND 0x1000
#define NPC_DSA_EDSA 0x8000
+#define NPC_DSA_FDSA 0xc000
+
+#define NPC_KEXOF_DMAC 8
+#define MKEX_SIGN 0x19bbfdbd15f /* strtoull of "mkexprof" with base:36 */
+#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
+ (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
+ ((flags_ena) << 6) | ((key_ofs) & 0x3F))
enum npc_kpu_parser_state {
NPC_S_NA = 0,
@@ -166,6 +174,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU3_DSA,
NPC_S_KPU4_MPLS,
NPC_S_KPU4_NSH,
+ NPC_S_KPU4_FDSA,
NPC_S_KPU5_IP,
NPC_S_KPU5_IP6,
NPC_S_KPU5_ARP,
@@ -189,7 +198,6 @@ enum npc_kpu_parser_state {
NPC_S_KPU8_IGMP,
NPC_S_KPU8_ICMP6,
NPC_S_KPU8_GRE,
- NPC_S_KPU8_ESP,
NPC_S_KPU8_AH,
NPC_S_KPU9_TU_MPLS_IN_GRE,
NPC_S_KPU9_TU_MPLS_IN_NSH,
@@ -201,6 +209,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU9_GENEVE,
NPC_S_KPU9_GTPC,
NPC_S_KPU9_GTPU,
+ NPC_S_KPU9_ESP,
NPC_S_KPU10_TU_MPLS_IN_VXLANGPE,
NPC_S_KPU10_TU_MPLS_PL,
NPC_S_KPU10_TU_MPLS,
@@ -271,6 +280,7 @@ enum npc_kpu_lb_lflag {
NPC_F_LB_L_EDSA_VLAN,
NPC_F_LB_L_EXDSA,
NPC_F_LB_L_EXDSA_VLAN,
+ NPC_F_LB_L_FDSA,
};
enum npc_kpu_lc_uflag {
@@ -418,7 +428,7 @@ enum NPC_ERRLEV_E {
NPC_ERRLEV_ENUM_LAST = 16,
};
-static struct npc_kpu_profile_action ikpu_action_entries[] = {
+static const struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
@@ -979,7 +989,7 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 16, 20, 0, 0,
+ 12, 14, 20, 0, 0,
NPC_S_KPU1_EXDSA, 0, 0,
NPC_LID_LA, NPC_LT_NA,
0,
@@ -997,7 +1007,7 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
{
NPC_S_KPU1_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -1351,10 +1361,19 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_EXDSA, 0xff,
+ 0x0000,
+ 0x0000,
NPC_DSA_EXTEND,
NPC_DSA_EXTEND,
0x0000,
0x0000,
+ },
+ {
+ NPC_S_KPU1_EXDSA, 0xff,
+ NPC_DSA_FDSA,
+ NPC_DSA_FDSA,
+ 0x0000,
+ 0x0000,
0x0000,
0x0000,
},
@@ -1666,7 +1685,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
{
NPC_S_KPU2_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -2794,7 +2813,7 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
{
NPC_S_KPU3_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -3913,7 +3932,7 @@ static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
{
NPC_S_KPU4_MPLS, 0xff,
NPC_MPLS_S,
@@ -3996,6 +4015,69 @@ static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ 0x0000,
+ NPC_DSA_FDSA,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -4006,7 +4088,7 @@ static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
{
NPC_S_KPU5_IP, 0xff,
0x0000,
@@ -4576,7 +4658,7 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu6_cam_entries[] = {
{
NPC_S_KPU6_IP6_EXT, 0xff,
0x0000,
@@ -4921,7 +5003,7 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu7_cam_entries[] = {
{
NPC_S_KPU7_IP6_EXT, 0xff,
0x0000,
@@ -5140,7 +5222,7 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
{
NPC_S_KPU8_TCP, 0xff,
0x0000,
@@ -5341,15 +5423,24 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
},
{
NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_ESP,
+ 0xffff,
0x0000,
0x0000,
0x0000,
0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_UDP_PORT_ESP,
+ 0xffff,
0x0000,
0x0000,
},
{
- NPC_S_KPU8_SCTP, 0xff,
+ NPC_S_KPU8_UDP, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5358,7 +5449,7 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU8_ICMP, 0xff,
+ NPC_S_KPU8_SCTP, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5367,7 +5458,7 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU8_IGMP, 0xff,
+ NPC_S_KPU8_ICMP, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5376,7 +5467,7 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU8_ICMP6, 0xff,
+ NPC_S_KPU8_IGMP, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5385,7 +5476,7 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU8_ESP, 0xff,
+ NPC_S_KPU8_ICMP6, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5872,7 +5963,7 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu9_cam_entries[] = {
{
NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
NPC_MPLS_S,
@@ -6324,6 +6415,15 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
NPC_MPLS_S,
},
{
+ NPC_S_KPU9_ESP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -6334,7 +6434,7 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu10_cam_entries[] = {
{
NPC_S_KPU10_TU_MPLS, 0xff,
NPC_MPLS_S,
@@ -6499,7 +6599,7 @@ static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu11_cam_entries[] = {
{
NPC_S_KPU11_TU_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -6808,7 +6908,7 @@ static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
{
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_TCP,
@@ -7063,7 +7163,7 @@ static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu13_cam_entries[] = {
{
NPC_S_KPU13_TU_IP6_EXT, 0xff,
0x0000,
@@ -7075,7 +7175,7 @@ static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu14_cam_entries[] = {
{
NPC_S_KPU14_TU_IP6_EXT, 0xff,
0x0000,
@@ -7087,7 +7187,7 @@ static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
{
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
@@ -7288,7 +7388,7 @@ static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu16_cam_entries[] = {
{
NPC_S_KPU16_TCP_DATA, 0xff,
0x0000,
@@ -7345,7 +7445,7 @@ static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu1_action_entries[] = {
+static const struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 3, 0,
@@ -7673,6 +7773,14 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 16, 2, 0,
+ NPC_S_KPU4_FDSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LA, NPC_EC_EDSA_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
@@ -7962,7 +8070,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu2_action_entries[] = {
+static const struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
@@ -8965,7 +9073,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu3_action_entries[] = {
+static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
@@ -9960,7 +10068,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu4_action_entries[] = {
+static const struct npc_kpu_profile_action kpu4_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -10034,6 +10142,62 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_ARP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_RARP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_PTP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_FCOE, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K4,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -10043,7 +10207,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu5_action_entries[] = {
+static const struct npc_kpu_profile_action kpu5_action_entries[] = {
{
NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
0, 0, 0, 0, 1,
@@ -10102,8 +10266,8 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU8_ESP, 20, 1,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 20, 1,
NPC_LID_LC, NPC_LT_LC_IP,
0,
0, 0, 0, 0,
@@ -10206,8 +10370,8 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU8_ESP, 0, 1,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP_OPT,
0,
0, 0xf, 0, 2,
@@ -10414,8 +10578,8 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU8_ESP, 40, 1,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 40, 1,
NPC_LID_LC, NPC_LT_LC_IP6_EXT,
0,
0, 0, 0, 0,
@@ -10550,7 +10714,7 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu6_action_entries[] = {
+static const struct npc_kpu_profile_action kpu6_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -10561,80 +10725,80 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 12, 0, 1, 0,
- NPC_S_KPU8_TCP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 8, 10, 1, 0,
- NPC_S_KPU8_UDP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_SCTP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_ICMP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_ICMP6, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_ESP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_AH, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_GRE, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 5, 0,
- NPC_S_KPU12_TU_IP6, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 2, 0,
- NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -10689,8 +10853,8 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_ESP, 8, 0,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
NPC_LID_LC, NPC_LT_NA,
0,
1, 0xff, 0, 3,
@@ -10793,8 +10957,8 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_ESP, 8, 0,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
NPC_LID_LC, NPC_LT_NA,
0,
1, 0xff, 0, 3,
@@ -10857,7 +11021,7 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu7_action_entries[] = {
+static const struct npc_kpu_profile_action kpu7_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -10908,8 +11072,8 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_ESP, 8, 0,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU9_ESP, 8, 0,
NPC_LID_LC, NPC_LT_NA,
0,
1, 0xff, 0, 3,
@@ -10956,80 +11120,80 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 12, 0, 0, 0,
- NPC_S_KPU8_TCP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 8, 10, 0, 0,
- NPC_S_KPU8_UDP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_SCTP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_ICMP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_ICMP6, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_ESP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_AH, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_GRE, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 4, 0,
- NPC_S_KPU12_TU_IP6, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 1, 0,
- NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -11052,7 +11216,7 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu8_action_entries[] = {
+static const struct npc_kpu_profile_action kpu8_action_entries[] = {
{
NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -11231,6 +11395,22 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU9_ESP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU9_ESP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 7, 0,
NPC_S_KPU16_UDP_DATA, 8, 1,
NPC_LID_LD, NPC_LT_LD_UDP,
@@ -11273,14 +11453,6 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_ESP,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
NPC_LID_LD, NPC_LT_LD_AH,
0,
0, 0, 0, 0,
@@ -11703,7 +11875,7 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu9_action_entries[] = {
+static const struct npc_kpu_profile_action kpu9_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -12105,6 +12277,14 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_ESP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LE, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -12114,7 +12294,7 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu10_action_entries[] = {
+static const struct npc_kpu_profile_action kpu10_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
@@ -12261,7 +12441,7 @@ static struct npc_kpu_profile_action kpu10_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu11_action_entries[] = {
+static const struct npc_kpu_profile_action kpu11_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 0, 0,
@@ -12536,7 +12716,7 @@ static struct npc_kpu_profile_action kpu11_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu12_action_entries[] = {
+static const struct npc_kpu_profile_action kpu12_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 12, 0, 2, 0,
@@ -12763,7 +12943,7 @@ static struct npc_kpu_profile_action kpu12_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu13_action_entries[] = {
+static const struct npc_kpu_profile_action kpu13_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -12774,7 +12954,7 @@ static struct npc_kpu_profile_action kpu13_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu14_action_entries[] = {
+static const struct npc_kpu_profile_action kpu14_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -12785,7 +12965,7 @@ static struct npc_kpu_profile_action kpu14_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu15_action_entries[] = {
+static const struct npc_kpu_profile_action kpu15_action_entries[] = {
{
NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -12964,7 +13144,7 @@ static struct npc_kpu_profile_action kpu15_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu16_action_entries[] = {
+static const struct npc_kpu_profile_action kpu16_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -13015,7 +13195,7 @@ static struct npc_kpu_profile_action kpu16_action_entries[] = {
},
};
-static struct npc_kpu_profile npc_kpu_profiles[] = {
+static const struct npc_kpu_profile npc_kpu_profiles[] = {
{
ARRAY_SIZE(kpu1_cam_entries),
ARRAY_SIZE(kpu1_action_entries),
@@ -13114,4 +13294,163 @@ static struct npc_kpu_profile npc_kpu_profiles[] = {
},
};
+static const struct npc_lt_def_cfg npc_lt_defaults = {
+ .rx_ol2 = {
+ .lid = NPC_LID_LA,
+ .ltype_match = NPC_LT_LA_ETHER,
+ .ltype_mask = 0x0F,
+ },
+ .rx_oip4 = {
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP,
+ .ltype_mask = 0x0E,
+ },
+ .rx_iip4 = {
+ .lid = NPC_LID_LG,
+ .ltype_match = NPC_LT_LG_TU_IP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_oip6 = {
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP6,
+ .ltype_mask = 0x0E,
+ },
+ .rx_iip6 = {
+ .lid = NPC_LID_LG,
+ .ltype_match = NPC_LT_LG_TU_IP6,
+ .ltype_mask = 0x0F,
+ },
+ .rx_otcp = {
+ .lid = NPC_LID_LD,
+ .ltype_match = NPC_LT_LD_TCP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_itcp = {
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_TCP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_oudp = {
+ .lid = NPC_LID_LD,
+ .ltype_match = NPC_LT_LD_UDP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_iudp = {
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_UDP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_osctp = {
+ .lid = NPC_LID_LD,
+ .ltype_match = NPC_LT_LD_SCTP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_isctp = {
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_SCTP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_ipsec = {
+ {
+ .lid = NPC_LID_LE,
+ .ltype_match = NPC_LT_LE_ESP,
+ .ltype_mask = 0x0F,
+ },
+ {
+ .spi_offset = 8,
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_ESP,
+ .ltype_mask = 0x0F,
+ },
+ },
+ .pck_ol2 = {
+ .lid = NPC_LID_LA,
+ .ltype_match = NPC_LT_LA_ETHER,
+ .ltype_mask = 0x0F,
+ },
+ .pck_oip4 = {
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP,
+ .ltype_mask = 0x0E,
+ },
+ .pck_iip4 = {
+ .lid = NPC_LID_LG,
+ .ltype_match = NPC_LT_LG_TU_IP,
+ .ltype_mask = 0x0F,
+ },
+};
+
+static const struct npc_mcam_kex npc_mkex_default = {
+ .mkex_sign = MKEX_SIGN,
+ .name = "default",
+ .kpu_version = NPC_KPU_PROFILE_VER,
+ .keyx_cfg = {
+ /* nibble: LA..LE (ltype only) + Channel */
+ [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | 0x49247,
+ [NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | ((1ULL << 19) - 1),
+ },
+ .intf_lid_lt_ld = {
+ /* Default RX MCAM KEX profile */
+ [NIX_INTF_RX] = {
+ [NPC_LID_LA] = {
+ /* Layer A: Ethernet: */
+ [NPC_LT_LA_ETHER] = {
+ /* DMAC: 6 bytes, KW1[47:0] */
+ KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_KEXOF_DMAC),
+ /* Ethertype: 2 bytes, KW0[47:32] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4),
+ },
+ },
+ [NPC_LID_LB] = {
+ /* Layer B: Single VLAN (CTAG) */
+ /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
+ [NPC_LT_LB_CTAG] = {
+ KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4),
+ },
+ /* Layer B: Stacked VLAN (STAG|QinQ) */
+ [NPC_LT_LB_STAG_QINQ] = {
+ /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
+ KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4),
+ },
+ [NPC_LT_LB_FDSA] = {
+ /* SWITCH PORT: 1 byte, KW0[63:48] */
+ KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0x6),
+ /* Ethertype: 2 bytes, KW0[47:32] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x4),
+ },
+ },
+ [NPC_LID_LC] = {
+ /* Layer C: IPv4 */
+ [NPC_LT_LC_IP] = {
+ /* SIP+DIP: 8 bytes, KW2[63:0] */
+ KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10),
+ /* TOS: 1 byte, KW1[63:56] */
+ KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf),
+ },
+ /* Layer C: IPv6 */
+ [NPC_LT_LC_IP6] = {
+ /* Everything up to SADDR: 8 bytes, KW2[63:0] */
+ KEX_LD_CFG(0x07, 0x0, 0x1, 0x0, 0x10),
+ },
+ },
+ [NPC_LID_LD] = {
+ /* Layer D:UDP */
+ [NPC_LT_LD_UDP] = {
+ /* SPORT: 2 bytes, KW3[15:0] */
+ KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
+ /* DPORT: 2 bytes, KW3[31:16] */
+ KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
+ },
+ /* Layer D:TCP */
+ [NPC_LT_LD_TCP] = {
+ /* SPORT: 2 bytes, KW3[15:0] */
+ KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
+ /* DPORT: 2 bytes, KW3[31:16] */
+ KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
+ },
+ },
+ },
+ },
+};
+
#endif /* NPC_PROFILE_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
new file mode 100644
index 000000000000..f69f4f35ae48
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell PTP driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "ptp.h"
+#include "mbox.h"
+#include "rvu.h"
+
+#define DRV_NAME "Marvell PTP Driver"
+
+#define PCI_DEVID_OCTEONTX2_PTP 0xA00C
+#define PCI_SUBSYS_DEVID_OCTX2_98xx_PTP 0xB100
+#define PCI_SUBSYS_DEVID_OCTX2_96XX_PTP 0xB200
+#define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300
+#define PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP 0xB400
+#define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP 0xB500
+#define PCI_DEVID_OCTEONTX2_RST 0xA085
+
+#define PCI_PTP_BAR_NO 0
+#define PCI_RST_BAR_NO 0
+
+#define PTP_CLOCK_CFG 0xF00ULL
+#define PTP_CLOCK_CFG_PTP_EN BIT_ULL(0)
+#define PTP_CLOCK_LO 0xF08ULL
+#define PTP_CLOCK_HI 0xF10ULL
+#define PTP_CLOCK_COMP 0xF18ULL
+
+#define RST_BOOT 0x1600ULL
+#define RST_MUL_BITS GENMASK_ULL(38, 33)
+#define CLOCK_BASE_RATE 50000000ULL
+
+static u64 get_clock_rate(void)
+{
+ u64 cfg, ret = CLOCK_BASE_RATE * 16;
+ struct pci_dev *pdev;
+ void __iomem *base;
+
+ /* To get the input clock frequency with which PTP co-processor
+ * block is running the base frequency(50 MHz) needs to be multiplied
+ * with multiplier bits present in RST_BOOT register of RESET block.
+ * Hence below code gets the multiplier bits from the RESET PCI
+ * device present in the system.
+ */
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RST, NULL);
+ if (!pdev)
+ goto error;
+
+ base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
+ if (!base)
+ goto error_put_pdev;
+
+ cfg = readq(base + RST_BOOT);
+ ret = CLOCK_BASE_RATE * FIELD_GET(RST_MUL_BITS, cfg);
+
+ iounmap(base);
+
+error_put_pdev:
+ pci_dev_put(pdev);
+
+error:
+ return ret;
+}
+
+struct ptp *ptp_get(void)
+{
+ struct pci_dev *pdev;
+ struct ptp *ptp;
+
+ /* If the PTP pci device is found on the system and ptp
+ * driver is bound to it then the PTP pci device is returned
+ * to the caller(rvu driver).
+ */
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_PTP, NULL);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ ptp = pci_get_drvdata(pdev);
+ if (!ptp)
+ ptp = ERR_PTR(-EPROBE_DEFER);
+ if (IS_ERR(ptp))
+ pci_dev_put(pdev);
+
+ return ptp;
+}
+
+void ptp_put(struct ptp *ptp)
+{
+ if (!ptp)
+ return;
+
+ pci_dev_put(ptp->pdev);
+}
+
+static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
+{
+ bool neg_adj = false;
+ u64 comp;
+ u64 adj;
+ s64 ppb;
+
+ if (scaled_ppm < 0) {
+ neg_adj = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* The hardware adds the clock compensation value to the PTP clock
+ * on every coprocessor clock cycle. Typical convention is that it
+ * represent number of nanosecond betwen each cycle. In this
+ * convention compensation value is in 64 bit fixed-point
+ * representation where upper 32 bits are number of nanoseconds
+ * and lower is fractions of nanosecond.
+ * The scaled_ppm represent the ratio in "parts per million" by which
+ * the compensation value should be corrected.
+ * To calculate new compenstation value we use 64bit fixed point
+ * arithmetic on following formula
+ * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
+ * where tbase is the basic compensation value calculated
+ * initialy in the probe function.
+ */
+ comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ /* convert scaled_ppm to ppb */
+ ppb = 1 + scaled_ppm;
+ ppb *= 125;
+ ppb >>= 13;
+ adj = comp * ppb;
+ adj = div_u64(adj, 1000000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+
+ writeq(comp, ptp->reg_base + PTP_CLOCK_COMP);
+
+ return 0;
+}
+
+static int ptp_get_clock(struct ptp *ptp, u64 *clk)
+{
+ /* Return the current PTP clock */
+ *clk = readq(ptp->reg_base + PTP_CLOCK_HI);
+
+ return 0;
+}
+
+static int ptp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct ptp *ptp;
+ u64 clock_comp;
+ u64 clock_cfg;
+ int err;
+
+ ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL);
+ if (!ptp) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ ptp->pdev = pdev;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ goto error_free;
+
+ err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
+ if (err)
+ goto error_free;
+
+ ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
+
+ ptp->clock_rate = get_clock_rate();
+
+ /* Enable PTP clock */
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+
+ clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ /* Initial compensation value to start the nanosecs counter */
+ writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
+
+ pci_set_drvdata(pdev, ptp);
+
+ return 0;
+
+error_free:
+ devm_kfree(dev, ptp);
+
+error:
+ /* For `ptp_get()` we need to differentiate between the case
+ * when the core has not tried to probe this device and the case when
+ * the probe failed. In the later case we pretend that the
+ * initialization was successful and keep the error in
+ * `dev->driver_data`.
+ */
+ pci_set_drvdata(pdev, ERR_PTR(err));
+ return 0;
+}
+
+static void ptp_remove(struct pci_dev *pdev)
+{
+ struct ptp *ptp = pci_get_drvdata(pdev);
+ u64 clock_cfg;
+
+ if (IS_ERR_OR_NULL(ptp))
+ return;
+
+ /* Disable PTP clock */
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+}
+
+static const struct pci_device_id ptp_id_table[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_98xx_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_96XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_95XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) },
+ { 0, }
+};
+
+struct pci_driver ptp_driver = {
+ .name = DRV_NAME,
+ .id_table = ptp_id_table,
+ .probe = ptp_probe,
+ .remove = ptp_remove,
+};
+
+int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
+ struct ptp_rsp *rsp)
+{
+ int err = 0;
+
+ /* This function is the PTP mailbox handler invoked when
+ * called by AF consumers/netdev drivers via mailbox mechanism.
+ * It is used by netdev driver to get the PTP clock and to set
+ * frequency adjustments. Since mailbox can be called without
+ * notion of whether the driver is bound to ptp device below
+ * validation is needed as first step.
+ */
+ if (!rvu->ptp)
+ return -ENODEV;
+
+ switch (req->op) {
+ case PTP_OP_ADJFINE:
+ err = ptp_adjfine(rvu->ptp, req->scaled_ppm);
+ break;
+ case PTP_OP_GET_CLOCK:
+ err = ptp_get_clock(rvu->ptp, &rsp->clk);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
new file mode 100644
index 000000000000..878bc395d28f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell PTP driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef PTP_H
+#define PTP_H
+
+#include <linux/timecounter.h>
+#include <linux/time64.h>
+#include <linux/spinlock.h>
+
+struct ptp {
+ struct pci_dev *pdev;
+ void __iomem *reg_base;
+ u32 clock_rate;
+};
+
+struct ptp *ptp_get(void);
+void ptp_put(struct ptp *ptp);
+
+extern struct pci_driver ptp_driver;
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 557e4292c846..e1f918960730 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -18,6 +18,9 @@
#include "cgx.h"
#include "rvu.h"
#include "rvu_reg.h"
+#include "ptp.h"
+
+#include "rvu_trace.h"
#define DRV_NAME "octeontx2-af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
@@ -1548,6 +1551,7 @@ static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
if (rsp && err) \
rsp->hdr.rc = err; \
\
+ trace_otx2_msg_process(mbox->pdev, _id, err); \
return rsp ? err : -ENOMEM; \
}
MBOX_MESSAGES
@@ -1880,6 +1884,8 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
/* Clear interrupts */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
+ if (intr)
+ trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
/* Sync with mbox memory region */
rmb();
@@ -1897,6 +1903,8 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
+ if (intr)
+ trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
@@ -2565,13 +2573,21 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
+ rvu->ptp = ptp_get();
+ if (IS_ERR(rvu->ptp)) {
+ err = PTR_ERR(rvu->ptp);
+ if (err == -EPROBE_DEFER)
+ goto err_release_regions;
+ rvu->ptp = NULL;
+ }
+
/* Map Admin function CSRs */
rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
if (!rvu->afreg_base || !rvu->pfreg_base) {
dev_err(dev, "Unable to map admin function CSRs, aborting\n");
err = -ENOMEM;
- goto err_release_regions;
+ goto err_put_ptp;
}
/* Store module params in rvu structure */
@@ -2586,7 +2602,7 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = rvu_setup_hw_resources(rvu);
if (err)
- goto err_release_regions;
+ goto err_put_ptp;
/* Init mailbox btw AF and PFs */
err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
@@ -2626,6 +2642,8 @@ err_hwsetup:
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu);
+err_put_ptp:
+ ptp_put(rvu->ptp);
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
@@ -2651,6 +2669,7 @@ static void rvu_remove(struct pci_dev *pdev)
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu);
+ ptp_put(rvu->ptp);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
@@ -2676,9 +2695,19 @@ static int __init rvu_init_module(void)
if (err < 0)
return err;
+ err = pci_register_driver(&ptp_driver);
+ if (err < 0)
+ goto ptp_err;
+
err = pci_register_driver(&rvu_driver);
if (err < 0)
- pci_unregister_driver(&cgx_driver);
+ goto rvu_err;
+
+ return 0;
+rvu_err:
+ pci_unregister_driver(&ptp_driver);
+ptp_err:
+ pci_unregister_driver(&cgx_driver);
return err;
}
@@ -2686,6 +2715,7 @@ static int __init rvu_init_module(void)
static void __exit rvu_cleanup_module(void)
{
pci_unregister_driver(&rvu_driver);
+ pci_unregister_driver(&ptp_driver);
pci_unregister_driver(&cgx_driver);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index b89dde2c8b08..90eed3160915 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -289,6 +289,22 @@ struct rvu_fwdata {
u64 reserved[FWDATA_RESERVED_MEM];
};
+struct ptp;
+
+/* KPU profile adapter structure gathering all KPU configuration data and abstracting out the
+ * source where it came from.
+ */
+struct npc_kpu_profile_adapter {
+ const char *name;
+ u64 version;
+ const struct npc_lt_def_cfg *lt_def;
+ const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */
+ const struct npc_kpu_profile *kpu; /* array[kpus] */
+ const struct npc_mcam_kex *mkex;
+ size_t pkinds;
+ size_t kpus;
+};
+
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@@ -337,6 +353,11 @@ struct rvu {
/* Firmware data */
struct rvu_fwdata *fwdata;
+ /* NPC KPU data */
+ struct npc_kpu_profile_adapter kpu;
+
+ struct ptp *ptp;
+
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
#endif
@@ -470,6 +491,7 @@ int rvu_npc_init(struct rvu *rvu);
void rvu_npc_freemem(struct rvu *rvu);
int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf);
+int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en);
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr);
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index f3c82e489897..fa9152ff5e2a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -15,6 +15,7 @@
#include "rvu.h"
#include "cgx.h"
#include "rvu_reg.h"
+#include "rvu_trace.h"
struct cgx_evq_entry {
struct list_head evq_node;
@@ -34,6 +35,7 @@ static struct _req_type __maybe_unused \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
+ trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
return req; \
}
@@ -509,6 +511,45 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
return 0;
}
+static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
+ !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ cgx_lmac_ptp_config(cgxd, lmac_id, enable);
+ /* If PTP is enabled then inform NPC that packets to be
+ * parsed by this PF will have their data shifted by 8 bytes
+ * and if PTP is disabled then no shift is required
+ */
+ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
+ return -EINVAL;
+
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
+}
+
+int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
+}
+
static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
{
int pf = rvu_get_pf(pcifunc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 0fc70824fd6b..21a89dd76d3c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -2508,6 +2508,14 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->ltype_match = NPC_LT_LE_GTPU;
field->ltype_mask = 0xF;
break;
+ case NIX_FLOW_KEY_TYPE_VLAN:
+ field->lid = NPC_LID_LB;
+ field->hdr_offset = 2; /* Skip TPID (2-bytes) */
+ field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
+ field->ltype_match = NPC_LT_LB_CTAG;
+ field->ltype_mask = 0xF;
+ field->fn_mask = 1; /* Mask out the first nibble */
+ break;
}
field->ena = 1;
@@ -3103,6 +3111,7 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
int rvu_nix_init(struct rvu *rvu)
{
+ const struct npc_lt_def_cfg *ltdefs;
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
int blkaddr, err;
@@ -3133,6 +3142,7 @@ int rvu_nix_init(struct rvu *rvu)
rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
}
+ ltdefs = rvu->kpu.lt_def;
/* Calibrate X2P bus to check if CGX/LBK links are fine */
err = nix_calibrate_x2p(rvu, blkaddr);
if (err)
@@ -3180,28 +3190,38 @@ int rvu_nix_init(struct rvu *rvu)
* and validate length and checksums.
*/
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
- (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
+ (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
+ ltdefs->rx_ol2.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
- (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
+ (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
+ ltdefs->rx_oip4.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
+ (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
+ ltdefs->rx_iip4.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
- (NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F);
+ (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
+ ltdefs->rx_oip6.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP6 << 4) | 0x0F);
+ (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
+ ltdefs->rx_iip6.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
- (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
+ (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
+ ltdefs->rx_otcp.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
- (NPC_LID_LH << 8) | (NPC_LT_LH_TU_TCP << 4) | 0x0F);
+ (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
+ ltdefs->rx_itcp.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
- (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
+ (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
+ ltdefs->rx_oudp.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
- (NPC_LID_LH << 8) | (NPC_LT_LH_TU_UDP << 4) | 0x0F);
+ (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
+ ltdefs->rx_iudp.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
- (NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F);
+ (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
+ ltdefs->rx_osctp.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
- (NPC_LID_LH << 8) | (NPC_LT_LH_TU_SCTP << 4) |
- 0x0F);
+ (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
+ ltdefs->rx_isctp.ltype_mask);
err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
if (err)
@@ -3318,6 +3338,49 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
nix_ctx_free(rvu, pfvf);
}
+#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
+
+static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr;
+ int nixlf;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
+
+ if (enable)
+ cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
+ else
+ cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
+}
+
+int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
+}
+
int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
struct nix_lso_format_cfg *req,
struct nix_lso_format_cfg_rsp *rsp)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index fbaf9bcd83f2..511b01dd03ed 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -27,6 +27,9 @@
#define NIXLF_PROMISC_ENTRY 2
#define NPC_PARSE_RESULT_DMAC_OFFSET 8
+#define NPC_HW_TSTAMP_OFFSET 8
+
+static const char def_pfl_name[] = "default";
static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 pcifunc);
@@ -61,6 +64,36 @@ int rvu_npc_get_pkind(struct rvu *rvu, u16 pf)
return -1;
}
+#define NPC_AF_ACTION0_PTR_ADVANCE GENMASK_ULL(27, 20)
+
+int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool enable)
+{
+ int pkind, blkaddr;
+ u64 val;
+
+ pkind = rvu_npc_get_pkind(rvu, pf);
+ if (pkind < 0) {
+ dev_err(rvu->dev, "%s: pkind not mapped\n", __func__);
+ return -EINVAL;
+ }
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+
+ val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind));
+ val &= ~NPC_AF_ACTION0_PTR_ADVANCE;
+ /* If timestamp is enabled then configure NPC to shift 8 bytes */
+ if (enable)
+ val |= FIELD_PREP(NPC_AF_ACTION0_PTR_ADVANCE,
+ NPC_HW_TSTAMP_OFFSET);
+ rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val);
+
+ return 0;
+}
+
static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
u16 pcifunc, int nixlf, int type)
{
@@ -417,7 +450,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
entry.kw_mask[0] = 0xFFFULL;
if (allmulti) {
- kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64);
+ kwi = NPC_KEXOF_DMAC / sizeof(u64);
entry.kw[kwi] = BIT_ULL(40); /* LSB bit of 1st byte in DMAC */
entry.kw_mask[kwi] = BIT_ULL(40);
}
@@ -699,88 +732,8 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
rvu_write64(rvu, blkaddr, \
NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
-#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
- (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
- ((flags_ena) << 6) | ((key_ofs) & 0x3F))
-
-static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
-{
- struct npc_mcam *mcam = &rvu->hw->mcam;
- int lid, ltype;
- int lid_count;
- u64 cfg;
-
- cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
- lid_count = (cfg >> 4) & 0xF;
-
- /* First clear any existing config i.e
- * disable LDATA and FLAGS extraction.
- */
- for (lid = 0; lid < lid_count; lid++) {
- for (ltype = 0; ltype < 16; ltype++) {
- SET_KEX_LD(NIX_INTF_RX, lid, ltype, 0, 0ULL);
- SET_KEX_LD(NIX_INTF_RX, lid, ltype, 1, 0ULL);
- SET_KEX_LD(NIX_INTF_TX, lid, ltype, 0, 0ULL);
- SET_KEX_LD(NIX_INTF_TX, lid, ltype, 1, 0ULL);
-
- SET_KEX_LDFLAGS(NIX_INTF_RX, 0, ltype, 0ULL);
- SET_KEX_LDFLAGS(NIX_INTF_RX, 1, ltype, 0ULL);
- SET_KEX_LDFLAGS(NIX_INTF_TX, 0, ltype, 0ULL);
- SET_KEX_LDFLAGS(NIX_INTF_TX, 1, ltype, 0ULL);
- }
- }
-
- if (mcam->keysize != NPC_MCAM_KEY_X2)
- return;
-
- /* Default MCAM KEX profile */
- /* Layer A: Ethernet: */
-
- /* DMAC: 6 bytes, KW1[47:0] */
- cfg = KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_PARSE_RESULT_DMAC_OFFSET);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg);
-
- /* Ethertype: 2 bytes, KW0[47:32] */
- cfg = KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 1, cfg);
-
- /* Layer B: Single VLAN (CTAG) */
- /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
- cfg = KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_CTAG, 0, cfg);
-
- /* Layer B: Stacked VLAN (STAG|QinQ) */
- /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
- cfg = KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 0, cfg);
-
- /* Layer C: IPv4 */
- /* SIP+DIP: 8 bytes, KW2[63:0] */
- cfg = KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 0, cfg);
- /* TOS: 1 byte, KW1[63:56] */
- cfg = KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 1, cfg);
-
- /* Layer D:UDP */
- /* SPORT: 2 bytes, KW3[15:0] */
- cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 0, cfg);
- /* DPORT: 2 bytes, KW3[31:16] */
- cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 1, cfg);
-
- /* Layer D:TCP */
- /* SPORT: 2 bytes, KW3[15:0] */
- cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 0, cfg);
- /* DPORT: 2 bytes, KW3[31:16] */
- cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 1, cfg);
-}
-
static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
- struct npc_mcam_kex *mkex)
+ const struct npc_mcam_kex *mkex)
{
int lid, lt, ld, fl;
@@ -820,34 +773,31 @@ static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
}
}
-/* strtoull of "mkexprof" with base:36 */
-#define MKEX_SIGN 0x19bbfdbd15f
#define MKEX_END_SIGN 0xdeadbeef
-static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
+static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr,
+ const char *mkex_profile)
{
- const char *mkex_profile = rvu->mkex_pfl_name;
struct device *dev = &rvu->pdev->dev;
- void __iomem *mkex_prfl_addr = NULL;
struct npc_mcam_kex *mcam_kex;
- u64 prfl_addr;
- u64 prfl_sz;
+ void *mkex_prfl_addr = NULL;
+ u64 prfl_addr, prfl_sz;
/* If user not selected mkex profile */
- if (!strncmp(mkex_profile, "default", MKEX_NAME_LEN))
- goto load_default;
+ if (!strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN))
+ goto program_mkex;
if (!rvu->fwdata)
- goto load_default;
+ goto program_mkex;
prfl_addr = rvu->fwdata->mcam_addr;
prfl_sz = rvu->fwdata->mcam_sz;
if (!prfl_addr || !prfl_sz)
- goto load_default;
+ goto program_mkex;
- mkex_prfl_addr = ioremap_wc(prfl_addr, prfl_sz);
+ mkex_prfl_addr = memremap(prfl_addr, prfl_sz, MEMREMAP_WC);
if (!mkex_prfl_addr)
- goto load_default;
+ goto program_mkex;
mcam_kex = (struct npc_mcam_kex *)mkex_prfl_addr;
@@ -859,35 +809,27 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
* parse nibble enable configuration has to be
* identical for both Rx and Tx interfaces.
*/
- if (is_rvu_96xx_B0(rvu) &&
- mcam_kex->keyx_cfg[NIX_INTF_RX] !=
- mcam_kex->keyx_cfg[NIX_INTF_TX])
- goto load_default;
-
- /* Program selected mkex profile */
- npc_program_mkex_profile(rvu, blkaddr, mcam_kex);
-
- goto unmap;
+ if (!is_rvu_96xx_B0(rvu) ||
+ mcam_kex->keyx_cfg[NIX_INTF_RX] == mcam_kex->keyx_cfg[NIX_INTF_TX])
+ rvu->kpu.mkex = mcam_kex;
+ goto program_mkex;
}
mcam_kex++;
prfl_sz -= sizeof(struct npc_mcam_kex);
}
- dev_warn(dev, "Failed to load requested profile: %s\n",
- rvu->mkex_pfl_name);
+ dev_warn(dev, "Failed to load requested profile: %s\n", mkex_profile);
-load_default:
- dev_info(rvu->dev, "Using default mkex profile\n");
- /* Config packet data and flags extraction into PARSE result */
- npc_config_ldata_extract(rvu, blkaddr);
-
-unmap:
+program_mkex:
+ dev_info(rvu->dev, "Using %s mkex profile\n", rvu->kpu.mkex->name);
+ /* Program selected mkex profile */
+ npc_program_mkex_profile(rvu, blkaddr, rvu->kpu.mkex);
if (mkex_prfl_addr)
- iounmap(mkex_prfl_addr);
+ memunmap(mkex_prfl_addr);
}
static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
- struct npc_kpu_profile_action *kpuaction,
+ const struct npc_kpu_profile_action *kpuaction,
int kpu, int entry, bool pkind)
{
struct npc_kpu_action0 action0 = {0};
@@ -929,7 +871,7 @@ static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
}
static void npc_config_kpucam(struct rvu *rvu, int blkaddr,
- struct npc_kpu_profile_cam *kpucam,
+ const struct npc_kpu_profile_cam *kpucam,
int kpu, int entry)
{
struct npc_kpu_cam cam0 = {0};
@@ -957,7 +899,7 @@ static inline u64 enable_mask(int count)
}
static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
- struct npc_kpu_profile *profile)
+ const struct npc_kpu_profile *profile)
{
int entry, num_entries, max_entries;
@@ -995,6 +937,27 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(kpu), 0x01);
}
+static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile)
+{
+ profile->name = def_pfl_name;
+ profile->version = NPC_KPU_PROFILE_VER;
+ profile->ikpu = ikpu_action_entries;
+ profile->pkinds = ARRAY_SIZE(ikpu_action_entries);
+ profile->kpu = npc_kpu_profiles;
+ profile->kpus = ARRAY_SIZE(npc_kpu_profiles);
+ profile->lt_def = &npc_lt_defaults;
+ profile->mkex = &npc_mkex_default;
+
+ return 0;
+}
+
+static void npc_load_kpu_profile(struct rvu *rvu)
+{
+ struct npc_kpu_profile_adapter *profile = &rvu->kpu;
+
+ npc_prepare_default_kpu(profile);
+}
+
static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -1013,25 +976,26 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x00);
}
+ /* Load and customize KPU profile. */
+ npc_load_kpu_profile(rvu);
+
/* First program IKPU profile i.e PKIND configs.
* Check HW max count to avoid configuring junk or
* writing to unsupported CSR addresses.
*/
pkind = &hw->pkind;
- num_pkinds = ARRAY_SIZE(ikpu_action_entries);
+ num_pkinds = rvu->kpu.pkinds;
num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds);
for (idx = 0; idx < num_pkinds; idx++)
- npc_config_kpuaction(rvu, blkaddr,
- &ikpu_action_entries[idx], 0, idx, true);
+ npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true);
/* Program KPU CAM and Action profiles */
- num_kpus = ARRAY_SIZE(npc_kpu_profiles);
+ num_kpus = rvu->kpu.kpus;
num_kpus = min_t(int, hw->npc_kpus, num_kpus);
for (idx = 0; idx < num_kpus; idx++)
- npc_program_kpu_profile(rvu, blkaddr,
- idx, &npc_kpu_profiles[idx]);
+ npc_program_kpu_profile(rvu, blkaddr, idx, &rvu->kpu.kpu[idx]);
}
static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
@@ -1156,11 +1120,11 @@ free_mem:
int rvu_npc_init(struct rvu *rvu)
{
+ struct npc_kpu_profile_adapter *kpu = &rvu->kpu;
struct npc_pkind *pkind = &rvu->hw->pkind;
struct npc_mcam *mcam = &rvu->hw->mcam;
- u64 keyz = NPC_MCAM_KEY_X2;
+ u64 cfg, nibble_ena, rx_kex, tx_kex;
int blkaddr, entry, bank, err;
- u64 cfg, nibble_ena;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0) {
@@ -1194,13 +1158,16 @@ int rvu_npc_init(struct rvu *rvu)
/* Config Outer L2, IPv4's NPC layer info */
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OL2,
- (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
+ (kpu->lt_def->pck_ol2.lid << 8) | (kpu->lt_def->pck_ol2.ltype_match << 4) |
+ kpu->lt_def->pck_ol2.ltype_mask);
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4,
- (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
+ (kpu->lt_def->pck_oip4.lid << 8) | (kpu->lt_def->pck_oip4.ltype_match << 4) |
+ kpu->lt_def->pck_oip4.ltype_mask);
/* Config Inner IPV4 NPC layer info */
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
+ (kpu->lt_def->pck_iip4.lid << 8) | (kpu->lt_def->pck_iip4.ltype_match << 4) |
+ kpu->lt_def->pck_iip4.ltype_mask);
/* Enable below for Rx pkts.
* - Outer IPv4 header checksum validation.
@@ -1216,23 +1183,25 @@ int rvu_npc_init(struct rvu *rvu)
/* Set RX and TX side MCAM search key size.
* LA..LD (ltype only) + Channel
*/
- nibble_ena = 0x49247;
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
- ((keyz & 0x3) << 32) | nibble_ena);
+ rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX];
+ tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX];
+ nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX), rx_kex);
/* Due to an errata (35786) in A0 pass silicon, parse nibble enable
* configuration has to be identical for both Rx and Tx interfaces.
*/
- if (!is_rvu_96xx_B0(rvu))
- nibble_ena = (1ULL << 19) - 1;
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
- ((keyz & 0x3) << 32) | nibble_ena);
+ if (is_rvu_96xx_B0(rvu)) {
+ tx_kex &= ~NPC_PARSE_NIBBLE;
+ tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
+ }
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), tx_kex);
err = npc_mcam_rsrcs_init(rvu, blkaddr);
if (err)
return err;
/* Configure MKEX profile */
- npc_load_mkex_profile(rvu, blkaddr);
+ npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name);
/* Set TX miss action to UCAST_DEFAULT i.e
* transmit the packet on NIX LF SQ's default channel.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
new file mode 100644
index 000000000000..56f90cf9c4c0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver tracepoints
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#define CREATE_TRACE_POINTS
+#include "rvu_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_alloc);
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_interrupt);
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_process);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
new file mode 100644
index 000000000000..e6609068e81b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver tracepoints
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvu
+
+#if !defined(__RVU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVU_TRACE_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/pci.h>
+
+TRACE_EVENT(otx2_msg_alloc,
+ TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size),
+ TP_ARGS(pdev, id, size),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, id)
+ __field(u64, size)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ __entry->id = id;
+ __entry->size = size;
+ ),
+ TP_printk("[%s] msg:(0x%x) size:%lld\n", __get_str(dev),
+ __entry->id, __entry->size)
+);
+
+TRACE_EVENT(otx2_msg_send,
+ TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size),
+ TP_ARGS(pdev, num_msgs, msg_size),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, num_msgs)
+ __field(u64, msg_size)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ __entry->num_msgs = num_msgs;
+ __entry->msg_size = msg_size;
+ ),
+ TP_printk("[%s] sent %d msg(s) of size:%lld\n", __get_str(dev),
+ __entry->num_msgs, __entry->msg_size)
+);
+
+TRACE_EVENT(otx2_msg_check,
+ TP_PROTO(const struct pci_dev *pdev, u16 reqid, u16 rspid, int rc),
+ TP_ARGS(pdev, reqid, rspid, rc),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, reqid)
+ __field(u16, rspid)
+ __field(int, rc)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ __entry->reqid = reqid;
+ __entry->rspid = rspid;
+ __entry->rc = rc;
+ ),
+ TP_printk("[%s] req->id:0x%x rsp->id:0x%x resp_code:%d\n",
+ __get_str(dev), __entry->reqid,
+ __entry->rspid, __entry->rc)
+);
+
+TRACE_EVENT(otx2_msg_interrupt,
+ TP_PROTO(const struct pci_dev *pdev, const char *msg, u64 intr),
+ TP_ARGS(pdev, msg, intr),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __string(str, msg)
+ __field(u64, intr)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ __assign_str(str, msg)
+ __entry->intr = intr;
+ ),
+ TP_printk("[%s] mbox interrupt %s (0x%llx)\n", __get_str(dev),
+ __get_str(str), __entry->intr)
+);
+
+TRACE_EVENT(otx2_msg_process,
+ TP_PROTO(const struct pci_dev *pdev, u16 id, int err),
+ TP_ARGS(pdev, id, err),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, id)
+ __field(int, err)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ __entry->id = id;
+ __entry->err = err;
+ ),
+ TP_printk("[%s] msg:(0x%x) error:%d\n", __get_str(dev),
+ __entry->id, __entry->err)
+);
+
+#endif /* __RVU_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE rvu_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 778df331c8ac..b2c6385707c9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -6,7 +6,8 @@
obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o
-octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o
+octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
+ otx2_ptp.o
octeontx2_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 93c4cf7fedbf..d2581090f9a4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -355,7 +355,7 @@ int otx2_rss_init(struct otx2_nic *pfvf)
rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg :
NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 |
NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP |
- NIX_FLOW_KEY_TYPE_SCTP;
+ NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN;
ret = otx2_set_flowkey_cfg(pfvf);
if (ret)
@@ -365,6 +365,95 @@ int otx2_rss_init(struct otx2_nic *pfvf)
return 0;
}
+/* Setup UDP segmentation algorithm in HW */
+static void otx2_setup_udp_segmentation(struct nix_lso_format_cfg *lso, bool v4)
+{
+ struct nix_lso_format *field;
+
+ field = (struct nix_lso_format *)&lso->fields[0];
+ lso->field_mask = GENMASK(18, 0);
+
+ /* IP's Length field */
+ field->layer = NIX_TXLAYER_OL3;
+ /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
+ field->offset = v4 ? 2 : 4;
+ field->sizem1 = 1; /* i.e 2 bytes */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+
+ /* No ID field in IPv6 header */
+ if (v4) {
+ /* Increment IPID */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = 4;
+ field->sizem1 = 1; /* i.e 2 bytes */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* Update length in UDP header */
+ field->layer = NIX_TXLAYER_OL4;
+ field->offset = 4;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+}
+
+/* Setup segmentation algorithms in HW and retrieve algorithm index */
+void otx2_setup_segmentation(struct otx2_nic *pfvf)
+{
+ struct nix_lso_format_cfg_rsp *rsp;
+ struct nix_lso_format_cfg *lso;
+ struct otx2_hw *hw = &pfvf->hw;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ /* UDPv4 segmentation */
+ lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox);
+ if (!lso)
+ goto fail;
+
+ /* Setup UDP/IP header fields that HW should update per segment */
+ otx2_setup_udp_segmentation(lso, true);
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ rsp = (struct nix_lso_format_cfg_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr);
+ if (IS_ERR(rsp))
+ goto fail;
+
+ hw->lso_udpv4_idx = rsp->lso_format_idx;
+
+ /* UDPv6 segmentation */
+ lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox);
+ if (!lso)
+ goto fail;
+
+ /* Setup UDP/IP header fields that HW should update per segment */
+ otx2_setup_udp_segmentation(lso, false);
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ rsp = (struct nix_lso_format_cfg_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr);
+ if (IS_ERR(rsp))
+ goto fail;
+
+ hw->lso_udpv6_idx = rsp->lso_format_idx;
+ mutex_unlock(&pfvf->mbox.lock);
+ return;
+fail:
+ mutex_unlock(&pfvf->mbox.lock);
+ netdev_info(pfvf->netdev,
+ "Failed to get LSO index for UDP GSO offload, disabling\n");
+ pfvf->netdev->hw_features &= ~NETIF_F_GSO_UDP_L4;
+}
+
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
{
/* Configure CQE interrupt coalescing parameters
@@ -671,6 +760,13 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (!sq->sg)
return -ENOMEM;
+ if (pfvf->ptp) {
+ err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
+ sizeof(*sq->timestamps));
+ if (err)
+ return err;
+ }
+
sq->head = 0;
sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1;
sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 2fa29889522e..d6253f2a414d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -13,10 +13,14 @@
#include <linux/pci.h>
#include <linux/iommu.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
#include <mbox.h>
#include "otx2_reg.h"
#include "otx2_txrx.h"
+#include <rvu_trace.h>
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
@@ -174,9 +178,11 @@ struct otx2_hw {
u16 rq_skid;
u8 cq_time_wait;
- /* For TSO segmentation */
+ /* Segmentation */
u8 lso_tsov4_idx;
u8 lso_tsov6_idx;
+ u8 lso_udpv4_idx;
+ u8 lso_udpv6_idx;
u8 hw_tso;
/* MSI-X */
@@ -209,6 +215,17 @@ struct refill_work {
struct otx2_nic *pf;
};
+struct otx2_ptp {
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ struct otx2_nic *nic;
+
+ struct cyclecounter cycle_counter;
+ struct timecounter time_counter;
+};
+
+#define OTX2_HW_TIMESTAMP_LEN 8
+
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
@@ -216,6 +233,8 @@ struct otx2_nic {
u16 max_frs;
u16 rbsize; /* Receive buffer size */
+#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
+#define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1)
#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
@@ -251,6 +270,9 @@ struct otx2_nic {
/* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
int nix_blkaddr;
+
+ struct otx2_ptp *ptp;
+ struct hwtstamp_config tstamp;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -502,6 +524,7 @@ static struct _req_type __maybe_unused \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
+ trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \
return req; \
}
@@ -561,6 +584,7 @@ void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
void otx2_get_mac_from_af(struct net_device *netdev);
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
int otx2_config_pause_frm(struct otx2_nic *pfvf);
+void otx2_setup_segmentation(struct otx2_nic *pfvf);
/* RVU block related APIs */
int otx2_attach_npa_nix(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index d59f5a9c7273..662fb80dbb9d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -13,8 +13,10 @@
#include <linux/stddef.h>
#include <linux/etherdevice.h>
#include <linux/log2.h>
+#include <linux/net_tstamp.h>
#include "otx2_common.h"
+#include "otx2_ptp.h"
#define DRV_NAME "octeontx2-nicpf"
#define DRV_VF_NAME "octeontx2-nicvf"
@@ -426,6 +428,8 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
/* Mimimum is IPv4 and IPv6, SIP/DIP */
nfc->data = RXH_IP_SRC | RXH_IP_DST;
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_VLAN)
+ nfc->data |= RXH_VLAN;
switch (nfc->flow_type) {
case TCP_V4_FLOW:
@@ -475,6 +479,11 @@ static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST))
return -EINVAL;
+ if (nfc->data & RXH_VLAN)
+ rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN;
+ else
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_VLAN;
+
switch (nfc->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
@@ -663,6 +672,31 @@ static u32 otx2_get_link(struct net_device *netdev)
return pfvf->linfo.link_up;
}
+static int otx2_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ if (!pfvf->ptp)
+ return ethtool_op_get_ts_info(netdev, info);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = otx2_ptp_clock_index(pfvf);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
static const struct ethtool_ops otx2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -687,6 +721,7 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
+ .get_ts_info = otx2_get_ts_info,
};
void otx2_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 2fb45670aca4..66f1a212f1f4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -21,6 +21,8 @@
#include "otx2_common.h"
#include "otx2_txrx.h"
#include "otx2_struct.h"
+#include "otx2_ptp.h"
+#include <rvu_trace.h>
#define DRV_NAME "octeontx2-nicpf"
#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver"
@@ -41,6 +43,9 @@ enum {
TYPE_PFVF,
};
+static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable);
+static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
+
static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
{
bool if_up = netif_running(netdev);
@@ -554,6 +559,8 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
+ trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
+
return IRQ_HANDLED;
}
@@ -937,6 +944,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
mbox = &pf->mbox;
+
+ trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
+
otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
return IRQ_HANDLED;
@@ -1282,7 +1292,8 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
/* Get the size of receive buffers to allocate */
- pf->rbsize = RCV_FRAG_LEN(pf->netdev->mtu + OTX2_ETH_HLEN);
+ pf->rbsize = RCV_FRAG_LEN(OTX2_HW_TIMESTAMP_LEN + pf->netdev->mtu +
+ OTX2_ETH_HLEN);
mutex_lock(&mbox->lock);
/* NPA init */
@@ -1497,6 +1508,9 @@ int otx2_open(struct net_device *netdev)
if (err)
goto err_disable_napi;
+ /* Setup segmentation algorithms, if failed, clear offload capability */
+ otx2_setup_segmentation(pf);
+
/* Initialize RSS */
err = otx2_rss_init(pf);
if (err)
@@ -1548,6 +1562,16 @@ int otx2_open(struct net_device *netdev)
otx2_set_cints_affinity(pf);
+ /* When reinitializing enable time stamping if it is enabled before */
+ if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
+ pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
+ otx2_config_hw_tx_tstamp(pf, true);
+ }
+ if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) {
+ pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
+ otx2_config_hw_rx_tstamp(pf, true);
+ }
+
pf->flags &= ~OTX2_FLAG_INTF_DOWN;
/* 'intf_down' may be checked on any cpu */
smp_wmb();
@@ -1742,6 +1766,143 @@ static void otx2_reset_task(struct work_struct *work)
rtnl_unlock();
}
+static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable)
+{
+ struct msg_req *req;
+ int err;
+
+ if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ if (enable)
+ req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ mutex_unlock(&pfvf->mbox.lock);
+ if (enable)
+ pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
+ return 0;
+}
+
+static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
+{
+ struct msg_req *req;
+ int err;
+
+ if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ if (enable)
+ req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ mutex_unlock(&pfvf->mbox.lock);
+ if (enable)
+ pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
+ return 0;
+}
+
+static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct hwtstamp_config config;
+
+ if (!pfvf->ptp)
+ return -ENODEV;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ otx2_config_hw_tx_tstamp(pfvf, false);
+ break;
+ case HWTSTAMP_TX_ON:
+ otx2_config_hw_tx_tstamp(pfvf, true);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ otx2_config_hw_rx_tstamp(pfvf, false);
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ otx2_config_hw_rx_tstamp(pfvf, true);
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ memcpy(&pfvf->tstamp, &config, sizeof(config));
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
+static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct hwtstamp_config *cfg = &pfvf->tstamp;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return otx2_config_hwtstamp(netdev, req);
+ case SIOCGHWTSTAMP:
+ return copy_to_user(req->ifr_data, cfg,
+ sizeof(*cfg)) ? -EFAULT : 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops otx2_netdev_ops = {
.ndo_open = otx2_open,
.ndo_stop = otx2_stop,
@@ -1752,6 +1913,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_features = otx2_set_features,
.ndo_tx_timeout = otx2_tx_timeout,
.ndo_get_stats64 = otx2_get_stats64,
+ .ndo_do_ioctl = otx2_ioctl,
};
static int otx2_wq_init(struct otx2_nic *pf)
@@ -1924,6 +2086,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
+ /* Don't check for error. Proceed without ptp */
+ otx2_ptp_init(pf);
+
/* NPA's pool is a stack to which SW frees buffer pointers via Aura.
* HW allocates buffer pointer from stack and uses it for DMA'ing
* ingress packet. In some scenarios HW can free back allocated buffer
@@ -1939,7 +2104,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6);
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4);
netdev->features |= netdev->hw_features;
netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
@@ -1956,7 +2122,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_detach_rsrc;
+ goto err_ptp_destroy;
}
err = otx2_wq_init(pf);
@@ -1976,6 +2142,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_unreg_netdev:
unregister_netdev(netdev);
+err_ptp_destroy:
+ otx2_ptp_destroy(pf);
err_detach_rsrc:
otx2_detach_resources(&pf->mbox);
err_disable_mbox_intr:
@@ -2117,6 +2285,11 @@ static void otx2_remove(struct pci_dev *pdev)
pf = netdev_priv(netdev);
+ if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
+ otx2_config_hw_tx_tstamp(pf, false);
+ if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
+ otx2_config_hw_rx_tstamp(pf, false);
+
cancel_work_sync(&pf->reset_task);
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);
@@ -2126,6 +2299,7 @@ static void otx2_remove(struct pci_dev *pdev)
if (pf->otx2_wq)
destroy_workqueue(pf->otx2_wq);
+ otx2_ptp_destroy(pf);
otx2_detach_resources(&pf->mbox);
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
new file mode 100644
index 000000000000..7bcf5246350f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 PTP support for ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include "otx2_common.h"
+#include "otx2_ptp.h"
+
+static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct ptp_req *req;
+ int err;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->op = PTP_OP_ADJFINE;
+ req->scaled_ppm = scaled_ppm;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static u64 ptp_cc_read(const struct cyclecounter *cc)
+{
+ struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+ int err;
+
+ if (!ptp->nic)
+ return 0;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return 0;
+
+ req->op = PTP_OP_GET_CLOCK;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return 0;
+
+ rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
+ &req->hdr);
+ if (IS_ERR(rsp))
+ return 0;
+
+ return rsp->clk;
+}
+
+static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct otx2_nic *pfvf = ptp->nic;
+
+ mutex_lock(&pfvf->mbox.lock);
+ timecounter_adjtime(&ptp->time_counter, delta);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return 0;
+}
+
+static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct otx2_nic *pfvf = ptp->nic;
+ u64 nsec;
+
+ mutex_lock(&pfvf->mbox.lock);
+ nsec = timecounter_read(&ptp->time_counter);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int otx2_ptp_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct otx2_nic *pfvf = ptp->nic;
+ u64 nsec;
+
+ nsec = timespec64_to_ns(ts);
+
+ mutex_lock(&pfvf->mbox.lock);
+ timecounter_init(&ptp->time_counter, &ptp->cycle_counter, nsec);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return 0;
+}
+
+static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+int otx2_ptp_init(struct otx2_nic *pfvf)
+{
+ struct otx2_ptp *ptp_ptr;
+ struct cyclecounter *cc;
+ struct ptp_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ /* check if PTP block is available */
+ req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->op = PTP_OP_GET_CLOCK;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+ mutex_unlock(&pfvf->mbox.lock);
+
+ ptp_ptr = kzalloc(sizeof(*ptp_ptr), GFP_KERNEL);
+ if (!ptp_ptr) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ ptp_ptr->nic = pfvf;
+
+ cc = &ptp_ptr->cycle_counter;
+ cc->read = ptp_cc_read;
+ cc->mask = CYCLECOUNTER_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+
+ timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter,
+ ktime_to_ns(ktime_get_real()));
+
+ ptp_ptr->ptp_info = (struct ptp_clock_info) {
+ .owner = THIS_MODULE,
+ .name = "OcteonTX2 PTP",
+ .max_adj = 1000000000ull,
+ .n_ext_ts = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfine = otx2_ptp_adjfine,
+ .adjtime = otx2_ptp_adjtime,
+ .gettime64 = otx2_ptp_gettime,
+ .settime64 = otx2_ptp_settime,
+ .enable = otx2_ptp_enable,
+ };
+
+ ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev);
+ if (IS_ERR_OR_NULL(ptp_ptr->ptp_clock)) {
+ err = ptp_ptr->ptp_clock ?
+ PTR_ERR(ptp_ptr->ptp_clock) : -ENODEV;
+ kfree(ptp_ptr);
+ goto error;
+ }
+
+ pfvf->ptp = ptp_ptr;
+
+error:
+ return err;
+}
+
+void otx2_ptp_destroy(struct otx2_nic *pfvf)
+{
+ struct otx2_ptp *ptp = pfvf->ptp;
+
+ if (!ptp)
+ return;
+
+ ptp_clock_unregister(ptp->ptp_clock);
+ kfree(ptp);
+ pfvf->ptp = NULL;
+}
+
+int otx2_ptp_clock_index(struct otx2_nic *pfvf)
+{
+ if (!pfvf->ptp)
+ return -ENODEV;
+
+ return ptp_clock_index(pfvf->ptp->ptp_clock);
+}
+
+int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns)
+{
+ if (!pfvf->ptp)
+ return -ENODEV;
+
+ *tsns = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
new file mode 100644
index 000000000000..706d63a43ae1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 PTP support for ethernet driver */
+
+#ifndef OTX2_PTP_H
+#define OTX2_PTP_H
+
+int otx2_ptp_init(struct otx2_nic *pfvf);
+void otx2_ptp_destroy(struct otx2_nic *pfvf);
+
+int otx2_ptp_clock_index(struct otx2_nic *pfvf);
+int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index e46834e043be..d5d7a2f37493 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -16,6 +16,7 @@
#include "otx2_common.h"
#include "otx2_struct.h"
#include "otx2_txrx.h"
+#include "otx2_ptp.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
@@ -81,8 +82,11 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
int budget, int *tx_pkts, int *tx_bytes)
{
struct nix_send_comp_s *snd_comp = &cqe->comp;
+ struct skb_shared_hwtstamps ts;
struct sk_buff *skb = NULL;
+ u64 timestamp, tsns;
struct sg_list *sg;
+ int err;
if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
@@ -94,6 +98,18 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
if (unlikely(!skb))
return;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
+ timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
+ if (timestamp != 1) {
+ err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
+ if (!err) {
+ memset(&ts, 0, sizeof(ts));
+ ts.hwtstamp = ns_to_ktime(tsns);
+ skb_tstamp_tx(skb, &ts);
+ }
+ }
+ }
+
*tx_bytes += skb->len;
(*tx_pkts)++;
otx2_dma_unmap_skb_frags(pfvf, sg);
@@ -101,16 +117,47 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
sg->skb = (u64)NULL;
}
+static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
+ struct sk_buff *skb, void *data)
+{
+ u64 tsns;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
+ return;
+
+ /* The first 8 bytes is the timestamp */
+ err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(__be64 *)data), &tsns);
+ if (err)
+ return;
+
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
+}
+
static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
- u64 iova, int len)
+ u64 iova, int len, struct nix_rx_parse_s *parse)
{
struct page *page;
+ int off = 0;
void *va;
va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
+
+ if (likely(!skb_shinfo(skb)->nr_frags)) {
+ /* Check if data starts at some nonzero offset
+ * from the start of the buffer. For now the
+ * only possible offset is 8 bytes in the case
+ * where packet is prepended by a timestamp.
+ */
+ if (parse->laptr) {
+ otx2_set_rxtstamp(pfvf, skb, va);
+ off = OTX2_HW_TIMESTAMP_LEN;
+ }
+ }
+
page = virt_to_page(va);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- va - page_address(page), len, pfvf->rbsize);
+ va - page_address(page) + off, len - off, pfvf->rbsize);
otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
pfvf->rbsize, DMA_FROM_DEVICE);
@@ -239,7 +286,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
if (unlikely(!skb))
return;
- otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size);
+ otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size, parse);
cq->pool_ptrs++;
otx2_set_rxhash(pfvf, cqe, skb);
@@ -477,15 +524,55 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
*/
ip_hdr(skb)->tot_len =
htons(ext->lso_sb - skb_network_offset(skb));
- } else {
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
ext->lso_format = pfvf->hw.lso_tsov6_idx;
+
ipv6_hdr(skb)->payload_len =
htons(ext->lso_sb - skb_network_offset(skb));
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ __be16 l3_proto = vlan_get_protocol(skb);
+ struct udphdr *udph = udp_hdr(skb);
+ u16 iplen;
+
+ ext->lso_sb = skb_transport_offset(skb) +
+ sizeof(struct udphdr);
+
+ /* HW adds payload size to length fields in IP and
+ * UDP headers while segmentation, hence adjust the
+ * lengths to just header sizes.
+ */
+ iplen = htons(ext->lso_sb - skb_network_offset(skb));
+ if (l3_proto == htons(ETH_P_IP)) {
+ ip_hdr(skb)->tot_len = iplen;
+ ext->lso_format = pfvf->hw.lso_udpv4_idx;
+ } else {
+ ipv6_hdr(skb)->payload_len = iplen;
+ ext->lso_format = pfvf->hw.lso_udpv6_idx;
+ }
+
+ udph->len = htons(sizeof(struct udphdr));
}
+ } else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ ext->tstmp = 1;
}
+
*offset += sizeof(*ext);
}
+static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
+ int alg, u64 iova)
+{
+ struct nix_sqe_mem_s *mem;
+
+ mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset);
+ mem->subdc = NIX_SUBDC_MEM;
+ mem->alg = alg;
+ mem->wmem = 1; /* wait for the memory operation */
+ mem->addr = iova;
+
+ *offset += sizeof(*mem);
+}
+
/* Add SQE header subdescriptor structure */
static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
struct nix_sqe_hdr_s *sqe_hdr,
@@ -737,6 +824,21 @@ static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
return skb_shinfo(skb)->gso_segs;
}
+static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
+ struct otx2_snd_queue *sq, int *offset)
+{
+ u64 iova;
+
+ if (!skb_shinfo(skb)->gso_size &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ iova = sq->timestamps->iova + (sq->head * sizeof(u64));
+ otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova);
+ } else {
+ skb_tx_timestamp(skb);
+ }
+}
+
bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx)
{
@@ -790,6 +892,8 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
return false;
}
+ otx2_set_txtstamp(pfvf, skb, sq, &offset);
+
sqe_hdr->sizem1 = (offset / 16) - 1;
netdev_tx_sent_queue(txq, skb->len);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index da97f2d4416f..73af15685657 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -91,6 +91,7 @@ struct otx2_snd_queue {
struct qmem *sqe;
struct qmem *tso_hdrs;
struct sg_list *sg;
+ struct qmem *timestamps;
struct queue_stats stats;
u16 sqb_count;
u64 *sqb_ptrs;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 2f90f1721441..67fabf265fe6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -187,6 +187,8 @@ static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
mdev = &mbox->dev[0];
otx2_sync_mbox_bbuf(mbox, 0);
+ trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0));
+
hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (hdr->num_msgs) {
vf->mbox.num_msgs = hdr->num_msgs;
@@ -553,7 +555,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
netdev->features = netdev->hw_features;
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
diff --git a/drivers/net/ethernet/marvell/prestera/Kconfig b/drivers/net/ethernet/marvell/prestera/Kconfig
new file mode 100644
index 000000000000..b1fcc44f566a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/Kconfig
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Marvell Prestera drivers configuration
+#
+
+config PRESTERA
+ tristate "Marvell Prestera Switch ASICs support"
+ depends on NET_SWITCHDEV && VLAN_8021Q
+ select NET_DEVLINK
+ help
+ This driver supports Marvell Prestera Switch ASICs family.
+
+ To compile this driver as a module, choose M here: the
+ module will be called prestera.
+
+config PRESTERA_PCI
+ tristate "PCI interface driver for Marvell Prestera Switch ASICs family"
+ depends on PCI && HAS_IOMEM && PRESTERA
+ default PRESTERA
+ help
+ This is implementation of PCI interface support for Marvell Prestera
+ Switch ASICs family.
+
+ To compile this driver as a module, choose M here: the
+ module will be called prestera_pci.
diff --git a/drivers/net/ethernet/marvell/prestera/Makefile b/drivers/net/ethernet/marvell/prestera/Makefile
new file mode 100644
index 000000000000..93129e32ebc5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PRESTERA) += prestera.o
+prestera-objs := prestera_main.o prestera_hw.o prestera_dsa.o \
+ prestera_rxtx.o prestera_devlink.o prestera_ethtool.o \
+ prestera_switchdev.o
+
+obj-$(CONFIG_PRESTERA_PCI) += prestera_pci.o
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
new file mode 100644
index 000000000000..55aa4bf8a27c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_H_
+#define _PRESTERA_H_
+
+#include <linux/notifier.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <net/devlink.h>
+#include <uapi/linux/if_ether.h>
+
+#define PRESTERA_DRV_NAME "prestera"
+
+#define PRESTERA_DEFAULT_VID 1
+
+struct prestera_fw_rev {
+ u16 maj;
+ u16 min;
+ u16 sub;
+};
+
+struct prestera_port_stats {
+ u64 good_octets_received;
+ u64 bad_octets_received;
+ u64 mac_trans_error;
+ u64 broadcast_frames_received;
+ u64 multicast_frames_received;
+ u64 frames_64_octets;
+ u64 frames_65_to_127_octets;
+ u64 frames_128_to_255_octets;
+ u64 frames_256_to_511_octets;
+ u64 frames_512_to_1023_octets;
+ u64 frames_1024_to_max_octets;
+ u64 excessive_collision;
+ u64 multicast_frames_sent;
+ u64 broadcast_frames_sent;
+ u64 fc_sent;
+ u64 fc_received;
+ u64 buffer_overrun;
+ u64 undersize;
+ u64 fragments;
+ u64 oversize;
+ u64 jabber;
+ u64 rx_error_frame_received;
+ u64 bad_crc;
+ u64 collisions;
+ u64 late_collision;
+ u64 unicast_frames_received;
+ u64 unicast_frames_sent;
+ u64 sent_multiple;
+ u64 sent_deferred;
+ u64 good_octets_sent;
+};
+
+struct prestera_port_caps {
+ u64 supp_link_modes;
+ u8 supp_fec;
+ u8 type;
+ u8 transceiver;
+};
+
+struct prestera_port {
+ struct net_device *dev;
+ struct prestera_switch *sw;
+ struct devlink_port dl_port;
+ u32 id;
+ u32 hw_id;
+ u32 dev_id;
+ u16 fp_id;
+ u16 pvid;
+ bool autoneg;
+ u64 adver_link_modes;
+ u8 adver_fec;
+ struct prestera_port_caps caps;
+ struct list_head list;
+ struct list_head vlans_list;
+ struct {
+ struct prestera_port_stats stats;
+ struct delayed_work caching_dw;
+ } cached_hw_stats;
+};
+
+struct prestera_device {
+ struct device *dev;
+ u8 __iomem *ctl_regs;
+ u8 __iomem *pp_regs;
+ struct prestera_fw_rev fw_rev;
+ void *priv;
+
+ /* called by device driver to handle received packets */
+ void (*recv_pkt)(struct prestera_device *dev);
+
+ /* called by device driver to pass event up to the higher layer */
+ int (*recv_msg)(struct prestera_device *dev, void *msg, size_t size);
+
+ /* called by higher layer to send request to the firmware */
+ int (*send_req)(struct prestera_device *dev, void *in_msg,
+ size_t in_size, void *out_msg, size_t out_size,
+ unsigned int wait);
+};
+
+enum prestera_event_type {
+ PRESTERA_EVENT_TYPE_UNSPEC,
+
+ PRESTERA_EVENT_TYPE_PORT,
+ PRESTERA_EVENT_TYPE_FDB,
+ PRESTERA_EVENT_TYPE_RXTX,
+
+ PRESTERA_EVENT_TYPE_MAX
+};
+
+enum prestera_rxtx_event_id {
+ PRESTERA_RXTX_EVENT_UNSPEC,
+ PRESTERA_RXTX_EVENT_RCV_PKT,
+};
+
+enum prestera_port_event_id {
+ PRESTERA_PORT_EVENT_UNSPEC,
+ PRESTERA_PORT_EVENT_STATE_CHANGED,
+};
+
+struct prestera_port_event {
+ u32 port_id;
+ union {
+ u32 oper_state;
+ } data;
+};
+
+enum prestera_fdb_event_id {
+ PRESTERA_FDB_EVENT_UNSPEC,
+ PRESTERA_FDB_EVENT_LEARNED,
+ PRESTERA_FDB_EVENT_AGED,
+};
+
+struct prestera_fdb_event {
+ u32 port_id;
+ u32 vid;
+ union {
+ u8 mac[ETH_ALEN];
+ } data;
+};
+
+struct prestera_event {
+ u16 id;
+ union {
+ struct prestera_port_event port_evt;
+ struct prestera_fdb_event fdb_evt;
+ };
+};
+
+struct prestera_switchdev;
+struct prestera_rxtx;
+
+struct prestera_switch {
+ struct prestera_device *dev;
+ struct prestera_switchdev *swdev;
+ struct prestera_rxtx *rxtx;
+ struct list_head event_handlers;
+ struct notifier_block netdev_nb;
+ char base_mac[ETH_ALEN];
+ struct list_head port_list;
+ rwlock_t port_list_lock;
+ u32 port_count;
+ u32 mtu_min;
+ u32 mtu_max;
+ u8 id;
+};
+
+struct prestera_rxtx_params {
+ bool use_sdma;
+ u32 map_addr;
+};
+
+#define prestera_dev(sw) ((sw)->dev->dev)
+
+static inline void prestera_write(const struct prestera_switch *sw,
+ unsigned int reg, u32 val)
+{
+ writel(val, sw->dev->pp_regs + reg);
+}
+
+static inline u32 prestera_read(const struct prestera_switch *sw,
+ unsigned int reg)
+{
+ return readl(sw->dev->pp_regs + reg);
+}
+
+int prestera_device_register(struct prestera_device *dev);
+void prestera_device_unregister(struct prestera_device *dev);
+
+struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
+ u32 dev_id, u32 hw_id);
+
+int prestera_port_autoneg_set(struct prestera_port *port, bool enable,
+ u64 adver_link_modes, u8 adver_fec);
+
+struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id);
+
+struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev);
+
+int prestera_port_pvid_set(struct prestera_port *port, u16 vid);
+
+bool prestera_netdev_check(const struct net_device *dev);
+
+#endif /* _PRESTERA_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
new file mode 100644
index 000000000000..94c185a0e2b8
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <net/devlink.h>
+
+#include "prestera_devlink.h"
+
+static int prestera_dl_info_get(struct devlink *dl,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_switch *sw = devlink_priv(dl);
+ char buf[16];
+ int err;
+
+ err = devlink_info_driver_name_put(req, PRESTERA_DRV_NAME);
+ if (err)
+ return err;
+
+ snprintf(buf, sizeof(buf), "%d.%d.%d",
+ sw->dev->fw_rev.maj,
+ sw->dev->fw_rev.min,
+ sw->dev->fw_rev.sub);
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+}
+
+static const struct devlink_ops prestera_dl_ops = {
+ .info_get = prestera_dl_info_get,
+};
+
+struct prestera_switch *prestera_devlink_alloc(void)
+{
+ struct devlink *dl;
+
+ dl = devlink_alloc(&prestera_dl_ops, sizeof(struct prestera_switch));
+
+ return devlink_priv(dl);
+}
+
+void prestera_devlink_free(struct prestera_switch *sw)
+{
+ struct devlink *dl = priv_to_devlink(sw);
+
+ devlink_free(dl);
+}
+
+int prestera_devlink_register(struct prestera_switch *sw)
+{
+ struct devlink *dl = priv_to_devlink(sw);
+ int err;
+
+ err = devlink_register(dl, sw->dev->dev);
+ if (err)
+ dev_err(prestera_dev(sw), "devlink_register failed: %d\n", err);
+
+ return err;
+}
+
+void prestera_devlink_unregister(struct prestera_switch *sw)
+{
+ struct devlink *dl = priv_to_devlink(sw);
+
+ devlink_unregister(dl);
+}
+
+int prestera_devlink_port_register(struct prestera_port *port)
+{
+ struct prestera_switch *sw = port->sw;
+ struct devlink *dl = priv_to_devlink(sw);
+ struct devlink_port_attrs attrs = {};
+ int err;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = port->fp_id;
+ attrs.switch_id.id_len = sizeof(sw->id);
+ memcpy(attrs.switch_id.id, &sw->id, attrs.switch_id.id_len);
+
+ devlink_port_attrs_set(&port->dl_port, &attrs);
+
+ err = devlink_port_register(dl, &port->dl_port, port->fp_id);
+ if (err) {
+ dev_err(prestera_dev(sw), "devlink_port_register failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+void prestera_devlink_port_unregister(struct prestera_port *port)
+{
+ devlink_port_unregister(&port->dl_port);
+}
+
+void prestera_devlink_port_set(struct prestera_port *port)
+{
+ devlink_port_type_eth_set(&port->dl_port, port->dev);
+}
+
+void prestera_devlink_port_clear(struct prestera_port *port)
+{
+ devlink_port_type_clear(&port->dl_port);
+}
+
+struct devlink_port *prestera_devlink_get_port(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ return &port->dl_port;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
new file mode 100644
index 000000000000..51bee9f75415
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_DEVLINK_H_
+#define _PRESTERA_DEVLINK_H_
+
+#include "prestera.h"
+
+struct prestera_switch *prestera_devlink_alloc(void);
+void prestera_devlink_free(struct prestera_switch *sw);
+
+int prestera_devlink_register(struct prestera_switch *sw);
+void prestera_devlink_unregister(struct prestera_switch *sw);
+
+int prestera_devlink_port_register(struct prestera_port *port);
+void prestera_devlink_port_unregister(struct prestera_port *port);
+
+void prestera_devlink_port_set(struct prestera_port *port);
+void prestera_devlink_port_clear(struct prestera_port *port);
+
+struct devlink_port *prestera_devlink_get_port(struct net_device *dev);
+
+#endif /* _PRESTERA_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_dsa.c b/drivers/net/ethernet/marvell/prestera/prestera_dsa.c
new file mode 100644
index 000000000000..a5e01c7a307b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_dsa.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+
+#include "prestera_dsa.h"
+
+#define PRESTERA_DSA_W0_CMD GENMASK(31, 30)
+#define PRESTERA_DSA_W0_IS_TAGGED BIT(29)
+#define PRESTERA_DSA_W0_DEV_NUM GENMASK(28, 24)
+#define PRESTERA_DSA_W0_PORT_NUM GENMASK(23, 19)
+#define PRESTERA_DSA_W0_VPT GENMASK(15, 13)
+#define PRESTERA_DSA_W0_EXT_BIT BIT(12)
+#define PRESTERA_DSA_W0_VID GENMASK(11, 0)
+
+#define PRESTERA_DSA_W1_EXT_BIT BIT(31)
+#define PRESTERA_DSA_W1_CFI_BIT BIT(30)
+#define PRESTERA_DSA_W1_PORT_NUM GENMASK(11, 10)
+
+#define PRESTERA_DSA_W2_EXT_BIT BIT(31)
+#define PRESTERA_DSA_W2_PORT_NUM BIT(20)
+
+#define PRESTERA_DSA_W3_VID GENMASK(30, 27)
+#define PRESTERA_DSA_W3_DST_EPORT GENMASK(23, 7)
+#define PRESTERA_DSA_W3_DEV_NUM GENMASK(6, 0)
+
+#define PRESTERA_DSA_VID GENMASK(15, 12)
+#define PRESTERA_DSA_DEV_NUM GENMASK(11, 5)
+
+int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf)
+{
+ __be32 *dsa_words = (__be32 *)dsa_buf;
+ enum prestera_dsa_cmd cmd;
+ u32 words[4];
+ u32 field;
+
+ words[0] = ntohl(dsa_words[0]);
+ words[1] = ntohl(dsa_words[1]);
+ words[2] = ntohl(dsa_words[2]);
+ words[3] = ntohl(dsa_words[3]);
+
+ /* set the common parameters */
+ cmd = (enum prestera_dsa_cmd)FIELD_GET(PRESTERA_DSA_W0_CMD, words[0]);
+
+ /* only to CPU is supported */
+ if (unlikely(cmd != PRESTERA_DSA_CMD_TO_CPU))
+ return -EINVAL;
+
+ if (FIELD_GET(PRESTERA_DSA_W0_EXT_BIT, words[0]) == 0)
+ return -EINVAL;
+ if (FIELD_GET(PRESTERA_DSA_W1_EXT_BIT, words[1]) == 0)
+ return -EINVAL;
+ if (FIELD_GET(PRESTERA_DSA_W2_EXT_BIT, words[2]) == 0)
+ return -EINVAL;
+
+ field = FIELD_GET(PRESTERA_DSA_W3_VID, words[3]);
+
+ dsa->vlan.is_tagged = FIELD_GET(PRESTERA_DSA_W0_IS_TAGGED, words[0]);
+ dsa->vlan.cfi_bit = FIELD_GET(PRESTERA_DSA_W1_CFI_BIT, words[1]);
+ dsa->vlan.vpt = FIELD_GET(PRESTERA_DSA_W0_VPT, words[0]);
+ dsa->vlan.vid = FIELD_GET(PRESTERA_DSA_W0_VID, words[0]);
+ dsa->vlan.vid &= ~PRESTERA_DSA_VID;
+ dsa->vlan.vid |= FIELD_PREP(PRESTERA_DSA_VID, field);
+
+ field = FIELD_GET(PRESTERA_DSA_W3_DEV_NUM, words[3]);
+
+ dsa->hw_dev_num = FIELD_GET(PRESTERA_DSA_W0_DEV_NUM, words[0]);
+ dsa->hw_dev_num |= FIELD_PREP(PRESTERA_DSA_DEV_NUM, field);
+
+ dsa->port_num = (FIELD_GET(PRESTERA_DSA_W0_PORT_NUM, words[0]) << 0) |
+ (FIELD_GET(PRESTERA_DSA_W1_PORT_NUM, words[1]) << 5) |
+ (FIELD_GET(PRESTERA_DSA_W2_PORT_NUM, words[2]) << 7);
+
+ return 0;
+}
+
+int prestera_dsa_build(const struct prestera_dsa *dsa, u8 *dsa_buf)
+{
+ __be32 *dsa_words = (__be32 *)dsa_buf;
+ u32 dev_num = dsa->hw_dev_num;
+ u32 words[4] = { 0 };
+
+ words[0] |= FIELD_PREP(PRESTERA_DSA_W0_CMD, PRESTERA_DSA_CMD_FROM_CPU);
+
+ words[0] |= FIELD_PREP(PRESTERA_DSA_W0_DEV_NUM, dev_num);
+ dev_num = FIELD_GET(PRESTERA_DSA_DEV_NUM, dev_num);
+ words[3] |= FIELD_PREP(PRESTERA_DSA_W3_DEV_NUM, dev_num);
+
+ words[3] |= FIELD_PREP(PRESTERA_DSA_W3_DST_EPORT, dsa->port_num);
+
+ words[0] |= FIELD_PREP(PRESTERA_DSA_W0_EXT_BIT, 1);
+ words[1] |= FIELD_PREP(PRESTERA_DSA_W1_EXT_BIT, 1);
+ words[2] |= FIELD_PREP(PRESTERA_DSA_W2_EXT_BIT, 1);
+
+ dsa_words[0] = htonl(words[0]);
+ dsa_words[1] = htonl(words[1]);
+ dsa_words[2] = htonl(words[2]);
+ dsa_words[3] = htonl(words[3]);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_dsa.h b/drivers/net/ethernet/marvell/prestera/prestera_dsa.h
new file mode 100644
index 000000000000..67018629bdd2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_dsa.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef __PRESTERA_DSA_H_
+#define __PRESTERA_DSA_H_
+
+#include <linux/types.h>
+
+#define PRESTERA_DSA_HLEN 16
+
+enum prestera_dsa_cmd {
+ /* DSA command is "To CPU" */
+ PRESTERA_DSA_CMD_TO_CPU = 0,
+
+ /* DSA command is "From CPU" */
+ PRESTERA_DSA_CMD_FROM_CPU,
+};
+
+struct prestera_dsa_vlan {
+ u16 vid;
+ u8 vpt;
+ u8 cfi_bit;
+ bool is_tagged;
+};
+
+struct prestera_dsa {
+ struct prestera_dsa_vlan vlan;
+ u32 hw_dev_num;
+ u32 port_num;
+};
+
+int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf);
+int prestera_dsa_build(const struct prestera_dsa *dsa, u8 *dsa_buf);
+
+#endif /* _PRESTERA_DSA_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
new file mode 100644
index 000000000000..93a5e2baf808
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
@@ -0,0 +1,780 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+#include "prestera_ethtool.h"
+#include "prestera.h"
+#include "prestera_hw.h"
+
+#define PRESTERA_STATS_CNT \
+ (sizeof(struct prestera_port_stats) / sizeof(u64))
+#define PRESTERA_STATS_IDX(name) \
+ (offsetof(struct prestera_port_stats, name) / sizeof(u64))
+#define PRESTERA_STATS_FIELD(name) \
+ [PRESTERA_STATS_IDX(name)] = __stringify(name)
+
+static const char driver_kind[] = "prestera";
+
+static const struct prestera_link_mode {
+ enum ethtool_link_mode_bit_indices eth_mode;
+ u32 speed;
+ u64 pr_mask;
+ u8 duplex;
+ u8 port_type;
+} port_link_modes[PRESTERA_LINK_MODE_MAX] = {
+ [PRESTERA_LINK_MODE_10baseT_Half] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ .speed = 10,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10baseT_Half,
+ .duplex = PRESTERA_PORT_DUPLEX_HALF,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_10baseT_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ .speed = 10,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10baseT_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_100baseT_Half] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ .speed = 100,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100baseT_Half,
+ .duplex = PRESTERA_PORT_DUPLEX_HALF,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_100baseT_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ .speed = 100,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100baseT_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_1000baseT_Half] = {
+ .eth_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ .speed = 1000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseT_Half,
+ .duplex = PRESTERA_PORT_DUPLEX_HALF,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_1000baseT_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ .speed = 1000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseT_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_1000baseX_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ .speed = 1000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseX_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_1000baseKX_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = 1000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseKX_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_2500baseX_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ .speed = 2500,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_2500baseX_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ },
+ [PRESTERA_LINK_MODE_10GbaseKR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ .speed = 10000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseKR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_10GbaseSR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ .speed = 10000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseSR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_10GbaseLR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ .speed = 10000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseLR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_20GbaseKR2_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+ .speed = 20000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_20GbaseKR2_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_25GbaseCR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = 25000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseCR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_DA,
+ },
+ [PRESTERA_LINK_MODE_25GbaseKR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = 25000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseKR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_25GbaseSR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = 25000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseSR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_40GbaseKR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ .speed = 40000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseKR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_40GbaseCR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ .speed = 40000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseCR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_DA,
+ },
+ [PRESTERA_LINK_MODE_40GbaseSR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ .speed = 40000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseSR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_50GbaseCR2_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ .speed = 50000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseCR2_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_DA,
+ },
+ [PRESTERA_LINK_MODE_50GbaseKR2_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ .speed = 50000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseKR2_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_50GbaseSR2_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ .speed = 50000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseSR2_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_100GbaseKR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = 100000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseKR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_100GbaseSR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ .speed = 100000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseSR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_100GbaseCR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = 100000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseCR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_DA,
+ }
+};
+
+static const struct prestera_fec {
+ u32 eth_fec;
+ enum ethtool_link_mode_bit_indices eth_mode;
+ u8 pr_fec;
+} port_fec_caps[PRESTERA_PORT_FEC_MAX] = {
+ [PRESTERA_PORT_FEC_OFF] = {
+ .eth_fec = ETHTOOL_FEC_OFF,
+ .eth_mode = ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ .pr_fec = 1 << PRESTERA_PORT_FEC_OFF,
+ },
+ [PRESTERA_PORT_FEC_BASER] = {
+ .eth_fec = ETHTOOL_FEC_BASER,
+ .eth_mode = ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ .pr_fec = 1 << PRESTERA_PORT_FEC_BASER,
+ },
+ [PRESTERA_PORT_FEC_RS] = {
+ .eth_fec = ETHTOOL_FEC_RS,
+ .eth_mode = ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ .pr_fec = 1 << PRESTERA_PORT_FEC_RS,
+ }
+};
+
+static const struct prestera_port_type {
+ enum ethtool_link_mode_bit_indices eth_mode;
+ u8 eth_type;
+} port_types[PRESTERA_PORT_TYPE_MAX] = {
+ [PRESTERA_PORT_TYPE_NONE] = {
+ .eth_mode = __ETHTOOL_LINK_MODE_MASK_NBITS,
+ .eth_type = PORT_NONE,
+ },
+ [PRESTERA_PORT_TYPE_TP] = {
+ .eth_mode = ETHTOOL_LINK_MODE_TP_BIT,
+ .eth_type = PORT_TP,
+ },
+ [PRESTERA_PORT_TYPE_AUI] = {
+ .eth_mode = ETHTOOL_LINK_MODE_AUI_BIT,
+ .eth_type = PORT_AUI,
+ },
+ [PRESTERA_PORT_TYPE_MII] = {
+ .eth_mode = ETHTOOL_LINK_MODE_MII_BIT,
+ .eth_type = PORT_MII,
+ },
+ [PRESTERA_PORT_TYPE_FIBRE] = {
+ .eth_mode = ETHTOOL_LINK_MODE_FIBRE_BIT,
+ .eth_type = PORT_FIBRE,
+ },
+ [PRESTERA_PORT_TYPE_BNC] = {
+ .eth_mode = ETHTOOL_LINK_MODE_BNC_BIT,
+ .eth_type = PORT_BNC,
+ },
+ [PRESTERA_PORT_TYPE_DA] = {
+ .eth_mode = ETHTOOL_LINK_MODE_TP_BIT,
+ .eth_type = PORT_TP,
+ },
+ [PRESTERA_PORT_TYPE_OTHER] = {
+ .eth_mode = __ETHTOOL_LINK_MODE_MASK_NBITS,
+ .eth_type = PORT_OTHER,
+ }
+};
+
+static const char prestera_cnt_name[PRESTERA_STATS_CNT][ETH_GSTRING_LEN] = {
+ PRESTERA_STATS_FIELD(good_octets_received),
+ PRESTERA_STATS_FIELD(bad_octets_received),
+ PRESTERA_STATS_FIELD(mac_trans_error),
+ PRESTERA_STATS_FIELD(broadcast_frames_received),
+ PRESTERA_STATS_FIELD(multicast_frames_received),
+ PRESTERA_STATS_FIELD(frames_64_octets),
+ PRESTERA_STATS_FIELD(frames_65_to_127_octets),
+ PRESTERA_STATS_FIELD(frames_128_to_255_octets),
+ PRESTERA_STATS_FIELD(frames_256_to_511_octets),
+ PRESTERA_STATS_FIELD(frames_512_to_1023_octets),
+ PRESTERA_STATS_FIELD(frames_1024_to_max_octets),
+ PRESTERA_STATS_FIELD(excessive_collision),
+ PRESTERA_STATS_FIELD(multicast_frames_sent),
+ PRESTERA_STATS_FIELD(broadcast_frames_sent),
+ PRESTERA_STATS_FIELD(fc_sent),
+ PRESTERA_STATS_FIELD(fc_received),
+ PRESTERA_STATS_FIELD(buffer_overrun),
+ PRESTERA_STATS_FIELD(undersize),
+ PRESTERA_STATS_FIELD(fragments),
+ PRESTERA_STATS_FIELD(oversize),
+ PRESTERA_STATS_FIELD(jabber),
+ PRESTERA_STATS_FIELD(rx_error_frame_received),
+ PRESTERA_STATS_FIELD(bad_crc),
+ PRESTERA_STATS_FIELD(collisions),
+ PRESTERA_STATS_FIELD(late_collision),
+ PRESTERA_STATS_FIELD(unicast_frames_received),
+ PRESTERA_STATS_FIELD(unicast_frames_sent),
+ PRESTERA_STATS_FIELD(sent_multiple),
+ PRESTERA_STATS_FIELD(sent_deferred),
+ PRESTERA_STATS_FIELD(good_octets_sent),
+};
+
+static void prestera_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct prestera_switch *sw = port->sw;
+
+ strlcpy(drvinfo->driver, driver_kind, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->bus_info, dev_name(prestera_dev(sw)),
+ sizeof(drvinfo->bus_info));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ sw->dev->fw_rev.maj,
+ sw->dev->fw_rev.min,
+ sw->dev->fw_rev.sub);
+}
+
+static u8 prestera_port_type_get(struct prestera_port *port)
+{
+ if (port->caps.type < PRESTERA_PORT_TYPE_MAX)
+ return port_types[port->caps.type].eth_type;
+
+ return PORT_OTHER;
+}
+
+static int prestera_port_type_set(const struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u32 new_mode = PRESTERA_LINK_MODE_MAX;
+ u32 type, mode;
+ int err;
+
+ for (type = 0; type < PRESTERA_PORT_TYPE_MAX; type++) {
+ if (port_types[type].eth_type == ecmd->base.port &&
+ test_bit(port_types[type].eth_mode,
+ ecmd->link_modes.supported)) {
+ break;
+ }
+ }
+
+ if (type == port->caps.type)
+ return 0;
+ if (type != port->caps.type && ecmd->base.autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+ if (type == PRESTERA_PORT_TYPE_MAX)
+ return -EOPNOTSUPP;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if ((port_link_modes[mode].pr_mask &
+ port->caps.supp_link_modes) &&
+ type == port_link_modes[mode].port_type) {
+ new_mode = mode;
+ }
+ }
+
+ if (new_mode < PRESTERA_LINK_MODE_MAX)
+ err = prestera_hw_port_link_mode_set(port, new_mode);
+ else
+ err = -EINVAL;
+
+ if (err)
+ return err;
+
+ port->caps.type = type;
+ port->autoneg = false;
+
+ return 0;
+}
+
+static void prestera_modes_to_eth(unsigned long *eth_modes, u64 link_modes,
+ u8 fec, u8 type)
+{
+ u32 mode;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if ((port_link_modes[mode].pr_mask & link_modes) == 0)
+ continue;
+
+ if (type != PRESTERA_PORT_TYPE_NONE &&
+ port_link_modes[mode].port_type != type)
+ continue;
+
+ __set_bit(port_link_modes[mode].eth_mode, eth_modes);
+ }
+
+ for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
+ if ((port_fec_caps[mode].pr_fec & fec) == 0)
+ continue;
+
+ __set_bit(port_fec_caps[mode].eth_mode, eth_modes);
+ }
+}
+
+static void prestera_modes_from_eth(const unsigned long *eth_modes,
+ u64 *link_modes, u8 *fec, u8 type)
+{
+ u64 adver_modes = 0;
+ u32 fec_modes = 0;
+ u32 mode;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if (!test_bit(port_link_modes[mode].eth_mode, eth_modes))
+ continue;
+
+ if (port_link_modes[mode].port_type != type)
+ continue;
+
+ adver_modes |= port_link_modes[mode].pr_mask;
+ }
+
+ for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
+ if (!test_bit(port_fec_caps[mode].eth_mode, eth_modes))
+ continue;
+
+ fec_modes |= port_fec_caps[mode].pr_fec;
+ }
+
+ *link_modes = adver_modes;
+ *fec = fec_modes;
+}
+
+static void prestera_port_supp_types_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u32 mode;
+ u8 ptype;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if ((port_link_modes[mode].pr_mask &
+ port->caps.supp_link_modes) == 0)
+ continue;
+
+ ptype = port_link_modes[mode].port_type;
+ __set_bit(port_types[ptype].eth_mode,
+ ecmd->link_modes.supported);
+ }
+}
+
+static void prestera_port_remote_cap_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ bool asym_pause;
+ bool pause;
+ u64 bitmap;
+ int err;
+
+ err = prestera_hw_port_remote_cap_get(port, &bitmap);
+ if (!err) {
+ prestera_modes_to_eth(ecmd->link_modes.lp_advertising,
+ bitmap, 0, PRESTERA_PORT_TYPE_NONE);
+
+ if (!bitmap_empty(ecmd->link_modes.lp_advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS)) {
+ ethtool_link_ksettings_add_link_mode(ecmd,
+ lp_advertising,
+ Autoneg);
+ }
+ }
+
+ err = prestera_hw_port_remote_fc_get(port, &pause, &asym_pause);
+ if (err)
+ return;
+
+ if (pause)
+ ethtool_link_ksettings_add_link_mode(ecmd,
+ lp_advertising,
+ Pause);
+ if (asym_pause)
+ ethtool_link_ksettings_add_link_mode(ecmd,
+ lp_advertising,
+ Asym_Pause);
+}
+
+static void prestera_port_speed_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u32 speed;
+ int err;
+
+ err = prestera_hw_port_speed_get(port, &speed);
+ ecmd->base.speed = err ? SPEED_UNKNOWN : speed;
+}
+
+static void prestera_port_duplex_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u8 duplex;
+ int err;
+
+ err = prestera_hw_port_duplex_get(port, &duplex);
+ if (err) {
+ ecmd->base.duplex = DUPLEX_UNKNOWN;
+ return;
+ }
+
+ ecmd->base.duplex = duplex == PRESTERA_PORT_DUPLEX_FULL ?
+ DUPLEX_FULL : DUPLEX_HALF;
+}
+
+static int
+prestera_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ ethtool_link_ksettings_zero_link_mode(ecmd, supported);
+ ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
+ ethtool_link_ksettings_zero_link_mode(ecmd, lp_advertising);
+
+ ecmd->base.autoneg = port->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ if (port->caps.type == PRESTERA_PORT_TYPE_TP) {
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, Autoneg);
+
+ if (netif_running(dev) &&
+ (port->autoneg ||
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER))
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising,
+ Autoneg);
+ }
+
+ prestera_modes_to_eth(ecmd->link_modes.supported,
+ port->caps.supp_link_modes,
+ port->caps.supp_fec,
+ port->caps.type);
+
+ prestera_port_supp_types_get(ecmd, port);
+
+ if (netif_carrier_ok(dev)) {
+ prestera_port_speed_get(ecmd, port);
+ prestera_port_duplex_get(ecmd, port);
+ } else {
+ ecmd->base.speed = SPEED_UNKNOWN;
+ ecmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+
+ ecmd->base.port = prestera_port_type_get(port);
+
+ if (port->autoneg) {
+ if (netif_running(dev))
+ prestera_modes_to_eth(ecmd->link_modes.advertising,
+ port->adver_link_modes,
+ port->adver_fec,
+ port->caps.type);
+
+ if (netif_carrier_ok(dev) &&
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER)
+ prestera_port_remote_cap_get(ecmd, port);
+ }
+
+ if (port->caps.type == PRESTERA_PORT_TYPE_TP &&
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER)
+ prestera_hw_port_mdix_get(port, &ecmd->base.eth_tp_mdix,
+ &ecmd->base.eth_tp_mdix_ctrl);
+
+ return 0;
+}
+
+static int prestera_port_mdix_set(const struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ if (ecmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_INVALID &&
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER &&
+ port->caps.type == PRESTERA_PORT_TYPE_TP)
+ return prestera_hw_port_mdix_set(port,
+ ecmd->base.eth_tp_mdix_ctrl);
+
+ return 0;
+}
+
+static int prestera_port_link_mode_set(struct prestera_port *port,
+ u32 speed, u8 duplex, u8 type)
+{
+ u32 new_mode = PRESTERA_LINK_MODE_MAX;
+ u32 mode;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if (speed != port_link_modes[mode].speed)
+ continue;
+
+ if (duplex != port_link_modes[mode].duplex)
+ continue;
+
+ if (!(port_link_modes[mode].pr_mask &
+ port->caps.supp_link_modes))
+ continue;
+
+ if (type != port_link_modes[mode].port_type)
+ continue;
+
+ new_mode = mode;
+ break;
+ }
+
+ if (new_mode == PRESTERA_LINK_MODE_MAX)
+ return -EOPNOTSUPP;
+
+ return prestera_hw_port_link_mode_set(port, new_mode);
+}
+
+static int
+prestera_port_speed_duplex_set(const struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u32 curr_mode;
+ u8 duplex;
+ u32 speed;
+ int err;
+
+ err = prestera_hw_port_link_mode_get(port, &curr_mode);
+ if (err)
+ return err;
+ if (curr_mode >= PRESTERA_LINK_MODE_MAX)
+ return -EINVAL;
+
+ if (ecmd->base.duplex != DUPLEX_UNKNOWN)
+ duplex = ecmd->base.duplex == DUPLEX_FULL ?
+ PRESTERA_PORT_DUPLEX_FULL : PRESTERA_PORT_DUPLEX_HALF;
+ else
+ duplex = port_link_modes[curr_mode].duplex;
+
+ if (ecmd->base.speed != SPEED_UNKNOWN)
+ speed = ecmd->base.speed;
+ else
+ speed = port_link_modes[curr_mode].speed;
+
+ return prestera_port_link_mode_set(port, speed, duplex,
+ port->caps.type);
+}
+
+static int
+prestera_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ u64 adver_modes;
+ u8 adver_fec;
+ int err;
+
+ err = prestera_port_type_set(ecmd, port);
+ if (err)
+ return err;
+
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER) {
+ err = prestera_port_mdix_set(ecmd, port);
+ if (err)
+ return err;
+ }
+
+ prestera_modes_from_eth(ecmd->link_modes.advertising, &adver_modes,
+ &adver_fec, port->caps.type);
+
+ err = prestera_port_autoneg_set(port,
+ ecmd->base.autoneg == AUTONEG_ENABLE,
+ adver_modes, adver_fec);
+ if (err)
+ return err;
+
+ if (ecmd->base.autoneg == AUTONEG_DISABLE) {
+ err = prestera_port_speed_duplex_set(ecmd, port);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int prestera_ethtool_get_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ u8 active;
+ u32 mode;
+ int err;
+
+ err = prestera_hw_port_fec_get(port, &active);
+ if (err)
+ return err;
+
+ fecparam->fec = 0;
+
+ for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
+ if ((port_fec_caps[mode].pr_fec & port->caps.supp_fec) == 0)
+ continue;
+
+ fecparam->fec |= port_fec_caps[mode].eth_fec;
+ }
+
+ if (active < PRESTERA_PORT_FEC_MAX)
+ fecparam->active_fec = port_fec_caps[active].eth_fec;
+ else
+ fecparam->active_fec = ETHTOOL_FEC_AUTO;
+
+ return 0;
+}
+
+static int prestera_ethtool_set_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ u8 fec, active;
+ u32 mode;
+ int err;
+
+ if (port->autoneg) {
+ netdev_err(dev, "FEC set is not allowed while autoneg is on\n");
+ return -EINVAL;
+ }
+
+ err = prestera_hw_port_fec_get(port, &active);
+ if (err)
+ return err;
+
+ fec = PRESTERA_PORT_FEC_MAX;
+ for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
+ if ((port_fec_caps[mode].eth_fec & fecparam->fec) &&
+ (port_fec_caps[mode].pr_fec & port->caps.supp_fec)) {
+ fec = mode;
+ break;
+ }
+ }
+
+ if (fec == active)
+ return 0;
+
+ if (fec == PRESTERA_PORT_FEC_MAX)
+ return -EOPNOTSUPP;
+
+ return prestera_hw_port_fec_set(port, fec);
+}
+
+static int prestera_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return PRESTERA_STATS_CNT;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void prestera_ethtool_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ memcpy(data, prestera_cnt_name, sizeof(prestera_cnt_name));
+}
+
+static void prestera_ethtool_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct prestera_port_stats *port_stats;
+
+ port_stats = &port->cached_hw_stats.stats;
+
+ memcpy(data, port_stats, sizeof(*port_stats));
+}
+
+static int prestera_ethtool_nway_reset(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ if (netif_running(dev) &&
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER &&
+ port->caps.type == PRESTERA_PORT_TYPE_TP)
+ return prestera_hw_port_autoneg_restart(port);
+
+ return -EINVAL;
+}
+
+const struct ethtool_ops prestera_ethtool_ops = {
+ .get_drvinfo = prestera_ethtool_get_drvinfo,
+ .get_link_ksettings = prestera_ethtool_get_link_ksettings,
+ .set_link_ksettings = prestera_ethtool_set_link_ksettings,
+ .get_fecparam = prestera_ethtool_get_fecparam,
+ .set_fecparam = prestera_ethtool_set_fecparam,
+ .get_sset_count = prestera_ethtool_get_sset_count,
+ .get_strings = prestera_ethtool_get_strings,
+ .get_ethtool_stats = prestera_ethtool_get_stats,
+ .get_link = ethtool_op_get_link,
+ .nway_reset = prestera_ethtool_nway_reset
+};
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
new file mode 100644
index 000000000000..523ef1f592ce
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef __PRESTERA_ETHTOOL_H_
+#define __PRESTERA_ETHTOOL_H_
+
+#include <linux/ethtool.h>
+
+extern const struct ethtool_ops prestera_ethtool_ops;
+
+#endif /* _PRESTERA_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
new file mode 100644
index 000000000000..0424718d5998
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -0,0 +1,1253 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/list.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+
+#define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000)
+
+#define PRESTERA_MIN_MTU 64
+
+enum prestera_cmd_type_t {
+ PRESTERA_CMD_TYPE_SWITCH_INIT = 0x1,
+ PRESTERA_CMD_TYPE_SWITCH_ATTR_SET = 0x2,
+
+ PRESTERA_CMD_TYPE_PORT_ATTR_SET = 0x100,
+ PRESTERA_CMD_TYPE_PORT_ATTR_GET = 0x101,
+ PRESTERA_CMD_TYPE_PORT_INFO_GET = 0x110,
+
+ PRESTERA_CMD_TYPE_VLAN_CREATE = 0x200,
+ PRESTERA_CMD_TYPE_VLAN_DELETE = 0x201,
+ PRESTERA_CMD_TYPE_VLAN_PORT_SET = 0x202,
+ PRESTERA_CMD_TYPE_VLAN_PVID_SET = 0x203,
+
+ PRESTERA_CMD_TYPE_FDB_ADD = 0x300,
+ PRESTERA_CMD_TYPE_FDB_DELETE = 0x301,
+ PRESTERA_CMD_TYPE_FDB_FLUSH_PORT = 0x310,
+ PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN = 0x311,
+ PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN = 0x312,
+
+ PRESTERA_CMD_TYPE_BRIDGE_CREATE = 0x400,
+ PRESTERA_CMD_TYPE_BRIDGE_DELETE = 0x401,
+ PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD = 0x402,
+ PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE = 0x403,
+
+ PRESTERA_CMD_TYPE_RXTX_INIT = 0x800,
+ PRESTERA_CMD_TYPE_RXTX_PORT_INIT = 0x801,
+
+ PRESTERA_CMD_TYPE_STP_PORT_SET = 0x1000,
+
+ PRESTERA_CMD_TYPE_ACK = 0x10000,
+ PRESTERA_CMD_TYPE_MAX
+};
+
+enum {
+ PRESTERA_CMD_PORT_ATTR_ADMIN_STATE = 1,
+ PRESTERA_CMD_PORT_ATTR_MTU = 3,
+ PRESTERA_CMD_PORT_ATTR_MAC = 4,
+ PRESTERA_CMD_PORT_ATTR_SPEED = 5,
+ PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE = 6,
+ PRESTERA_CMD_PORT_ATTR_LEARNING = 7,
+ PRESTERA_CMD_PORT_ATTR_FLOOD = 8,
+ PRESTERA_CMD_PORT_ATTR_CAPABILITY = 9,
+ PRESTERA_CMD_PORT_ATTR_REMOTE_CAPABILITY = 10,
+ PRESTERA_CMD_PORT_ATTR_REMOTE_FC = 11,
+ PRESTERA_CMD_PORT_ATTR_LINK_MODE = 12,
+ PRESTERA_CMD_PORT_ATTR_TYPE = 13,
+ PRESTERA_CMD_PORT_ATTR_FEC = 14,
+ PRESTERA_CMD_PORT_ATTR_AUTONEG = 15,
+ PRESTERA_CMD_PORT_ATTR_DUPLEX = 16,
+ PRESTERA_CMD_PORT_ATTR_STATS = 17,
+ PRESTERA_CMD_PORT_ATTR_MDIX = 18,
+ PRESTERA_CMD_PORT_ATTR_AUTONEG_RESTART = 19,
+};
+
+enum {
+ PRESTERA_CMD_SWITCH_ATTR_MAC = 1,
+ PRESTERA_CMD_SWITCH_ATTR_AGEING = 2,
+};
+
+enum {
+ PRESTERA_CMD_ACK_OK,
+ PRESTERA_CMD_ACK_FAILED,
+
+ PRESTERA_CMD_ACK_MAX
+};
+
+enum {
+ PRESTERA_PORT_TP_NA,
+ PRESTERA_PORT_TP_MDI,
+ PRESTERA_PORT_TP_MDIX,
+ PRESTERA_PORT_TP_AUTO,
+};
+
+enum {
+ PRESTERA_PORT_GOOD_OCTETS_RCV_CNT,
+ PRESTERA_PORT_BAD_OCTETS_RCV_CNT,
+ PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT,
+ PRESTERA_PORT_BRDC_PKTS_RCV_CNT,
+ PRESTERA_PORT_MC_PKTS_RCV_CNT,
+ PRESTERA_PORT_PKTS_64L_CNT,
+ PRESTERA_PORT_PKTS_65TO127L_CNT,
+ PRESTERA_PORT_PKTS_128TO255L_CNT,
+ PRESTERA_PORT_PKTS_256TO511L_CNT,
+ PRESTERA_PORT_PKTS_512TO1023L_CNT,
+ PRESTERA_PORT_PKTS_1024TOMAXL_CNT,
+ PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT,
+ PRESTERA_PORT_MC_PKTS_SENT_CNT,
+ PRESTERA_PORT_BRDC_PKTS_SENT_CNT,
+ PRESTERA_PORT_FC_SENT_CNT,
+ PRESTERA_PORT_GOOD_FC_RCV_CNT,
+ PRESTERA_PORT_DROP_EVENTS_CNT,
+ PRESTERA_PORT_UNDERSIZE_PKTS_CNT,
+ PRESTERA_PORT_FRAGMENTS_PKTS_CNT,
+ PRESTERA_PORT_OVERSIZE_PKTS_CNT,
+ PRESTERA_PORT_JABBER_PKTS_CNT,
+ PRESTERA_PORT_MAC_RCV_ERROR_CNT,
+ PRESTERA_PORT_BAD_CRC_CNT,
+ PRESTERA_PORT_COLLISIONS_CNT,
+ PRESTERA_PORT_LATE_COLLISIONS_CNT,
+ PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT,
+ PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT,
+ PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT,
+ PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT,
+ PRESTERA_PORT_GOOD_OCTETS_SENT_CNT,
+
+ PRESTERA_PORT_CNT_MAX
+};
+
+enum {
+ PRESTERA_FC_NONE,
+ PRESTERA_FC_SYMMETRIC,
+ PRESTERA_FC_ASYMMETRIC,
+ PRESTERA_FC_SYMM_ASYMM,
+};
+
+struct prestera_fw_event_handler {
+ struct list_head list;
+ struct rcu_head rcu;
+ enum prestera_event_type type;
+ prestera_event_cb_t func;
+ void *arg;
+};
+
+struct prestera_msg_cmd {
+ u32 type;
+};
+
+struct prestera_msg_ret {
+ struct prestera_msg_cmd cmd;
+ u32 status;
+};
+
+struct prestera_msg_common_req {
+ struct prestera_msg_cmd cmd;
+};
+
+struct prestera_msg_common_resp {
+ struct prestera_msg_ret ret;
+};
+
+union prestera_msg_switch_param {
+ u8 mac[ETH_ALEN];
+ u32 ageing_timeout_ms;
+};
+
+struct prestera_msg_switch_attr_req {
+ struct prestera_msg_cmd cmd;
+ u32 attr;
+ union prestera_msg_switch_param param;
+};
+
+struct prestera_msg_switch_init_resp {
+ struct prestera_msg_ret ret;
+ u32 port_count;
+ u32 mtu_max;
+ u8 switch_id;
+};
+
+struct prestera_msg_port_autoneg_param {
+ u64 link_mode;
+ u8 enable;
+ u8 fec;
+};
+
+struct prestera_msg_port_cap_param {
+ u64 link_mode;
+ u8 type;
+ u8 fec;
+ u8 transceiver;
+};
+
+struct prestera_msg_port_mdix_param {
+ u8 status;
+ u8 admin_mode;
+};
+
+union prestera_msg_port_param {
+ u8 admin_state;
+ u8 oper_state;
+ u32 mtu;
+ u8 mac[ETH_ALEN];
+ u8 accept_frm_type;
+ u32 speed;
+ u8 learning;
+ u8 flood;
+ u32 link_mode;
+ u8 type;
+ u8 duplex;
+ u8 fec;
+ u8 fc;
+ struct prestera_msg_port_mdix_param mdix;
+ struct prestera_msg_port_autoneg_param autoneg;
+ struct prestera_msg_port_cap_param cap;
+};
+
+struct prestera_msg_port_attr_req {
+ struct prestera_msg_cmd cmd;
+ u32 attr;
+ u32 port;
+ u32 dev;
+ union prestera_msg_port_param param;
+};
+
+struct prestera_msg_port_attr_resp {
+ struct prestera_msg_ret ret;
+ union prestera_msg_port_param param;
+};
+
+struct prestera_msg_port_stats_resp {
+ struct prestera_msg_ret ret;
+ u64 stats[PRESTERA_PORT_CNT_MAX];
+};
+
+struct prestera_msg_port_info_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+};
+
+struct prestera_msg_port_info_resp {
+ struct prestera_msg_ret ret;
+ u32 hw_id;
+ u32 dev_id;
+ u16 fp_id;
+};
+
+struct prestera_msg_vlan_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+ u16 vid;
+ u8 is_member;
+ u8 is_tagged;
+};
+
+struct prestera_msg_fdb_req {
+ struct prestera_msg_cmd cmd;
+ u8 dest_type;
+ u32 port;
+ u32 dev;
+ u8 mac[ETH_ALEN];
+ u16 vid;
+ u8 dynamic;
+ u32 flush_mode;
+};
+
+struct prestera_msg_bridge_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+ u16 bridge;
+};
+
+struct prestera_msg_bridge_resp {
+ struct prestera_msg_ret ret;
+ u16 bridge;
+};
+
+struct prestera_msg_stp_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+ u16 vid;
+ u8 state;
+};
+
+struct prestera_msg_rxtx_req {
+ struct prestera_msg_cmd cmd;
+ u8 use_sdma;
+};
+
+struct prestera_msg_rxtx_resp {
+ struct prestera_msg_ret ret;
+ u32 map_addr;
+};
+
+struct prestera_msg_rxtx_port_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+};
+
+struct prestera_msg_event {
+ u16 type;
+ u16 id;
+};
+
+union prestera_msg_event_port_param {
+ u32 oper_state;
+};
+
+struct prestera_msg_event_port {
+ struct prestera_msg_event id;
+ u32 port_id;
+ union prestera_msg_event_port_param param;
+};
+
+union prestera_msg_event_fdb_param {
+ u8 mac[ETH_ALEN];
+};
+
+struct prestera_msg_event_fdb {
+ struct prestera_msg_event id;
+ u8 dest_type;
+ u32 port_id;
+ u32 vid;
+ union prestera_msg_event_fdb_param param;
+};
+
+static int __prestera_cmd_ret(struct prestera_switch *sw,
+ enum prestera_cmd_type_t type,
+ struct prestera_msg_cmd *cmd, size_t clen,
+ struct prestera_msg_ret *ret, size_t rlen,
+ int waitms)
+{
+ struct prestera_device *dev = sw->dev;
+ int err;
+
+ cmd->type = type;
+
+ err = dev->send_req(dev, cmd, clen, ret, rlen, waitms);
+ if (err)
+ return err;
+
+ if (ret->cmd.type != PRESTERA_CMD_TYPE_ACK)
+ return -EBADE;
+ if (ret->status != PRESTERA_CMD_ACK_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int prestera_cmd_ret(struct prestera_switch *sw,
+ enum prestera_cmd_type_t type,
+ struct prestera_msg_cmd *cmd, size_t clen,
+ struct prestera_msg_ret *ret, size_t rlen)
+{
+ return __prestera_cmd_ret(sw, type, cmd, clen, ret, rlen, 0);
+}
+
+static int prestera_cmd_ret_wait(struct prestera_switch *sw,
+ enum prestera_cmd_type_t type,
+ struct prestera_msg_cmd *cmd, size_t clen,
+ struct prestera_msg_ret *ret, size_t rlen,
+ int waitms)
+{
+ return __prestera_cmd_ret(sw, type, cmd, clen, ret, rlen, waitms);
+}
+
+static int prestera_cmd(struct prestera_switch *sw,
+ enum prestera_cmd_type_t type,
+ struct prestera_msg_cmd *cmd, size_t clen)
+{
+ struct prestera_msg_common_resp resp;
+
+ return prestera_cmd_ret(sw, type, cmd, clen, &resp.ret, sizeof(resp));
+}
+
+static int prestera_fw_parse_port_evt(void *msg, struct prestera_event *evt)
+{
+ struct prestera_msg_event_port *hw_evt = msg;
+
+ if (evt->id != PRESTERA_PORT_EVENT_STATE_CHANGED)
+ return -EINVAL;
+
+ evt->port_evt.data.oper_state = hw_evt->param.oper_state;
+ evt->port_evt.port_id = hw_evt->port_id;
+
+ return 0;
+}
+
+static int prestera_fw_parse_fdb_evt(void *msg, struct prestera_event *evt)
+{
+ struct prestera_msg_event_fdb *hw_evt = msg;
+
+ evt->fdb_evt.port_id = hw_evt->port_id;
+ evt->fdb_evt.vid = hw_evt->vid;
+
+ ether_addr_copy(evt->fdb_evt.data.mac, hw_evt->param.mac);
+
+ return 0;
+}
+
+static struct prestera_fw_evt_parser {
+ int (*func)(void *msg, struct prestera_event *evt);
+} fw_event_parsers[PRESTERA_EVENT_TYPE_MAX] = {
+ [PRESTERA_EVENT_TYPE_PORT] = { .func = prestera_fw_parse_port_evt },
+ [PRESTERA_EVENT_TYPE_FDB] = { .func = prestera_fw_parse_fdb_evt },
+};
+
+static struct prestera_fw_event_handler *
+__find_event_handler(const struct prestera_switch *sw,
+ enum prestera_event_type type)
+{
+ struct prestera_fw_event_handler *eh;
+
+ list_for_each_entry_rcu(eh, &sw->event_handlers, list) {
+ if (eh->type == type)
+ return eh;
+ }
+
+ return NULL;
+}
+
+static int prestera_find_event_handler(const struct prestera_switch *sw,
+ enum prestera_event_type type,
+ struct prestera_fw_event_handler *eh)
+{
+ struct prestera_fw_event_handler *tmp;
+ int err = 0;
+
+ rcu_read_lock();
+ tmp = __find_event_handler(sw, type);
+ if (tmp)
+ *eh = *tmp;
+ else
+ err = -ENOENT;
+ rcu_read_unlock();
+
+ return err;
+}
+
+static int prestera_evt_recv(struct prestera_device *dev, void *buf, size_t size)
+{
+ struct prestera_switch *sw = dev->priv;
+ struct prestera_msg_event *msg = buf;
+ struct prestera_fw_event_handler eh;
+ struct prestera_event evt;
+ int err;
+
+ if (msg->type >= PRESTERA_EVENT_TYPE_MAX)
+ return -EINVAL;
+ if (!fw_event_parsers[msg->type].func)
+ return -ENOENT;
+
+ err = prestera_find_event_handler(sw, msg->type, &eh);
+ if (err)
+ return err;
+
+ evt.id = msg->id;
+
+ err = fw_event_parsers[msg->type].func(buf, &evt);
+ if (err)
+ return err;
+
+ eh.func(sw, &evt, eh.arg);
+
+ return 0;
+}
+
+static void prestera_pkt_recv(struct prestera_device *dev)
+{
+ struct prestera_switch *sw = dev->priv;
+ struct prestera_fw_event_handler eh;
+ struct prestera_event ev;
+ int err;
+
+ ev.id = PRESTERA_RXTX_EVENT_RCV_PKT;
+
+ err = prestera_find_event_handler(sw, PRESTERA_EVENT_TYPE_RXTX, &eh);
+ if (err)
+ return;
+
+ eh.func(sw, &ev, eh.arg);
+}
+
+int prestera_hw_port_info_get(const struct prestera_port *port,
+ u32 *dev_id, u32 *hw_id, u16 *fp_id)
+{
+ struct prestera_msg_port_info_req req = {
+ .port = port->id,
+ };
+ struct prestera_msg_port_info_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_INFO_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *dev_id = resp.dev_id;
+ *hw_id = resp.hw_id;
+ *fp_id = resp.fp_id;
+
+ return 0;
+}
+
+int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac)
+{
+ struct prestera_msg_switch_attr_req req = {
+ .attr = PRESTERA_CMD_SWITCH_ATTR_MAC,
+ };
+
+ ether_addr_copy(req.param.mac, mac);
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_SWITCH_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_switch_init(struct prestera_switch *sw)
+{
+ struct prestera_msg_switch_init_resp resp;
+ struct prestera_msg_common_req req;
+ int err;
+
+ INIT_LIST_HEAD(&sw->event_handlers);
+
+ err = prestera_cmd_ret_wait(sw, PRESTERA_CMD_TYPE_SWITCH_INIT,
+ &req.cmd, sizeof(req),
+ &resp.ret, sizeof(resp),
+ PRESTERA_SWITCH_INIT_TIMEOUT_MS);
+ if (err)
+ return err;
+
+ sw->dev->recv_msg = prestera_evt_recv;
+ sw->dev->recv_pkt = prestera_pkt_recv;
+ sw->port_count = resp.port_count;
+ sw->mtu_min = PRESTERA_MIN_MTU;
+ sw->mtu_max = resp.mtu_max;
+ sw->id = resp.switch_id;
+
+ return 0;
+}
+
+void prestera_hw_switch_fini(struct prestera_switch *sw)
+{
+ WARN_ON(!list_empty(&sw->event_handlers));
+}
+
+int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms)
+{
+ struct prestera_msg_switch_attr_req req = {
+ .attr = PRESTERA_CMD_SWITCH_ATTR_AGEING,
+ .param = {
+ .ageing_timeout_ms = ageing_ms,
+ },
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_SWITCH_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_state_set(const struct prestera_port *port,
+ bool admin_state)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_ADMIN_STATE,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .admin_state = admin_state,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_MTU,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .mtu = mtu,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_MAC,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ ether_addr_copy(req.param.mac, mac);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_accept_frm_type(struct prestera_port *port,
+ enum prestera_accept_frm_type type)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .accept_frm_type = type,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_cap_get(const struct prestera_port *port,
+ struct prestera_port_caps *caps)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_CAPABILITY,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ caps->supp_link_modes = resp.param.cap.link_mode;
+ caps->transceiver = resp.param.cap.transceiver;
+ caps->supp_fec = resp.param.cap.fec;
+ caps->type = resp.param.cap.type;
+
+ return err;
+}
+
+int prestera_hw_port_remote_cap_get(const struct prestera_port *port,
+ u64 *link_mode_bitmap)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_REMOTE_CAPABILITY,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *link_mode_bitmap = resp.param.cap.link_mode;
+
+ return 0;
+}
+
+int prestera_hw_port_remote_fc_get(const struct prestera_port *port,
+ bool *pause, bool *asym_pause)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_REMOTE_FC,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ switch (resp.param.fc) {
+ case PRESTERA_FC_SYMMETRIC:
+ *pause = true;
+ *asym_pause = false;
+ break;
+ case PRESTERA_FC_ASYMMETRIC:
+ *pause = false;
+ *asym_pause = true;
+ break;
+ case PRESTERA_FC_SYMM_ASYMM:
+ *pause = true;
+ *asym_pause = true;
+ break;
+ default:
+ *pause = false;
+ *asym_pause = false;
+ }
+
+ return 0;
+}
+
+int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_TYPE,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *type = resp.param.type;
+
+ return 0;
+}
+
+int prestera_hw_port_fec_get(const struct prestera_port *port, u8 *fec)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_FEC,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *fec = resp.param.fec;
+
+ return 0;
+}
+
+int prestera_hw_port_fec_set(const struct prestera_port *port, u8 fec)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_FEC,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .fec = fec,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+static u8 prestera_hw_mdix_to_eth(u8 mode)
+{
+ switch (mode) {
+ case PRESTERA_PORT_TP_MDI:
+ return ETH_TP_MDI;
+ case PRESTERA_PORT_TP_MDIX:
+ return ETH_TP_MDI_X;
+ case PRESTERA_PORT_TP_AUTO:
+ return ETH_TP_MDI_AUTO;
+ default:
+ return ETH_TP_MDI_INVALID;
+ }
+}
+
+static u8 prestera_hw_mdix_from_eth(u8 mode)
+{
+ switch (mode) {
+ case ETH_TP_MDI:
+ return PRESTERA_PORT_TP_MDI;
+ case ETH_TP_MDI_X:
+ return PRESTERA_PORT_TP_MDIX;
+ case ETH_TP_MDI_AUTO:
+ return PRESTERA_PORT_TP_AUTO;
+ default:
+ return PRESTERA_PORT_TP_NA;
+ }
+}
+
+int prestera_hw_port_mdix_get(const struct prestera_port *port, u8 *status,
+ u8 *admin_mode)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_MDIX,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *status = prestera_hw_mdix_to_eth(resp.param.mdix.status);
+ *admin_mode = prestera_hw_mdix_to_eth(resp.param.mdix.admin_mode);
+
+ return 0;
+}
+
+int prestera_hw_port_mdix_set(const struct prestera_port *port, u8 mode)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_MDIX,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ req.param.mdix.admin_mode = prestera_hw_mdix_from_eth(mode);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_link_mode_set(const struct prestera_port *port, u32 mode)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_LINK_MODE,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .link_mode = mode,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_link_mode_get(const struct prestera_port *port, u32 *mode)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_LINK_MODE,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *mode = resp.param.link_mode;
+
+ return 0;
+}
+
+int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_SPEED,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *speed = resp.param.speed;
+
+ return 0;
+}
+
+int prestera_hw_port_autoneg_set(const struct prestera_port *port,
+ bool autoneg, u64 link_modes, u8 fec)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_AUTONEG,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .autoneg = {
+ .link_mode = link_modes,
+ .enable = autoneg,
+ .fec = fec,
+ }
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_autoneg_restart(struct prestera_port *port)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_AUTONEG_RESTART,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_duplex_get(const struct prestera_port *port, u8 *duplex)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_DUPLEX,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *duplex = resp.param.duplex;
+
+ return 0;
+}
+
+int prestera_hw_port_stats_get(const struct prestera_port *port,
+ struct prestera_port_stats *st)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_STATS,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_stats_resp resp;
+ u64 *hw = resp.stats;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ st->good_octets_received = hw[PRESTERA_PORT_GOOD_OCTETS_RCV_CNT];
+ st->bad_octets_received = hw[PRESTERA_PORT_BAD_OCTETS_RCV_CNT];
+ st->mac_trans_error = hw[PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT];
+ st->broadcast_frames_received = hw[PRESTERA_PORT_BRDC_PKTS_RCV_CNT];
+ st->multicast_frames_received = hw[PRESTERA_PORT_MC_PKTS_RCV_CNT];
+ st->frames_64_octets = hw[PRESTERA_PORT_PKTS_64L_CNT];
+ st->frames_65_to_127_octets = hw[PRESTERA_PORT_PKTS_65TO127L_CNT];
+ st->frames_128_to_255_octets = hw[PRESTERA_PORT_PKTS_128TO255L_CNT];
+ st->frames_256_to_511_octets = hw[PRESTERA_PORT_PKTS_256TO511L_CNT];
+ st->frames_512_to_1023_octets = hw[PRESTERA_PORT_PKTS_512TO1023L_CNT];
+ st->frames_1024_to_max_octets = hw[PRESTERA_PORT_PKTS_1024TOMAXL_CNT];
+ st->excessive_collision = hw[PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT];
+ st->multicast_frames_sent = hw[PRESTERA_PORT_MC_PKTS_SENT_CNT];
+ st->broadcast_frames_sent = hw[PRESTERA_PORT_BRDC_PKTS_SENT_CNT];
+ st->fc_sent = hw[PRESTERA_PORT_FC_SENT_CNT];
+ st->fc_received = hw[PRESTERA_PORT_GOOD_FC_RCV_CNT];
+ st->buffer_overrun = hw[PRESTERA_PORT_DROP_EVENTS_CNT];
+ st->undersize = hw[PRESTERA_PORT_UNDERSIZE_PKTS_CNT];
+ st->fragments = hw[PRESTERA_PORT_FRAGMENTS_PKTS_CNT];
+ st->oversize = hw[PRESTERA_PORT_OVERSIZE_PKTS_CNT];
+ st->jabber = hw[PRESTERA_PORT_JABBER_PKTS_CNT];
+ st->rx_error_frame_received = hw[PRESTERA_PORT_MAC_RCV_ERROR_CNT];
+ st->bad_crc = hw[PRESTERA_PORT_BAD_CRC_CNT];
+ st->collisions = hw[PRESTERA_PORT_COLLISIONS_CNT];
+ st->late_collision = hw[PRESTERA_PORT_LATE_COLLISIONS_CNT];
+ st->unicast_frames_received = hw[PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT];
+ st->unicast_frames_sent = hw[PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT];
+ st->sent_multiple = hw[PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT];
+ st->sent_deferred = hw[PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT];
+ st->good_octets_sent = hw[PRESTERA_PORT_GOOD_OCTETS_SENT_CNT];
+
+ return 0;
+}
+
+int prestera_hw_port_learning_set(struct prestera_port *port, bool enable)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_LEARNING,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .learning = enable,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_flood_set(struct prestera_port *port, bool flood)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_FLOOD,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .flood = flood,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid)
+{
+ struct prestera_msg_vlan_req req = {
+ .vid = vid,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_CREATE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_delete(struct prestera_switch *sw, u16 vid)
+{
+ struct prestera_msg_vlan_req req = {
+ .vid = vid,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid,
+ bool is_member, bool untagged)
+{
+ struct prestera_msg_vlan_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ .is_member = is_member,
+ .is_tagged = !untagged,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PORT_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid)
+{
+ struct prestera_msg_vlan_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PVID_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_port_stp_set(struct prestera_port *port, u16 vid, u8 state)
+{
+ struct prestera_msg_stp_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ .state = state,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_STP_PORT_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac,
+ u16 vid, bool dynamic)
+{
+ struct prestera_msg_fdb_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ .dynamic = dynamic,
+ };
+
+ ether_addr_copy(req.mac, mac);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_ADD,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac,
+ u16 vid)
+{
+ struct prestera_msg_fdb_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ };
+
+ ether_addr_copy(req.mac, mac);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .flush_mode = mode,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_flush_vlan(struct prestera_switch *sw, u16 vid, u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .vid = vid,
+ .flush_mode = mode,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
+ u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ .flush_mode = mode,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id)
+{
+ struct prestera_msg_bridge_resp resp;
+ struct prestera_msg_bridge_req req;
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_BRIDGE_CREATE,
+ &req.cmd, sizeof(req),
+ &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *bridge_id = resp.bridge;
+
+ return 0;
+}
+
+int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id)
+{
+ struct prestera_msg_bridge_req req = {
+ .bridge = bridge_id,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_BRIDGE_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id)
+{
+ struct prestera_msg_bridge_req req = {
+ .bridge = bridge_id,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id)
+{
+ struct prestera_msg_bridge_req req = {
+ .bridge = bridge_id,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_rxtx_init(struct prestera_switch *sw,
+ struct prestera_rxtx_params *params)
+{
+ struct prestera_msg_rxtx_resp resp;
+ struct prestera_msg_rxtx_req req;
+ int err;
+
+ req.use_sdma = params->use_sdma;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_RXTX_INIT,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ params->map_addr = resp.map_addr;
+
+ return 0;
+}
+
+int prestera_hw_rxtx_port_init(struct prestera_port *port)
+{
+ struct prestera_msg_rxtx_port_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_RXTX_PORT_INIT,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_event_handler_register(struct prestera_switch *sw,
+ enum prestera_event_type type,
+ prestera_event_cb_t fn,
+ void *arg)
+{
+ struct prestera_fw_event_handler *eh;
+
+ eh = __find_event_handler(sw, type);
+ if (eh)
+ return -EEXIST;
+
+ eh = kmalloc(sizeof(*eh), GFP_KERNEL);
+ if (!eh)
+ return -ENOMEM;
+
+ eh->type = type;
+ eh->func = fn;
+ eh->arg = arg;
+
+ INIT_LIST_HEAD(&eh->list);
+
+ list_add_rcu(&eh->list, &sw->event_handlers);
+
+ return 0;
+}
+
+void prestera_hw_event_handler_unregister(struct prestera_switch *sw,
+ enum prestera_event_type type,
+ prestera_event_cb_t fn)
+{
+ struct prestera_fw_event_handler *eh;
+
+ eh = __find_event_handler(sw, type);
+ if (!eh)
+ return;
+
+ list_del_rcu(&eh->list);
+ kfree_rcu(eh, rcu);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
new file mode 100644
index 000000000000..b2b5ac95b4e3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_HW_H_
+#define _PRESTERA_HW_H_
+
+#include <linux/types.h>
+
+enum prestera_accept_frm_type {
+ PRESTERA_ACCEPT_FRAME_TYPE_TAGGED,
+ PRESTERA_ACCEPT_FRAME_TYPE_UNTAGGED,
+ PRESTERA_ACCEPT_FRAME_TYPE_ALL,
+};
+
+enum prestera_fdb_flush_mode {
+ PRESTERA_FDB_FLUSH_MODE_DYNAMIC = BIT(0),
+ PRESTERA_FDB_FLUSH_MODE_STATIC = BIT(1),
+ PRESTERA_FDB_FLUSH_MODE_ALL = PRESTERA_FDB_FLUSH_MODE_DYNAMIC
+ | PRESTERA_FDB_FLUSH_MODE_STATIC,
+};
+
+enum {
+ PRESTERA_LINK_MODE_10baseT_Half,
+ PRESTERA_LINK_MODE_10baseT_Full,
+ PRESTERA_LINK_MODE_100baseT_Half,
+ PRESTERA_LINK_MODE_100baseT_Full,
+ PRESTERA_LINK_MODE_1000baseT_Half,
+ PRESTERA_LINK_MODE_1000baseT_Full,
+ PRESTERA_LINK_MODE_1000baseX_Full,
+ PRESTERA_LINK_MODE_1000baseKX_Full,
+ PRESTERA_LINK_MODE_2500baseX_Full,
+ PRESTERA_LINK_MODE_10GbaseKR_Full,
+ PRESTERA_LINK_MODE_10GbaseSR_Full,
+ PRESTERA_LINK_MODE_10GbaseLR_Full,
+ PRESTERA_LINK_MODE_20GbaseKR2_Full,
+ PRESTERA_LINK_MODE_25GbaseCR_Full,
+ PRESTERA_LINK_MODE_25GbaseKR_Full,
+ PRESTERA_LINK_MODE_25GbaseSR_Full,
+ PRESTERA_LINK_MODE_40GbaseKR4_Full,
+ PRESTERA_LINK_MODE_40GbaseCR4_Full,
+ PRESTERA_LINK_MODE_40GbaseSR4_Full,
+ PRESTERA_LINK_MODE_50GbaseCR2_Full,
+ PRESTERA_LINK_MODE_50GbaseKR2_Full,
+ PRESTERA_LINK_MODE_50GbaseSR2_Full,
+ PRESTERA_LINK_MODE_100GbaseKR4_Full,
+ PRESTERA_LINK_MODE_100GbaseSR4_Full,
+ PRESTERA_LINK_MODE_100GbaseCR4_Full,
+
+ PRESTERA_LINK_MODE_MAX
+};
+
+enum {
+ PRESTERA_PORT_TYPE_NONE,
+ PRESTERA_PORT_TYPE_TP,
+ PRESTERA_PORT_TYPE_AUI,
+ PRESTERA_PORT_TYPE_MII,
+ PRESTERA_PORT_TYPE_FIBRE,
+ PRESTERA_PORT_TYPE_BNC,
+ PRESTERA_PORT_TYPE_DA,
+ PRESTERA_PORT_TYPE_OTHER,
+
+ PRESTERA_PORT_TYPE_MAX
+};
+
+enum {
+ PRESTERA_PORT_TCVR_COPPER,
+ PRESTERA_PORT_TCVR_SFP,
+
+ PRESTERA_PORT_TCVR_MAX
+};
+
+enum {
+ PRESTERA_PORT_FEC_OFF,
+ PRESTERA_PORT_FEC_BASER,
+ PRESTERA_PORT_FEC_RS,
+
+ PRESTERA_PORT_FEC_MAX
+};
+
+enum {
+ PRESTERA_PORT_DUPLEX_HALF,
+ PRESTERA_PORT_DUPLEX_FULL,
+};
+
+enum {
+ PRESTERA_STP_DISABLED,
+ PRESTERA_STP_BLOCK_LISTEN,
+ PRESTERA_STP_LEARN,
+ PRESTERA_STP_FORWARD,
+};
+
+struct prestera_switch;
+struct prestera_port;
+struct prestera_port_stats;
+struct prestera_port_caps;
+enum prestera_event_type;
+struct prestera_event;
+
+typedef void (*prestera_event_cb_t)
+ (struct prestera_switch *sw, struct prestera_event *evt, void *arg);
+
+struct prestera_rxtx_params;
+
+/* Switch API */
+int prestera_hw_switch_init(struct prestera_switch *sw);
+void prestera_hw_switch_fini(struct prestera_switch *sw);
+int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms);
+int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac);
+
+/* Port API */
+int prestera_hw_port_info_get(const struct prestera_port *port,
+ u32 *dev_id, u32 *hw_id, u16 *fp_id);
+int prestera_hw_port_state_set(const struct prestera_port *port,
+ bool admin_state);
+int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu);
+int prestera_hw_port_mtu_get(const struct prestera_port *port, u32 *mtu);
+int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac);
+int prestera_hw_port_mac_get(const struct prestera_port *port, char *mac);
+int prestera_hw_port_cap_get(const struct prestera_port *port,
+ struct prestera_port_caps *caps);
+int prestera_hw_port_remote_cap_get(const struct prestera_port *port,
+ u64 *link_mode_bitmap);
+int prestera_hw_port_remote_fc_get(const struct prestera_port *port,
+ bool *pause, bool *asym_pause);
+int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type);
+int prestera_hw_port_fec_get(const struct prestera_port *port, u8 *fec);
+int prestera_hw_port_fec_set(const struct prestera_port *port, u8 fec);
+int prestera_hw_port_autoneg_set(const struct prestera_port *port,
+ bool autoneg, u64 link_modes, u8 fec);
+int prestera_hw_port_autoneg_restart(struct prestera_port *port);
+int prestera_hw_port_duplex_get(const struct prestera_port *port, u8 *duplex);
+int prestera_hw_port_stats_get(const struct prestera_port *port,
+ struct prestera_port_stats *stats);
+int prestera_hw_port_link_mode_set(const struct prestera_port *port, u32 mode);
+int prestera_hw_port_link_mode_get(const struct prestera_port *port, u32 *mode);
+int prestera_hw_port_mdix_get(const struct prestera_port *port, u8 *status,
+ u8 *admin_mode);
+int prestera_hw_port_mdix_set(const struct prestera_port *port, u8 mode);
+int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed);
+int prestera_hw_port_learning_set(struct prestera_port *port, bool enable);
+int prestera_hw_port_flood_set(struct prestera_port *port, bool flood);
+int prestera_hw_port_accept_frm_type(struct prestera_port *port,
+ enum prestera_accept_frm_type type);
+/* Vlan API */
+int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid);
+int prestera_hw_vlan_delete(struct prestera_switch *sw, u16 vid);
+int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid,
+ bool is_member, bool untagged);
+int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid);
+int prestera_hw_vlan_port_stp_set(struct prestera_port *port, u16 vid, u8 state);
+
+/* FDB API */
+int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac,
+ u16 vid, bool dynamic);
+int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac,
+ u16 vid);
+int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode);
+int prestera_hw_fdb_flush_vlan(struct prestera_switch *sw, u16 vid, u32 mode);
+int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
+ u32 mode);
+
+/* Bridge API */
+int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id);
+int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id);
+int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id);
+int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id);
+
+/* Event handlers */
+int prestera_hw_event_handler_register(struct prestera_switch *sw,
+ enum prestera_event_type type,
+ prestera_event_cb_t fn,
+ void *arg);
+void prestera_hw_event_handler_unregister(struct prestera_switch *sw,
+ enum prestera_event_type type,
+ prestera_event_cb_t fn);
+
+/* RX/TX */
+int prestera_hw_rxtx_init(struct prestera_switch *sw,
+ struct prestera_rxtx_params *params);
+int prestera_hw_rxtx_port_init(struct prestera_port *port);
+
+#endif /* _PRESTERA_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
new file mode 100644
index 000000000000..0f20e0788cce
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -0,0 +1,667 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/etherdevice.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdev_features.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_rxtx.h"
+#include "prestera_devlink.h"
+#include "prestera_ethtool.h"
+#include "prestera_switchdev.h"
+
+#define PRESTERA_MTU_DEFAULT 1536
+
+#define PRESTERA_STATS_DELAY_MS 1000
+
+#define PRESTERA_MAC_ADDR_NUM_MAX 255
+
+static struct workqueue_struct *prestera_wq;
+
+int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
+{
+ enum prestera_accept_frm_type frm_type;
+ int err;
+
+ frm_type = PRESTERA_ACCEPT_FRAME_TYPE_TAGGED;
+
+ if (vid) {
+ err = prestera_hw_vlan_port_vid_set(port, vid);
+ if (err)
+ return err;
+
+ frm_type = PRESTERA_ACCEPT_FRAME_TYPE_ALL;
+ }
+
+ err = prestera_hw_port_accept_frm_type(port, frm_type);
+ if (err && frm_type == PRESTERA_ACCEPT_FRAME_TYPE_ALL)
+ prestera_hw_vlan_port_vid_set(port, port->pvid);
+
+ port->pvid = vid;
+ return 0;
+}
+
+struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
+ u32 dev_id, u32 hw_id)
+{
+ struct prestera_port *port = NULL;
+
+ read_lock(&sw->port_list_lock);
+ list_for_each_entry(port, &sw->port_list, list) {
+ if (port->dev_id == dev_id && port->hw_id == hw_id)
+ break;
+ }
+ read_unlock(&sw->port_list_lock);
+
+ return port;
+}
+
+struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id)
+{
+ struct prestera_port *port = NULL;
+
+ read_lock(&sw->port_list_lock);
+ list_for_each_entry(port, &sw->port_list, list) {
+ if (port->id == id)
+ break;
+ }
+ read_unlock(&sw->port_list_lock);
+
+ return port;
+}
+
+static int prestera_port_open(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ int err;
+
+ err = prestera_hw_port_state_set(port, true);
+ if (err)
+ return err;
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int prestera_port_close(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ int err;
+
+ netif_stop_queue(dev);
+
+ err = prestera_hw_port_state_set(port, false);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static netdev_tx_t prestera_port_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ return prestera_rxtx_xmit(netdev_priv(dev), skb);
+}
+
+static int prestera_is_valid_mac_addr(struct prestera_port *port, u8 *addr)
+{
+ if (!is_valid_ether_addr(addr))
+ return -EADDRNOTAVAIL;
+
+ /* firmware requires that port's MAC address contains first 5 bytes
+ * of the base MAC address
+ */
+ if (memcmp(port->sw->base_mac, addr, ETH_ALEN - 1))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int prestera_port_set_mac_address(struct net_device *dev, void *p)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ int err;
+
+ err = prestera_is_valid_mac_addr(port, addr->sa_data);
+ if (err)
+ return err;
+
+ err = prestera_hw_port_mac_set(port, addr->sa_data);
+ if (err)
+ return err;
+
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+ return 0;
+}
+
+static int prestera_port_change_mtu(struct net_device *dev, int mtu)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ int err;
+
+ err = prestera_hw_port_mtu_set(port, mtu);
+ if (err)
+ return err;
+
+ dev->mtu = mtu;
+
+ return 0;
+}
+
+static void prestera_port_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct prestera_port_stats *port_stats = &port->cached_hw_stats.stats;
+
+ stats->rx_packets = port_stats->broadcast_frames_received +
+ port_stats->multicast_frames_received +
+ port_stats->unicast_frames_received;
+
+ stats->tx_packets = port_stats->broadcast_frames_sent +
+ port_stats->multicast_frames_sent +
+ port_stats->unicast_frames_sent;
+
+ stats->rx_bytes = port_stats->good_octets_received;
+
+ stats->tx_bytes = port_stats->good_octets_sent;
+
+ stats->rx_errors = port_stats->rx_error_frame_received;
+ stats->tx_errors = port_stats->mac_trans_error;
+
+ stats->rx_dropped = port_stats->buffer_overrun;
+ stats->tx_dropped = 0;
+
+ stats->multicast = port_stats->multicast_frames_received;
+ stats->collisions = port_stats->excessive_collision;
+
+ stats->rx_crc_errors = port_stats->bad_crc;
+}
+
+static void prestera_port_get_hw_stats(struct prestera_port *port)
+{
+ prestera_hw_port_stats_get(port, &port->cached_hw_stats.stats);
+}
+
+static void prestera_port_stats_update(struct work_struct *work)
+{
+ struct prestera_port *port =
+ container_of(work, struct prestera_port,
+ cached_hw_stats.caching_dw.work);
+
+ prestera_port_get_hw_stats(port);
+
+ queue_delayed_work(prestera_wq, &port->cached_hw_stats.caching_dw,
+ msecs_to_jiffies(PRESTERA_STATS_DELAY_MS));
+}
+
+static const struct net_device_ops prestera_netdev_ops = {
+ .ndo_open = prestera_port_open,
+ .ndo_stop = prestera_port_close,
+ .ndo_start_xmit = prestera_port_xmit,
+ .ndo_change_mtu = prestera_port_change_mtu,
+ .ndo_get_stats64 = prestera_port_get_stats64,
+ .ndo_set_mac_address = prestera_port_set_mac_address,
+ .ndo_get_devlink_port = prestera_devlink_get_port,
+};
+
+int prestera_port_autoneg_set(struct prestera_port *port, bool enable,
+ u64 adver_link_modes, u8 adver_fec)
+{
+ bool refresh = false;
+ u64 link_modes;
+ int err;
+ u8 fec;
+
+ if (port->caps.type != PRESTERA_PORT_TYPE_TP)
+ return enable ? -EINVAL : 0;
+
+ if (!enable)
+ goto set_autoneg;
+
+ link_modes = port->caps.supp_link_modes & adver_link_modes;
+ fec = port->caps.supp_fec & adver_fec;
+
+ if (!link_modes && !fec)
+ return -EOPNOTSUPP;
+
+ if (link_modes && port->adver_link_modes != link_modes) {
+ port->adver_link_modes = link_modes;
+ refresh = true;
+ }
+
+ if (fec && port->adver_fec != fec) {
+ port->adver_fec = fec;
+ refresh = true;
+ }
+
+set_autoneg:
+ if (port->autoneg == enable && !refresh)
+ return 0;
+
+ err = prestera_hw_port_autoneg_set(port, enable, port->adver_link_modes,
+ port->adver_fec);
+ if (err)
+ return err;
+
+ port->autoneg = enable;
+
+ return 0;
+}
+
+static void prestera_port_list_add(struct prestera_port *port)
+{
+ write_lock(&port->sw->port_list_lock);
+ list_add(&port->list, &port->sw->port_list);
+ write_unlock(&port->sw->port_list_lock);
+}
+
+static void prestera_port_list_del(struct prestera_port *port)
+{
+ write_lock(&port->sw->port_list_lock);
+ list_del(&port->list);
+ write_unlock(&port->sw->port_list_lock);
+}
+
+static int prestera_port_create(struct prestera_switch *sw, u32 id)
+{
+ struct prestera_port *port;
+ struct net_device *dev;
+ int err;
+
+ dev = alloc_etherdev(sizeof(*port));
+ if (!dev)
+ return -ENOMEM;
+
+ port = netdev_priv(dev);
+
+ INIT_LIST_HEAD(&port->vlans_list);
+ port->pvid = PRESTERA_DEFAULT_VID;
+ port->dev = dev;
+ port->id = id;
+ port->sw = sw;
+
+ err = prestera_hw_port_info_get(port, &port->dev_id, &port->hw_id,
+ &port->fp_id);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to get port(%u) info\n", id);
+ goto err_port_info_get;
+ }
+
+ err = prestera_devlink_port_register(port);
+ if (err)
+ goto err_dl_port_register;
+
+ dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->netdev_ops = &prestera_netdev_ops;
+ dev->ethtool_ops = &prestera_ethtool_ops;
+
+ netif_carrier_off(dev);
+
+ dev->mtu = min_t(unsigned int, sw->mtu_max, PRESTERA_MTU_DEFAULT);
+ dev->min_mtu = sw->mtu_min;
+ dev->max_mtu = sw->mtu_max;
+
+ err = prestera_hw_port_mtu_set(port, dev->mtu);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to set port(%u) mtu(%d)\n",
+ id, dev->mtu);
+ goto err_port_init;
+ }
+
+ if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX)
+ goto err_port_init;
+
+ /* firmware requires that port's MAC address consist of the first
+ * 5 bytes of the base MAC address
+ */
+ memcpy(dev->dev_addr, sw->base_mac, dev->addr_len - 1);
+ dev->dev_addr[dev->addr_len - 1] = port->fp_id;
+
+ err = prestera_hw_port_mac_set(port, dev->dev_addr);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to set port(%u) mac addr\n", id);
+ goto err_port_init;
+ }
+
+ err = prestera_hw_port_cap_get(port, &port->caps);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to get port(%u) caps\n", id);
+ goto err_port_init;
+ }
+
+ port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF);
+ prestera_port_autoneg_set(port, true, port->caps.supp_link_modes,
+ port->caps.supp_fec);
+
+ err = prestera_hw_port_state_set(port, false);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to set port(%u) down\n", id);
+ goto err_port_init;
+ }
+
+ err = prestera_rxtx_port_init(port);
+ if (err)
+ goto err_port_init;
+
+ INIT_DELAYED_WORK(&port->cached_hw_stats.caching_dw,
+ &prestera_port_stats_update);
+
+ prestera_port_list_add(port);
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_register_netdev;
+
+ prestera_devlink_port_set(port);
+
+ return 0;
+
+err_register_netdev:
+ prestera_port_list_del(port);
+err_port_init:
+ prestera_devlink_port_unregister(port);
+err_dl_port_register:
+err_port_info_get:
+ free_netdev(dev);
+ return err;
+}
+
+static void prestera_port_destroy(struct prestera_port *port)
+{
+ struct net_device *dev = port->dev;
+
+ cancel_delayed_work_sync(&port->cached_hw_stats.caching_dw);
+ prestera_devlink_port_clear(port);
+ unregister_netdev(dev);
+ prestera_port_list_del(port);
+ prestera_devlink_port_unregister(port);
+ free_netdev(dev);
+}
+
+static void prestera_destroy_ports(struct prestera_switch *sw)
+{
+ struct prestera_port *port, *tmp;
+
+ list_for_each_entry_safe(port, tmp, &sw->port_list, list)
+ prestera_port_destroy(port);
+}
+
+static int prestera_create_ports(struct prestera_switch *sw)
+{
+ struct prestera_port *port, *tmp;
+ u32 port_idx;
+ int err;
+
+ for (port_idx = 0; port_idx < sw->port_count; port_idx++) {
+ err = prestera_port_create(sw, port_idx);
+ if (err)
+ goto err_port_create;
+ }
+
+ return 0;
+
+err_port_create:
+ list_for_each_entry_safe(port, tmp, &sw->port_list, list)
+ prestera_port_destroy(port);
+
+ return err;
+}
+
+static void prestera_port_handle_event(struct prestera_switch *sw,
+ struct prestera_event *evt, void *arg)
+{
+ struct delayed_work *caching_dw;
+ struct prestera_port *port;
+
+ port = prestera_find_port(sw, evt->port_evt.port_id);
+ if (!port || !port->dev)
+ return;
+
+ caching_dw = &port->cached_hw_stats.caching_dw;
+
+ if (evt->id == PRESTERA_PORT_EVENT_STATE_CHANGED) {
+ if (evt->port_evt.data.oper_state) {
+ netif_carrier_on(port->dev);
+ if (!delayed_work_pending(caching_dw))
+ queue_delayed_work(prestera_wq, caching_dw, 0);
+ } else {
+ netif_carrier_off(port->dev);
+ if (delayed_work_pending(caching_dw))
+ cancel_delayed_work(caching_dw);
+ }
+ }
+}
+
+static int prestera_event_handlers_register(struct prestera_switch *sw)
+{
+ return prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_PORT,
+ prestera_port_handle_event,
+ NULL);
+}
+
+static void prestera_event_handlers_unregister(struct prestera_switch *sw)
+{
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_PORT,
+ prestera_port_handle_event);
+}
+
+static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw)
+{
+ struct device_node *base_mac_np;
+ struct device_node *np;
+ const char *base_mac;
+
+ np = of_find_compatible_node(NULL, NULL, "marvell,prestera");
+ base_mac_np = of_parse_phandle(np, "base-mac-provider", 0);
+
+ base_mac = of_get_mac_address(base_mac_np);
+ of_node_put(base_mac_np);
+ if (!IS_ERR(base_mac))
+ ether_addr_copy(sw->base_mac, base_mac);
+
+ if (!is_valid_ether_addr(sw->base_mac)) {
+ eth_random_addr(sw->base_mac);
+ dev_info(prestera_dev(sw), "using random base mac address\n");
+ }
+
+ return prestera_hw_switch_mac_set(sw, sw->base_mac);
+}
+
+bool prestera_netdev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &prestera_netdev_ops;
+}
+
+static int prestera_lower_dev_walk(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ struct prestera_port **pport = (struct prestera_port **)priv->data;
+
+ if (prestera_netdev_check(dev)) {
+ *pport = netdev_priv(dev);
+ return 1;
+ }
+
+ return 0;
+}
+
+struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev)
+{
+ struct prestera_port *port = NULL;
+ struct netdev_nested_priv priv = {
+ .data = (void *)&port,
+ };
+
+ if (prestera_netdev_check(dev))
+ return netdev_priv(dev);
+
+ netdev_walk_all_lower_dev(dev, prestera_lower_dev_walk, &priv);
+
+ return port;
+}
+
+static int prestera_netdev_port_event(struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ case NETDEV_CHANGEUPPER:
+ return prestera_bridge_port_event(dev, event, ptr);
+ default:
+ return 0;
+ }
+}
+
+static int prestera_netdev_event_handler(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int err = 0;
+
+ if (prestera_netdev_check(dev))
+ err = prestera_netdev_port_event(dev, event, ptr);
+
+ return notifier_from_errno(err);
+}
+
+static int prestera_netdev_event_handler_register(struct prestera_switch *sw)
+{
+ sw->netdev_nb.notifier_call = prestera_netdev_event_handler;
+
+ return register_netdevice_notifier(&sw->netdev_nb);
+}
+
+static void prestera_netdev_event_handler_unregister(struct prestera_switch *sw)
+{
+ unregister_netdevice_notifier(&sw->netdev_nb);
+}
+
+static int prestera_switch_init(struct prestera_switch *sw)
+{
+ int err;
+
+ err = prestera_hw_switch_init(sw);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to init Switch device\n");
+ return err;
+ }
+
+ rwlock_init(&sw->port_list_lock);
+ INIT_LIST_HEAD(&sw->port_list);
+
+ err = prestera_switch_set_base_mac_addr(sw);
+ if (err)
+ return err;
+
+ err = prestera_netdev_event_handler_register(sw);
+ if (err)
+ return err;
+
+ err = prestera_switchdev_init(sw);
+ if (err)
+ goto err_swdev_register;
+
+ err = prestera_rxtx_switch_init(sw);
+ if (err)
+ goto err_rxtx_register;
+
+ err = prestera_event_handlers_register(sw);
+ if (err)
+ goto err_handlers_register;
+
+ err = prestera_devlink_register(sw);
+ if (err)
+ goto err_dl_register;
+
+ err = prestera_create_ports(sw);
+ if (err)
+ goto err_ports_create;
+
+ return 0;
+
+err_ports_create:
+ prestera_devlink_unregister(sw);
+err_dl_register:
+ prestera_event_handlers_unregister(sw);
+err_handlers_register:
+ prestera_rxtx_switch_fini(sw);
+err_rxtx_register:
+ prestera_switchdev_fini(sw);
+err_swdev_register:
+ prestera_netdev_event_handler_unregister(sw);
+ prestera_hw_switch_fini(sw);
+
+ return err;
+}
+
+static void prestera_switch_fini(struct prestera_switch *sw)
+{
+ prestera_destroy_ports(sw);
+ prestera_devlink_unregister(sw);
+ prestera_event_handlers_unregister(sw);
+ prestera_rxtx_switch_fini(sw);
+ prestera_switchdev_fini(sw);
+ prestera_netdev_event_handler_unregister(sw);
+ prestera_hw_switch_fini(sw);
+}
+
+int prestera_device_register(struct prestera_device *dev)
+{
+ struct prestera_switch *sw;
+ int err;
+
+ sw = prestera_devlink_alloc();
+ if (!sw)
+ return -ENOMEM;
+
+ dev->priv = sw;
+ sw->dev = dev;
+
+ err = prestera_switch_init(sw);
+ if (err) {
+ prestera_devlink_free(sw);
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(prestera_device_register);
+
+void prestera_device_unregister(struct prestera_device *dev)
+{
+ struct prestera_switch *sw = dev->priv;
+
+ prestera_switch_fini(sw);
+ prestera_devlink_free(sw);
+}
+EXPORT_SYMBOL(prestera_device_unregister);
+
+static int __init prestera_module_init(void)
+{
+ prestera_wq = alloc_workqueue("prestera", 0, 0);
+ if (!prestera_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __exit prestera_module_exit(void)
+{
+ destroy_workqueue(prestera_wq);
+}
+
+module_init(prestera_module_init);
+module_exit(prestera_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Marvell Prestera switch driver");
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
new file mode 100644
index 000000000000..1b97adae542e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/circ_buf.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "prestera.h"
+
+#define PRESTERA_MSG_MAX_SIZE 1500
+
+#define PRESTERA_SUPP_FW_MAJ_VER 2
+#define PRESTERA_SUPP_FW_MIN_VER 0
+
+#define PRESTERA_FW_PATH_FMT "mrvl/prestera/mvsw_prestera_fw-v%u.%u.img"
+
+#define PRESTERA_FW_HDR_MAGIC 0x351D9D06
+#define PRESTERA_FW_DL_TIMEOUT_MS 50000
+#define PRESTERA_FW_BLK_SZ 1024
+
+#define PRESTERA_FW_VER_MAJ_MUL 1000000
+#define PRESTERA_FW_VER_MIN_MUL 1000
+
+#define PRESTERA_FW_VER_MAJ(v) ((v) / PRESTERA_FW_VER_MAJ_MUL)
+
+#define PRESTERA_FW_VER_MIN(v) \
+ (((v) - (PRESTERA_FW_VER_MAJ(v) * PRESTERA_FW_VER_MAJ_MUL)) / \
+ PRESTERA_FW_VER_MIN_MUL)
+
+#define PRESTERA_FW_VER_PATCH(v) \
+ ((v) - (PRESTERA_FW_VER_MAJ(v) * PRESTERA_FW_VER_MAJ_MUL) - \
+ (PRESTERA_FW_VER_MIN(v) * PRESTERA_FW_VER_MIN_MUL))
+
+enum prestera_pci_bar_t {
+ PRESTERA_PCI_BAR_FW = 2,
+ PRESTERA_PCI_BAR_PP = 4,
+};
+
+struct prestera_fw_header {
+ __be32 magic_number;
+ __be32 version_value;
+ u8 reserved[8];
+};
+
+struct prestera_ldr_regs {
+ u32 ldr_ready;
+ u32 pad1;
+
+ u32 ldr_img_size;
+ u32 ldr_ctl_flags;
+
+ u32 ldr_buf_offs;
+ u32 ldr_buf_size;
+
+ u32 ldr_buf_rd;
+ u32 pad2;
+ u32 ldr_buf_wr;
+
+ u32 ldr_status;
+};
+
+#define PRESTERA_LDR_REG_OFFSET(f) offsetof(struct prestera_ldr_regs, f)
+
+#define PRESTERA_LDR_READY_MAGIC 0xf00dfeed
+
+#define PRESTERA_LDR_STATUS_IMG_DL BIT(0)
+#define PRESTERA_LDR_STATUS_START_FW BIT(1)
+#define PRESTERA_LDR_STATUS_INVALID_IMG BIT(2)
+#define PRESTERA_LDR_STATUS_NOMEM BIT(3)
+
+#define PRESTERA_LDR_REG_BASE(fw) ((fw)->ldr_regs)
+#define PRESTERA_LDR_REG_ADDR(fw, reg) (PRESTERA_LDR_REG_BASE(fw) + (reg))
+
+/* fw loader registers */
+#define PRESTERA_LDR_READY_REG PRESTERA_LDR_REG_OFFSET(ldr_ready)
+#define PRESTERA_LDR_IMG_SIZE_REG PRESTERA_LDR_REG_OFFSET(ldr_img_size)
+#define PRESTERA_LDR_CTL_REG PRESTERA_LDR_REG_OFFSET(ldr_ctl_flags)
+#define PRESTERA_LDR_BUF_SIZE_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_size)
+#define PRESTERA_LDR_BUF_OFFS_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_offs)
+#define PRESTERA_LDR_BUF_RD_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_rd)
+#define PRESTERA_LDR_BUF_WR_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_wr)
+#define PRESTERA_LDR_STATUS_REG PRESTERA_LDR_REG_OFFSET(ldr_status)
+
+#define PRESTERA_LDR_CTL_DL_START BIT(0)
+
+#define PRESTERA_EVT_QNUM_MAX 4
+
+struct prestera_fw_evtq_regs {
+ u32 rd_idx;
+ u32 pad1;
+ u32 wr_idx;
+ u32 pad2;
+ u32 offs;
+ u32 len;
+};
+
+struct prestera_fw_regs {
+ u32 fw_ready;
+ u32 pad;
+ u32 cmd_offs;
+ u32 cmd_len;
+ u32 evt_offs;
+ u32 evt_qnum;
+
+ u32 cmd_req_ctl;
+ u32 cmd_req_len;
+ u32 cmd_rcv_ctl;
+ u32 cmd_rcv_len;
+
+ u32 fw_status;
+ u32 rx_status;
+
+ struct prestera_fw_evtq_regs evtq_list[PRESTERA_EVT_QNUM_MAX];
+};
+
+#define PRESTERA_FW_REG_OFFSET(f) offsetof(struct prestera_fw_regs, f)
+
+#define PRESTERA_FW_READY_MAGIC 0xcafebabe
+
+/* fw registers */
+#define PRESTERA_FW_READY_REG PRESTERA_FW_REG_OFFSET(fw_ready)
+
+#define PRESTERA_CMD_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(cmd_offs)
+#define PRESTERA_CMD_BUF_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_len)
+#define PRESTERA_EVT_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(evt_offs)
+#define PRESTERA_EVT_QNUM_REG PRESTERA_FW_REG_OFFSET(evt_qnum)
+
+#define PRESTERA_CMD_REQ_CTL_REG PRESTERA_FW_REG_OFFSET(cmd_req_ctl)
+#define PRESTERA_CMD_REQ_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_req_len)
+
+#define PRESTERA_CMD_RCV_CTL_REG PRESTERA_FW_REG_OFFSET(cmd_rcv_ctl)
+#define PRESTERA_CMD_RCV_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_rcv_len)
+#define PRESTERA_FW_STATUS_REG PRESTERA_FW_REG_OFFSET(fw_status)
+#define PRESTERA_RX_STATUS_REG PRESTERA_FW_REG_OFFSET(rx_status)
+
+/* PRESTERA_CMD_REQ_CTL_REG flags */
+#define PRESTERA_CMD_F_REQ_SENT BIT(0)
+#define PRESTERA_CMD_F_REPL_RCVD BIT(1)
+
+/* PRESTERA_CMD_RCV_CTL_REG flags */
+#define PRESTERA_CMD_F_REPL_SENT BIT(0)
+
+#define PRESTERA_EVTQ_REG_OFFSET(q, f) \
+ (PRESTERA_FW_REG_OFFSET(evtq_list) + \
+ (q) * sizeof(struct prestera_fw_evtq_regs) + \
+ offsetof(struct prestera_fw_evtq_regs, f))
+
+#define PRESTERA_EVTQ_RD_IDX_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, rd_idx)
+#define PRESTERA_EVTQ_WR_IDX_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, wr_idx)
+#define PRESTERA_EVTQ_OFFS_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, offs)
+#define PRESTERA_EVTQ_LEN_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, len)
+
+#define PRESTERA_FW_REG_BASE(fw) ((fw)->dev.ctl_regs)
+#define PRESTERA_FW_REG_ADDR(fw, reg) PRESTERA_FW_REG_BASE((fw)) + (reg)
+
+#define PRESTERA_FW_CMD_DEFAULT_WAIT_MS 30000
+#define PRESTERA_FW_READY_WAIT_MS 20000
+
+struct prestera_fw_evtq {
+ u8 __iomem *addr;
+ size_t len;
+};
+
+struct prestera_fw {
+ struct workqueue_struct *wq;
+ struct prestera_device dev;
+ u8 __iomem *ldr_regs;
+ u8 __iomem *ldr_ring_buf;
+ u32 ldr_buf_len;
+ u32 ldr_wr_idx;
+ struct mutex cmd_mtx; /* serialize access to dev->send_req */
+ size_t cmd_mbox_len;
+ u8 __iomem *cmd_mbox;
+ struct prestera_fw_evtq evt_queue[PRESTERA_EVT_QNUM_MAX];
+ u8 evt_qnum;
+ struct work_struct evt_work;
+ u8 __iomem *evt_buf;
+ u8 *evt_msg;
+};
+
+static int prestera_fw_load(struct prestera_fw *fw);
+
+static void prestera_fw_write(struct prestera_fw *fw, u32 reg, u32 val)
+{
+ writel(val, PRESTERA_FW_REG_ADDR(fw, reg));
+}
+
+static u32 prestera_fw_read(struct prestera_fw *fw, u32 reg)
+{
+ return readl(PRESTERA_FW_REG_ADDR(fw, reg));
+}
+
+static u32 prestera_fw_evtq_len(struct prestera_fw *fw, u8 qid)
+{
+ return fw->evt_queue[qid].len;
+}
+
+static u32 prestera_fw_evtq_avail(struct prestera_fw *fw, u8 qid)
+{
+ u32 wr_idx = prestera_fw_read(fw, PRESTERA_EVTQ_WR_IDX_REG(qid));
+ u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
+
+ return CIRC_CNT(wr_idx, rd_idx, prestera_fw_evtq_len(fw, qid));
+}
+
+static void prestera_fw_evtq_rd_set(struct prestera_fw *fw,
+ u8 qid, u32 idx)
+{
+ u32 rd_idx = idx & (prestera_fw_evtq_len(fw, qid) - 1);
+
+ prestera_fw_write(fw, PRESTERA_EVTQ_RD_IDX_REG(qid), rd_idx);
+}
+
+static u8 __iomem *prestera_fw_evtq_buf(struct prestera_fw *fw, u8 qid)
+{
+ return fw->evt_queue[qid].addr;
+}
+
+static u32 prestera_fw_evtq_read32(struct prestera_fw *fw, u8 qid)
+{
+ u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
+ u32 val;
+
+ val = readl(prestera_fw_evtq_buf(fw, qid) + rd_idx);
+ prestera_fw_evtq_rd_set(fw, qid, rd_idx + 4);
+ return val;
+}
+
+static ssize_t prestera_fw_evtq_read_buf(struct prestera_fw *fw,
+ u8 qid, void *buf, size_t len)
+{
+ u32 idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
+ u8 __iomem *evtq_addr = prestera_fw_evtq_buf(fw, qid);
+ u32 *buf32 = buf;
+ int i;
+
+ for (i = 0; i < len / 4; buf32++, i++) {
+ *buf32 = readl_relaxed(evtq_addr + idx);
+ idx = (idx + 4) & (prestera_fw_evtq_len(fw, qid) - 1);
+ }
+
+ prestera_fw_evtq_rd_set(fw, qid, idx);
+
+ return i;
+}
+
+static u8 prestera_fw_evtq_pick(struct prestera_fw *fw)
+{
+ int qid;
+
+ for (qid = 0; qid < fw->evt_qnum; qid++) {
+ if (prestera_fw_evtq_avail(fw, qid) >= 4)
+ return qid;
+ }
+
+ return PRESTERA_EVT_QNUM_MAX;
+}
+
+static void prestera_fw_evt_work_fn(struct work_struct *work)
+{
+ struct prestera_fw *fw;
+ void *msg;
+ u8 qid;
+
+ fw = container_of(work, struct prestera_fw, evt_work);
+ msg = fw->evt_msg;
+
+ while ((qid = prestera_fw_evtq_pick(fw)) < PRESTERA_EVT_QNUM_MAX) {
+ u32 idx;
+ u32 len;
+
+ len = prestera_fw_evtq_read32(fw, qid);
+ idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
+
+ WARN_ON(prestera_fw_evtq_avail(fw, qid) < len);
+
+ if (WARN_ON(len > PRESTERA_MSG_MAX_SIZE)) {
+ prestera_fw_evtq_rd_set(fw, qid, idx + len);
+ continue;
+ }
+
+ prestera_fw_evtq_read_buf(fw, qid, msg, len);
+
+ if (fw->dev.recv_msg)
+ fw->dev.recv_msg(&fw->dev, msg, len);
+ }
+}
+
+static int prestera_fw_wait_reg32(struct prestera_fw *fw, u32 reg, u32 cmp,
+ unsigned int waitms)
+{
+ u8 __iomem *addr = PRESTERA_FW_REG_ADDR(fw, reg);
+ u32 val;
+
+ return readl_poll_timeout(addr, val, cmp == val,
+ 1 * USEC_PER_MSEC, waitms * USEC_PER_MSEC);
+}
+
+static int prestera_fw_cmd_send(struct prestera_fw *fw,
+ void *in_msg, size_t in_size,
+ void *out_msg, size_t out_size,
+ unsigned int waitms)
+{
+ u32 ret_size;
+ int err;
+
+ if (!waitms)
+ waitms = PRESTERA_FW_CMD_DEFAULT_WAIT_MS;
+
+ if (ALIGN(in_size, 4) > fw->cmd_mbox_len)
+ return -EMSGSIZE;
+
+ /* wait for finish previous reply from FW */
+ err = prestera_fw_wait_reg32(fw, PRESTERA_CMD_RCV_CTL_REG, 0, 30);
+ if (err) {
+ dev_err(fw->dev.dev, "finish reply from FW is timed out\n");
+ return err;
+ }
+
+ prestera_fw_write(fw, PRESTERA_CMD_REQ_LEN_REG, in_size);
+ memcpy_toio(fw->cmd_mbox, in_msg, in_size);
+
+ prestera_fw_write(fw, PRESTERA_CMD_REQ_CTL_REG, PRESTERA_CMD_F_REQ_SENT);
+
+ /* wait for reply from FW */
+ err = prestera_fw_wait_reg32(fw, PRESTERA_CMD_RCV_CTL_REG,
+ PRESTERA_CMD_F_REPL_SENT, waitms);
+ if (err) {
+ dev_err(fw->dev.dev, "reply from FW is timed out\n");
+ goto cmd_exit;
+ }
+
+ ret_size = prestera_fw_read(fw, PRESTERA_CMD_RCV_LEN_REG);
+ if (ret_size > out_size) {
+ dev_err(fw->dev.dev, "ret_size (%u) > out_len(%zu)\n",
+ ret_size, out_size);
+ err = -EMSGSIZE;
+ goto cmd_exit;
+ }
+
+ memcpy_fromio(out_msg, fw->cmd_mbox + in_size, ret_size);
+
+cmd_exit:
+ prestera_fw_write(fw, PRESTERA_CMD_REQ_CTL_REG, PRESTERA_CMD_F_REPL_RCVD);
+ return err;
+}
+
+static int prestera_fw_send_req(struct prestera_device *dev,
+ void *in_msg, size_t in_size, void *out_msg,
+ size_t out_size, unsigned int waitms)
+{
+ struct prestera_fw *fw;
+ ssize_t ret;
+
+ fw = container_of(dev, struct prestera_fw, dev);
+
+ mutex_lock(&fw->cmd_mtx);
+ ret = prestera_fw_cmd_send(fw, in_msg, in_size, out_msg, out_size, waitms);
+ mutex_unlock(&fw->cmd_mtx);
+
+ return ret;
+}
+
+static int prestera_fw_init(struct prestera_fw *fw)
+{
+ u8 __iomem *base;
+ int err;
+ u8 qid;
+
+ fw->dev.send_req = prestera_fw_send_req;
+ fw->ldr_regs = fw->dev.ctl_regs;
+
+ err = prestera_fw_load(fw);
+ if (err)
+ return err;
+
+ err = prestera_fw_wait_reg32(fw, PRESTERA_FW_READY_REG,
+ PRESTERA_FW_READY_MAGIC,
+ PRESTERA_FW_READY_WAIT_MS);
+ if (err) {
+ dev_err(fw->dev.dev, "FW failed to start\n");
+ return err;
+ }
+
+ base = fw->dev.ctl_regs;
+
+ fw->cmd_mbox = base + prestera_fw_read(fw, PRESTERA_CMD_BUF_OFFS_REG);
+ fw->cmd_mbox_len = prestera_fw_read(fw, PRESTERA_CMD_BUF_LEN_REG);
+ mutex_init(&fw->cmd_mtx);
+
+ fw->evt_buf = base + prestera_fw_read(fw, PRESTERA_EVT_BUF_OFFS_REG);
+ fw->evt_qnum = prestera_fw_read(fw, PRESTERA_EVT_QNUM_REG);
+ fw->evt_msg = kmalloc(PRESTERA_MSG_MAX_SIZE, GFP_KERNEL);
+ if (!fw->evt_msg)
+ return -ENOMEM;
+
+ for (qid = 0; qid < fw->evt_qnum; qid++) {
+ u32 offs = prestera_fw_read(fw, PRESTERA_EVTQ_OFFS_REG(qid));
+ struct prestera_fw_evtq *evtq = &fw->evt_queue[qid];
+
+ evtq->len = prestera_fw_read(fw, PRESTERA_EVTQ_LEN_REG(qid));
+ evtq->addr = fw->evt_buf + offs;
+ }
+
+ return 0;
+}
+
+static void prestera_fw_uninit(struct prestera_fw *fw)
+{
+ kfree(fw->evt_msg);
+}
+
+static irqreturn_t prestera_pci_irq_handler(int irq, void *dev_id)
+{
+ struct prestera_fw *fw = dev_id;
+
+ if (prestera_fw_read(fw, PRESTERA_RX_STATUS_REG)) {
+ prestera_fw_write(fw, PRESTERA_RX_STATUS_REG, 0);
+
+ if (fw->dev.recv_pkt)
+ fw->dev.recv_pkt(&fw->dev);
+ }
+
+ queue_work(fw->wq, &fw->evt_work);
+
+ return IRQ_HANDLED;
+}
+
+static void prestera_ldr_write(struct prestera_fw *fw, u32 reg, u32 val)
+{
+ writel(val, PRESTERA_LDR_REG_ADDR(fw, reg));
+}
+
+static u32 prestera_ldr_read(struct prestera_fw *fw, u32 reg)
+{
+ return readl(PRESTERA_LDR_REG_ADDR(fw, reg));
+}
+
+static int prestera_ldr_wait_reg32(struct prestera_fw *fw,
+ u32 reg, u32 cmp, unsigned int waitms)
+{
+ u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, reg);
+ u32 val;
+
+ return readl_poll_timeout(addr, val, cmp == val,
+ 10 * USEC_PER_MSEC, waitms * USEC_PER_MSEC);
+}
+
+static u32 prestera_ldr_wait_buf(struct prestera_fw *fw, size_t len)
+{
+ u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, PRESTERA_LDR_BUF_RD_REG);
+ u32 buf_len = fw->ldr_buf_len;
+ u32 wr_idx = fw->ldr_wr_idx;
+ u32 rd_idx;
+
+ return readl_poll_timeout(addr, rd_idx,
+ CIRC_SPACE(wr_idx, rd_idx, buf_len) >= len,
+ 1 * USEC_PER_MSEC, 100 * USEC_PER_MSEC);
+}
+
+static int prestera_ldr_wait_dl_finish(struct prestera_fw *fw)
+{
+ u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, PRESTERA_LDR_STATUS_REG);
+ unsigned long mask = ~(PRESTERA_LDR_STATUS_IMG_DL);
+ u32 val;
+ int err;
+
+ err = readl_poll_timeout(addr, val, val & mask, 10 * USEC_PER_MSEC,
+ PRESTERA_FW_DL_TIMEOUT_MS * USEC_PER_MSEC);
+ if (err) {
+ dev_err(fw->dev.dev, "Timeout to load FW img [state=%d]",
+ prestera_ldr_read(fw, PRESTERA_LDR_STATUS_REG));
+ return err;
+ }
+
+ return 0;
+}
+
+static void prestera_ldr_wr_idx_move(struct prestera_fw *fw, unsigned int n)
+{
+ fw->ldr_wr_idx = (fw->ldr_wr_idx + (n)) & (fw->ldr_buf_len - 1);
+}
+
+static void prestera_ldr_wr_idx_commit(struct prestera_fw *fw)
+{
+ prestera_ldr_write(fw, PRESTERA_LDR_BUF_WR_REG, fw->ldr_wr_idx);
+}
+
+static u8 __iomem *prestera_ldr_wr_ptr(struct prestera_fw *fw)
+{
+ return fw->ldr_ring_buf + fw->ldr_wr_idx;
+}
+
+static int prestera_ldr_send(struct prestera_fw *fw, const u8 *buf, size_t len)
+{
+ int err;
+ int i;
+
+ err = prestera_ldr_wait_buf(fw, len);
+ if (err) {
+ dev_err(fw->dev.dev, "failed wait for sending firmware\n");
+ return err;
+ }
+
+ for (i = 0; i < len; i += 4) {
+ writel_relaxed(*(u32 *)(buf + i), prestera_ldr_wr_ptr(fw));
+ prestera_ldr_wr_idx_move(fw, 4);
+ }
+
+ prestera_ldr_wr_idx_commit(fw);
+ return 0;
+}
+
+static int prestera_ldr_fw_send(struct prestera_fw *fw,
+ const char *img, u32 fw_size)
+{
+ u32 status;
+ u32 pos;
+ int err;
+
+ err = prestera_ldr_wait_reg32(fw, PRESTERA_LDR_STATUS_REG,
+ PRESTERA_LDR_STATUS_IMG_DL,
+ 5 * MSEC_PER_SEC);
+ if (err) {
+ dev_err(fw->dev.dev, "Loader is not ready to load image\n");
+ return err;
+ }
+
+ for (pos = 0; pos < fw_size; pos += PRESTERA_FW_BLK_SZ) {
+ if (pos + PRESTERA_FW_BLK_SZ > fw_size)
+ break;
+
+ err = prestera_ldr_send(fw, img + pos, PRESTERA_FW_BLK_SZ);
+ if (err)
+ return err;
+ }
+
+ if (pos < fw_size) {
+ err = prestera_ldr_send(fw, img + pos, fw_size - pos);
+ if (err)
+ return err;
+ }
+
+ err = prestera_ldr_wait_dl_finish(fw);
+ if (err)
+ return err;
+
+ status = prestera_ldr_read(fw, PRESTERA_LDR_STATUS_REG);
+
+ switch (status) {
+ case PRESTERA_LDR_STATUS_INVALID_IMG:
+ dev_err(fw->dev.dev, "FW img has bad CRC\n");
+ return -EINVAL;
+ case PRESTERA_LDR_STATUS_NOMEM:
+ dev_err(fw->dev.dev, "Loader has no enough mem\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void prestera_fw_rev_parse(const struct prestera_fw_header *hdr,
+ struct prestera_fw_rev *rev)
+{
+ u32 version = be32_to_cpu(hdr->version_value);
+
+ rev->maj = PRESTERA_FW_VER_MAJ(version);
+ rev->min = PRESTERA_FW_VER_MIN(version);
+ rev->sub = PRESTERA_FW_VER_PATCH(version);
+}
+
+static int prestera_fw_rev_check(struct prestera_fw *fw)
+{
+ struct prestera_fw_rev *rev = &fw->dev.fw_rev;
+ u16 maj_supp = PRESTERA_SUPP_FW_MAJ_VER;
+ u16 min_supp = PRESTERA_SUPP_FW_MIN_VER;
+
+ if (rev->maj == maj_supp && rev->min >= min_supp)
+ return 0;
+
+ dev_err(fw->dev.dev, "Driver supports FW version only '%u.%u.x'",
+ PRESTERA_SUPP_FW_MAJ_VER, PRESTERA_SUPP_FW_MIN_VER);
+
+ return -EINVAL;
+}
+
+static int prestera_fw_hdr_parse(struct prestera_fw *fw,
+ const struct firmware *img)
+{
+ struct prestera_fw_header *hdr = (struct prestera_fw_header *)img->data;
+ struct prestera_fw_rev *rev = &fw->dev.fw_rev;
+ u32 magic;
+
+ magic = be32_to_cpu(hdr->magic_number);
+ if (magic != PRESTERA_FW_HDR_MAGIC) {
+ dev_err(fw->dev.dev, "FW img hdr magic is invalid");
+ return -EINVAL;
+ }
+
+ prestera_fw_rev_parse(hdr, rev);
+
+ dev_info(fw->dev.dev, "FW version '%u.%u.%u'\n",
+ rev->maj, rev->min, rev->sub);
+
+ return prestera_fw_rev_check(fw);
+}
+
+static int prestera_fw_load(struct prestera_fw *fw)
+{
+ size_t hlen = sizeof(struct prestera_fw_header);
+ const struct firmware *f;
+ char fw_path[128];
+ int err;
+
+ err = prestera_ldr_wait_reg32(fw, PRESTERA_LDR_READY_REG,
+ PRESTERA_LDR_READY_MAGIC,
+ 5 * MSEC_PER_SEC);
+ if (err) {
+ dev_err(fw->dev.dev, "waiting for FW loader is timed out");
+ return err;
+ }
+
+ fw->ldr_ring_buf = fw->ldr_regs +
+ prestera_ldr_read(fw, PRESTERA_LDR_BUF_OFFS_REG);
+
+ fw->ldr_buf_len =
+ prestera_ldr_read(fw, PRESTERA_LDR_BUF_SIZE_REG);
+
+ fw->ldr_wr_idx = 0;
+
+ snprintf(fw_path, sizeof(fw_path), PRESTERA_FW_PATH_FMT,
+ PRESTERA_SUPP_FW_MAJ_VER, PRESTERA_SUPP_FW_MIN_VER);
+
+ err = request_firmware_direct(&f, fw_path, fw->dev.dev);
+ if (err) {
+ dev_err(fw->dev.dev, "failed to request firmware file\n");
+ return err;
+ }
+
+ err = prestera_fw_hdr_parse(fw, f);
+ if (err) {
+ dev_err(fw->dev.dev, "FW image header is invalid\n");
+ goto out_release;
+ }
+
+ prestera_ldr_write(fw, PRESTERA_LDR_IMG_SIZE_REG, f->size - hlen);
+ prestera_ldr_write(fw, PRESTERA_LDR_CTL_REG, PRESTERA_LDR_CTL_DL_START);
+
+ dev_info(fw->dev.dev, "Loading %s ...", fw_path);
+
+ err = prestera_ldr_fw_send(fw, f->data + hlen, f->size - hlen);
+
+out_release:
+ release_firmware(f);
+ return err;
+}
+
+static int prestera_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ const char *driver_name = pdev->driver->name;
+ struct prestera_fw *fw;
+ int err;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ return err;
+
+ err = pcim_iomap_regions(pdev, BIT(PRESTERA_PCI_BAR_FW) |
+ BIT(PRESTERA_PCI_BAR_PP),
+ pci_name(pdev));
+ if (err)
+ return err;
+
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(30))) {
+ dev_err(&pdev->dev, "fail to set DMA mask\n");
+ goto err_dma_mask;
+ }
+
+ pci_set_master(pdev);
+
+ fw = devm_kzalloc(&pdev->dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw) {
+ err = -ENOMEM;
+ goto err_pci_dev_alloc;
+ }
+
+ fw->dev.ctl_regs = pcim_iomap_table(pdev)[PRESTERA_PCI_BAR_FW];
+ fw->dev.pp_regs = pcim_iomap_table(pdev)[PRESTERA_PCI_BAR_PP];
+ fw->dev.dev = &pdev->dev;
+
+ pci_set_drvdata(pdev, fw);
+
+ err = prestera_fw_init(fw);
+ if (err)
+ goto err_prestera_fw_init;
+
+ dev_info(fw->dev.dev, "Prestera FW is ready\n");
+
+ fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI, 1);
+ if (!fw->wq)
+ goto err_wq_alloc;
+
+ INIT_WORK(&fw->evt_work, prestera_fw_evt_work_fn);
+
+ err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (err < 0) {
+ dev_err(&pdev->dev, "MSI IRQ init failed\n");
+ goto err_irq_alloc;
+ }
+
+ err = request_irq(pci_irq_vector(pdev, 0), prestera_pci_irq_handler,
+ 0, driver_name, fw);
+ if (err) {
+ dev_err(&pdev->dev, "fail to request IRQ\n");
+ goto err_request_irq;
+ }
+
+ err = prestera_device_register(&fw->dev);
+ if (err)
+ goto err_prestera_dev_register;
+
+ return 0;
+
+err_prestera_dev_register:
+ free_irq(pci_irq_vector(pdev, 0), fw);
+err_request_irq:
+ pci_free_irq_vectors(pdev);
+err_irq_alloc:
+ destroy_workqueue(fw->wq);
+err_wq_alloc:
+ prestera_fw_uninit(fw);
+err_prestera_fw_init:
+err_pci_dev_alloc:
+err_dma_mask:
+ return err;
+}
+
+static void prestera_pci_remove(struct pci_dev *pdev)
+{
+ struct prestera_fw *fw = pci_get_drvdata(pdev);
+
+ prestera_device_unregister(&fw->dev);
+ free_irq(pci_irq_vector(pdev, 0), fw);
+ pci_free_irq_vectors(pdev);
+ destroy_workqueue(fw->wq);
+ prestera_fw_uninit(fw);
+}
+
+static const struct pci_device_id prestera_pci_devices[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC804) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, prestera_pci_devices);
+
+static struct pci_driver prestera_pci_driver = {
+ .name = "Prestera DX",
+ .id_table = prestera_pci_devices,
+ .probe = prestera_pci_probe,
+ .remove = prestera_pci_remove,
+};
+module_pci_driver(prestera_pci_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Marvell Prestera switch PCI interface");
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
new file mode 100644
index 000000000000..2a13c318048c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
@@ -0,0 +1,820 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/bitfield.h>
+#include <linux/dmapool.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "prestera_dsa.h"
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_rxtx.h"
+
+#define PRESTERA_SDMA_WAIT_MUL 10
+
+struct prestera_sdma_desc {
+ __le32 word1;
+ __le32 word2;
+ __le32 buff;
+ __le32 next;
+} __packed __aligned(16);
+
+#define PRESTERA_SDMA_BUFF_SIZE_MAX 1544
+
+#define PRESTERA_SDMA_RX_DESC_PKT_LEN(desc) \
+ ((le32_to_cpu((desc)->word2) >> 16) & GENMASK(13, 0))
+
+#define PRESTERA_SDMA_RX_DESC_OWNER(desc) \
+ ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
+
+#define PRESTERA_SDMA_RX_DESC_IS_RCVD(desc) \
+ (PRESTERA_SDMA_RX_DESC_OWNER(desc) == PRESTERA_SDMA_RX_DESC_CPU_OWN)
+
+#define PRESTERA_SDMA_RX_DESC_CPU_OWN 0
+#define PRESTERA_SDMA_RX_DESC_DMA_OWN 1
+
+#define PRESTERA_SDMA_RX_QUEUE_NUM 8
+
+#define PRESTERA_SDMA_RX_DESC_PER_Q 1000
+
+#define PRESTERA_SDMA_TX_DESC_PER_Q 1000
+#define PRESTERA_SDMA_TX_MAX_BURST 64
+
+#define PRESTERA_SDMA_TX_DESC_OWNER(desc) \
+ ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
+
+#define PRESTERA_SDMA_TX_DESC_CPU_OWN 0
+#define PRESTERA_SDMA_TX_DESC_DMA_OWN 1U
+
+#define PRESTERA_SDMA_TX_DESC_IS_SENT(desc) \
+ (PRESTERA_SDMA_TX_DESC_OWNER(desc) == PRESTERA_SDMA_TX_DESC_CPU_OWN)
+
+#define PRESTERA_SDMA_TX_DESC_LAST BIT(20)
+#define PRESTERA_SDMA_TX_DESC_FIRST BIT(21)
+#define PRESTERA_SDMA_TX_DESC_CALC_CRC BIT(12)
+
+#define PRESTERA_SDMA_TX_DESC_SINGLE \
+ (PRESTERA_SDMA_TX_DESC_FIRST | PRESTERA_SDMA_TX_DESC_LAST)
+
+#define PRESTERA_SDMA_TX_DESC_INIT \
+ (PRESTERA_SDMA_TX_DESC_SINGLE | PRESTERA_SDMA_TX_DESC_CALC_CRC)
+
+#define PRESTERA_SDMA_RX_INTR_MASK_REG 0x2814
+#define PRESTERA_SDMA_RX_QUEUE_STATUS_REG 0x2680
+#define PRESTERA_SDMA_RX_QUEUE_DESC_REG(n) (0x260C + (n) * 16)
+
+#define PRESTERA_SDMA_TX_QUEUE_DESC_REG 0x26C0
+#define PRESTERA_SDMA_TX_QUEUE_START_REG 0x2868
+
+struct prestera_sdma_buf {
+ struct prestera_sdma_desc *desc;
+ dma_addr_t desc_dma;
+ struct sk_buff *skb;
+ dma_addr_t buf_dma;
+ bool is_used;
+};
+
+struct prestera_rx_ring {
+ struct prestera_sdma_buf *bufs;
+ int next_rx;
+};
+
+struct prestera_tx_ring {
+ struct prestera_sdma_buf *bufs;
+ int next_tx;
+ int max_burst;
+ int burst;
+};
+
+struct prestera_sdma {
+ struct prestera_rx_ring rx_ring[PRESTERA_SDMA_RX_QUEUE_NUM];
+ struct prestera_tx_ring tx_ring;
+ struct prestera_switch *sw;
+ struct dma_pool *desc_pool;
+ struct work_struct tx_work;
+ struct napi_struct rx_napi;
+ struct net_device napi_dev;
+ u32 map_addr;
+ u64 dma_mask;
+ /* protect SDMA with concurrrent access from multiple CPUs */
+ spinlock_t tx_lock;
+};
+
+struct prestera_rxtx {
+ struct prestera_sdma sdma;
+};
+
+static int prestera_sdma_buf_init(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf)
+{
+ struct prestera_sdma_desc *desc;
+ dma_addr_t dma;
+
+ desc = dma_pool_alloc(sdma->desc_pool, GFP_DMA | GFP_KERNEL, &dma);
+ if (!desc)
+ return -ENOMEM;
+
+ buf->buf_dma = DMA_MAPPING_ERROR;
+ buf->desc_dma = dma;
+ buf->desc = desc;
+ buf->skb = NULL;
+
+ return 0;
+}
+
+static u32 prestera_sdma_map(struct prestera_sdma *sdma, dma_addr_t pa)
+{
+ return sdma->map_addr + pa;
+}
+
+static void prestera_sdma_rx_desc_init(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc,
+ dma_addr_t buf)
+{
+ u32 word = le32_to_cpu(desc->word2);
+
+ u32p_replace_bits(&word, PRESTERA_SDMA_BUFF_SIZE_MAX, GENMASK(15, 0));
+ desc->word2 = cpu_to_le32(word);
+
+ desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
+
+ /* make sure buffer is set before reset the descriptor */
+ wmb();
+
+ desc->word1 = cpu_to_le32(0xA0000000);
+}
+
+static void prestera_sdma_rx_desc_set_next(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc,
+ dma_addr_t next)
+{
+ desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
+}
+
+static int prestera_sdma_rx_skb_alloc(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf)
+{
+ struct device *dev = sdma->sw->dev->dev;
+ struct sk_buff *skb;
+ dma_addr_t dma;
+
+ skb = alloc_skb(PRESTERA_SDMA_BUFF_SIZE_MAX, GFP_DMA | GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ dma = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto err_dma_map;
+
+ if (buf->skb)
+ dma_unmap_single(dev, buf->buf_dma, buf->skb->len,
+ DMA_FROM_DEVICE);
+
+ buf->buf_dma = dma;
+ buf->skb = skb;
+
+ return 0;
+
+err_dma_map:
+ kfree_skb(skb);
+
+ return -ENOMEM;
+}
+
+static struct sk_buff *prestera_sdma_rx_skb_get(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf)
+{
+ dma_addr_t buf_dma = buf->buf_dma;
+ struct sk_buff *skb = buf->skb;
+ u32 len = skb->len;
+ int err;
+
+ err = prestera_sdma_rx_skb_alloc(sdma, buf);
+ if (err) {
+ buf->buf_dma = buf_dma;
+ buf->skb = skb;
+
+ skb = alloc_skb(skb->len, GFP_ATOMIC);
+ if (skb) {
+ skb_put(skb, len);
+ skb_copy_from_linear_data(buf->skb, skb->data, len);
+ }
+ }
+
+ prestera_sdma_rx_desc_init(sdma, buf->desc, buf->buf_dma);
+
+ return skb;
+}
+
+static int prestera_rxtx_process_skb(struct prestera_sdma *sdma,
+ struct sk_buff *skb)
+{
+ const struct prestera_port *port;
+ struct prestera_dsa dsa;
+ u32 hw_port, dev_id;
+ int err;
+
+ skb_pull(skb, ETH_HLEN);
+
+ /* ethertype field is part of the dsa header */
+ err = prestera_dsa_parse(&dsa, skb->data - ETH_TLEN);
+ if (err)
+ return err;
+
+ dev_id = dsa.hw_dev_num;
+ hw_port = dsa.port_num;
+
+ port = prestera_port_find_by_hwid(sdma->sw, dev_id, hw_port);
+ if (unlikely(!port)) {
+ dev_warn_ratelimited(prestera_dev(sdma->sw), "received pkt for non-existent port(%u, %u)\n",
+ dev_id, hw_port);
+ return -ENOENT;
+ }
+
+ if (unlikely(!pskb_may_pull(skb, PRESTERA_DSA_HLEN)))
+ return -EINVAL;
+
+ /* remove DSA tag and update checksum */
+ skb_pull_rcsum(skb, PRESTERA_DSA_HLEN);
+
+ memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - PRESTERA_DSA_HLEN,
+ ETH_ALEN * 2);
+
+ skb_push(skb, ETH_HLEN);
+
+ skb->protocol = eth_type_trans(skb, port->dev);
+
+ if (dsa.vlan.is_tagged) {
+ u16 tci = dsa.vlan.vid & VLAN_VID_MASK;
+
+ tci |= dsa.vlan.vpt << VLAN_PRIO_SHIFT;
+ if (dsa.vlan.cfi_bit)
+ tci |= VLAN_CFI_MASK;
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci);
+ }
+
+ return 0;
+}
+
+static int prestera_sdma_next_rx_buf_idx(int buf_idx)
+{
+ return (buf_idx + 1) % PRESTERA_SDMA_RX_DESC_PER_Q;
+}
+
+static int prestera_sdma_rx_poll(struct napi_struct *napi, int budget)
+{
+ int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
+ unsigned int rxq_done_map = 0;
+ struct prestera_sdma *sdma;
+ struct list_head rx_list;
+ unsigned int qmask;
+ int pkts_done = 0;
+ int q;
+
+ qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
+ qmask = GENMASK(qnum - 1, 0);
+
+ INIT_LIST_HEAD(&rx_list);
+
+ sdma = container_of(napi, struct prestera_sdma, rx_napi);
+
+ while (pkts_done < budget && rxq_done_map != qmask) {
+ for (q = 0; q < qnum && pkts_done < budget; q++) {
+ struct prestera_rx_ring *ring = &sdma->rx_ring[q];
+ struct prestera_sdma_desc *desc;
+ struct prestera_sdma_buf *buf;
+ int buf_idx = ring->next_rx;
+ struct sk_buff *skb;
+
+ buf = &ring->bufs[buf_idx];
+ desc = buf->desc;
+
+ if (PRESTERA_SDMA_RX_DESC_IS_RCVD(desc)) {
+ rxq_done_map &= ~BIT(q);
+ } else {
+ rxq_done_map |= BIT(q);
+ continue;
+ }
+
+ pkts_done++;
+
+ __skb_trim(buf->skb, PRESTERA_SDMA_RX_DESC_PKT_LEN(desc));
+
+ skb = prestera_sdma_rx_skb_get(sdma, buf);
+ if (!skb)
+ goto rx_next_buf;
+
+ if (unlikely(prestera_rxtx_process_skb(sdma, skb)))
+ goto rx_next_buf;
+
+ list_add_tail(&skb->list, &rx_list);
+rx_next_buf:
+ ring->next_rx = prestera_sdma_next_rx_buf_idx(buf_idx);
+ }
+ }
+
+ if (pkts_done < budget && napi_complete_done(napi, pkts_done))
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG,
+ GENMASK(9, 2));
+
+ netif_receive_skb_list(&rx_list);
+
+ return pkts_done;
+}
+
+static void prestera_sdma_rx_fini(struct prestera_sdma *sdma)
+{
+ int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
+ int q, b;
+
+ /* disable all rx queues */
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
+ GENMASK(15, 8));
+
+ for (q = 0; q < qnum; q++) {
+ struct prestera_rx_ring *ring = &sdma->rx_ring[q];
+
+ if (!ring->bufs)
+ break;
+
+ for (b = 0; b < PRESTERA_SDMA_RX_DESC_PER_Q; b++) {
+ struct prestera_sdma_buf *buf = &ring->bufs[b];
+
+ if (buf->desc_dma)
+ dma_pool_free(sdma->desc_pool, buf->desc,
+ buf->desc_dma);
+
+ if (!buf->skb)
+ continue;
+
+ if (buf->buf_dma != DMA_MAPPING_ERROR)
+ dma_unmap_single(sdma->sw->dev->dev,
+ buf->buf_dma, buf->skb->len,
+ DMA_FROM_DEVICE);
+ kfree_skb(buf->skb);
+ }
+ }
+}
+
+static int prestera_sdma_rx_init(struct prestera_sdma *sdma)
+{
+ int bnum = PRESTERA_SDMA_RX_DESC_PER_Q;
+ int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
+ int err;
+ int q;
+
+ /* disable all rx queues */
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
+ GENMASK(15, 8));
+
+ for (q = 0; q < qnum; q++) {
+ struct prestera_sdma_buf *head, *tail, *next, *prev;
+ struct prestera_rx_ring *ring = &sdma->rx_ring[q];
+
+ ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
+ if (!ring->bufs)
+ return -ENOMEM;
+
+ ring->next_rx = 0;
+
+ tail = &ring->bufs[bnum - 1];
+ head = &ring->bufs[0];
+ next = head;
+ prev = next;
+
+ do {
+ err = prestera_sdma_buf_init(sdma, next);
+ if (err)
+ return err;
+
+ err = prestera_sdma_rx_skb_alloc(sdma, next);
+ if (err)
+ return err;
+
+ prestera_sdma_rx_desc_init(sdma, next->desc,
+ next->buf_dma);
+
+ prestera_sdma_rx_desc_set_next(sdma, prev->desc,
+ next->desc_dma);
+
+ prev = next;
+ next++;
+ } while (prev != tail);
+
+ /* join tail with head to make a circular list */
+ prestera_sdma_rx_desc_set_next(sdma, tail->desc, head->desc_dma);
+
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_DESC_REG(q),
+ prestera_sdma_map(sdma, head->desc_dma));
+ }
+
+ /* make sure all rx descs are filled before enabling all rx queues */
+ wmb();
+
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
+ GENMASK(7, 0));
+
+ return 0;
+}
+
+static void prestera_sdma_tx_desc_init(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc)
+{
+ desc->word1 = cpu_to_le32(PRESTERA_SDMA_TX_DESC_INIT);
+ desc->word2 = 0;
+}
+
+static void prestera_sdma_tx_desc_set_next(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc,
+ dma_addr_t next)
+{
+ desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
+}
+
+static void prestera_sdma_tx_desc_set_buf(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc,
+ dma_addr_t buf, size_t len)
+{
+ u32 word = le32_to_cpu(desc->word2);
+
+ u32p_replace_bits(&word, len + ETH_FCS_LEN, GENMASK(30, 16));
+
+ desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
+ desc->word2 = cpu_to_le32(word);
+}
+
+static void prestera_sdma_tx_desc_xmit(struct prestera_sdma_desc *desc)
+{
+ u32 word = le32_to_cpu(desc->word1);
+
+ word |= PRESTERA_SDMA_TX_DESC_DMA_OWN << 31;
+
+ /* make sure everything is written before enable xmit */
+ wmb();
+
+ desc->word1 = cpu_to_le32(word);
+}
+
+static int prestera_sdma_tx_buf_map(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf,
+ struct sk_buff *skb)
+{
+ struct device *dma_dev = sdma->sw->dev->dev;
+ dma_addr_t dma;
+
+ dma = dma_map_single(dma_dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, dma))
+ return -ENOMEM;
+
+ buf->buf_dma = dma;
+ buf->skb = skb;
+
+ return 0;
+}
+
+static void prestera_sdma_tx_buf_unmap(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf)
+{
+ struct device *dma_dev = sdma->sw->dev->dev;
+
+ dma_unmap_single(dma_dev, buf->buf_dma, buf->skb->len, DMA_TO_DEVICE);
+}
+
+static void prestera_sdma_tx_recycle_work_fn(struct work_struct *work)
+{
+ int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
+ struct prestera_tx_ring *tx_ring;
+ struct prestera_sdma *sdma;
+ int b;
+
+ sdma = container_of(work, struct prestera_sdma, tx_work);
+
+ tx_ring = &sdma->tx_ring;
+
+ for (b = 0; b < bnum; b++) {
+ struct prestera_sdma_buf *buf = &tx_ring->bufs[b];
+
+ if (!buf->is_used)
+ continue;
+
+ if (!PRESTERA_SDMA_TX_DESC_IS_SENT(buf->desc))
+ continue;
+
+ prestera_sdma_tx_buf_unmap(sdma, buf);
+ dev_consume_skb_any(buf->skb);
+ buf->skb = NULL;
+
+ /* make sure everything is cleaned up */
+ wmb();
+
+ buf->is_used = false;
+ }
+}
+
+static int prestera_sdma_tx_init(struct prestera_sdma *sdma)
+{
+ struct prestera_sdma_buf *head, *tail, *next, *prev;
+ struct prestera_tx_ring *tx_ring = &sdma->tx_ring;
+ int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
+ int err;
+
+ INIT_WORK(&sdma->tx_work, prestera_sdma_tx_recycle_work_fn);
+ spin_lock_init(&sdma->tx_lock);
+
+ tx_ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
+ if (!tx_ring->bufs)
+ return -ENOMEM;
+
+ tail = &tx_ring->bufs[bnum - 1];
+ head = &tx_ring->bufs[0];
+ next = head;
+ prev = next;
+
+ tx_ring->max_burst = PRESTERA_SDMA_TX_MAX_BURST;
+ tx_ring->burst = tx_ring->max_burst;
+ tx_ring->next_tx = 0;
+
+ do {
+ err = prestera_sdma_buf_init(sdma, next);
+ if (err)
+ return err;
+
+ next->is_used = false;
+
+ prestera_sdma_tx_desc_init(sdma, next->desc);
+
+ prestera_sdma_tx_desc_set_next(sdma, prev->desc,
+ next->desc_dma);
+
+ prev = next;
+ next++;
+ } while (prev != tail);
+
+ /* join tail with head to make a circular list */
+ prestera_sdma_tx_desc_set_next(sdma, tail->desc, head->desc_dma);
+
+ /* make sure descriptors are written */
+ wmb();
+
+ prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_DESC_REG,
+ prestera_sdma_map(sdma, head->desc_dma));
+
+ return 0;
+}
+
+static void prestera_sdma_tx_fini(struct prestera_sdma *sdma)
+{
+ struct prestera_tx_ring *ring = &sdma->tx_ring;
+ int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
+ int b;
+
+ cancel_work_sync(&sdma->tx_work);
+
+ if (!ring->bufs)
+ return;
+
+ for (b = 0; b < bnum; b++) {
+ struct prestera_sdma_buf *buf = &ring->bufs[b];
+
+ if (buf->desc)
+ dma_pool_free(sdma->desc_pool, buf->desc,
+ buf->desc_dma);
+
+ if (!buf->skb)
+ continue;
+
+ dma_unmap_single(sdma->sw->dev->dev, buf->buf_dma,
+ buf->skb->len, DMA_TO_DEVICE);
+
+ dev_consume_skb_any(buf->skb);
+ }
+}
+
+static void prestera_rxtx_handle_event(struct prestera_switch *sw,
+ struct prestera_event *evt,
+ void *arg)
+{
+ struct prestera_sdma *sdma = arg;
+
+ if (evt->id != PRESTERA_RXTX_EVENT_RCV_PKT)
+ return;
+
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG, 0);
+ napi_schedule(&sdma->rx_napi);
+}
+
+static int prestera_sdma_switch_init(struct prestera_switch *sw)
+{
+ struct prestera_sdma *sdma = &sw->rxtx->sdma;
+ struct device *dev = sw->dev->dev;
+ struct prestera_rxtx_params p;
+ int err;
+
+ p.use_sdma = true;
+
+ err = prestera_hw_rxtx_init(sw, &p);
+ if (err) {
+ dev_err(dev, "failed to init rxtx by hw\n");
+ return err;
+ }
+
+ sdma->dma_mask = dma_get_mask(dev);
+ sdma->map_addr = p.map_addr;
+ sdma->sw = sw;
+
+ sdma->desc_pool = dma_pool_create("desc_pool", dev,
+ sizeof(struct prestera_sdma_desc),
+ 16, 0);
+ if (!sdma->desc_pool)
+ return -ENOMEM;
+
+ err = prestera_sdma_rx_init(sdma);
+ if (err) {
+ dev_err(dev, "failed to init rx ring\n");
+ goto err_rx_init;
+ }
+
+ err = prestera_sdma_tx_init(sdma);
+ if (err) {
+ dev_err(dev, "failed to init tx ring\n");
+ goto err_tx_init;
+ }
+
+ err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_RXTX,
+ prestera_rxtx_handle_event,
+ sdma);
+ if (err)
+ goto err_evt_register;
+
+ init_dummy_netdev(&sdma->napi_dev);
+
+ netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll, 64);
+ napi_enable(&sdma->rx_napi);
+
+ return 0;
+
+err_evt_register:
+err_tx_init:
+ prestera_sdma_tx_fini(sdma);
+err_rx_init:
+ prestera_sdma_rx_fini(sdma);
+
+ dma_pool_destroy(sdma->desc_pool);
+ return err;
+}
+
+static void prestera_sdma_switch_fini(struct prestera_switch *sw)
+{
+ struct prestera_sdma *sdma = &sw->rxtx->sdma;
+
+ napi_disable(&sdma->rx_napi);
+ netif_napi_del(&sdma->rx_napi);
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX,
+ prestera_rxtx_handle_event);
+ prestera_sdma_tx_fini(sdma);
+ prestera_sdma_rx_fini(sdma);
+ dma_pool_destroy(sdma->desc_pool);
+}
+
+static bool prestera_sdma_is_ready(struct prestera_sdma *sdma)
+{
+ return !(prestera_read(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG) & 1);
+}
+
+static int prestera_sdma_tx_wait(struct prestera_sdma *sdma,
+ struct prestera_tx_ring *tx_ring)
+{
+ int tx_wait_num = PRESTERA_SDMA_WAIT_MUL * tx_ring->max_burst;
+
+ do {
+ if (prestera_sdma_is_ready(sdma))
+ return 0;
+
+ udelay(1);
+ } while (--tx_wait_num);
+
+ return -EBUSY;
+}
+
+static void prestera_sdma_tx_start(struct prestera_sdma *sdma)
+{
+ prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG, 1);
+ schedule_work(&sdma->tx_work);
+}
+
+static netdev_tx_t prestera_sdma_xmit(struct prestera_sdma *sdma,
+ struct sk_buff *skb)
+{
+ struct device *dma_dev = sdma->sw->dev->dev;
+ struct net_device *dev = skb->dev;
+ struct prestera_tx_ring *tx_ring;
+ struct prestera_sdma_buf *buf;
+ int err;
+
+ spin_lock(&sdma->tx_lock);
+
+ tx_ring = &sdma->tx_ring;
+
+ buf = &tx_ring->bufs[tx_ring->next_tx];
+ if (buf->is_used) {
+ schedule_work(&sdma->tx_work);
+ goto drop_skb;
+ }
+
+ if (unlikely(eth_skb_pad(skb)))
+ goto drop_skb_nofree;
+
+ err = prestera_sdma_tx_buf_map(sdma, buf, skb);
+ if (err)
+ goto drop_skb;
+
+ prestera_sdma_tx_desc_set_buf(sdma, buf->desc, buf->buf_dma, skb->len);
+
+ dma_sync_single_for_device(dma_dev, buf->buf_dma, skb->len,
+ DMA_TO_DEVICE);
+
+ if (tx_ring->burst) {
+ tx_ring->burst--;
+ } else {
+ tx_ring->burst = tx_ring->max_burst;
+
+ err = prestera_sdma_tx_wait(sdma, tx_ring);
+ if (err)
+ goto drop_skb_unmap;
+ }
+
+ tx_ring->next_tx = (tx_ring->next_tx + 1) % PRESTERA_SDMA_TX_DESC_PER_Q;
+ prestera_sdma_tx_desc_xmit(buf->desc);
+ buf->is_used = true;
+
+ prestera_sdma_tx_start(sdma);
+
+ goto tx_done;
+
+drop_skb_unmap:
+ prestera_sdma_tx_buf_unmap(sdma, buf);
+drop_skb:
+ dev_consume_skb_any(skb);
+drop_skb_nofree:
+ dev->stats.tx_dropped++;
+tx_done:
+ spin_unlock(&sdma->tx_lock);
+ return NETDEV_TX_OK;
+}
+
+int prestera_rxtx_switch_init(struct prestera_switch *sw)
+{
+ struct prestera_rxtx *rxtx;
+
+ rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL);
+ if (!rxtx)
+ return -ENOMEM;
+
+ sw->rxtx = rxtx;
+
+ return prestera_sdma_switch_init(sw);
+}
+
+void prestera_rxtx_switch_fini(struct prestera_switch *sw)
+{
+ prestera_sdma_switch_fini(sw);
+ kfree(sw->rxtx);
+}
+
+int prestera_rxtx_port_init(struct prestera_port *port)
+{
+ int err;
+
+ err = prestera_hw_rxtx_port_init(port);
+ if (err)
+ return err;
+
+ port->dev->needed_headroom = PRESTERA_DSA_HLEN;
+
+ return 0;
+}
+
+netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb)
+{
+ struct prestera_dsa dsa;
+
+ dsa.hw_dev_num = port->dev_id;
+ dsa.port_num = port->hw_id;
+
+ if (skb_cow_head(skb, PRESTERA_DSA_HLEN) < 0)
+ return NET_XMIT_DROP;
+
+ skb_push(skb, PRESTERA_DSA_HLEN);
+ memmove(skb->data, skb->data + PRESTERA_DSA_HLEN, 2 * ETH_ALEN);
+
+ if (prestera_dsa_build(&dsa, skb->data + 2 * ETH_ALEN) != 0)
+ return NET_XMIT_DROP;
+
+ return prestera_sdma_xmit(&port->sw->rxtx->sdma, skb);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h
new file mode 100644
index 000000000000..882a1225c323
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_RXTX_H_
+#define _PRESTERA_RXTX_H_
+
+#include <linux/netdevice.h>
+
+struct prestera_switch;
+struct prestera_port;
+
+int prestera_rxtx_switch_init(struct prestera_switch *sw);
+void prestera_rxtx_switch_fini(struct prestera_switch *sw);
+
+int prestera_rxtx_port_init(struct prestera_port *port);
+
+netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb);
+
+#endif /* _PRESTERA_RXTX_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
new file mode 100644
index 000000000000..7d83e1f91ef1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -0,0 +1,1277 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <net/netevent.h>
+#include <net/switchdev.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_switchdev.h"
+
+#define PRESTERA_VID_ALL (0xffff)
+
+#define PRESTERA_DEFAULT_AGEING_TIME_MS 300000
+#define PRESTERA_MAX_AGEING_TIME_MS 1000000000
+#define PRESTERA_MIN_AGEING_TIME_MS 32000
+
+struct prestera_fdb_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ unsigned long event;
+};
+
+struct prestera_switchdev {
+ struct prestera_switch *sw;
+ struct list_head bridge_list;
+ bool bridge_8021q_exists;
+ struct notifier_block swdev_nb_blk;
+ struct notifier_block swdev_nb;
+};
+
+struct prestera_bridge {
+ struct list_head head;
+ struct net_device *dev;
+ struct prestera_switchdev *swdev;
+ struct list_head port_list;
+ bool vlan_enabled;
+ u16 bridge_id;
+};
+
+struct prestera_bridge_port {
+ struct list_head head;
+ struct net_device *dev;
+ struct prestera_bridge *bridge;
+ struct list_head vlan_list;
+ refcount_t ref_count;
+ unsigned long flags;
+ u8 stp_state;
+};
+
+struct prestera_bridge_vlan {
+ struct list_head head;
+ struct list_head port_vlan_list;
+ u16 vid;
+};
+
+struct prestera_port_vlan {
+ struct list_head br_vlan_head;
+ struct list_head port_head;
+ struct prestera_port *port;
+ struct prestera_bridge_port *br_port;
+ u16 vid;
+};
+
+static struct workqueue_struct *swdev_wq;
+
+static void prestera_bridge_port_put(struct prestera_bridge_port *br_port);
+
+static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid,
+ u8 state);
+
+static struct prestera_bridge_vlan *
+prestera_bridge_vlan_create(struct prestera_bridge_port *br_port, u16 vid)
+{
+ struct prestera_bridge_vlan *br_vlan;
+
+ br_vlan = kzalloc(sizeof(*br_vlan), GFP_KERNEL);
+ if (!br_vlan)
+ return NULL;
+
+ INIT_LIST_HEAD(&br_vlan->port_vlan_list);
+ br_vlan->vid = vid;
+ list_add(&br_vlan->head, &br_port->vlan_list);
+
+ return br_vlan;
+}
+
+static void prestera_bridge_vlan_destroy(struct prestera_bridge_vlan *br_vlan)
+{
+ list_del(&br_vlan->head);
+ WARN_ON(!list_empty(&br_vlan->port_vlan_list));
+ kfree(br_vlan);
+}
+
+static struct prestera_bridge_vlan *
+prestera_bridge_vlan_by_vid(struct prestera_bridge_port *br_port, u16 vid)
+{
+ struct prestera_bridge_vlan *br_vlan;
+
+ list_for_each_entry(br_vlan, &br_port->vlan_list, head) {
+ if (br_vlan->vid == vid)
+ return br_vlan;
+ }
+
+ return NULL;
+}
+
+static int prestera_bridge_vlan_port_count(struct prestera_bridge *bridge,
+ u16 vid)
+{
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge_vlan *br_vlan;
+ int count = 0;
+
+ list_for_each_entry(br_port, &bridge->port_list, head) {
+ list_for_each_entry(br_vlan, &br_port->vlan_list, head) {
+ if (br_vlan->vid == vid) {
+ count += 1;
+ break;
+ }
+ }
+ }
+
+ return count;
+}
+
+static void prestera_bridge_vlan_put(struct prestera_bridge_vlan *br_vlan)
+{
+ if (list_empty(&br_vlan->port_vlan_list))
+ prestera_bridge_vlan_destroy(br_vlan);
+}
+
+static struct prestera_port_vlan *
+prestera_port_vlan_by_vid(struct prestera_port *port, u16 vid)
+{
+ struct prestera_port_vlan *port_vlan;
+
+ list_for_each_entry(port_vlan, &port->vlans_list, port_head) {
+ if (port_vlan->vid == vid)
+ return port_vlan;
+ }
+
+ return NULL;
+}
+
+static struct prestera_port_vlan *
+prestera_port_vlan_create(struct prestera_port *port, u16 vid, bool untagged)
+{
+ struct prestera_port_vlan *port_vlan;
+ int err;
+
+ port_vlan = prestera_port_vlan_by_vid(port, vid);
+ if (port_vlan)
+ return ERR_PTR(-EEXIST);
+
+ err = prestera_hw_vlan_port_set(port, vid, true, untagged);
+ if (err)
+ return ERR_PTR(err);
+
+ port_vlan = kzalloc(sizeof(*port_vlan), GFP_KERNEL);
+ if (!port_vlan) {
+ err = -ENOMEM;
+ goto err_port_vlan_alloc;
+ }
+
+ port_vlan->port = port;
+ port_vlan->vid = vid;
+
+ list_add(&port_vlan->port_head, &port->vlans_list);
+
+ return port_vlan;
+
+err_port_vlan_alloc:
+ prestera_hw_vlan_port_set(port, vid, false, false);
+ return ERR_PTR(err);
+}
+
+static void
+prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan)
+{
+ u32 fdb_flush_mode = PRESTERA_FDB_FLUSH_MODE_DYNAMIC;
+ struct prestera_port *port = port_vlan->port;
+ struct prestera_bridge_vlan *br_vlan;
+ struct prestera_bridge_port *br_port;
+ bool last_port, last_vlan;
+ u16 vid = port_vlan->vid;
+ int port_count;
+
+ br_port = port_vlan->br_port;
+ port_count = prestera_bridge_vlan_port_count(br_port->bridge, vid);
+ br_vlan = prestera_bridge_vlan_by_vid(br_port, vid);
+
+ last_vlan = list_is_singular(&br_port->vlan_list);
+ last_port = port_count == 1;
+
+ if (last_vlan)
+ prestera_hw_fdb_flush_port(port, fdb_flush_mode);
+ else if (last_port)
+ prestera_hw_fdb_flush_vlan(port->sw, vid, fdb_flush_mode);
+ else
+ prestera_hw_fdb_flush_port_vlan(port, vid, fdb_flush_mode);
+
+ list_del(&port_vlan->br_vlan_head);
+ prestera_bridge_vlan_put(br_vlan);
+ prestera_bridge_port_put(br_port);
+ port_vlan->br_port = NULL;
+}
+
+static void prestera_port_vlan_destroy(struct prestera_port_vlan *port_vlan)
+{
+ struct prestera_port *port = port_vlan->port;
+ u16 vid = port_vlan->vid;
+
+ if (port_vlan->br_port)
+ prestera_port_vlan_bridge_leave(port_vlan);
+
+ prestera_hw_vlan_port_set(port, vid, false, false);
+ list_del(&port_vlan->port_head);
+ kfree(port_vlan);
+}
+
+static struct prestera_bridge *
+prestera_bridge_create(struct prestera_switchdev *swdev, struct net_device *dev)
+{
+ bool vlan_enabled = br_vlan_enabled(dev);
+ struct prestera_bridge *bridge;
+ u16 bridge_id;
+ int err;
+
+ if (vlan_enabled && swdev->bridge_8021q_exists) {
+ netdev_err(dev, "Only one VLAN-aware bridge is supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return ERR_PTR(-ENOMEM);
+
+ if (vlan_enabled) {
+ swdev->bridge_8021q_exists = true;
+ } else {
+ err = prestera_hw_bridge_create(swdev->sw, &bridge_id);
+ if (err) {
+ kfree(bridge);
+ return ERR_PTR(err);
+ }
+
+ bridge->bridge_id = bridge_id;
+ }
+
+ bridge->vlan_enabled = vlan_enabled;
+ bridge->swdev = swdev;
+ bridge->dev = dev;
+
+ INIT_LIST_HEAD(&bridge->port_list);
+
+ list_add(&bridge->head, &swdev->bridge_list);
+
+ return bridge;
+}
+
+static void prestera_bridge_destroy(struct prestera_bridge *bridge)
+{
+ struct prestera_switchdev *swdev = bridge->swdev;
+
+ list_del(&bridge->head);
+
+ if (bridge->vlan_enabled)
+ swdev->bridge_8021q_exists = false;
+ else
+ prestera_hw_bridge_delete(swdev->sw, bridge->bridge_id);
+
+ WARN_ON(!list_empty(&bridge->port_list));
+ kfree(bridge);
+}
+
+static void prestera_bridge_put(struct prestera_bridge *bridge)
+{
+ if (list_empty(&bridge->port_list))
+ prestera_bridge_destroy(bridge);
+}
+
+static
+struct prestera_bridge *prestera_bridge_by_dev(struct prestera_switchdev *swdev,
+ const struct net_device *dev)
+{
+ struct prestera_bridge *bridge;
+
+ list_for_each_entry(bridge, &swdev->bridge_list, head)
+ if (bridge->dev == dev)
+ return bridge;
+
+ return NULL;
+}
+
+static struct prestera_bridge_port *
+__prestera_bridge_port_by_dev(struct prestera_bridge *bridge,
+ struct net_device *dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ list_for_each_entry(br_port, &bridge->port_list, head) {
+ if (br_port->dev == dev)
+ return br_port;
+ }
+
+ return NULL;
+}
+
+static struct prestera_bridge_port *
+prestera_bridge_port_by_dev(struct prestera_switchdev *swdev,
+ struct net_device *dev)
+{
+ struct net_device *br_dev = netdev_master_upper_dev_get(dev);
+ struct prestera_bridge *bridge;
+
+ if (!br_dev)
+ return NULL;
+
+ bridge = prestera_bridge_by_dev(swdev, br_dev);
+ if (!bridge)
+ return NULL;
+
+ return __prestera_bridge_port_by_dev(bridge, dev);
+}
+
+static struct prestera_bridge_port *
+prestera_bridge_port_create(struct prestera_bridge *bridge,
+ struct net_device *dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ br_port = kzalloc(sizeof(*br_port), GFP_KERNEL);
+ if (!br_port)
+ return NULL;
+
+ br_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
+ BR_MCAST_FLOOD;
+ br_port->stp_state = BR_STATE_DISABLED;
+ refcount_set(&br_port->ref_count, 1);
+ br_port->bridge = bridge;
+ br_port->dev = dev;
+
+ INIT_LIST_HEAD(&br_port->vlan_list);
+ list_add(&br_port->head, &bridge->port_list);
+
+ return br_port;
+}
+
+static void
+prestera_bridge_port_destroy(struct prestera_bridge_port *br_port)
+{
+ list_del(&br_port->head);
+ WARN_ON(!list_empty(&br_port->vlan_list));
+ kfree(br_port);
+}
+
+static void prestera_bridge_port_get(struct prestera_bridge_port *br_port)
+{
+ refcount_inc(&br_port->ref_count);
+}
+
+static void prestera_bridge_port_put(struct prestera_bridge_port *br_port)
+{
+ struct prestera_bridge *bridge = br_port->bridge;
+
+ if (refcount_dec_and_test(&br_port->ref_count)) {
+ prestera_bridge_port_destroy(br_port);
+ prestera_bridge_put(bridge);
+ }
+}
+
+static struct prestera_bridge_port *
+prestera_bridge_port_add(struct prestera_bridge *bridge, struct net_device *dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ br_port = __prestera_bridge_port_by_dev(bridge, dev);
+ if (br_port) {
+ prestera_bridge_port_get(br_port);
+ return br_port;
+ }
+
+ br_port = prestera_bridge_port_create(bridge, dev);
+ if (!br_port)
+ return ERR_PTR(-ENOMEM);
+
+ return br_port;
+}
+
+static int
+prestera_bridge_1d_port_join(struct prestera_bridge_port *br_port)
+{
+ struct prestera_port *port = netdev_priv(br_port->dev);
+ struct prestera_bridge *bridge = br_port->bridge;
+ int err;
+
+ err = prestera_hw_bridge_port_add(port, bridge->bridge_id);
+ if (err)
+ return err;
+
+ err = prestera_hw_port_flood_set(port, br_port->flags & BR_FLOOD);
+ if (err)
+ goto err_port_flood_set;
+
+ err = prestera_hw_port_learning_set(port, br_port->flags & BR_LEARNING);
+ if (err)
+ goto err_port_learning_set;
+
+ return 0;
+
+err_port_learning_set:
+ prestera_hw_port_flood_set(port, false);
+err_port_flood_set:
+ prestera_hw_bridge_port_delete(port, bridge->bridge_id);
+
+ return err;
+}
+
+static int prestera_port_bridge_join(struct prestera_port *port,
+ struct net_device *upper)
+{
+ struct prestera_switchdev *swdev = port->sw->swdev;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *bridge;
+ int err;
+
+ bridge = prestera_bridge_by_dev(swdev, upper);
+ if (!bridge) {
+ bridge = prestera_bridge_create(swdev, upper);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ }
+
+ br_port = prestera_bridge_port_add(bridge, port->dev);
+ if (IS_ERR(br_port)) {
+ err = PTR_ERR(br_port);
+ goto err_brport_create;
+ }
+
+ if (bridge->vlan_enabled)
+ return 0;
+
+ err = prestera_bridge_1d_port_join(br_port);
+ if (err)
+ goto err_port_join;
+
+ return 0;
+
+err_port_join:
+ prestera_bridge_port_put(br_port);
+err_brport_create:
+ prestera_bridge_put(bridge);
+ return err;
+}
+
+static void prestera_bridge_1q_port_leave(struct prestera_bridge_port *br_port)
+{
+ struct prestera_port *port = netdev_priv(br_port->dev);
+
+ prestera_hw_fdb_flush_port(port, PRESTERA_FDB_FLUSH_MODE_ALL);
+ prestera_port_pvid_set(port, PRESTERA_DEFAULT_VID);
+}
+
+static void prestera_bridge_1d_port_leave(struct prestera_bridge_port *br_port)
+{
+ struct prestera_port *port = netdev_priv(br_port->dev);
+
+ prestera_hw_fdb_flush_port(port, PRESTERA_FDB_FLUSH_MODE_ALL);
+ prestera_hw_bridge_port_delete(port, br_port->bridge->bridge_id);
+}
+
+static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid,
+ u8 state)
+{
+ u8 hw_state = state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ hw_state = PRESTERA_STP_DISABLED;
+ break;
+
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ hw_state = PRESTERA_STP_BLOCK_LISTEN;
+ break;
+
+ case BR_STATE_LEARNING:
+ hw_state = PRESTERA_STP_LEARN;
+ break;
+
+ case BR_STATE_FORWARDING:
+ hw_state = PRESTERA_STP_FORWARD;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return prestera_hw_vlan_port_stp_set(port, vid, hw_state);
+}
+
+static void prestera_port_bridge_leave(struct prestera_port *port,
+ struct net_device *upper)
+{
+ struct prestera_switchdev *swdev = port->sw->swdev;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *bridge;
+
+ bridge = prestera_bridge_by_dev(swdev, upper);
+ if (!bridge)
+ return;
+
+ br_port = __prestera_bridge_port_by_dev(bridge, port->dev);
+ if (!br_port)
+ return;
+
+ bridge = br_port->bridge;
+
+ if (bridge->vlan_enabled)
+ prestera_bridge_1q_port_leave(br_port);
+ else
+ prestera_bridge_1d_port_leave(br_port);
+
+ prestera_hw_port_learning_set(port, false);
+ prestera_hw_port_flood_set(port, false);
+ prestera_port_vid_stp_set(port, PRESTERA_VID_ALL, BR_STATE_FORWARDING);
+ prestera_bridge_port_put(br_port);
+}
+
+int prestera_bridge_port_event(struct net_device *dev, unsigned long event,
+ void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct prestera_port *port;
+ struct net_device *upper;
+ int err;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+ port = netdev_priv(dev);
+ upper = info->upper_dev;
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ if (!netif_is_bridge_master(upper)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+ return -EINVAL;
+ }
+
+ if (!info->linking)
+ break;
+
+ if (netdev_has_any_upper_dev(upper)) {
+ NL_SET_ERR_MSG_MOD(extack, "Upper device is already enslaved");
+ return -EINVAL;
+ }
+ break;
+
+ case NETDEV_CHANGEUPPER:
+ if (!netif_is_bridge_master(upper))
+ break;
+
+ if (info->linking) {
+ err = prestera_port_bridge_join(port, upper);
+ if (err)
+ return err;
+ } else {
+ prestera_port_bridge_leave(port, upper);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int prestera_port_attr_br_flags_set(struct prestera_port *port,
+ struct switchdev_trans *trans,
+ struct net_device *dev,
+ unsigned long flags)
+{
+ struct prestera_bridge_port *br_port;
+ int err;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
+ if (!br_port)
+ return 0;
+
+ err = prestera_hw_port_flood_set(port, flags & BR_FLOOD);
+ if (err)
+ return err;
+
+ err = prestera_hw_port_learning_set(port, flags & BR_LEARNING);
+ if (err)
+ return err;
+
+ memcpy(&br_port->flags, &flags, sizeof(flags));
+
+ return 0;
+}
+
+static int prestera_port_attr_br_ageing_set(struct prestera_port *port,
+ struct switchdev_trans *trans,
+ unsigned long ageing_clock_t)
+{
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ u32 ageing_time_ms = jiffies_to_msecs(ageing_jiffies);
+ struct prestera_switch *sw = port->sw;
+
+ if (switchdev_trans_ph_prepare(trans)) {
+ if (ageing_time_ms < PRESTERA_MIN_AGEING_TIME_MS ||
+ ageing_time_ms > PRESTERA_MAX_AGEING_TIME_MS)
+ return -ERANGE;
+ else
+ return 0;
+ }
+
+ return prestera_hw_switch_ageing_set(sw, ageing_time_ms);
+}
+
+static int prestera_port_attr_br_vlan_set(struct prestera_port *port,
+ struct switchdev_trans *trans,
+ struct net_device *dev,
+ bool vlan_enabled)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_bridge *bridge;
+
+ if (!switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ bridge = prestera_bridge_by_dev(sw->swdev, dev);
+ if (WARN_ON(!bridge))
+ return -EINVAL;
+
+ if (bridge->vlan_enabled == vlan_enabled)
+ return 0;
+
+ netdev_err(bridge->dev, "VLAN filtering can't be changed for existing bridge\n");
+
+ return -EINVAL;
+}
+
+static int prestera_port_bridge_vlan_stp_set(struct prestera_port *port,
+ struct prestera_bridge_vlan *br_vlan,
+ u8 state)
+{
+ struct prestera_port_vlan *port_vlan;
+
+ list_for_each_entry(port_vlan, &br_vlan->port_vlan_list, br_vlan_head) {
+ if (port_vlan->port != port)
+ continue;
+
+ return prestera_port_vid_stp_set(port, br_vlan->vid, state);
+ }
+
+ return 0;
+}
+
+static int presterar_port_attr_stp_state_set(struct prestera_port *port,
+ struct switchdev_trans *trans,
+ struct net_device *dev,
+ u8 state)
+{
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge_vlan *br_vlan;
+ int err;
+ u16 vid;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
+ if (!br_port)
+ return 0;
+
+ if (!br_port->bridge->vlan_enabled) {
+ vid = br_port->bridge->bridge_id;
+ err = prestera_port_vid_stp_set(port, vid, state);
+ if (err)
+ goto err_port_stp_set;
+ } else {
+ list_for_each_entry(br_vlan, &br_port->vlan_list, head) {
+ err = prestera_port_bridge_vlan_stp_set(port, br_vlan,
+ state);
+ if (err)
+ goto err_port_vlan_stp_set;
+ }
+ }
+
+ br_port->stp_state = state;
+
+ return 0;
+
+err_port_vlan_stp_set:
+ list_for_each_entry_continue_reverse(br_vlan, &br_port->vlan_list, head)
+ prestera_port_bridge_vlan_stp_set(port, br_vlan, br_port->stp_state);
+ return err;
+
+err_port_stp_set:
+ prestera_port_vid_stp_set(port, vid, br_port->stp_state);
+
+ return err;
+}
+
+static int prestera_port_obj_attr_set(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ int err = 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ err = presterar_port_attr_stp_state_set(port, trans,
+ attr->orig_dev,
+ attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ if (attr->u.brport_flags &
+ ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
+ err = -EINVAL;
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ err = prestera_port_attr_br_flags_set(port, trans,
+ attr->orig_dev,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ err = prestera_port_attr_br_ageing_set(port, trans,
+ attr->u.ageing_time);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ err = prestera_port_attr_br_vlan_set(port, trans,
+ attr->orig_dev,
+ attr->u.vlan_filtering);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static void
+prestera_fdb_offload_notify(struct prestera_port *port,
+ struct switchdev_notifier_fdb_info *info)
+{
+ struct switchdev_notifier_fdb_info send_info;
+
+ send_info.addr = info->addr;
+ send_info.vid = info->vid;
+ send_info.offloaded = true;
+
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, port->dev,
+ &send_info.info, NULL);
+}
+
+static int prestera_port_fdb_set(struct prestera_port *port,
+ struct switchdev_notifier_fdb_info *fdb_info,
+ bool adding)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *bridge;
+ int err;
+ u16 vid;
+
+ br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev);
+ if (!br_port)
+ return -EINVAL;
+
+ bridge = br_port->bridge;
+
+ if (bridge->vlan_enabled)
+ vid = fdb_info->vid;
+ else
+ vid = bridge->bridge_id;
+
+ if (adding)
+ err = prestera_hw_fdb_add(port, fdb_info->addr, vid, false);
+ else
+ err = prestera_hw_fdb_del(port, fdb_info->addr, vid);
+
+ return err;
+}
+
+static void prestera_fdb_event_work(struct work_struct *work)
+{
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct prestera_fdb_event_work *swdev_work;
+ struct prestera_port *port;
+ struct net_device *dev;
+ int err;
+
+ swdev_work = container_of(work, struct prestera_fdb_event_work, work);
+ dev = swdev_work->dev;
+
+ rtnl_lock();
+
+ port = prestera_port_dev_lower_find(dev);
+ if (!port)
+ goto out_unlock;
+
+ switch (swdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb_info = &swdev_work->fdb_info;
+ if (!fdb_info->added_by_user)
+ break;
+
+ err = prestera_port_fdb_set(port, fdb_info, true);
+ if (err)
+ break;
+
+ prestera_fdb_offload_notify(port, fdb_info);
+ break;
+
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = &swdev_work->fdb_info;
+ prestera_port_fdb_set(port, fdb_info, false);
+ break;
+ }
+
+out_unlock:
+ rtnl_unlock();
+
+ kfree(swdev_work->fdb_info.addr);
+ kfree(swdev_work);
+ dev_put(dev);
+}
+
+static int prestera_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct switchdev_notifier_info *info = ptr;
+ struct prestera_fdb_event_work *swdev_work;
+ struct net_device *upper;
+ int err;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ prestera_netdev_check,
+ prestera_port_obj_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ if (!prestera_netdev_check(dev))
+ return NOTIFY_DONE;
+
+ upper = netdev_master_upper_dev_get_rcu(dev);
+ if (!upper)
+ return NOTIFY_DONE;
+
+ if (!netif_is_bridge_master(upper))
+ return NOTIFY_DONE;
+
+ swdev_work = kzalloc(sizeof(*swdev_work), GFP_ATOMIC);
+ if (!swdev_work)
+ return NOTIFY_BAD;
+
+ swdev_work->event = event;
+ swdev_work->dev = dev;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = container_of(info,
+ struct switchdev_notifier_fdb_info,
+ info);
+
+ INIT_WORK(&swdev_work->work, prestera_fdb_event_work);
+ memcpy(&swdev_work->fdb_info, ptr,
+ sizeof(swdev_work->fdb_info));
+
+ swdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!swdev_work->fdb_info.addr)
+ goto out_bad;
+
+ ether_addr_copy((u8 *)swdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(dev);
+ break;
+
+ default:
+ kfree(swdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(swdev_wq, &swdev_work->work);
+ return NOTIFY_DONE;
+
+out_bad:
+ kfree(swdev_work);
+ return NOTIFY_BAD;
+}
+
+static int
+prestera_port_vlan_bridge_join(struct prestera_port_vlan *port_vlan,
+ struct prestera_bridge_port *br_port)
+{
+ struct prestera_port *port = port_vlan->port;
+ struct prestera_bridge_vlan *br_vlan;
+ u16 vid = port_vlan->vid;
+ int err;
+
+ if (port_vlan->br_port)
+ return 0;
+
+ err = prestera_hw_port_flood_set(port, br_port->flags & BR_FLOOD);
+ if (err)
+ return err;
+
+ err = prestera_hw_port_learning_set(port, br_port->flags & BR_LEARNING);
+ if (err)
+ goto err_port_learning_set;
+
+ err = prestera_port_vid_stp_set(port, vid, br_port->stp_state);
+ if (err)
+ goto err_port_vid_stp_set;
+
+ br_vlan = prestera_bridge_vlan_by_vid(br_port, vid);
+ if (!br_vlan) {
+ br_vlan = prestera_bridge_vlan_create(br_port, vid);
+ if (!br_vlan) {
+ err = -ENOMEM;
+ goto err_bridge_vlan_get;
+ }
+ }
+
+ list_add(&port_vlan->br_vlan_head, &br_vlan->port_vlan_list);
+
+ prestera_bridge_port_get(br_port);
+ port_vlan->br_port = br_port;
+
+ return 0;
+
+err_bridge_vlan_get:
+ prestera_port_vid_stp_set(port, vid, BR_STATE_FORWARDING);
+err_port_vid_stp_set:
+ prestera_hw_port_learning_set(port, false);
+err_port_learning_set:
+ return err;
+}
+
+static int
+prestera_bridge_port_vlan_add(struct prestera_port *port,
+ struct prestera_bridge_port *br_port,
+ u16 vid, bool is_untagged, bool is_pvid,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_port_vlan *port_vlan;
+ u16 old_pvid = port->pvid;
+ u16 pvid;
+ int err;
+
+ if (is_pvid)
+ pvid = vid;
+ else
+ pvid = port->pvid == vid ? 0 : port->pvid;
+
+ port_vlan = prestera_port_vlan_by_vid(port, vid);
+ if (port_vlan && port_vlan->br_port != br_port)
+ return -EEXIST;
+
+ if (!port_vlan) {
+ port_vlan = prestera_port_vlan_create(port, vid, is_untagged);
+ if (IS_ERR(port_vlan))
+ return PTR_ERR(port_vlan);
+ } else {
+ err = prestera_hw_vlan_port_set(port, vid, true, is_untagged);
+ if (err)
+ goto err_port_vlan_set;
+ }
+
+ err = prestera_port_pvid_set(port, pvid);
+ if (err)
+ goto err_port_pvid_set;
+
+ err = prestera_port_vlan_bridge_join(port_vlan, br_port);
+ if (err)
+ goto err_port_vlan_bridge_join;
+
+ return 0;
+
+err_port_vlan_bridge_join:
+ prestera_port_pvid_set(port, old_pvid);
+err_port_pvid_set:
+ prestera_hw_vlan_port_set(port, vid, false, false);
+err_port_vlan_set:
+ prestera_port_vlan_destroy(port_vlan);
+
+ return err;
+}
+
+static void
+prestera_bridge_port_vlan_del(struct prestera_port *port,
+ struct prestera_bridge_port *br_port, u16 vid)
+{
+ u16 pvid = port->pvid == vid ? 0 : port->pvid;
+ struct prestera_port_vlan *port_vlan;
+
+ port_vlan = prestera_port_vlan_by_vid(port, vid);
+ if (WARN_ON(!port_vlan))
+ return;
+
+ prestera_port_vlan_bridge_leave(port_vlan);
+ prestera_port_pvid_set(port, pvid);
+ prestera_port_vlan_destroy(port_vlan);
+}
+
+static int prestera_port_vlans_add(struct prestera_port *port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
+{
+ bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct net_device *dev = vlan->obj.orig_dev;
+ struct prestera_bridge_port *br_port;
+ struct prestera_switch *sw = port->sw;
+ struct prestera_bridge *bridge;
+ u16 vid;
+
+ if (netif_is_bridge_master(dev))
+ return 0;
+
+ if (switchdev_trans_ph_commit(trans))
+ return 0;
+
+ br_port = prestera_bridge_port_by_dev(sw->swdev, dev);
+ if (WARN_ON(!br_port))
+ return -EINVAL;
+
+ bridge = br_port->bridge;
+ if (!bridge->vlan_enabled)
+ return 0;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ int err;
+
+ err = prestera_bridge_port_vlan_add(port, br_port,
+ vid, flag_untagged,
+ flag_pvid, extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int prestera_port_obj_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ const struct switchdev_obj_port_vlan *vlan;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ return prestera_port_vlans_add(port, vlan, trans, extack);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int prestera_port_vlans_del(struct prestera_port *port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct net_device *dev = vlan->obj.orig_dev;
+ struct prestera_bridge_port *br_port;
+ struct prestera_switch *sw = port->sw;
+ u16 vid;
+
+ if (netif_is_bridge_master(dev))
+ return -EOPNOTSUPP;
+
+ br_port = prestera_bridge_port_by_dev(sw->swdev, dev);
+ if (WARN_ON(!br_port))
+ return -EINVAL;
+
+ if (!br_port->bridge->vlan_enabled)
+ return 0;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
+ prestera_bridge_port_vlan_del(port, br_port, vid);
+
+ return 0;
+}
+
+static int prestera_port_obj_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ return prestera_port_vlans_del(port, SWITCHDEV_OBJ_PORT_VLAN(obj));
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int prestera_switchdev_blk_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ prestera_netdev_check,
+ prestera_port_obj_add);
+ break;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ prestera_netdev_check,
+ prestera_port_obj_del);
+ break;
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ prestera_netdev_check,
+ prestera_port_obj_attr_set);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static void prestera_fdb_event(struct prestera_switch *sw,
+ struct prestera_event *evt, void *arg)
+{
+ struct switchdev_notifier_fdb_info info;
+ struct prestera_port *port;
+
+ port = prestera_find_port(sw, evt->fdb_evt.port_id);
+ if (!port)
+ return;
+
+ info.addr = evt->fdb_evt.data.mac;
+ info.vid = evt->fdb_evt.vid;
+ info.offloaded = true;
+
+ rtnl_lock();
+
+ switch (evt->id) {
+ case PRESTERA_FDB_EVENT_LEARNED:
+ call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ port->dev, &info.info, NULL);
+ break;
+ case PRESTERA_FDB_EVENT_AGED:
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ port->dev, &info.info, NULL);
+ break;
+ }
+
+ rtnl_unlock();
+}
+
+static int prestera_fdb_init(struct prestera_switch *sw)
+{
+ int err;
+
+ err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_FDB,
+ prestera_fdb_event, NULL);
+ if (err)
+ return err;
+
+ err = prestera_hw_switch_ageing_set(sw, PRESTERA_DEFAULT_AGEING_TIME_MS);
+ if (err)
+ goto err_ageing_set;
+
+ return 0;
+
+err_ageing_set:
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_FDB,
+ prestera_fdb_event);
+ return err;
+}
+
+static void prestera_fdb_fini(struct prestera_switch *sw)
+{
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_FDB,
+ prestera_fdb_event);
+}
+
+static int prestera_switchdev_handler_init(struct prestera_switchdev *swdev)
+{
+ int err;
+
+ swdev->swdev_nb.notifier_call = prestera_switchdev_event;
+ err = register_switchdev_notifier(&swdev->swdev_nb);
+ if (err)
+ goto err_register_swdev_notifier;
+
+ swdev->swdev_nb_blk.notifier_call = prestera_switchdev_blk_event;
+ err = register_switchdev_blocking_notifier(&swdev->swdev_nb_blk);
+ if (err)
+ goto err_register_blk_swdev_notifier;
+
+ return 0;
+
+err_register_blk_swdev_notifier:
+ unregister_switchdev_notifier(&swdev->swdev_nb);
+err_register_swdev_notifier:
+ destroy_workqueue(swdev_wq);
+ return err;
+}
+
+static void prestera_switchdev_handler_fini(struct prestera_switchdev *swdev)
+{
+ unregister_switchdev_blocking_notifier(&swdev->swdev_nb_blk);
+ unregister_switchdev_notifier(&swdev->swdev_nb);
+}
+
+int prestera_switchdev_init(struct prestera_switch *sw)
+{
+ struct prestera_switchdev *swdev;
+ int err;
+
+ swdev = kzalloc(sizeof(*swdev), GFP_KERNEL);
+ if (!swdev)
+ return -ENOMEM;
+
+ sw->swdev = swdev;
+ swdev->sw = sw;
+
+ INIT_LIST_HEAD(&swdev->bridge_list);
+
+ swdev_wq = alloc_ordered_workqueue("%s_ordered", 0, "prestera_br");
+ if (!swdev_wq) {
+ err = -ENOMEM;
+ goto err_alloc_wq;
+ }
+
+ err = prestera_switchdev_handler_init(swdev);
+ if (err)
+ goto err_swdev_init;
+
+ err = prestera_fdb_init(sw);
+ if (err)
+ goto err_fdb_init;
+
+ return 0;
+
+err_fdb_init:
+err_swdev_init:
+ destroy_workqueue(swdev_wq);
+err_alloc_wq:
+ kfree(swdev);
+
+ return err;
+}
+
+void prestera_switchdev_fini(struct prestera_switch *sw)
+{
+ struct prestera_switchdev *swdev = sw->swdev;
+
+ prestera_fdb_fini(sw);
+ prestera_switchdev_handler_fini(swdev);
+ destroy_workqueue(swdev_wq);
+ kfree(swdev);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
new file mode 100644
index 000000000000..606e21d2355b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_SWITCHDEV_H_
+#define _PRESTERA_SWITCHDEV_H_
+
+int prestera_switchdev_init(struct prestera_switch *sw);
+void prestera_switchdev_fini(struct prestera_switch *sw);
+
+int prestera_bridge_port_event(struct net_device *dev, unsigned long event,
+ void *ptr);
+
+#endif /* _PRESTERA_SWITCHDEV_H_ */
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index eb8cf60ecf12..d1e4d42e497d 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1187,11 +1187,10 @@ static int pxa168_eth_stop(struct net_device *dev)
static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
{
- int retval;
struct pxa168_eth_private *pep = netdev_priv(dev);
dev->mtu = mtu;
- retval = set_port_config_ext(pep);
+ set_port_config_ext(pep);
if (!netif_running(dev))
return 0;
@@ -1541,10 +1540,8 @@ static int pxa168_eth_remove(struct platform_device *pdev)
}
if (dev->phydev)
phy_disconnect(dev->phydev);
- if (pep->clk) {
- clk_disable_unprepare(pep->clk);
- }
+ clk_disable_unprepare(pep->clk);
mdiobus_unregister(pep->smi_bus);
mdiobus_free(pep->smi_bus);
unregister_netdev(dev);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 6a930351cb23..8a9c0f490bfb 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3338,9 +3338,9 @@ static void skge_error_irq(struct skge_hw *hw)
* because accessing phy registers requires spin wait which might
* cause excess interrupt latency.
*/
-static void skge_extirq(unsigned long arg)
+static void skge_extirq(struct tasklet_struct *t)
{
- struct skge_hw *hw = (struct skge_hw *) arg;
+ struct skge_hw *hw = from_tasklet(hw, t, phy_task);
int port;
for (port = 0; port < hw->ports; port++) {
@@ -3927,7 +3927,7 @@ static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->pdev = pdev;
spin_lock_init(&hw->hw_lock);
spin_lock_init(&hw->phy_lock);
- tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw);
+ tasklet_setup(&hw->phy_task, skge_extirq);
hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000);
if (!hw->regs) {