diff options
Diffstat (limited to 'drivers/net/ethernet/freescale/fec_main.c')
| -rw-r--r-- | drivers/net/ethernet/freescale/fec_main.c | 397 | 
1 files changed, 324 insertions, 73 deletions
| diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 23e1a94b9ce4..5528b0af82ae 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -89,6 +89,11 @@ static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};  #define FEC_ENET_OPD_V	0xFFF0  #define FEC_MDIO_PM_TIMEOUT  100 /* ms */ +#define FEC_ENET_XDP_PASS          0 +#define FEC_ENET_XDP_CONSUMED      BIT(0) +#define FEC_ENET_XDP_TX            BIT(1) +#define FEC_ENET_XDP_REDIR         BIT(2) +  struct fec_devinfo {  	u32 quirks;  }; @@ -365,16 +370,6 @@ static void swap_buffer(void *bufaddr, int len)  		swab32s(buf);  } -static void swap_buffer2(void *dst_buf, void *src_buf, int len) -{ -	int i; -	unsigned int *src = src_buf; -	unsigned int *dst = dst_buf; - -	for (i = 0; i < len; i += 4, src++, dst++) -		*dst = swab32p(src); -} -  static void fec_dump(struct net_device *ndev)  {  	struct fec_enet_private *fep = netdev_priv(ndev); @@ -428,13 +423,14 @@ static int  fec_enet_create_page_pool(struct fec_enet_private *fep,  			  struct fec_enet_priv_rx_q *rxq, int size)  { +	struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);  	struct page_pool_params pp_params = {  		.order = 0,  		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,  		.pool_size = size,  		.nid = dev_to_node(&fep->pdev->dev),  		.dev = &fep->pdev->dev, -		.dma_dir = DMA_FROM_DEVICE, +		.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,  		.offset = FEC_ENET_XDP_HEADROOM,  		.max_len = FEC_ENET_RX_FRSIZE,  	}; @@ -1494,53 +1490,6 @@ static void fec_enet_tx(struct net_device *ndev)  		fec_enet_tx_queue(ndev, i);  } -static int __maybe_unused -fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb) -{ -	struct  fec_enet_private *fep = netdev_priv(ndev); -	int off; - -	off = ((unsigned long)skb->data) & fep->rx_align; -	if (off) -		skb_reserve(skb, fep->rx_align + 1 - off); - -	bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE)); -	if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) { -		if (net_ratelimit()) -			netdev_err(ndev, "Rx DMA memory map failed\n"); -		return -ENOMEM; -	} - -	return 0; -} - -static bool __maybe_unused -fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, -		   struct bufdesc *bdp, u32 length, bool swap) -{ -	struct  fec_enet_private *fep = netdev_priv(ndev); -	struct sk_buff *new_skb; - -	if (length > fep->rx_copybreak) -		return false; - -	new_skb = netdev_alloc_skb(ndev, length); -	if (!new_skb) -		return false; - -	dma_sync_single_for_cpu(&fep->pdev->dev, -				fec32_to_cpu(bdp->cbd_bufaddr), -				FEC_ENET_RX_FRSIZE - fep->rx_align, -				DMA_FROM_DEVICE); -	if (!swap) -		memcpy(new_skb->data, (*skb)->data, length); -	else -		swap_buffer2(new_skb->data, (*skb)->data, length); -	*skb = new_skb; - -	return true; -} -  static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,  				struct bufdesc *bdp, int index)  { @@ -1556,6 +1505,62 @@ static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,  	bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);  } +static u32 +fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, +		 struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int index) +{ +	unsigned int sync, len = xdp->data_end - xdp->data; +	u32 ret = FEC_ENET_XDP_PASS; +	struct page *page; +	int err; +	u32 act; + +	act = bpf_prog_run_xdp(prog, xdp); + +	/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ +	sync = xdp->data_end - xdp->data_hard_start - FEC_ENET_XDP_HEADROOM; +	sync = max(sync, len); + +	switch (act) { +	case XDP_PASS: +		rxq->stats[RX_XDP_PASS]++; +		ret = FEC_ENET_XDP_PASS; +		break; + +	case XDP_REDIRECT: +		rxq->stats[RX_XDP_REDIRECT]++; +		err = xdp_do_redirect(fep->netdev, xdp, prog); +		if (!err) { +			ret = FEC_ENET_XDP_REDIR; +		} else { +			ret = FEC_ENET_XDP_CONSUMED; +			page = virt_to_head_page(xdp->data); +			page_pool_put_page(rxq->page_pool, page, sync, true); +		} +		break; + +	default: +		bpf_warn_invalid_xdp_action(fep->netdev, prog, act); +		fallthrough; + +	case XDP_TX: +		bpf_warn_invalid_xdp_action(fep->netdev, prog, act); +		fallthrough; + +	case XDP_ABORTED: +		fallthrough;    /* handle aborts by dropping packet */ + +	case XDP_DROP: +		rxq->stats[RX_XDP_DROP]++; +		ret = FEC_ENET_XDP_CONSUMED; +		page = virt_to_head_page(xdp->data); +		page_pool_put_page(rxq->page_pool, page, sync, true); +		break; +	} + +	return ret; +} +  /* During a receive, the bd_rx.cur points to the current incoming buffer.   * When we update through the ring, if the next incoming buffer has   * not been given to the system, we just set the empty indicator, @@ -1577,7 +1582,22 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)  	u16	vlan_tag;  	int	index = 0;  	bool	need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; +	struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); +	u32 ret, xdp_result = FEC_ENET_XDP_PASS; +	u32 data_start = FEC_ENET_XDP_HEADROOM; +	struct xdp_buff xdp;  	struct page *page; +	u32 sub_len = 4; + +#if !defined(CONFIG_M5272) +	/*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of +	 * FEC_RACC_SHIFT16 is set by default in the probe function. +	 */ +	if (fep->quirks & FEC_QUIRK_HAS_RACC) { +		data_start += 2; +		sub_len += 2; +	} +#endif  #ifdef CONFIG_M532x  	flush_cache_all(); @@ -1588,6 +1608,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)  	 * These get messed up if we get called due to a busy condition.  	 */  	bdp = rxq->bd.cur; +	xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq);  	while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { @@ -1637,23 +1658,31 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)  		prefetch(page_address(page));  		fec_enet_update_cbd(rxq, bdp, index); +		if (xdp_prog) { +			xdp_buff_clear_frags_flag(&xdp); +			/* subtract 16bit shift and FCS */ +			xdp_prepare_buff(&xdp, page_address(page), +					 data_start, pkt_len - sub_len, false); +			ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, index); +			xdp_result |= ret; +			if (ret != FEC_ENET_XDP_PASS) +				goto rx_processing_done; +		} +  		/* The packet length includes FCS, but we don't want to  		 * include that when passing upstream as it messes up  		 * bridging applications.  		 */  		skb = build_skb(page_address(page), PAGE_SIZE); -		skb_reserve(skb, FEC_ENET_XDP_HEADROOM); -		skb_put(skb, pkt_len - 4); +		skb_reserve(skb, data_start); +		skb_put(skb, pkt_len - sub_len);  		skb_mark_for_recycle(skb); -		data = skb->data; -		if (need_swap) +		if (unlikely(need_swap)) { +			data = page_address(page) + FEC_ENET_XDP_HEADROOM;  			swap_buffer(data, pkt_len); - -#if !defined(CONFIG_M5272) -		if (fep->quirks & FEC_QUIRK_HAS_RACC) -			data = skb_pull_inline(skb, 2); -#endif +		} +		data = skb->data;  		/* Extract the enhanced buffer descriptor */  		ebdp = NULL; @@ -1732,6 +1761,10 @@ rx_processing_done:  		writel(0, rxq->bd.reg_desc_active);  	}  	rxq->bd.cur = bdp; + +	if (xdp_result & FEC_ENET_XDP_REDIR) +		xdp_do_flush_map(); +  	return pkt_received;  } @@ -2226,7 +2259,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)  	fep->link = 0;  	fep->full_duplex = 0; -	phy_dev->mac_managed_pm = 1; +	phy_dev->mac_managed_pm = true;  	phy_attached_info(phy_dev); @@ -2671,6 +2704,16 @@ static const struct fec_stat {  #define FEC_STATS_SIZE		(ARRAY_SIZE(fec_stats) * sizeof(u64)) +static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = { +	"rx_xdp_redirect",           /* RX_XDP_REDIRECT = 0, */ +	"rx_xdp_pass",               /* RX_XDP_PASS, */ +	"rx_xdp_drop",               /* RX_XDP_DROP, */ +	"rx_xdp_tx",                 /* RX_XDP_TX, */ +	"rx_xdp_tx_errors",          /* RX_XDP_TX_ERRORS, */ +	"tx_xdp_xmit",               /* TX_XDP_XMIT, */ +	"tx_xdp_xmit_errors",        /* TX_XDP_XMIT_ERRORS, */ +}; +  static void fec_enet_update_ethtool_stats(struct net_device *dev)  {  	struct fec_enet_private *fep = netdev_priv(dev); @@ -2680,6 +2723,40 @@ static void fec_enet_update_ethtool_stats(struct net_device *dev)  		fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);  } +static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data) +{ +	u64 xdp_stats[XDP_STATS_TOTAL] = { 0 }; +	struct fec_enet_priv_rx_q *rxq; +	int i, j; + +	for (i = fep->num_rx_queues - 1; i >= 0; i--) { +		rxq = fep->rx_queue[i]; + +		for (j = 0; j < XDP_STATS_TOTAL; j++) +			xdp_stats[j] += rxq->stats[j]; +	} + +	memcpy(data, xdp_stats, sizeof(xdp_stats)); +} + +static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data) +{ +	struct page_pool_stats stats = {}; +	struct fec_enet_priv_rx_q *rxq; +	int i; + +	for (i = fep->num_rx_queues - 1; i >= 0; i--) { +		rxq = fep->rx_queue[i]; + +		if (!rxq->page_pool) +			continue; + +		page_pool_get_stats(rxq->page_pool, &stats); +	} + +	page_pool_ethtool_stats_get(data, &stats); +} +  static void fec_enet_get_ethtool_stats(struct net_device *dev,  				       struct ethtool_stats *stats, u64 *data)  { @@ -2689,6 +2766,12 @@ static void fec_enet_get_ethtool_stats(struct net_device *dev,  		fec_enet_update_ethtool_stats(dev);  	memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE); +	data += FEC_STATS_SIZE / sizeof(u64); + +	fec_enet_get_xdp_stats(fep, data); +	data += XDP_STATS_TOTAL; + +	fec_enet_page_pool_stats(fep, data);  }  static void fec_enet_get_strings(struct net_device *netdev, @@ -2697,9 +2780,16 @@ static void fec_enet_get_strings(struct net_device *netdev,  	int i;  	switch (stringset) {  	case ETH_SS_STATS: -		for (i = 0; i < ARRAY_SIZE(fec_stats); i++) -			memcpy(data + i * ETH_GSTRING_LEN, -				fec_stats[i].name, ETH_GSTRING_LEN); +		for (i = 0; i < ARRAY_SIZE(fec_stats); i++) { +			memcpy(data, fec_stats[i].name, ETH_GSTRING_LEN); +			data += ETH_GSTRING_LEN; +		} +		for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) { +			strncpy(data, fec_xdp_stat_strs[i], ETH_GSTRING_LEN); +			data += ETH_GSTRING_LEN; +		} +		page_pool_ethtool_stats_get_strings(data); +  		break;  	case ETH_SS_TEST:  		net_selftest_get_strings(data); @@ -2709,9 +2799,14 @@ static void fec_enet_get_strings(struct net_device *netdev,  static int fec_enet_get_sset_count(struct net_device *dev, int sset)  { +	int count; +  	switch (sset) {  	case ETH_SS_STATS: -		return ARRAY_SIZE(fec_stats); +		count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL; +		count += page_pool_ethtool_stats_get_count(); +		return count; +  	case ETH_SS_TEST:  		return net_selftest_get_count();  	default: @@ -2722,7 +2817,8 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset)  static void fec_enet_clear_ethtool_stats(struct net_device *dev)  {  	struct fec_enet_private *fep = netdev_priv(dev); -	int i; +	struct fec_enet_priv_rx_q *rxq; +	int i, j;  	/* Disable MIB statistics counters */  	writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT); @@ -2730,6 +2826,12 @@ static void fec_enet_clear_ethtool_stats(struct net_device *dev)  	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)  		writel(0, fep->hwp + fec_stats[i].offset); +	for (i = fep->num_rx_queues - 1; i >= 0; i--) { +		rxq = fep->rx_queue[i]; +		for (j = 0; j < XDP_STATS_TOTAL; j++) +			rxq->stats[j] = 0; +	} +  	/* Don't disable MIB statistics counters */  	writel(0, fep->hwp + FEC_MIB_CTRLSTAT);  } @@ -3083,6 +3185,9 @@ static void fec_enet_free_buffers(struct net_device *ndev)  		for (i = 0; i < rxq->bd.ring_size; i++)  			page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page); +		for (i = 0; i < XDP_STATS_TOTAL; i++) +			rxq->stats[i] = 0; +  		if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))  			xdp_rxq_info_unreg(&rxq->xdp_rxq);  		page_pool_destroy(rxq->page_pool); @@ -3562,6 +3667,150 @@ static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,  	return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];  } +static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ +	struct fec_enet_private *fep = netdev_priv(dev); +	bool is_run = netif_running(dev); +	struct bpf_prog *old_prog; + +	switch (bpf->command) { +	case XDP_SETUP_PROG: +		/* No need to support the SoCs that require to +		 * do the frame swap because the performance wouldn't be +		 * better than the skb mode. +		 */ +		if (fep->quirks & FEC_QUIRK_SWAP_FRAME) +			return -EOPNOTSUPP; + +		if (is_run) { +			napi_disable(&fep->napi); +			netif_tx_disable(dev); +		} + +		old_prog = xchg(&fep->xdp_prog, bpf->prog); +		fec_restart(dev); + +		if (is_run) { +			napi_enable(&fep->napi); +			netif_tx_start_all_queues(dev); +		} + +		if (old_prog) +			bpf_prog_put(old_prog); + +		return 0; + +	case XDP_SETUP_XSK_POOL: +		return -EOPNOTSUPP; + +	default: +		return -EOPNOTSUPP; +	} +} + +static int +fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index) +{ +	if (unlikely(index < 0)) +		return 0; + +	return (index % fep->num_tx_queues); +} + +static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, +				   struct fec_enet_priv_tx_q *txq, +				   struct xdp_frame *frame) +{ +	unsigned int index, status, estatus; +	struct bufdesc *bdp, *last_bdp; +	dma_addr_t dma_addr; +	int entries_free; + +	entries_free = fec_enet_get_free_txdesc_num(txq); +	if (entries_free < MAX_SKB_FRAGS + 1) { +		netdev_err(fep->netdev, "NOT enough BD for SG!\n"); +		return NETDEV_TX_OK; +	} + +	/* Fill in a Tx ring entry */ +	bdp = txq->bd.cur; +	last_bdp = bdp; +	status = fec16_to_cpu(bdp->cbd_sc); +	status &= ~BD_ENET_TX_STATS; + +	index = fec_enet_get_bd_index(bdp, &txq->bd); + +	dma_addr = dma_map_single(&fep->pdev->dev, frame->data, +				  frame->len, DMA_TO_DEVICE); +	if (dma_mapping_error(&fep->pdev->dev, dma_addr)) +		return FEC_ENET_XDP_CONSUMED; + +	status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); +	if (fep->bufdesc_ex) +		estatus = BD_ENET_TX_INT; + +	bdp->cbd_bufaddr = cpu_to_fec32(dma_addr); +	bdp->cbd_datlen = cpu_to_fec16(frame->len); + +	if (fep->bufdesc_ex) { +		struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + +		if (fep->quirks & FEC_QUIRK_HAS_AVB) +			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); + +		ebdp->cbd_bdu = 0; +		ebdp->cbd_esc = cpu_to_fec32(estatus); +	} + +	index = fec_enet_get_bd_index(last_bdp, &txq->bd); +	txq->tx_skbuff[index] = NULL; + +	/* Send it on its way.  Tell FEC it's ready, interrupt when done, +	 * it's the last BD of the frame, and to put the CRC on the end. +	 */ +	status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); +	bdp->cbd_sc = cpu_to_fec16(status); + +	/* If this was the last BD in the ring, start at the beginning again. */ +	bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); + +	txq->bd.cur = bdp; + +	return 0; +} + +static int fec_enet_xdp_xmit(struct net_device *dev, +			     int num_frames, +			     struct xdp_frame **frames, +			     u32 flags) +{ +	struct fec_enet_private *fep = netdev_priv(dev); +	struct fec_enet_priv_tx_q *txq; +	int cpu = smp_processor_id(); +	struct netdev_queue *nq; +	unsigned int queue; +	int i; + +	queue = fec_enet_xdp_get_tx_queue(fep, cpu); +	txq = fep->tx_queue[queue]; +	nq = netdev_get_tx_queue(fep->netdev, queue); + +	__netif_tx_lock(nq, cpu); + +	for (i = 0; i < num_frames; i++) +		fec_enet_txq_xmit_frame(fep, txq, frames[i]); + +	/* Make sure the update to bdp and tx_skbuff are performed. */ +	wmb(); + +	/* Trigger transmission start */ +	writel(0, txq->bd.reg_desc_active); + +	__netif_tx_unlock(nq); + +	return num_frames; +} +  static const struct net_device_ops fec_netdev_ops = {  	.ndo_open		= fec_enet_open,  	.ndo_stop		= fec_enet_close, @@ -3576,6 +3825,8 @@ static const struct net_device_ops fec_netdev_ops = {  	.ndo_poll_controller	= fec_poll_controller,  #endif  	.ndo_set_features	= fec_set_features, +	.ndo_bpf		= fec_enet_bpf, +	.ndo_xdp_xmit		= fec_enet_xdp_xmit,  };  static const unsigned short offset_des_active_rxq[] = { | 
