diff options
Diffstat (limited to 'drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c')
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 147 |
1 files changed, 106 insertions, 41 deletions
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 7d2390e3df77..0acb11557ed1 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -555,7 +555,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv, /* Prepare the HW SGT structure */ sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry) * num_dma_bufs; - sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN); + sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN); if (unlikely(!sgt_buf)) { err = -ENOMEM; goto sgt_buf_alloc_failed; @@ -757,6 +757,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) u16 queue_mapping; unsigned int needed_headroom; u32 fd_len; + u8 prio = 0; int err, i; percpu_stats = this_cpu_ptr(priv->percpu_stats); @@ -814,6 +815,18 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) * a queue affined to the same core that processed the Rx frame */ queue_mapping = skb_get_queue_mapping(skb); + + if (net_dev->num_tc) { + prio = netdev_txq_to_tc(net_dev, queue_mapping); + /* Hardware interprets priority level 0 as being the highest, + * so we need to do a reverse mapping to the netdev tc index + */ + prio = net_dev->num_tc - prio - 1; + /* We have only one FQ array entry for all Tx hardware queues + * with the same flow id (but different priority levels) + */ + queue_mapping %= dpaa2_eth_queue_count(priv); + } fq = &priv->fq[queue_mapping]; fd_len = dpaa2_fd_get_len(&fd); @@ -824,7 +837,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) * the Tx confirmation callback for this frame */ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { - err = priv->enqueue(priv, fq, &fd, 0); + err = priv->enqueue(priv, fq, &fd, prio); if (err != -EBUSY) break; } @@ -997,13 +1010,6 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) int i, j; int new_count; - /* This is the lazy seeding of Rx buffer pools. - * dpaa2_add_bufs() is also used on the Rx hotpath and calls - * napi_alloc_frag(). The trouble with that is that it in turn ends up - * calling this_cpu_ptr(), which mandates execution in atomic context. - * Rather than splitting up the code, do a one-off preempt disable. - */ - preempt_disable(); for (j = 0; j < priv->num_channels; j++) { for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) { @@ -1011,12 +1017,10 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) priv->channel[j]->buf_count += new_count; if (new_count < DPAA2_ETH_BUFS_PER_CMD) { - preempt_enable(); return -ENOMEM; } } } - preempt_enable(); return 0; } @@ -1872,6 +1876,78 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, return n - drops; } +static int update_xps(struct dpaa2_eth_priv *priv) +{ + struct net_device *net_dev = priv->net_dev; + struct cpumask xps_mask; + struct dpaa2_eth_fq *fq; + int i, num_queues, netdev_queues; + int err = 0; + + num_queues = dpaa2_eth_queue_count(priv); + netdev_queues = (net_dev->num_tc ? : 1) * num_queues; + + /* The first <num_queues> entries in priv->fq array are Tx/Tx conf + * queues, so only process those + */ + for (i = 0; i < netdev_queues; i++) { + fq = &priv->fq[i % num_queues]; + + cpumask_clear(&xps_mask); + cpumask_set_cpu(fq->target_cpu, &xps_mask); + + err = netif_set_xps_queue(net_dev, &xps_mask, i); + if (err) { + netdev_warn_once(net_dev, "Error setting XPS queue\n"); + break; + } + } + + return err; +} + +static int dpaa2_eth_setup_tc(struct net_device *net_dev, + enum tc_setup_type type, void *type_data) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct tc_mqprio_qopt *mqprio = type_data; + u8 num_tc, num_queues; + int i; + + if (type != TC_SETUP_QDISC_MQPRIO) + return -EINVAL; + + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + num_queues = dpaa2_eth_queue_count(priv); + num_tc = mqprio->num_tc; + + if (num_tc == net_dev->num_tc) + return 0; + + if (num_tc > dpaa2_eth_tc_count(priv)) { + netdev_err(net_dev, "Max %d traffic classes supported\n", + dpaa2_eth_tc_count(priv)); + return -EINVAL; + } + + if (!num_tc) { + netdev_reset_tc(net_dev); + netif_set_real_num_tx_queues(net_dev, num_queues); + goto out; + } + + netdev_set_num_tc(net_dev, num_tc); + netif_set_real_num_tx_queues(net_dev, num_tc * num_queues); + + for (i = 0; i < num_tc; i++) + netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues); + +out: + update_xps(priv); + + return 0; +} + static const struct net_device_ops dpaa2_eth_ops = { .ndo_open = dpaa2_eth_open, .ndo_start_xmit = dpaa2_eth_tx, @@ -1884,6 +1960,7 @@ static const struct net_device_ops dpaa2_eth_ops = { .ndo_change_mtu = dpaa2_eth_change_mtu, .ndo_bpf = dpaa2_eth_xdp, .ndo_xdp_xmit = dpaa2_eth_xdp_xmit, + .ndo_setup_tc = dpaa2_eth_setup_tc, }; static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) @@ -2138,10 +2215,9 @@ static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, static void set_fq_affinity(struct dpaa2_eth_priv *priv) { struct device *dev = priv->net_dev->dev.parent; - struct cpumask xps_mask; struct dpaa2_eth_fq *fq; int rx_cpu, txc_cpu; - int i, err; + int i; /* For each FQ, pick one channel/CPU to deliver frames to. * This may well change at runtime, either through irqbalance or @@ -2160,17 +2236,6 @@ static void set_fq_affinity(struct dpaa2_eth_priv *priv) break; case DPAA2_TX_CONF_FQ: fq->target_cpu = txc_cpu; - - /* Tell the stack to affine to txc_cpu the Tx queue - * associated with the confirmation one - */ - cpumask_clear(&xps_mask); - cpumask_set_cpu(txc_cpu, &xps_mask); - err = netif_set_xps_queue(priv->net_dev, &xps_mask, - fq->flowid); - if (err) - dev_err(dev, "Error setting XPS queue\n"); - txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask); if (txc_cpu >= nr_cpu_ids) txc_cpu = cpumask_first(&priv->dpio_cpumask); @@ -2180,6 +2245,8 @@ static void set_fq_affinity(struct dpaa2_eth_priv *priv) } fq->channel = get_affine_channel(priv, fq->target_cpu); } + + update_xps(priv); } static void setup_fqs(struct dpaa2_eth_priv *priv) @@ -2361,11 +2428,10 @@ static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv, static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv, struct dpaa2_eth_fq *fq, - struct dpaa2_fd *fd, - u8 prio __always_unused) + struct dpaa2_fd *fd, u8 prio) { return dpaa2_io_service_enqueue_fq(fq->channel->dpio, - fq->tx_fqid, fd); + fq->tx_fqid[prio], fd); } static void set_enqueue_mode(struct dpaa2_eth_priv *priv) @@ -2479,14 +2545,9 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv, queue.destination.type = DPNI_DEST_DPCON; queue.destination.priority = 1; queue.user_context = (u64)(uintptr_t)fq; - queue.flc.stash_control = 1; - queue.flc.value &= 0xFFFFFFFFFFFFFFC0; - /* 01 01 00 - data, annotation, flow context */ - queue.flc.value |= 0x14; err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_RX, 0, fq->flowid, - DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST | - DPNI_QUEUE_OPT_FLC, + DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, &queue); if (err) { dev_err(dev, "dpni_set_queue(RX) failed\n"); @@ -2526,17 +2587,21 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv, struct device *dev = priv->net_dev->dev.parent; struct dpni_queue queue; struct dpni_queue_id qid; - int err; + int i, err; - err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, - DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid); - if (err) { - dev_err(dev, "dpni_get_queue(TX) failed\n"); - return err; + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX, i, fq->flowid, + &queue, &qid); + if (err) { + dev_err(dev, "dpni_get_queue(TX) failed\n"); + return err; + } + fq->tx_fqid[i] = qid.fqid; } + /* All Tx queues belonging to the same flowid have the same qdbin */ fq->tx_qdbin = qid.qdbin; - fq->tx_fqid = qid.fqid; err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, @@ -3236,7 +3301,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) dev = &dpni_dev->dev; /* Net device */ - net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES); + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES); if (!net_dev) { dev_err(dev, "alloc_etherdev_mq() failed\n"); return -ENOMEM; |