summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2023-10-11 07:13:51 +0100
committerDavid S. Miller <davem@davemloft.net>2023-10-11 07:13:51 +0100
commit0e594c1f5d0f0f916f7512cac92ef3ba8c4ace1c (patch)
tree94db9829e6d16961643509bfdf0548ff3096bf34
parente5f061d5e340fefc663cacfe8f42f149d55bdb53 (diff)
parentc4e33cf2611b2f73098f7413176d428d1fb62304 (diff)
Merge branch 'virtio-net-interrupt-moderation'
Heng Qi says: ==================== virtio-net: Fix and update interrupt moderation The setting of virtio coalescing parameters involves all-queues and per queue, so we must be careful to synchronize the two. Regarding napi_tx switching, this patch set is not only compatible with the previous way of using tx-frames to switch napi_tx, but also improves the user experience when setting interrupt parameters. This patch set has been tested and was part of the previous netdim patch set[1] and is now being split to be rolled out in steps. [1] https://lore.kernel.org/all/20230811065512.22190-1-hengqi@linux.alibaba.com/ --- v2->v3: 1. Fix a tiny comment. v1->v2: 1. Fix some minor comments and add ack tags. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/virtio_net.c104
1 files changed, 75 insertions, 29 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 1ed6bcc4cbb1..a52fd743504a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2855,6 +2855,9 @@ static void virtnet_get_ringparam(struct net_device *dev,
ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
}
+static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
+ u16 vqn, u32 max_usecs, u32 max_packets);
+
static int virtnet_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
@@ -2890,12 +2893,36 @@ static int virtnet_set_ringparam(struct net_device *dev,
err = virtnet_tx_resize(vi, sq, ring->tx_pending);
if (err)
return err;
+
+ /* Upon disabling and re-enabling a transmit virtqueue, the device must
+ * set the coalescing parameters of the virtqueue to those configured
+ * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver
+ * did not set any TX coalescing parameters, to 0.
+ */
+ err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(i),
+ vi->intr_coal_tx.max_usecs,
+ vi->intr_coal_tx.max_packets);
+ if (err)
+ return err;
+
+ vi->sq[i].intr_coal.max_usecs = vi->intr_coal_tx.max_usecs;
+ vi->sq[i].intr_coal.max_packets = vi->intr_coal_tx.max_packets;
}
if (ring->rx_pending != rx_pending) {
err = virtnet_rx_resize(vi, rq, ring->rx_pending);
if (err)
return err;
+
+ /* The reason is same as the transmit virtqueue reset */
+ err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(i),
+ vi->intr_coal_rx.max_usecs,
+ vi->intr_coal_rx.max_packets);
+ if (err)
+ return err;
+
+ vi->rq[i].intr_coal.max_usecs = vi->intr_coal_rx.max_usecs;
+ vi->rq[i].intr_coal.max_packets = vi->intr_coal_rx.max_packets;
}
}
@@ -3233,6 +3260,7 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
struct ethtool_coalesce *ec)
{
struct scatterlist sgs_tx, sgs_rx;
+ int i;
vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
@@ -3246,6 +3274,10 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
/* Save parameters */
vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs;
+ vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
+ }
vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
@@ -3259,6 +3291,10 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
/* Save parameters */
vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
+ vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
+ }
return 0;
}
@@ -3287,27 +3323,23 @@ static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
{
int err;
- if (ec->rx_coalesce_usecs || ec->rx_max_coalesced_frames) {
- err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
- ec->rx_coalesce_usecs,
- ec->rx_max_coalesced_frames);
- if (err)
- return err;
- /* Save parameters */
- vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs;
- vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames;
- }
+ err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
+ ec->rx_coalesce_usecs,
+ ec->rx_max_coalesced_frames);
+ if (err)
+ return err;
- if (ec->tx_coalesce_usecs || ec->tx_max_coalesced_frames) {
- err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
- ec->tx_coalesce_usecs,
- ec->tx_max_coalesced_frames);
- if (err)
- return err;
- /* Save parameters */
- vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs;
- vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames;
- }
+ vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs;
+ vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames;
+
+ err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
+ ec->tx_coalesce_usecs,
+ ec->tx_max_coalesced_frames);
+ if (err)
+ return err;
+
+ vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs;
+ vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames;
return 0;
}
@@ -3315,7 +3347,7 @@ static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
{
/* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
- * feature is negotiated.
+ * or VIRTIO_NET_F_VQ_NOTF_COAL feature is negotiated.
*/
if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
return -EOPNOTSUPP;
@@ -3453,7 +3485,7 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev,
} else {
ec->rx_max_coalesced_frames = 1;
- if (vi->sq[0].napi.weight)
+ if (vi->sq[queue].napi.weight)
ec->tx_max_coalesced_frames = 1;
}
@@ -4442,13 +4474,6 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
}
- if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
- vi->intr_coal_rx.max_usecs = 0;
- vi->intr_coal_tx.max_usecs = 0;
- vi->intr_coal_tx.max_packets = 0;
- vi->intr_coal_rx.max_packets = 0;
- }
-
if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
vi->has_rss_hash_report = true;
@@ -4523,6 +4548,27 @@ static int virtnet_probe(struct virtio_device *vdev)
if (err)
goto free;
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
+ vi->intr_coal_rx.max_usecs = 0;
+ vi->intr_coal_tx.max_usecs = 0;
+ vi->intr_coal_rx.max_packets = 0;
+
+ /* Keep the default values of the coalescing parameters
+ * aligned with the default napi_tx state.
+ */
+ if (vi->sq[0].napi.weight)
+ vi->intr_coal_tx.max_packets = 1;
+ else
+ vi->intr_coal_tx.max_packets = 0;
+ }
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
+ /* The reason is the same as VIRTIO_NET_F_NOTF_COAL. */
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ if (vi->sq[i].napi.weight)
+ vi->sq[i].intr_coal.max_packets = 1;
+ }
+
#ifdef CONFIG_SYSFS
if (vi->mergeable_rx_bufs)
dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;