diff options
author | David S. Miller <davem@davemloft.net> | 2021-11-16 13:10:35 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2021-11-16 13:10:35 +0000 |
commit | 6fcc06205c15bf1bb90896efdf5967028c154aba (patch) | |
tree | 9485fbb54c44a7933e5e7904aaebaedbe0f82907 /net/ipv6/tcp_ipv6.c | |
parent | 3ad4b7c81a992463c29ae130332c217607fe4452 (diff) | |
parent | 43f51df4172955971ef5498f09308a9dc0291766 (diff) |
Merge branch 'tcp-optimizations'
Eric Dumazet says:
====================
tcp: optimizations for linux-5.17
Mostly small improvements in this series.
The notable change is in "defer skb freeing after
socket lock is released" in recvmsg() (and RX zerocopy)
The idea is to try to let skb freeing to BH handler,
whenever possible, or at least perform the freeing
outside of the socket lock section, for much improved
performance. This idea can probably be extended
to other protocols.
Tests on a 100Gbit NIC
Max throughput for one TCP_STREAM flow, over 10 runs.
MTU : 1500 (1428 bytes of TCP payload per MSS)
Before: 55 Gbit
After: 66 Gbit
MTU : 4096+ (4096 bytes of TCP payload, plus TCP/IPv6 headers)
Before: 82 Gbit
After: 95 Gbit
====================
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/tcp_ipv6.c')
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 551fce49841d..3b7d6ede1364 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -72,7 +72,7 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb); static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, struct request_sock *req); -static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); +INDIRECT_CALLABLE_SCOPE int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); static const struct inet_connection_sock_af_ops ipv6_mapped; const struct inet_connection_sock_af_ops ipv6_specific; @@ -1466,7 +1466,8 @@ INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *, * This is because we cannot sleep with the original spinlock * held. */ -static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) +INDIRECT_CALLABLE_SCOPE +int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) { struct ipv6_pinfo *np = tcp_inet6_sk(sk); struct sk_buff *opt_skb = NULL; @@ -1757,6 +1758,7 @@ process: sk_incoming_cpu_update(sk); + sk_defer_free_flush(sk); bh_lock_sock_nested(sk); tcp_segs_in(tcp_sk(sk), skb); ret = 0; @@ -1893,9 +1895,7 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = { INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) { - struct ipv6_pinfo *np = inet6_sk(sk); - - __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr); + __tcp_v6_send_check(skb, &sk->sk_v6_rcv_saddr, &sk->sk_v6_daddr); } const struct inet_connection_sock_af_ops ipv6_specific = { |