summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2022-05-01 12:19:01 +0100
committerDavid S. Miller <davem@davemloft.net>2022-05-01 12:19:01 +0100
commitb97af72209eedccb79a146b7b6243cffb20739b2 (patch)
treee64a468c235a58d43c0433f99e8e233e03a211c2
parent0ed99ecc95b9c1d3c46d3bc34459088e82caef32 (diff)
parent0a8afd9f026a7f6c835be0fed2ab709d4133797f (diff)
Merge branch 'UDP-sock_wfree-opts'
Pavel Begunkov says: ==================== UDP sock_wfree optimisations The series is not UDP specific but that the main beneficiary. 2/3 saves one atomic in sock_wfree() and on top 3/3 removes an extra barrier. Tested with UDP over dummy netdev, 2038491 -> 2099071 req/s (or around +3%). note: in regards to 1/3, there is a "Should agree with poll..." comment that I don't completely get, and there is no git history to explain it. Though I can't see how it could rely on having the second check without racing with tasks woken by wake_up*(). The series was split from a larger patchset, see https://lore.kernel.org/netdev/cover.1648981570.git.asml.silence@gmail.com/ ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/core/sock.c43
1 files changed, 40 insertions, 3 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index b164618f2cb6..be20a1af20e5 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -146,6 +146,9 @@
static DEFINE_MUTEX(proto_list_mutex);
static LIST_HEAD(proto_list);
+static void sock_def_write_space_wfree(struct sock *sk);
+static void sock_def_write_space(struct sock *sk);
+
/**
* sk_ns_capable - General socket capability test
* @sk: Socket to use a capability on or through
@@ -2324,8 +2327,20 @@ void sock_wfree(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
unsigned int len = skb->truesize;
+ bool free;
if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
+ if (sock_flag(sk, SOCK_RCU_FREE) &&
+ sk->sk_write_space == sock_def_write_space) {
+ rcu_read_lock();
+ free = refcount_sub_and_test(len, &sk->sk_wmem_alloc);
+ sock_def_write_space_wfree(sk);
+ rcu_read_unlock();
+ if (unlikely(free))
+ __sk_free(sk);
+ return;
+ }
+
/*
* Keep a reference on sk_wmem_alloc, this will be released
* after sk_write_space() call
@@ -3191,20 +3206,42 @@ static void sock_def_write_space(struct sock *sk)
/* Do not wake up a writer until he can make "significant"
* progress. --DaveM
*/
- if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) {
+ if (sock_writeable(sk)) {
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
EPOLLWRNORM | EPOLLWRBAND);
/* Should agree with poll, otherwise some programs break */
- if (sock_writeable(sk))
- sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
rcu_read_unlock();
}
+/* An optimised version of sock_def_write_space(), should only be called
+ * for SOCK_RCU_FREE sockets under RCU read section and after putting
+ * ->sk_wmem_alloc.
+ */
+static void sock_def_write_space_wfree(struct sock *sk)
+{
+ /* Do not wake up a writer until he can make "significant"
+ * progress. --DaveM
+ */
+ if (sock_writeable(sk)) {
+ struct socket_wq *wq = rcu_dereference(sk->sk_wq);
+
+ /* rely on refcount_sub from sock_wfree() */
+ smp_mb__after_atomic();
+ if (wq && waitqueue_active(&wq->wait))
+ wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
+ EPOLLWRNORM | EPOLLWRBAND);
+
+ /* Should agree with poll, otherwise some programs break */
+ sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ }
+}
+
static void sock_def_destruct(struct sock *sk)
{
}