diff options
author | Kuniyuki Iwashima <kuniyu@amazon.com> | 2024-08-16 16:39:21 -0700 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2024-08-20 15:48:00 -0700 |
commit | 8594d9b85c07f05e431bd07e895c2a3ad9b85d6f (patch) | |
tree | 57b946461ef2896b053703d6106a200e7b33970e | |
parent | 2862c9349d5d8667d897aa5cecf0f3886979ecb1 (diff) |
af_unix: Don't call skb_get() for OOB skb.
Since introduced, OOB skb holds an additional reference count with no
special reason and caused many issues.
Also, kfree_skb() and consume_skb() are used to decrement the count,
which is confusing.
Let's drop the unnecessary skb_get() in queue_oob() and corresponding
kfree_skb(), consume_skb(), and skb_unref().
Now unix_sk(sk)->oob_skb is just a pointer to skb in the receive queue,
so special handing is no longer needed in GC.
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Link: https://patch.msgid.link/20240816233921.57800-1-kuniyu@amazon.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r-- | net/unix/af_unix.c | 27 | ||||
-rw-r--r-- | net/unix/garbage.c | 16 |
2 files changed, 7 insertions, 36 deletions
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 0be0dcb07f7b..a1894019ebd5 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -693,10 +693,7 @@ static void unix_release_sock(struct sock *sk, int embrion) unix_state_unlock(sk); #if IS_ENABLED(CONFIG_AF_UNIX_OOB) - if (u->oob_skb) { - kfree_skb(u->oob_skb); - u->oob_skb = NULL; - } + u->oob_skb = NULL; #endif wake_up_interruptible_all(&u->peer_wait); @@ -2226,13 +2223,9 @@ static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other } maybe_add_creds(skb, sock, other); - skb_get(skb); - scm_stat_add(other, skb); spin_lock(&other->sk_receive_queue.lock); - if (ousk->oob_skb) - consume_skb(ousk->oob_skb); WRITE_ONCE(ousk->oob_skb, skb); __skb_queue_tail(&other->sk_receive_queue, skb); spin_unlock(&other->sk_receive_queue.lock); @@ -2640,8 +2633,6 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state) if (!(state->flags & MSG_PEEK)) WRITE_ONCE(u->oob_skb, NULL); - else - skb_get(oob_skb); spin_unlock(&sk->sk_receive_queue.lock); unix_state_unlock(sk); @@ -2651,8 +2642,6 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state) if (!(state->flags & MSG_PEEK)) UNIXCB(oob_skb).consumed += 1; - consume_skb(oob_skb); - mutex_unlock(&u->iolock); if (chunk < 0) @@ -2694,12 +2683,10 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk, if (copied) { skb = NULL; } else if (!(flags & MSG_PEEK)) { - if (sock_flag(sk, SOCK_URGINLINE)) { - WRITE_ONCE(u->oob_skb, NULL); - consume_skb(skb); - } else { + WRITE_ONCE(u->oob_skb, NULL); + + if (!sock_flag(sk, SOCK_URGINLINE)) { __skb_unlink(skb, &sk->sk_receive_queue); - WRITE_ONCE(u->oob_skb, NULL); unlinked_skb = skb; skb = skb_peek(&sk->sk_receive_queue); } @@ -2710,10 +2697,7 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk, spin_unlock(&sk->sk_receive_queue.lock); - if (unlinked_skb) { - WARN_ON_ONCE(skb_unref(unlinked_skb)); - kfree_skb(unlinked_skb); - } + kfree_skb(unlinked_skb); } return skb; } @@ -2756,7 +2740,6 @@ static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor) unix_state_unlock(sk); if (drop) { - WARN_ON_ONCE(skb_unref(skb)); kfree_skb(skb); return -EAGAIN; } diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 06d94ad999e9..0068e758be4d 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -337,18 +337,6 @@ static bool unix_vertex_dead(struct unix_vertex *vertex) return true; } -static void unix_collect_queue(struct unix_sock *u, struct sk_buff_head *hitlist) -{ - skb_queue_splice_init(&u->sk.sk_receive_queue, hitlist); - -#if IS_ENABLED(CONFIG_AF_UNIX_OOB) - if (u->oob_skb) { - WARN_ON_ONCE(skb_unref(u->oob_skb)); - u->oob_skb = NULL; - } -#endif -} - static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist) { struct unix_vertex *vertex; @@ -371,11 +359,11 @@ static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist struct sk_buff_head *embryo_queue = &skb->sk->sk_receive_queue; spin_lock(&embryo_queue->lock); - unix_collect_queue(unix_sk(skb->sk), hitlist); + skb_queue_splice_init(embryo_queue, hitlist); spin_unlock(&embryo_queue->lock); } } else { - unix_collect_queue(u, hitlist); + skb_queue_splice_init(queue, hitlist); } spin_unlock(&queue->lock); |