summaryrefslogtreecommitdiff
path: root/net/core/netpoll.c
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2024-11-15 16:25:39 -0800
committerJakub Kicinski <kuba@kernel.org>2024-11-15 16:25:40 -0800
commitbf3c76b4c46a532c78e2b26e0dfde1f58231261f (patch)
tree34a990262e956e5aabfeee929fb8ebec9df28689 /net/core/netpoll.c
parent11ee317d883ef111b8c36228437eaffea7b49bbc (diff)
parent6c59f16f1770481a6ee684720ec55b1e38b3a4b2 (diff)
Merge branch 'net-netpoll-improve-skb-pool-management'
Breno Leitao says: ==================== net: netpoll: Improve SKB pool management The netpoll subsystem pre-allocates 32 SKBs in a pool for emergency use during out-of-memory conditions. However, the current implementation has several inefficiencies: * The SKB pool, once allocated, is never freed: * Resources remain allocated even after netpoll users are removed * Failed initialization can leave pool populated forever * The global pool design makes resource tracking difficult This series addresses these issues through three patches: Patch 1 ("net: netpoll: Individualize the skb pool"): - Replace global pool with per-user pools in netpoll struct Patch 2 ("net: netpoll: flush skb pool during cleanup"): - Properly free pool resources during netconsole cleanup These changes improve resource management and make the code more maintainable. As a side benefit, the improved structure would allow netpoll to be modularized if desired in the future. v2: https://lore.kernel.org/20241107-skb_buffers_v2-v2-0-288c6264ba4f@debian.org v1: https://lore.kernel.org/20241025142025.3558051-1-leitao@debian.org ==================== Link: https://patch.msgid.link/20241114-skb_buffers_v2-v3-0-9be9f52a8b69@debian.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/core/netpoll.c')
-rw-r--r--net/core/netpoll.c45
1 files changed, 26 insertions, 19 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 94b7f07a952f..00e1e4a32902 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -45,9 +45,6 @@
#define MAX_UDP_CHUNK 1460
#define MAX_SKBS 32
-
-static struct sk_buff_head skb_pool;
-
#define USEC_PER_POLL 50
#define MAX_SKB_SIZE \
@@ -234,20 +231,23 @@ void netpoll_poll_enable(struct net_device *dev)
up(&ni->dev_lock);
}
-static void refill_skbs(void)
+static void refill_skbs(struct netpoll *np)
{
+ struct sk_buff_head *skb_pool;
struct sk_buff *skb;
unsigned long flags;
- spin_lock_irqsave(&skb_pool.lock, flags);
- while (skb_pool.qlen < MAX_SKBS) {
+ skb_pool = &np->skb_pool;
+
+ spin_lock_irqsave(&skb_pool->lock, flags);
+ while (skb_pool->qlen < MAX_SKBS) {
skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
if (!skb)
break;
- __skb_queue_tail(&skb_pool, skb);
+ __skb_queue_tail(skb_pool, skb);
}
- spin_unlock_irqrestore(&skb_pool.lock, flags);
+ spin_unlock_irqrestore(&skb_pool->lock, flags);
}
static void zap_completion_queue(void)
@@ -284,12 +284,12 @@ static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
struct sk_buff *skb;
zap_completion_queue();
- refill_skbs();
+ refill_skbs(np);
repeat:
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
- skb = skb_dequeue(&skb_pool);
+ skb = skb_dequeue(&np->skb_pool);
if (!skb) {
if (++count < 10) {
@@ -531,6 +531,14 @@ static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
return -1;
}
+static void skb_pool_flush(struct netpoll *np)
+{
+ struct sk_buff_head *skb_pool;
+
+ skb_pool = &np->skb_pool;
+ skb_queue_purge_reason(skb_pool, SKB_CONSUMED);
+}
+
int netpoll_parse_options(struct netpoll *np, char *opt)
{
char *cur=opt, *delim;
@@ -673,6 +681,8 @@ int netpoll_setup(struct netpoll *np)
struct in_device *in_dev;
int err;
+ skb_queue_head_init(&np->skb_pool);
+
rtnl_lock();
if (np->dev_name[0]) {
struct net *net = current->nsproxy->net_ns;
@@ -773,14 +783,16 @@ put_noaddr:
}
/* fill up the skb queue */
- refill_skbs();
+ refill_skbs(np);
err = __netpoll_setup(np, ndev);
if (err)
- goto put;
+ goto flush;
rtnl_unlock();
return 0;
+flush:
+ skb_pool_flush(np);
put:
DEBUG_NET_WARN_ON_ONCE(np->dev);
if (ip_overwritten)
@@ -792,13 +804,6 @@ unlock:
}
EXPORT_SYMBOL(netpoll_setup);
-static int __init netpoll_init(void)
-{
- skb_queue_head_init(&skb_pool);
- return 0;
-}
-core_initcall(netpoll_init);
-
static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
{
struct netpoll_info *npinfo =
@@ -835,6 +840,8 @@ void __netpoll_cleanup(struct netpoll *np)
call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
} else
RCU_INIT_POINTER(np->dev->npinfo, NULL);
+
+ skb_pool_flush(np);
}
EXPORT_SYMBOL_GPL(__netpoll_cleanup);