From 40bbae583ec38ea31e728bf42a4ea72bded22ab6 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 6 Mar 2023 20:43:13 +0000 Subject: net: remove enum skb_free_reason enum skb_drop_reason is more generic, we can adopt it instead. Provide dev_kfree_skb_irq_reason() and dev_kfree_skb_any_reason(). This means drivers can use more precise drop reasons if they want to. Signed-off-by: Eric Dumazet Reviewed-by: Simon Horman Reviewed-by: Yunsheng Lin Link: https://lore.kernel.org/r/20230306204313.10492-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- include/linux/netdevice.h | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) (limited to 'include/linux/netdevice.h') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6a14b7b11766..ee483071cf59 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -52,6 +52,7 @@ #include #include #include +#include struct netpoll_info; struct device; @@ -3804,13 +3805,8 @@ static inline unsigned int get_netdev_rx_queue_index( int netif_get_num_default_rss_queues(void); -enum skb_free_reason { - SKB_REASON_CONSUMED, - SKB_REASON_DROPPED, -}; - -void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason); -void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason); +void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason); +void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason); /* * It is not allowed to call kfree_skb() or consume_skb() from hardware @@ -3833,22 +3829,22 @@ void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason); */ static inline void dev_kfree_skb_irq(struct sk_buff *skb) { - __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED); + dev_kfree_skb_irq_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); } static inline void dev_consume_skb_irq(struct sk_buff *skb) { - __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED); + dev_kfree_skb_irq_reason(skb, SKB_CONSUMED); } static inline void dev_kfree_skb_any(struct sk_buff *skb) { - __dev_kfree_skb_any(skb, SKB_REASON_DROPPED); + dev_kfree_skb_any_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); } static inline void dev_consume_skb_any(struct sk_buff *skb) { - __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED); + dev_kfree_skb_any_reason(skb, SKB_CONSUMED); } u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, -- cgit v1.2.3-70-g09d2 From 4b397c06cb987935b1b097336532aa6b4210e091 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 10 Mar 2023 19:11:09 +0000 Subject: net: tunnels: annotate lockless accesses to dev->needed_headroom IP tunnels can apparently update dev->needed_headroom in their xmit path. This patch takes care of three tunnels xmit, and also the core LL_RESERVED_SPACE() and LL_RESERVED_SPACE_EXTRA() helpers. More changes might be needed for completeness. BUG: KCSAN: data-race in ip_tunnel_xmit / ip_tunnel_xmit read to 0xffff88815b9da0ec of 2 bytes by task 888 on cpu 1: ip_tunnel_xmit+0x1270/0x1730 net/ipv4/ip_tunnel.c:803 __gre_xmit net/ipv4/ip_gre.c:469 [inline] ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661 __netdev_start_xmit include/linux/netdevice.h:4881 [inline] netdev_start_xmit include/linux/netdevice.h:4895 [inline] xmit_one net/core/dev.c:3580 [inline] dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596 __dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246 dev_queue_xmit include/linux/netdevice.h:3051 [inline] neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623 neigh_output include/net/neighbour.h:546 [inline] ip_finish_output2+0x740/0x840 net/ipv4/ip_output.c:228 ip_finish_output+0xf4/0x240 net/ipv4/ip_output.c:316 NF_HOOK_COND include/linux/netfilter.h:291 [inline] ip_output+0xe5/0x1b0 net/ipv4/ip_output.c:430 dst_output include/net/dst.h:444 [inline] ip_local_out+0x64/0x80 net/ipv4/ip_output.c:126 iptunnel_xmit+0x34a/0x4b0 net/ipv4/ip_tunnel_core.c:82 ip_tunnel_xmit+0x1451/0x1730 net/ipv4/ip_tunnel.c:813 __gre_xmit net/ipv4/ip_gre.c:469 [inline] ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661 __netdev_start_xmit include/linux/netdevice.h:4881 [inline] netdev_start_xmit include/linux/netdevice.h:4895 [inline] xmit_one net/core/dev.c:3580 [inline] dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596 __dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246 dev_queue_xmit include/linux/netdevice.h:3051 [inline] neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623 neigh_output include/net/neighbour.h:546 [inline] ip_finish_output2+0x740/0x840 net/ipv4/ip_output.c:228 ip_finish_output+0xf4/0x240 net/ipv4/ip_output.c:316 NF_HOOK_COND include/linux/netfilter.h:291 [inline] ip_output+0xe5/0x1b0 net/ipv4/ip_output.c:430 dst_output include/net/dst.h:444 [inline] ip_local_out+0x64/0x80 net/ipv4/ip_output.c:126 iptunnel_xmit+0x34a/0x4b0 net/ipv4/ip_tunnel_core.c:82 ip_tunnel_xmit+0x1451/0x1730 net/ipv4/ip_tunnel.c:813 __gre_xmit net/ipv4/ip_gre.c:469 [inline] ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661 __netdev_start_xmit include/linux/netdevice.h:4881 [inline] netdev_start_xmit include/linux/netdevice.h:4895 [inline] xmit_one net/core/dev.c:3580 [inline] dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596 __dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246 dev_queue_xmit include/linux/netdevice.h:3051 [inline] neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623 neigh_output include/net/neighbour.h:546 [inline] ip_finish_output2+0x740/0x840 net/ipv4/ip_output.c:228 ip_finish_output+0xf4/0x240 net/ipv4/ip_output.c:316 NF_HOOK_COND include/linux/netfilter.h:291 [inline] ip_output+0xe5/0x1b0 net/ipv4/ip_output.c:430 dst_output include/net/dst.h:444 [inline] ip_local_out+0x64/0x80 net/ipv4/ip_output.c:126 iptunnel_xmit+0x34a/0x4b0 net/ipv4/ip_tunnel_core.c:82 ip_tunnel_xmit+0x1451/0x1730 net/ipv4/ip_tunnel.c:813 __gre_xmit net/ipv4/ip_gre.c:469 [inline] ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661 __netdev_start_xmit include/linux/netdevice.h:4881 [inline] netdev_start_xmit include/linux/netdevice.h:4895 [inline] xmit_one net/core/dev.c:3580 [inline] dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596 __dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246 dev_queue_xmit include/linux/netdevice.h:3051 [inline] neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623 neigh_output include/net/neighbour.h:546 [inline] ip_finish_output2+0x740/0x840 net/ipv4/ip_output.c:228 ip_finish_output+0xf4/0x240 net/ipv4/ip_output.c:316 NF_HOOK_COND include/linux/netfilter.h:291 [inline] ip_output+0xe5/0x1b0 net/ipv4/ip_output.c:430 dst_output include/net/dst.h:444 [inline] ip_local_out+0x64/0x80 net/ipv4/ip_output.c:126 iptunnel_xmit+0x34a/0x4b0 net/ipv4/ip_tunnel_core.c:82 ip_tunnel_xmit+0x1451/0x1730 net/ipv4/ip_tunnel.c:813 __gre_xmit net/ipv4/ip_gre.c:469 [inline] ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661 __netdev_start_xmit include/linux/netdevice.h:4881 [inline] netdev_start_xmit include/linux/netdevice.h:4895 [inline] xmit_one net/core/dev.c:3580 [inline] dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596 __dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246 dev_queue_xmit include/linux/netdevice.h:3051 [inline] neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623 neigh_output include/net/neighbour.h:546 [inline] ip_finish_output2+0x740/0x840 net/ipv4/ip_output.c:228 ip_finish_output+0xf4/0x240 net/ipv4/ip_output.c:316 NF_HOOK_COND include/linux/netfilter.h:291 [inline] ip_output+0xe5/0x1b0 net/ipv4/ip_output.c:430 dst_output include/net/dst.h:444 [inline] ip_local_out+0x64/0x80 net/ipv4/ip_output.c:126 iptunnel_xmit+0x34a/0x4b0 net/ipv4/ip_tunnel_core.c:82 ip_tunnel_xmit+0x1451/0x1730 net/ipv4/ip_tunnel.c:813 __gre_xmit net/ipv4/ip_gre.c:469 [inline] ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661 __netdev_start_xmit include/linux/netdevice.h:4881 [inline] netdev_start_xmit include/linux/netdevice.h:4895 [inline] xmit_one net/core/dev.c:3580 [inline] dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596 __dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246 dev_queue_xmit include/linux/netdevice.h:3051 [inline] neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623 neigh_output include/net/neighbour.h:546 [inline] ip_finish_output2+0x740/0x840 net/ipv4/ip_output.c:228 ip_finish_output+0xf4/0x240 net/ipv4/ip_output.c:316 NF_HOOK_COND include/linux/netfilter.h:291 [inline] ip_output+0xe5/0x1b0 net/ipv4/ip_output.c:430 dst_output include/net/dst.h:444 [inline] ip_local_out+0x64/0x80 net/ipv4/ip_output.c:126 iptunnel_xmit+0x34a/0x4b0 net/ipv4/ip_tunnel_core.c:82 ip_tunnel_xmit+0x1451/0x1730 net/ipv4/ip_tunnel.c:813 __gre_xmit net/ipv4/ip_gre.c:469 [inline] ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661 __netdev_start_xmit include/linux/netdevice.h:4881 [inline] netdev_start_xmit include/linux/netdevice.h:4895 [inline] xmit_one net/core/dev.c:3580 [inline] dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596 __dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246 write to 0xffff88815b9da0ec of 2 bytes by task 2379 on cpu 0: ip_tunnel_xmit+0x1294/0x1730 net/ipv4/ip_tunnel.c:804 __gre_xmit net/ipv4/ip_gre.c:469 [inline] ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661 __netdev_start_xmit include/linux/netdevice.h:4881 [inline] netdev_start_xmit include/linux/netdevice.h:4895 [inline] xmit_one net/core/dev.c:3580 [inline] dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596 __dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246 dev_queue_xmit include/linux/netdevice.h:3051 [inline] neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623 neigh_output include/net/neighbour.h:546 [inline] ip6_finish_output2+0x9bc/0xc50 net/ipv6/ip6_output.c:134 __ip6_finish_output net/ipv6/ip6_output.c:195 [inline] ip6_finish_output+0x39a/0x4e0 net/ipv6/ip6_output.c:206 NF_HOOK_COND include/linux/netfilter.h:291 [inline] ip6_output+0xeb/0x220 net/ipv6/ip6_output.c:227 dst_output include/net/dst.h:444 [inline] NF_HOOK include/linux/netfilter.h:302 [inline] mld_sendpack+0x438/0x6a0 net/ipv6/mcast.c:1820 mld_send_cr net/ipv6/mcast.c:2121 [inline] mld_ifc_work+0x519/0x7b0 net/ipv6/mcast.c:2653 process_one_work+0x3e6/0x750 kernel/workqueue.c:2390 worker_thread+0x5f2/0xa10 kernel/workqueue.c:2537 kthread+0x1ac/0x1e0 kernel/kthread.c:376 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:308 value changed: 0x0dd4 -> 0x0e14 Reported by Kernel Concurrency Sanitizer on: CPU: 0 PID: 2379 Comm: kworker/0:0 Not tainted 6.3.0-rc1-syzkaller-00002-g8ca09d5fa354-dirty #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 03/02/2023 Workqueue: mld mld_ifc_work Fixes: 8eb30be0352d ("ipv6: Create ip6_tnl_xmit") Reported-by: syzbot Signed-off-by: Eric Dumazet Link: https://lore.kernel.org/r/20230310191109.2384387-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- include/linux/netdevice.h | 6 ++++-- net/ipv4/ip_tunnel.c | 12 ++++++------ net/ipv6/ip6_tunnel.c | 4 ++-- 3 files changed, 12 insertions(+), 10 deletions(-) (limited to 'include/linux/netdevice.h') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6a14b7b11766..470085b121d3 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -297,9 +297,11 @@ struct hh_cache { * relationship HH alignment <= LL alignment. */ #define LL_RESERVED_SPACE(dev) \ - ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) + ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \ + & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ - ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) + ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \ + & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) struct header_ops { int (*create) (struct sk_buff *skb, struct net_device *dev, diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index de90b09dfe78..2541083d49ad 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -614,10 +614,10 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, } headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len; - if (headroom > dev->needed_headroom) - dev->needed_headroom = headroom; + if (headroom > READ_ONCE(dev->needed_headroom)) + WRITE_ONCE(dev->needed_headroom, headroom); - if (skb_cow_head(skb, dev->needed_headroom)) { + if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) { ip_rt_put(rt); goto tx_dropped; } @@ -800,10 +800,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) + rt->dst.header_len + ip_encap_hlen(&tunnel->encap); - if (max_headroom > dev->needed_headroom) - dev->needed_headroom = max_headroom; + if (max_headroom > READ_ONCE(dev->needed_headroom)) + WRITE_ONCE(dev->needed_headroom, max_headroom); - if (skb_cow_head(skb, dev->needed_headroom)) { + if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) { ip_rt_put(rt); DEV_STATS_INC(dev, tx_dropped); kfree_skb(skb); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 47b6607a1370..5e80e517f071 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1240,8 +1240,8 @@ route_lookup: */ max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr) + dst->header_len + t->hlen; - if (max_headroom > dev->needed_headroom) - dev->needed_headroom = max_headroom; + if (max_headroom > READ_ONCE(dev->needed_headroom)) + WRITE_ONCE(dev->needed_headroom, max_headroom); err = ip6_tnl_encap(skb, t, &proto, fl6); if (err) -- cgit v1.2.3-70-g09d2 From 8c44fa12c8fa09c6c12f0dc25129a6d13ee0a1ea Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 15 Mar 2023 15:11:45 +0200 Subject: net: Add MDB net device operations Add MDB net device operations that will be invoked by rtnetlink code in response to received RTM_{NEW,DEL,GET}MDB messages. Subsequent patches will implement these operations in the bridge and VXLAN drivers. Signed-off-by: Ido Schimmel Reviewed-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/linux/netdevice.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'include/linux/netdevice.h') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ee483071cf59..23b0d7eaaadd 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1307,6 +1307,17 @@ struct netdev_net_notifier { * Used to add FDB entries to dump requests. Implementers should add * entries to skb and update idx with the number of entries. * + * int (*ndo_mdb_add)(struct net_device *dev, struct nlattr *tb[], + * u16 nlmsg_flags, struct netlink_ext_ack *extack); + * Adds an MDB entry to dev. + * int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[], + * struct netlink_ext_ack *extack); + * Deletes the MDB entry from dev. + * int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb, + * struct netlink_callback *cb); + * Dumps MDB entries from dev. The first argument (marker) in the netlink + * callback is used by core rtnetlink code. + * * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, * u16 flags, struct netlink_ext_ack *extack) * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, @@ -1569,6 +1580,16 @@ struct net_device_ops { const unsigned char *addr, u16 vid, u32 portid, u32 seq, struct netlink_ext_ack *extack); + int (*ndo_mdb_add)(struct net_device *dev, + struct nlattr *tb[], + u16 nlmsg_flags, + struct netlink_ext_ack *extack); + int (*ndo_mdb_del)(struct net_device *dev, + struct nlattr *tb[], + struct netlink_ext_ack *extack); + int (*ndo_mdb_dump)(struct net_device *dev, + struct sk_buff *skb, + struct netlink_callback *cb); int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, u16 flags, -- cgit v1.2.3-70-g09d2 From 1cc6571f562774f1d928dc8b3cff50829b86e970 Mon Sep 17 00:00:00 2001 From: Nick Child Date: Tue, 21 Mar 2023 10:07:25 -0500 Subject: netdev: Enforce index cap in netdev_get_tx_queue When requesting a TX queue at a given index, warn on out-of-bounds referencing if the index is greater than the allocated number of queues. Specifically, since this function is used heavily in the networking stack use DEBUG_NET_WARN_ON_ONCE to avoid executing a new branch on every packet. Signed-off-by: Nick Child Link: https://lore.kernel.org/r/20230321150725.127229-2-nnac123@linux.ibm.com Signed-off-by: Jakub Kicinski --- include/linux/netdevice.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux/netdevice.h') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7621c512765f..674ee5daa7b1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2484,6 +2484,7 @@ static inline struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, unsigned int index) { + DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues); return &dev->_tx[index]; } -- cgit v1.2.3-70-g09d2 From 3eb8eea2a453463f5606ce3e46cf225f88671440 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 21 Mar 2023 22:38:48 -0700 Subject: docs: networking: document NAPI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add basic documentation about NAPI. We can stop linking to the ancient doc on the LF wiki. Link: https://lore.kernel.org/all/20230315223044.471002-1-kuba@kernel.org/ Reviewed-by: Bagas Sanjaya Reviewed-by: Toke Høiland-Jørgensen Acked-by: Pavel Pisa # for ctucanfd-driver.rst Reviewed-by: Tony Nguyen Reviewed-by: Florian Fainelli Reviewed-by: Stephen Hemminger Reviewed-by: Randy Dunlap Link: https://lore.kernel.org/r/20230322053848.198452-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- .../device_drivers/can/ctu/ctucanfd-driver.rst | 3 +- .../device_drivers/ethernet/intel/e100.rst | 3 +- .../device_drivers/ethernet/intel/i40e.rst | 4 +- .../device_drivers/ethernet/intel/ice.rst | 4 +- Documentation/networking/index.rst | 1 + Documentation/networking/napi.rst | 254 +++++++++++++++++++++ include/linux/netdevice.h | 13 +- 7 files changed, 269 insertions(+), 13 deletions(-) create mode 100644 Documentation/networking/napi.rst (limited to 'include/linux/netdevice.h') diff --git a/Documentation/networking/device_drivers/can/ctu/ctucanfd-driver.rst b/Documentation/networking/device_drivers/can/ctu/ctucanfd-driver.rst index 1a4fc6607582..1661d13174d5 100644 --- a/Documentation/networking/device_drivers/can/ctu/ctucanfd-driver.rst +++ b/Documentation/networking/device_drivers/can/ctu/ctucanfd-driver.rst @@ -229,8 +229,7 @@ frames for a while. This has a potential to avoid the costly round of enabling interrupts, handling an incoming IRQ in ISR, re-enabling the softirq and switching context back to softirq. -More detailed documentation of NAPI may be found on the pages of Linux -Foundation ``_. +See :ref:`Documentation/networking/napi.rst ` for more information. Integrating the core to Xilinx Zynq ----------------------------------- diff --git a/Documentation/networking/device_drivers/ethernet/intel/e100.rst b/Documentation/networking/device_drivers/ethernet/intel/e100.rst index 3d4a9ba21946..371b7e5c3293 100644 --- a/Documentation/networking/device_drivers/ethernet/intel/e100.rst +++ b/Documentation/networking/device_drivers/ethernet/intel/e100.rst @@ -151,8 +151,7 @@ NAPI NAPI (Rx polling mode) is supported in the e100 driver. -See https://wiki.linuxfoundation.org/networking/napi for more -information on NAPI. +See :ref:`Documentation/networking/napi.rst ` for more information. Multiple Interfaces on Same Ethernet Broadcast Network ------------------------------------------------------ diff --git a/Documentation/networking/device_drivers/ethernet/intel/i40e.rst b/Documentation/networking/device_drivers/ethernet/intel/i40e.rst index ac35bd472bdc..c495c4e16b3b 100644 --- a/Documentation/networking/device_drivers/ethernet/intel/i40e.rst +++ b/Documentation/networking/device_drivers/ethernet/intel/i40e.rst @@ -399,8 +399,8 @@ operate only in full duplex and only at their native speed. NAPI ---- NAPI (Rx polling mode) is supported in the i40e driver. -For more information on NAPI, see -https://wiki.linuxfoundation.org/networking/napi + +See :ref:`Documentation/networking/napi.rst ` for more information. Flow Control ------------ diff --git a/Documentation/networking/device_drivers/ethernet/intel/ice.rst b/Documentation/networking/device_drivers/ethernet/intel/ice.rst index 5efea4dd1251..2b6dc7880d7b 100644 --- a/Documentation/networking/device_drivers/ethernet/intel/ice.rst +++ b/Documentation/networking/device_drivers/ethernet/intel/ice.rst @@ -817,10 +817,10 @@ NOTE: NAPI ---- + This driver supports NAPI (Rx polling mode). -For more information on NAPI, see -https://wiki.linuxfoundation.org/networking/napi +See :ref:`Documentation/networking/napi.rst ` for more information. MACVLAN ------- diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 4ddcae33c336..24bb256d6d53 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -73,6 +73,7 @@ Contents: mpls-sysctl mptcp-sysctl multiqueue + napi netconsole netdev-features netdevices diff --git a/Documentation/networking/napi.rst b/Documentation/networking/napi.rst new file mode 100644 index 000000000000..a7a047742e93 --- /dev/null +++ b/Documentation/networking/napi.rst @@ -0,0 +1,254 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + +.. _napi: + +==== +NAPI +==== + +NAPI is the event handling mechanism used by the Linux networking stack. +The name NAPI no longer stands for anything in particular [#]_. + +In basic operation the device notifies the host about new events +via an interrupt. +The host then schedules a NAPI instance to process the events. +The device may also be polled for events via NAPI without receiving +interrupts first (:ref:`busy polling`). + +NAPI processing usually happens in the software interrupt context, +but there is an option to use :ref:`separate kernel threads` +for NAPI processing. + +All in all NAPI abstracts away from the drivers the context and configuration +of event (packet Rx and Tx) processing. + +Driver API +========== + +The two most important elements of NAPI are the struct napi_struct +and the associated poll method. struct napi_struct holds the state +of the NAPI instance while the method is the driver-specific event +handler. The method will typically free Tx packets that have been +transmitted and process newly received packets. + +.. _drv_ctrl: + +Control API +----------- + +netif_napi_add() and netif_napi_del() add/remove a NAPI instance +from the system. The instances are attached to the netdevice passed +as argument (and will be deleted automatically when netdevice is +unregistered). Instances are added in a disabled state. + +napi_enable() and napi_disable() manage the disabled state. +A disabled NAPI can't be scheduled and its poll method is guaranteed +to not be invoked. napi_disable() waits for ownership of the NAPI +instance to be released. + +The control APIs are not idempotent. Control API calls are safe against +concurrent use of datapath APIs but an incorrect sequence of control API +calls may result in crashes, deadlocks, or race conditions. For example, +calling napi_disable() multiple times in a row will deadlock. + +Datapath API +------------ + +napi_schedule() is the basic method of scheduling a NAPI poll. +Drivers should call this function in their interrupt handler +(see :ref:`drv_sched` for more info). A successful call to napi_schedule() +will take ownership of the NAPI instance. + +Later, after NAPI is scheduled, the driver's poll method will be +called to process the events/packets. The method takes a ``budget`` +argument - drivers can process completions for any number of Tx +packets but should only process up to ``budget`` number of +Rx packets. Rx processing is usually much more expensive. + +In other words, it is recommended to ignore the budget argument when +performing TX buffer reclamation to ensure that the reclamation is not +arbitrarily bounded; however, it is required to honor the budget argument +for RX processing. + +.. warning:: + + The ``budget`` argument may be 0 if core tries to only process Tx completions + and no Rx packets. + +The poll method returns the amount of work done. If the driver still +has outstanding work to do (e.g. ``budget`` was exhausted) +the poll method should return exactly ``budget``. In that case, +the NAPI instance will be serviced/polled again (without the +need to be scheduled). + +If event processing has been completed (all outstanding packets +processed) the poll method should call napi_complete_done() +before returning. napi_complete_done() releases the ownership +of the instance. + +.. warning:: + + The case of finishing all events and using exactly ``budget`` + must be handled carefully. There is no way to report this + (rare) condition to the stack, so the driver must either + not call napi_complete_done() and wait to be called again, + or return ``budget - 1``. + + If the ``budget`` is 0 napi_complete_done() should never be called. + +Call sequence +------------- + +Drivers should not make assumptions about the exact sequencing +of calls. The poll method may be called without the driver scheduling +the instance (unless the instance is disabled). Similarly, +it's not guaranteed that the poll method will be called, even +if napi_schedule() succeeded (e.g. if the instance gets disabled). + +As mentioned in the :ref:`drv_ctrl` section - napi_disable() and subsequent +calls to the poll method only wait for the ownership of the instance +to be released, not for the poll method to exit. This means that +drivers should avoid accessing any data structures after calling +napi_complete_done(). + +.. _drv_sched: + +Scheduling and IRQ masking +-------------------------- + +Drivers should keep the interrupts masked after scheduling +the NAPI instance - until NAPI polling finishes any further +interrupts are unnecessary. + +Drivers which have to mask the interrupts explicitly (as opposed +to IRQ being auto-masked by the device) should use the napi_schedule_prep() +and __napi_schedule() calls: + +.. code-block:: c + + if (napi_schedule_prep(&v->napi)) { + mydrv_mask_rxtx_irq(v->idx); + /* schedule after masking to avoid races */ + __napi_schedule(&v->napi); + } + +IRQ should only be unmasked after a successful call to napi_complete_done(): + +.. code-block:: c + + if (budget && napi_complete_done(&v->napi, work_done)) { + mydrv_unmask_rxtx_irq(v->idx); + return min(work_done, budget - 1); + } + +napi_schedule_irqoff() is a variant of napi_schedule() which takes advantage +of guarantees given by being invoked in IRQ context (no need to +mask interrupts). Note that PREEMPT_RT forces all interrupts +to be threaded so the interrupt may need to be marked ``IRQF_NO_THREAD`` +to avoid issues on real-time kernel configurations. + +Instance to queue mapping +------------------------- + +Modern devices have multiple NAPI instances (struct napi_struct) per +interface. There is no strong requirement on how the instances are +mapped to queues and interrupts. NAPI is primarily a polling/processing +abstraction without specific user-facing semantics. That said, most networking +devices end up using NAPI in fairly similar ways. + +NAPI instances most often correspond 1:1:1 to interrupts and queue pairs +(queue pair is a set of a single Rx and single Tx queue). + +In less common cases a NAPI instance may be used for multiple queues +or Rx and Tx queues can be serviced by separate NAPI instances on a single +core. Regardless of the queue assignment, however, there is usually still +a 1:1 mapping between NAPI instances and interrupts. + +It's worth noting that the ethtool API uses a "channel" terminology where +each channel can be either ``rx``, ``tx`` or ``combined``. It's not clear +what constitutes a channel; the recommended interpretation is to understand +a channel as an IRQ/NAPI which services queues of a given type. For example, +a configuration of 1 ``rx``, 1 ``tx`` and 1 ``combined`` channel is expected +to utilize 3 interrupts, 2 Rx and 2 Tx queues. + +User API +======== + +User interactions with NAPI depend on NAPI instance ID. The instance IDs +are only visible to the user thru the ``SO_INCOMING_NAPI_ID`` socket option. +It's not currently possible to query IDs used by a given device. + +Software IRQ coalescing +----------------------- + +NAPI does not perform any explicit event coalescing by default. +In most scenarios batching happens due to IRQ coalescing which is done +by the device. There are cases where software coalescing is helpful. + +NAPI can be configured to arm a repoll timer instead of unmasking +the hardware interrupts as soon as all packets are processed. +The ``gro_flush_timeout`` sysfs configuration of the netdevice +is reused to control the delay of the timer, while +``napi_defer_hard_irqs`` controls the number of consecutive empty polls +before NAPI gives up and goes back to using hardware IRQs. + +.. _poll: + +Busy polling +------------ + +Busy polling allows a user process to check for incoming packets before +the device interrupt fires. As is the case with any busy polling it trades +off CPU cycles for lower latency (production uses of NAPI busy polling +are not well known). + +Busy polling is enabled by either setting ``SO_BUSY_POLL`` on +selected sockets or using the global ``net.core.busy_poll`` and +``net.core.busy_read`` sysctls. An io_uring API for NAPI busy polling +also exists. + +IRQ mitigation +--------------- + +While busy polling is supposed to be used by low latency applications, +a similar mechanism can be used for IRQ mitigation. + +Very high request-per-second applications (especially routing/forwarding +applications and especially applications using AF_XDP sockets) may not +want to be interrupted until they finish processing a request or a batch +of packets. + +Such applications can pledge to the kernel that they will perform a busy +polling operation periodically, and the driver should keep the device IRQs +permanently masked. This mode is enabled by using the ``SO_PREFER_BUSY_POLL`` +socket option. To avoid system misbehavior the pledge is revoked +if ``gro_flush_timeout`` passes without any busy poll call. + +The NAPI budget for busy polling is lower than the default (which makes +sense given the low latency intention of normal busy polling). This is +not the case with IRQ mitigation, however, so the budget can be adjusted +with the ``SO_BUSY_POLL_BUDGET`` socket option. + +.. _threaded: + +Threaded NAPI +------------- + +Threaded NAPI is an operating mode that uses dedicated kernel +threads rather than software IRQ context for NAPI processing. +The configuration is per netdevice and will affect all +NAPI instances of that device. Each NAPI instance will spawn a separate +thread (called ``napi/${ifc-name}-${napi-id}``). + +It is recommended to pin each kernel thread to a single CPU, the same +CPU as the CPU which services the interrupt. Note that the mapping +between IRQs and NAPI instances may not be trivial (and is driver +dependent). The NAPI instance IDs will be assigned in the opposite +order than the process IDs of the kernel threads. + +Threaded NAPI is controlled by writing 0/1 to the ``threaded`` file in +netdev's sysfs directory. + +.. rubric:: Footnotes + +.. [#] NAPI was originally referred to as New API in 2.4 Linux. diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 674ee5daa7b1..18a5be6ddd0f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -509,15 +509,18 @@ static inline bool napi_reschedule(struct napi_struct *napi) return false; } -bool napi_complete_done(struct napi_struct *n, int work_done); /** - * napi_complete - NAPI processing complete - * @n: NAPI context + * napi_complete_done - NAPI processing complete + * @n: NAPI context + * @work_done: number of packets processed * - * Mark NAPI processing as complete. - * Consider using napi_complete_done() instead. + * Mark NAPI processing as complete. Should only be called if poll budget + * has not been completely consumed. + * Prefer over napi_complete(). * Return false if device should avoid rearming interrupts. */ +bool napi_complete_done(struct napi_struct *n, int work_done); + static inline bool napi_complete(struct napi_struct *n) { return napi_complete_done(n, 0); -- cgit v1.2.3-70-g09d2 From c59647c0dc679008886756a888368da1c6d4ccd3 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 28 Mar 2023 23:50:19 +0000 Subject: net: add softnet_data.in_net_rx_action We want to make two optimizations in napi_schedule_rps() and ____napi_schedule() which require to know if these helpers are called from net_rx_action(), instead of being called from other contexts. sd.in_net_rx_action is only read/written by the owning cpu. Signed-off-by: Eric Dumazet Reviewed-by: Jason Xing Tested-by: Jason Xing Signed-off-by: Paolo Abeni --- include/linux/netdevice.h | 1 + net/core/dev.c | 4 ++++ 2 files changed, 5 insertions(+) (limited to 'include/linux/netdevice.h') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 18a5be6ddd0f..c8c634091a65 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3188,6 +3188,7 @@ struct softnet_data { #ifdef CONFIG_RPS struct softnet_data *rps_ipi_list; #endif + bool in_net_rx_action; #ifdef CONFIG_NET_FLOW_LIMIT struct sd_flow_limit __rcu *flow_limit; #endif diff --git a/net/core/dev.c b/net/core/dev.c index f7050b95d125..15331edbacf4 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6646,6 +6646,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) LIST_HEAD(list); LIST_HEAD(repoll); + sd->in_net_rx_action = true; local_irq_disable(); list_splice_init(&sd->poll_list, &list); local_irq_enable(); @@ -6656,6 +6657,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) skb_defer_free_flush(sd); if (list_empty(&list)) { + sd->in_net_rx_action = false; if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll)) goto end; break; @@ -6682,6 +6684,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) list_splice(&list, &sd->poll_list); if (!list_empty(&sd->poll_list)) __raise_softirq_irqoff(NET_RX_SOFTIRQ); + else + sd->in_net_rx_action = false; net_rps_action_and_irq_enable(sd); end:; -- cgit v1.2.3-70-g09d2 From dd2d6604407da5b1b260faee409cd601fe914ce9 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 30 Mar 2023 21:47:31 -0700 Subject: net: minor reshuffle of napi_struct napi_id is read by GRO and drivers to mark skbs, and it currently sits at the end of the structure, in a mostly unused cache line. Move it up into a hole, and separate the clearly control path fields from the important ones. Before: struct napi_struct { struct list_head poll_list; /* 0 16 */ long unsigned int state; /* 16 8 */ int weight; /* 24 4 */ int defer_hard_irqs_count; /* 28 4 */ long unsigned int gro_bitmask; /* 32 8 */ int (*poll)(struct napi_struct *, int); /* 40 8 */ int poll_owner; /* 48 4 */ /* XXX 4 bytes hole, try to pack */ struct net_device * dev; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct gro_list gro_hash[8]; /* 64 192 */ /* --- cacheline 4 boundary (256 bytes) --- */ struct sk_buff * skb; /* 256 8 */ struct list_head rx_list; /* 264 16 */ int rx_count; /* 280 4 */ /* XXX 4 bytes hole, try to pack */ struct hrtimer timer; /* 288 64 */ /* XXX last struct has 4 bytes of padding */ /* --- cacheline 5 boundary (320 bytes) was 32 bytes ago --- */ struct list_head dev_list; /* 352 16 */ struct hlist_node napi_hash_node; /* 368 16 */ /* --- cacheline 6 boundary (384 bytes) --- */ unsigned int napi_id; /* 384 4 */ /* XXX 4 bytes hole, try to pack */ struct task_struct * thread; /* 392 8 */ /* size: 400, cachelines: 7, members: 17 */ /* sum members: 388, holes: 3, sum holes: 12 */ /* paddings: 1, sum paddings: 4 */ /* last cacheline: 16 bytes */ }; After: struct napi_struct { struct list_head poll_list; /* 0 16 */ long unsigned int state; /* 16 8 */ int weight; /* 24 4 */ int defer_hard_irqs_count; /* 28 4 */ long unsigned int gro_bitmask; /* 32 8 */ int (*poll)(struct napi_struct *, int); /* 40 8 */ int poll_owner; /* 48 4 */ /* XXX 4 bytes hole, try to pack */ struct net_device * dev; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct gro_list gro_hash[8]; /* 64 192 */ /* --- cacheline 4 boundary (256 bytes) --- */ struct sk_buff * skb; /* 256 8 */ struct list_head rx_list; /* 264 16 */ int rx_count; /* 280 4 */ unsigned int napi_id; /* 284 4 */ struct hrtimer timer; /* 288 64 */ /* XXX last struct has 4 bytes of padding */ /* --- cacheline 5 boundary (320 bytes) was 32 bytes ago --- */ struct task_struct * thread; /* 352 8 */ struct list_head dev_list; /* 360 16 */ struct hlist_node napi_hash_node; /* 376 16 */ /* size: 392, cachelines: 7, members: 17 */ /* sum members: 388, holes: 1, sum holes: 4 */ /* paddings: 1, sum paddings: 4 */ /* forced alignments: 1 */ /* last cacheline: 8 bytes */ } __attribute__((__aligned__(8))); Signed-off-by: Jakub Kicinski Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/netdevice.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux/netdevice.h') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c8c634091a65..62e093a6d6d1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -367,11 +367,12 @@ struct napi_struct { struct sk_buff *skb; struct list_head rx_list; /* Pending GRO_NORMAL skbs */ int rx_count; /* length of rx_list */ + unsigned int napi_id; struct hrtimer timer; + struct task_struct *thread; + /* control-path-only fields follow */ struct list_head dev_list; struct hlist_node napi_hash_node; - unsigned int napi_id; - struct task_struct *thread; }; enum { -- cgit v1.2.3-70-g09d2 From 88c0a6b503b7f4fffb68a8d49c3987870c5b1d6b Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 2 Apr 2023 15:37:55 +0300 Subject: net: create a netdev notifier for DSA to reject PTP on DSA master The fact that PTP 2-step TX timestamping is broken on DSA switches if the master also timestamps the same packets is documented by commit f685e609a301 ("net: dsa: Deny PTP on master if switch supports it"). We attempt to help the users avoid shooting themselves in the foot by making DSA reject the timestamping ioctls on an interface that is a DSA master, and the switch tree beneath it contains switches which are aware of PTP. The only problem is that there isn't an established way of intercepting ndo_eth_ioctl calls, so DSA creates avoidable burden upon the network stack by creating a struct dsa_netdevice_ops with overlaid function pointers that are manually checked from the relevant call sites. There used to be 2 such dsa_netdevice_ops, but now, ndo_eth_ioctl is the only one left. There is an ongoing effort to migrate driver-visible hardware timestamping control from the ndo_eth_ioctl() based API to a new ndo_hwtstamp_set() model, but DSA actively prevents that migration, since dsa_master_ioctl() is currently coded to manually call the master's legacy ndo_eth_ioctl(), and so, whenever a network device driver would be converted to the new API, DSA's restrictions would be circumvented, because any device could be used as a DSA master. The established way for unrelated modules to react on a net device event is via netdevice notifiers. So we create a new notifier which gets called whenever there is an attempt to change hardware timestamping settings on a device. Finally, there is another reason why a netdev notifier will be a good idea, besides strictly DSA, and this has to do with PHY timestamping. With ndo_eth_ioctl(), all MAC drivers must manually call phy_has_hwtstamp() before deciding whether to act upon SIOCSHWTSTAMP, otherwise they must pass this ioctl to the PHY driver via phy_mii_ioctl(). With the new ndo_hwtstamp_set() API, it will be desirable to simply not make any calls into the MAC device driver when timestamping should be performed at the PHY level. But there exist drivers, such as the lan966x switch, which need to install packet traps for PTP regardless of whether they are the layer that provides the hardware timestamps, or the PHY is. That would be impossible to support with the new API. The proposal there, too, is to introduce a netdev notifier which acts as a better cue for switching drivers to add or remove PTP packet traps, than ndo_hwtstamp_set(). The one introduced here "almost" works there as well, except for the fact that packet traps should only be installed if the PHY driver succeeded to enable hardware timestamping, whereas here, we need to deny hardware timestamping on the DSA master before it actually gets enabled. This is why this notifier is called "PRE_", and the notifier that would get used for PHY timestamping and packet traps would be called NETDEV_CHANGE_HWTSTAMP. This isn't a new concept, for example NETDEV_CHANGEUPPER and NETDEV_PRECHANGEUPPER do the same thing. In expectation of future netlink UAPI, we also pass a non-NULL extack pointer to the netdev notifier, and we make DSA populate it with an informative reason for the rejection. To avoid making it go to waste, we make the ioctl-based dev_set_hwtstamp() create a fake extack and print the message to the kernel log. Link: https://lore.kernel.org/netdev/20230401191215.tvveoi3lkawgg6g4@skbuf/ Link: https://lore.kernel.org/netdev/20230310164451.ls7bbs6pdzs4m6pw@skbuf/ Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/netdevice.h | 9 ++++++++- include/net/dsa.h | 51 ----------------------------------------------- net/core/dev.c | 8 +++----- net/core/dev_ioctl.c | 16 +++++++++++++-- net/dsa/master.c | 50 ++++++++++++++-------------------------------- net/dsa/master.h | 3 +++ net/dsa/slave.c | 11 ++++++++++ 7 files changed, 54 insertions(+), 94 deletions(-) (limited to 'include/linux/netdevice.h') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 62e093a6d6d1..a740be3bb911 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2878,6 +2878,7 @@ enum netdev_cmd { NETDEV_OFFLOAD_XSTATS_REPORT_USED, NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, NETDEV_XDP_FEAT_CHANGE, + NETDEV_PRE_CHANGE_HWTSTAMP, }; const char *netdev_cmd_to_name(enum netdev_cmd cmd); @@ -2928,6 +2929,11 @@ struct netdev_notifier_pre_changeaddr_info { const unsigned char *dev_addr; }; +struct netdev_notifier_hwtstamp_info { + struct netdev_notifier_info info; /* must be first */ + struct kernel_hwtstamp_config *config; +}; + enum netdev_offload_xstats_type { NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, }; @@ -2984,7 +2990,8 @@ netdev_notifier_info_to_extack(const struct netdev_notifier_info *info) } int call_netdevice_notifiers(unsigned long val, struct net_device *dev); - +int call_netdevice_notifiers_info(unsigned long val, + struct netdev_notifier_info *info); extern rwlock_t dev_base_lock; /* Device list lock */ diff --git a/include/net/dsa.h b/include/net/dsa.h index a15f17a38eca..8903053fa5aa 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -109,16 +109,6 @@ struct dsa_device_ops { bool promisc_on_master; }; -/* This structure defines the control interfaces that are overlayed by the - * DSA layer on top of the DSA CPU/management net_device instance. This is - * used by the core net_device layer while calling various net_device_ops - * function pointers. - */ -struct dsa_netdevice_ops { - int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, - int cmd); -}; - struct dsa_lag { struct net_device *dev; unsigned int id; @@ -317,11 +307,6 @@ struct dsa_port { */ const struct ethtool_ops *orig_ethtool_ops; - /* - * Original copy of the master netdev net_device_ops - */ - const struct dsa_netdevice_ops *netdev_ops; - /* List of MAC addresses that must be forwarded on this port. * These are only valid on CPU ports and DSA links. */ @@ -1339,42 +1324,6 @@ static inline void dsa_tag_generic_flow_dissect(const struct sk_buff *skb, #endif } -#if IS_ENABLED(CONFIG_NET_DSA) -static inline int __dsa_netdevice_ops_check(struct net_device *dev) -{ - int err = -EOPNOTSUPP; - - if (!dev->dsa_ptr) - return err; - - if (!dev->dsa_ptr->netdev_ops) - return err; - - return 0; -} - -static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr, - int cmd) -{ - const struct dsa_netdevice_ops *ops; - int err; - - err = __dsa_netdevice_ops_check(dev); - if (err) - return err; - - ops = dev->dsa_ptr->netdev_ops; - - return ops->ndo_eth_ioctl(dev, ifr, cmd); -} -#else -static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr, - int cmd) -{ - return -EOPNOTSUPP; -} -#endif - void dsa_unregister_switch(struct dsa_switch *ds); int dsa_register_switch(struct dsa_switch *ds); void dsa_switch_shutdown(struct dsa_switch *ds); diff --git a/net/core/dev.c b/net/core/dev.c index 0c4b21291348..7ce5985be84b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -160,8 +160,6 @@ struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; struct list_head ptype_all __read_mostly; /* Taps */ static int netif_rx_internal(struct sk_buff *skb); -static int call_netdevice_notifiers_info(unsigned long val, - struct netdev_notifier_info *info); static int call_netdevice_notifiers_extack(unsigned long val, struct net_device *dev, struct netlink_ext_ack *extack); @@ -1614,7 +1612,7 @@ const char *netdev_cmd_to_name(enum netdev_cmd cmd) N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE) N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA) - N(XDP_FEAT_CHANGE) + N(XDP_FEAT_CHANGE) N(PRE_CHANGE_HWTSTAMP) } #undef N return "UNKNOWN_NETDEV_EVENT"; @@ -1919,8 +1917,8 @@ static void move_netdevice_notifiers_dev_net(struct net_device *dev, * are as for raw_notifier_call_chain(). */ -static int call_netdevice_notifiers_info(unsigned long val, - struct netdev_notifier_info *info) +int call_netdevice_notifiers_info(unsigned long val, + struct netdev_notifier_info *info) { struct net *net = dev_net(info->dev); int ret; diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index c532ef4d5dff..6d772837eb3f 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -259,7 +259,11 @@ static int dev_get_hwtstamp(struct net_device *dev, struct ifreq *ifr) static int dev_set_hwtstamp(struct net_device *dev, struct ifreq *ifr) { + struct netdev_notifier_hwtstamp_info info = { + .info.dev = dev, + }; struct kernel_hwtstamp_config kernel_cfg; + struct netlink_ext_ack extack = {}; struct hwtstamp_config cfg; int err; @@ -272,9 +276,17 @@ static int dev_set_hwtstamp(struct net_device *dev, struct ifreq *ifr) if (err) return err; - err = dsa_ndo_eth_ioctl(dev, ifr, SIOCSHWTSTAMP); - if (err != -EOPNOTSUPP) + info.info.extack = &extack; + info.config = &kernel_cfg; + + err = call_netdevice_notifiers_info(NETDEV_PRE_CHANGE_HWTSTAMP, + &info.info); + err = notifier_to_errno(err); + if (err) { + if (extack._msg) + netdev_err(dev, "%s\n", extack._msg); return err; + } return dev_eth_ioctl(dev, ifr, SIOCSHWTSTAMP); } diff --git a/net/dsa/master.c b/net/dsa/master.c index e397641382ca..c2cabe6248b1 100644 --- a/net/dsa/master.c +++ b/net/dsa/master.c @@ -195,38 +195,31 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset, } } -static int dsa_master_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +/* Deny PTP operations on master if there is at least one switch in the tree + * that is PTP capable. + */ +int dsa_master_pre_change_hwtstamp(struct net_device *dev, + const struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct dsa_port *cpu_dp = dev->dsa_ptr; struct dsa_switch *ds = cpu_dp->ds; struct dsa_switch_tree *dst; - int err = -EOPNOTSUPP; struct dsa_port *dp; dst = ds->dst; - switch (cmd) { - case SIOCGHWTSTAMP: - case SIOCSHWTSTAMP: - /* Deny PTP operations on master if there is at least one - * switch in the tree that is PTP capable. - */ - list_for_each_entry(dp, &dst->ports, list) - if (dsa_port_supports_hwtstamp(dp)) - return -EBUSY; - break; + list_for_each_entry(dp, &dst->ports, list) { + if (dsa_port_supports_hwtstamp(dp)) { + NL_SET_ERR_MSG(extack, + "HW timestamping not allowed on DSA master when switch supports the operation"); + return -EBUSY; + } } - if (dev->netdev_ops->ndo_eth_ioctl) - err = dev->netdev_ops->ndo_eth_ioctl(dev, ifr, cmd); - - return err; + return 0; } -static const struct dsa_netdevice_ops dsa_netdev_ops = { - .ndo_eth_ioctl = dsa_master_ioctl, -}; - static int dsa_master_ethtool_setup(struct net_device *dev) { struct dsa_port *cpu_dp = dev->dsa_ptr; @@ -267,15 +260,6 @@ static void dsa_master_ethtool_teardown(struct net_device *dev) cpu_dp->orig_ethtool_ops = NULL; } -static void dsa_netdev_ops_set(struct net_device *dev, - const struct dsa_netdevice_ops *ops) -{ - if (netif_is_lag_master(dev)) - return; - - dev->dsa_ptr->netdev_ops = ops; -} - /* Keep the master always promiscuous if the tagging protocol requires that * (garbles MAC DA) or if it doesn't support unicast filtering, case in which * it would revert to promiscuous mode as soon as we call dev_uc_add() on it @@ -414,16 +398,13 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) if (ret) goto out_err_reset_promisc; - dsa_netdev_ops_set(dev, &dsa_netdev_ops); - ret = sysfs_create_group(&dev->dev.kobj, &dsa_group); if (ret) - goto out_err_ndo_teardown; + goto out_err_ethtool_teardown; return ret; -out_err_ndo_teardown: - dsa_netdev_ops_set(dev, NULL); +out_err_ethtool_teardown: dsa_master_ethtool_teardown(dev); out_err_reset_promisc: dsa_master_set_promiscuity(dev, -1); @@ -433,7 +414,6 @@ out_err_reset_promisc: void dsa_master_teardown(struct net_device *dev) { sysfs_remove_group(&dev->dev.kobj, &dsa_group); - dsa_netdev_ops_set(dev, NULL); dsa_master_ethtool_teardown(dev); dsa_master_reset_mtu(dev); dsa_master_set_promiscuity(dev, -1); diff --git a/net/dsa/master.h b/net/dsa/master.h index 3fc0e610b5b5..80842f4e27f7 100644 --- a/net/dsa/master.h +++ b/net/dsa/master.h @@ -15,5 +15,8 @@ int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp, struct netlink_ext_ack *extack); void dsa_master_lag_teardown(struct net_device *lag_dev, struct dsa_port *cpu_dp); +int dsa_master_pre_change_hwtstamp(struct net_device *dev, + const struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); #endif diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 165bb2cb8431..8abc1658ac47 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -3289,6 +3289,7 @@ static int dsa_master_changeupper(struct net_device *dev, static int dsa_slave_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { + struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); struct net_device *dev = netdev_notifier_info_to_dev(ptr); switch (event) { @@ -3418,6 +3419,16 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb, return NOTIFY_OK; } + case NETDEV_PRE_CHANGE_HWTSTAMP: { + struct netdev_notifier_hwtstamp_info *info = ptr; + int err; + + if (!netdev_uses_dsa(dev)) + return NOTIFY_DONE; + + err = dsa_master_pre_change_hwtstamp(dev, info->config, extack); + return notifier_from_errno(err); + } default: break; } -- cgit v1.2.3-70-g09d2 From 5a17818682cf43ad0fdd6035945f3b7a8c9dc5e9 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Thu, 6 Apr 2023 14:42:46 +0300 Subject: net: dsa: replace NETDEV_PRE_CHANGE_HWTSTAMP notifier with a stub There was a sort of rush surrounding commit 88c0a6b503b7 ("net: create a netdev notifier for DSA to reject PTP on DSA master"), due to a desire to convert DSA's attempt to deny TX timestamping on a DSA master to something that doesn't block the kernel-wide API conversion from ndo_eth_ioctl() to ndo_hwtstamp_set(). What was required was a mechanism that did not depend on ndo_eth_ioctl(), and what was provided was a mechanism that did not depend on ndo_eth_ioctl(), while at the same time introducing something that wasn't absolutely necessary - a new netdev notifier. There have been objections from Jakub Kicinski that using notifiers in general when they are not absolutely necessary creates complications to the control flow and difficulties to maintainers who look at the code. So there is a desire to not use notifiers. In addition to that, the notifier chain gets called even if there is no DSA in the system and no one is interested in applying any restriction. Take the model of udp_tunnel_nic_ops and introduce a stub mechanism, through which net/core/dev_ioctl.c can call into DSA even when CONFIG_NET_DSA=m. Compared to the code that existed prior to the notifier conversion, aka what was added in commits: - 4cfab3566710 ("net: dsa: Add wrappers for overloaded ndo_ops") - 3369afba1e46 ("net: Call into DSA netdevice_ops wrappers") this is different because we are not overloading any struct net_device_ops of the DSA master anymore, but rather, we are exposing a rather specific functionality which is orthogonal to which API is used to enable it - ndo_eth_ioctl() or ndo_hwtstamp_set(). Also, what is similar is that both approaches use function pointers to get from built-in code to DSA. There is no point in replicating the function pointers towards __dsa_master_hwtstamp_validate() once for every CPU port (dev->dsa_ptr). Instead, it is sufficient to introduce a singleton struct dsa_stubs, built into the kernel, which contains a single function pointer to __dsa_master_hwtstamp_validate(). I find this approach preferable to what we had originally, because dev->dsa_ptr->netdev_ops->ndo_do_ioctl() used to require going through struct dsa_port (dev->dsa_ptr), and so, this was incompatible with any attempts to add any data encapsulation and hide DSA data structures from the outside world. Link: https://lore.kernel.org/netdev/20230403083019.120b72fd@kernel.org/ Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/netdevice.h | 6 ------ include/net/dsa_stubs.h | 48 +++++++++++++++++++++++++++++++++++++++++++++++ net/Makefile | 2 +- net/core/dev.c | 2 +- net/core/dev_ioctl.c | 12 ++---------- net/dsa/Makefile | 6 ++++++ net/dsa/dsa.c | 19 +++++++++++++++++++ net/dsa/master.c | 2 +- net/dsa/master.h | 2 +- net/dsa/slave.c | 11 ----------- net/dsa/stubs.c | 10 ++++++++++ 11 files changed, 89 insertions(+), 31 deletions(-) create mode 100644 include/net/dsa_stubs.h create mode 100644 net/dsa/stubs.c (limited to 'include/linux/netdevice.h') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a740be3bb911..1c25b39681b3 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2878,7 +2878,6 @@ enum netdev_cmd { NETDEV_OFFLOAD_XSTATS_REPORT_USED, NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, NETDEV_XDP_FEAT_CHANGE, - NETDEV_PRE_CHANGE_HWTSTAMP, }; const char *netdev_cmd_to_name(enum netdev_cmd cmd); @@ -2929,11 +2928,6 @@ struct netdev_notifier_pre_changeaddr_info { const unsigned char *dev_addr; }; -struct netdev_notifier_hwtstamp_info { - struct netdev_notifier_info info; /* must be first */ - struct kernel_hwtstamp_config *config; -}; - enum netdev_offload_xstats_type { NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, }; diff --git a/include/net/dsa_stubs.h b/include/net/dsa_stubs.h new file mode 100644 index 000000000000..361811750a54 --- /dev/null +++ b/include/net/dsa_stubs.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * include/net/dsa_stubs.h - Stubs for the Distributed Switch Architecture framework + */ + +#include +#include +#include +#include + +#if IS_ENABLED(CONFIG_NET_DSA) + +extern const struct dsa_stubs *dsa_stubs; + +struct dsa_stubs { + int (*master_hwtstamp_validate)(struct net_device *dev, + const struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); +}; + +static inline int dsa_master_hwtstamp_validate(struct net_device *dev, + const struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + if (!netdev_uses_dsa(dev)) + return 0; + + /* rtnl_lock() is a sufficient guarantee, because as long as + * netdev_uses_dsa() returns true, the dsa_core module is still + * registered, and so, dsa_unregister_stubs() couldn't have run. + * For netdev_uses_dsa() to start returning false, it would imply that + * dsa_master_teardown() has executed, which requires rtnl_lock(). + */ + ASSERT_RTNL(); + + return dsa_stubs->master_hwtstamp_validate(dev, config, extack); +} + +#else + +static inline int dsa_master_hwtstamp_validate(struct net_device *dev, + const struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + return 0; +} + +#endif diff --git a/net/Makefile b/net/Makefile index 0914bea9c335..87592009366f 100644 --- a/net/Makefile +++ b/net/Makefile @@ -24,7 +24,7 @@ obj-$(CONFIG_PACKET) += packet/ obj-$(CONFIG_NET_KEY) += key/ obj-$(CONFIG_BRIDGE) += bridge/ obj-$(CONFIG_NET_DEVLINK) += devlink/ -obj-$(CONFIG_NET_DSA) += dsa/ +obj-y += dsa/ obj-$(CONFIG_ATALK) += appletalk/ obj-$(CONFIG_X25) += x25/ obj-$(CONFIG_LAPB) += lapb/ diff --git a/net/core/dev.c b/net/core/dev.c index 7ce5985be84b..480600a075ce 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1612,7 +1612,7 @@ const char *netdev_cmd_to_name(enum netdev_cmd cmd) N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE) N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA) - N(XDP_FEAT_CHANGE) N(PRE_CHANGE_HWTSTAMP) + N(XDP_FEAT_CHANGE) } #undef N return "UNKNOWN_NETDEV_EVENT"; diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 6d772837eb3f..3730945ee294 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include "dev.h" @@ -259,9 +259,6 @@ static int dev_get_hwtstamp(struct net_device *dev, struct ifreq *ifr) static int dev_set_hwtstamp(struct net_device *dev, struct ifreq *ifr) { - struct netdev_notifier_hwtstamp_info info = { - .info.dev = dev, - }; struct kernel_hwtstamp_config kernel_cfg; struct netlink_ext_ack extack = {}; struct hwtstamp_config cfg; @@ -276,12 +273,7 @@ static int dev_set_hwtstamp(struct net_device *dev, struct ifreq *ifr) if (err) return err; - info.info.extack = &extack; - info.config = &kernel_cfg; - - err = call_netdevice_notifiers_info(NETDEV_PRE_CHANGE_HWTSTAMP, - &info.info); - err = notifier_to_errno(err); + err = dsa_master_hwtstamp_validate(dev, &kernel_cfg, &extack); if (err) { if (extack._msg) netdev_err(dev, "%s\n", extack._msg); diff --git a/net/dsa/Makefile b/net/dsa/Makefile index cc7e93a562fe..3835de286116 100644 --- a/net/dsa/Makefile +++ b/net/dsa/Makefile @@ -1,4 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 + +# the stubs are built-in whenever DSA is built-in or module +ifdef CONFIG_NET_DSA +obj-y := stubs.o +endif + # the core obj-$(CONFIG_NET_DSA) += dsa_core.o dsa_core-y += \ diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index e5f156940c67..ab1afe67fd18 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include "devlink.h" @@ -1702,6 +1703,20 @@ bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port, } EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db); +static const struct dsa_stubs __dsa_stubs = { + .master_hwtstamp_validate = __dsa_master_hwtstamp_validate, +}; + +static void dsa_register_stubs(void) +{ + dsa_stubs = &__dsa_stubs; +} + +static void dsa_unregister_stubs(void) +{ + dsa_stubs = NULL; +} + static int __init dsa_init_module(void) { int rc; @@ -1721,6 +1736,8 @@ static int __init dsa_init_module(void) if (rc) goto netlink_register_fail; + dsa_register_stubs(); + return 0; netlink_register_fail: @@ -1735,6 +1752,8 @@ module_init(dsa_init_module); static void __exit dsa_cleanup_module(void) { + dsa_unregister_stubs(); + rtnl_link_unregister(&dsa_link_ops); dsa_slave_unregister_notifier(); diff --git a/net/dsa/master.c b/net/dsa/master.c index c2cabe6248b1..6be89ab0cc01 100644 --- a/net/dsa/master.c +++ b/net/dsa/master.c @@ -198,7 +198,7 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset, /* Deny PTP operations on master if there is at least one switch in the tree * that is PTP capable. */ -int dsa_master_pre_change_hwtstamp(struct net_device *dev, +int __dsa_master_hwtstamp_validate(struct net_device *dev, const struct kernel_hwtstamp_config *config, struct netlink_ext_ack *extack) { diff --git a/net/dsa/master.h b/net/dsa/master.h index 80842f4e27f7..76e39d3ec909 100644 --- a/net/dsa/master.h +++ b/net/dsa/master.h @@ -15,7 +15,7 @@ int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp, struct netlink_ext_ack *extack); void dsa_master_lag_teardown(struct net_device *lag_dev, struct dsa_port *cpu_dp); -int dsa_master_pre_change_hwtstamp(struct net_device *dev, +int __dsa_master_hwtstamp_validate(struct net_device *dev, const struct kernel_hwtstamp_config *config, struct netlink_ext_ack *extack); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 8abc1658ac47..165bb2cb8431 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -3289,7 +3289,6 @@ static int dsa_master_changeupper(struct net_device *dev, static int dsa_slave_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { - struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); struct net_device *dev = netdev_notifier_info_to_dev(ptr); switch (event) { @@ -3419,16 +3418,6 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb, return NOTIFY_OK; } - case NETDEV_PRE_CHANGE_HWTSTAMP: { - struct netdev_notifier_hwtstamp_info *info = ptr; - int err; - - if (!netdev_uses_dsa(dev)) - return NOTIFY_DONE; - - err = dsa_master_pre_change_hwtstamp(dev, info->config, extack); - return notifier_from_errno(err); - } default: break; } diff --git a/net/dsa/stubs.c b/net/dsa/stubs.c new file mode 100644 index 000000000000..2ed8a6c85fbf --- /dev/null +++ b/net/dsa/stubs.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Stubs for DSA functionality called by the core network stack. + * These are necessary because CONFIG_NET_DSA can be a module, and built-in + * code cannot directly call symbols exported by modules. + */ +#include + +const struct dsa_stubs *dsa_stubs; +EXPORT_SYMBOL_GPL(dsa_stubs); -- cgit v1.2.3-70-g09d2 From c91c46de6bbc1147ae5dfe046b87f5f3d6593215 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 6 Apr 2023 18:25:33 -0700 Subject: net: provide macros for commonly copied lockless queue stop/wake code A lot of drivers follow the same scheme to stop / start queues without introducing locks between xmit and NAPI tx completions. I'm guessing they all copy'n'paste each other's code. The original code dates back all the way to e1000 and Linux 2.6.19. Smaller drivers shy away from the scheme and introduce a lock which may cause deadlocks in netpoll. Provide macros which encapsulate the necessary logic. The macros do not prevent false wake ups, the extra barrier required to close that race is not worth it. See discussion in: https://lore.kernel.org/all/c39312a2-4537-14b4-270c-9fe1fbb91e89@gmail.com/ Acked-by: Herbert Xu Signed-off-by: Jakub Kicinski --- Documentation/networking/driver.rst | 6 ++ include/linux/netdevice.h | 1 + include/net/netdev_queues.h | 144 ++++++++++++++++++++++++++++++++++++ 3 files changed, 151 insertions(+) create mode 100644 include/net/netdev_queues.h (limited to 'include/linux/netdevice.h') diff --git a/Documentation/networking/driver.rst b/Documentation/networking/driver.rst index 19c363291d04..4071f2c00f8b 100644 --- a/Documentation/networking/driver.rst +++ b/Documentation/networking/driver.rst @@ -104,6 +104,12 @@ and: TX_BUFFS_AVAIL(dp) > 0) netif_wake_queue(dp->dev); +Lockless queue stop / wake helper macros +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. kernel-doc:: include/net/netdev_queues.h + :doc: Lockless queue stopping / waking helpers. + No exclusive ownership ---------------------- diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 1c25b39681b3..7bec9a2be8ef 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3335,6 +3335,7 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev) static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) { + /* Must be an atomic op see netif_txq_try_stop() */ set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); } diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h new file mode 100644 index 000000000000..5236d78bbdeb --- /dev/null +++ b/include/net/netdev_queues.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_NET_QUEUES_H +#define _LINUX_NET_QUEUES_H + +#include + +/** + * DOC: Lockless queue stopping / waking helpers. + * + * The netif_txq_maybe_stop() and __netif_txq_completed_wake() + * macros are designed to safely implement stopping + * and waking netdev queues without full lock protection. + * + * We assume that there can be no concurrent stop attempts and no concurrent + * wake attempts. The try-stop should happen from the xmit handler, + * while wake up should be triggered from NAPI poll context. + * The two may run concurrently (single producer, single consumer). + * + * The try-stop side is expected to run from the xmit handler and therefore + * it does not reschedule Tx (netif_tx_start_queue() instead of + * netif_tx_wake_queue()). Uses of the ``stop`` macros outside of the xmit + * handler may lead to xmit queue being enabled but not run. + * The waking side does not have similar context restrictions. + * + * The macros guarantee that rings will not remain stopped if there's + * space available, but they do *not* prevent false wake ups when + * the ring is full! Drivers should check for ring full at the start + * for the xmit handler. + * + * All descriptor ring indexes (and other relevant shared state) must + * be updated before invoking the macros. + */ + +#define netif_txq_try_stop(txq, get_desc, start_thrs) \ + ({ \ + int _res; \ + \ + netif_tx_stop_queue(txq); \ + /* Producer index and stop bit must be visible \ + * to consumer before we recheck. \ + * Pairs with a barrier in __netif_txq_maybe_wake(). \ + */ \ + smp_mb__after_atomic(); \ + \ + /* We need to check again in a case another \ + * CPU has just made room available. \ + */ \ + _res = 0; \ + if (unlikely(get_desc >= start_thrs)) { \ + netif_tx_start_queue(txq); \ + _res = -1; \ + } \ + _res; \ + }) \ + +/** + * netif_txq_maybe_stop() - locklessly stop a Tx queue, if needed + * @txq: struct netdev_queue to stop/start + * @get_desc: get current number of free descriptors (see requirements below!) + * @stop_thrs: minimal number of available descriptors for queue to be left + * enabled + * @start_thrs: minimal number of descriptors to re-enable the queue, can be + * equal to @stop_thrs or higher to avoid frequent waking + * + * All arguments may be evaluated multiple times, beware of side effects. + * @get_desc must be a formula or a function call, it must always + * return up-to-date information when evaluated! + * Expected to be used from ndo_start_xmit, see the comment on top of the file. + * + * Returns: + * 0 if the queue was stopped + * 1 if the queue was left enabled + * -1 if the queue was re-enabled (raced with waking) + */ +#define netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs) \ + ({ \ + int _res; \ + \ + _res = 1; \ + if (unlikely(get_desc < stop_thrs)) \ + _res = netif_txq_try_stop(txq, get_desc, start_thrs); \ + _res; \ + }) \ + + +/** + * __netif_txq_maybe_wake() - locklessly wake a Tx queue, if needed + * @txq: struct netdev_queue to stop/start + * @get_desc: get current number of free descriptors (see requirements below!) + * @start_thrs: minimal number of descriptors to re-enable the queue + * @down_cond: down condition, predicate indicating that the queue should + * not be woken up even if descriptors are available + * + * All arguments may be evaluated multiple times. + * @get_desc must be a formula or a function call, it must always + * return up-to-date information when evaluated! + * + * Returns: + * 0 if the queue was woken up + * 1 if the queue was already enabled (or disabled but @down_cond is true) + * -1 if the queue was left unchanged (@start_thrs not reached) + */ +#define __netif_txq_maybe_wake(txq, get_desc, start_thrs, down_cond) \ + ({ \ + int _res; \ + \ + _res = -1; \ + if (likely(get_desc > start_thrs)) { \ + /* Make sure that anybody stopping the queue after \ + * this sees the new next_to_clean. \ + */ \ + smp_mb(); \ + _res = 1; \ + if (unlikely(netif_tx_queue_stopped(txq)) && \ + !(down_cond)) { \ + netif_tx_wake_queue(txq); \ + _res = 0; \ + } \ + } \ + _res; \ + }) + +#define netif_txq_maybe_wake(txq, get_desc, start_thrs) \ + __netif_txq_maybe_wake(txq, get_desc, start_thrs, false) + +/* subqueue variants follow */ + +#define netif_subqueue_try_stop(dev, idx, get_desc, start_thrs) \ + ({ \ + struct netdev_queue *txq; \ + \ + txq = netdev_get_tx_queue(dev, idx); \ + netif_txq_try_stop(txq, get_desc, start_thrs); \ + }) + +#define netif_subqueue_maybe_stop(dev, idx, get_desc, stop_thrs, start_thrs) \ + ({ \ + struct netdev_queue *txq; \ + \ + txq = netdev_get_tx_queue(dev, idx); \ + netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs); \ + }) + +#endif -- cgit v1.2.3-70-g09d2 From 301f227fc860624d37ba5dae9da57dcf371268db Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 6 Apr 2023 18:25:36 -0700 Subject: net: piggy back on the memory barrier in bql when waking queues Drivers call netdev_tx_completed_queue() right before netif_txq_maybe_wake(). If BQL is enabled netdev_tx_completed_queue() should issue a memory barrier, so we can depend on that separating the stop check from the consumer index update, instead of adding another barrier in netif_txq_maybe_wake(). This matters more than the barriers on the xmit path, because the wake condition is almost always true. So we issue the consumer side barrier often. Wrap netdev_tx_completed_queue() in a local helper to issue the barrier even if BQL is disabled. Keep the same semantics as netdev_tx_completed_queue() (barrier only if bytes != 0) to make it clear that the barrier is conditional. Plus since macro gets pkt/byte counts as arguments now - we can skip waking if there were no packets completed. Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6 ++--- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 12 ++++----- include/linux/netdevice.h | 2 +- include/net/netdev_queues.h | 39 ++++++++++++++++++++------- 4 files changed, 38 insertions(+), 21 deletions(-) (limited to 'include/linux/netdevice.h') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index de97bee25249..f7602d8d79e3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -688,11 +688,11 @@ next_tx_int: dev_kfree_skb_any(skb); } - netdev_tx_completed_queue(txq, nr_pkts, tx_bytes); txr->tx_cons = cons; - __netif_txq_maybe_wake(txq, bnxt_tx_avail(bp, txr), bp->tx_wake_thresh, - READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING); + __netif_txq_completed_wake(txq, nr_pkts, tx_bytes, + bnxt_tx_avail(bp, txr), bp->tx_wake_thresh, + READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING); } static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index cbbddee55db1..f2604fc05991 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1251,15 +1251,13 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, if (ring_is_xdp(tx_ring)) return !!budget; - netdev_tx_completed_queue(txring_txq(tx_ring), - total_packets, total_bytes); - #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); - if (total_packets && netif_carrier_ok(tx_ring->netdev) && - !__netif_txq_maybe_wake(txq, ixgbe_desc_unused(tx_ring), - TX_WAKE_THRESHOLD, - test_bit(__IXGBE_DOWN, &adapter->state))) + if (!__netif_txq_completed_wake(txq, total_packets, total_bytes, + ixgbe_desc_unused(tx_ring), + TX_WAKE_THRESHOLD, + netif_carrier_ok(tx_ring->netdev) && + test_bit(__IXGBE_DOWN, &adapter->state))) ++tx_ring->tx_stats.restart_queue; return !!budget; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7bec9a2be8ef..fe355592dfde 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3532,7 +3532,7 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, * netdev_tx_sent_queue will miss the update and cause the queue to * be stopped forever */ - smp_mb(); + smp_mb(); /* NOTE: netdev_txq_completed_mb() assumes this exists */ if (unlikely(dql_avail(&dev_queue->dql) < 0)) return; diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h index 5236d78bbdeb..b26fdb441e39 100644 --- a/include/net/netdev_queues.h +++ b/include/net/netdev_queues.h @@ -38,7 +38,7 @@ netif_tx_stop_queue(txq); \ /* Producer index and stop bit must be visible \ * to consumer before we recheck. \ - * Pairs with a barrier in __netif_txq_maybe_wake(). \ + * Pairs with a barrier in __netif_txq_completed_wake(). \ */ \ smp_mb__after_atomic(); \ \ @@ -82,10 +82,24 @@ _res; \ }) \ +/* Variant of netdev_tx_completed_queue() which guarantees smp_mb() if + * @bytes != 0, regardless of kernel config. + */ +static inline void +netdev_txq_completed_mb(struct netdev_queue *dev_queue, + unsigned int pkts, unsigned int bytes) +{ + if (IS_ENABLED(CONFIG_BQL)) + netdev_tx_completed_queue(dev_queue, pkts, bytes); + else if (bytes) + smp_mb(); +} /** - * __netif_txq_maybe_wake() - locklessly wake a Tx queue, if needed + * __netif_txq_completed_wake() - locklessly wake a Tx queue, if needed * @txq: struct netdev_queue to stop/start + * @pkts: number of packets completed + * @bytes: number of bytes completed * @get_desc: get current number of free descriptors (see requirements below!) * @start_thrs: minimal number of descriptors to re-enable the queue * @down_cond: down condition, predicate indicating that the queue should @@ -94,22 +108,27 @@ * All arguments may be evaluated multiple times. * @get_desc must be a formula or a function call, it must always * return up-to-date information when evaluated! + * Reports completed pkts/bytes to BQL. * * Returns: * 0 if the queue was woken up * 1 if the queue was already enabled (or disabled but @down_cond is true) * -1 if the queue was left unchanged (@start_thrs not reached) */ -#define __netif_txq_maybe_wake(txq, get_desc, start_thrs, down_cond) \ +#define __netif_txq_completed_wake(txq, pkts, bytes, \ + get_desc, start_thrs, down_cond) \ ({ \ int _res; \ \ + /* Report to BQL and piggy back on its barrier. \ + * Barrier makes sure that anybody stopping the queue \ + * after this point sees the new consumer index. \ + * Pairs with barrier in netif_txq_try_stop(). \ + */ \ + netdev_txq_completed_mb(txq, pkts, bytes); \ + \ _res = -1; \ - if (likely(get_desc > start_thrs)) { \ - /* Make sure that anybody stopping the queue after \ - * this sees the new next_to_clean. \ - */ \ - smp_mb(); \ + if (pkts && likely(get_desc > start_thrs)) { \ _res = 1; \ if (unlikely(netif_tx_queue_stopped(txq)) && \ !(down_cond)) { \ @@ -120,8 +139,8 @@ _res; \ }) -#define netif_txq_maybe_wake(txq, get_desc, start_thrs) \ - __netif_txq_maybe_wake(txq, get_desc, start_thrs, false) +#define netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs) \ + __netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs, false) /* subqueue variants follow */ -- cgit v1.2.3-70-g09d2 From 0cd917a4a8ace70ff9082d797c899f6bf10de910 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Wed, 12 Apr 2023 21:48:40 +0200 Subject: xdp: rss hash types representation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The RSS hash type specifies what portion of packet data NIC hardware used when calculating RSS hash value. The RSS types are focused on Internet traffic protocols at OSI layers L3 and L4. L2 (e.g. ARP) often get hash value zero and no RSS type. For L3 focused on IPv4 vs. IPv6, and L4 primarily TCP vs UDP, but some hardware supports SCTP. Hardware RSS types are differently encoded for each hardware NIC. Most hardware represent RSS hash type as a number. Determining L3 vs L4 often requires a mapping table as there often isn't a pattern or sorting according to ISO layer. The patch introduce a XDP RSS hash type (enum xdp_rss_hash_type) that contains both BITs for the L3/L4 types, and combinations to be used by drivers for their mapping tables. The enum xdp_rss_type_bits get exposed to BPF via BTF, and it is up to the BPF-programmer to match using these defines. This proposal change the kfunc API bpf_xdp_metadata_rx_hash() adding a pointer value argument for provide the RSS hash type. Change signature for all xmo_rx_hash calls in drivers to make it compile. The RSS type implementations for each driver comes as separate patches. Fixes: 3d76a4d3d4e5 ("bpf: XDP metadata RX kfuncs") Signed-off-by: Jesper Dangaard Brouer Acked-by: Toke Høiland-Jørgensen Acked-by: Stanislav Fomichev Link: https://lore.kernel.org/r/168132892042.340624.582563003880565460.stgit@firesoul Signed-off-by: Alexei Starovoitov --- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 3 +- drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 3 +- drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 3 +- drivers/net/veth.c | 3 +- include/linux/netdevice.h | 3 +- include/net/xdp.h | 45 ++++++++++++++++++++++++ net/core/xdp.c | 10 +++++- 7 files changed, 64 insertions(+), 6 deletions(-) (limited to 'include/linux/netdevice.h') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 4b5e459b6d49..73d10aa4c503 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -681,7 +681,8 @@ int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) return 0; } -int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash) +int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, + enum xdp_rss_hash_type *rss_type) { struct mlx4_en_xdp_buff *_ctx = (void *)ctx; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 544e09b97483..4ac4d883047b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -798,7 +798,8 @@ int mlx4_en_netdev_event(struct notifier_block *this, struct xdp_md; int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp); -int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash); +int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, + enum xdp_rss_hash_type *rss_type); /* * Functions for time stamping diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index c5dae48b7932..efe609f8e3aa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -169,7 +169,8 @@ static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) return 0; } -static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash) +static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, + enum xdp_rss_hash_type *rss_type) { const struct mlx5e_xdp_buff *_ctx = (void *)ctx; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index c1178915496d..424e8876a16b 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1648,7 +1648,8 @@ static int veth_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) return 0; } -static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash) +static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, + enum xdp_rss_hash_type *rss_type) { struct veth_xdp_buff *_ctx = (void *)ctx; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 470085b121d3..c35f04f636f1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1624,7 +1624,8 @@ struct net_device_ops { struct xdp_metadata_ops { int (*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp); - int (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash); + int (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash, + enum xdp_rss_hash_type *rss_type); }; /** diff --git a/include/net/xdp.h b/include/net/xdp.h index 41c57b8b1671..a76c4ea203ea 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -8,6 +8,7 @@ #include /* skb_shared_info */ #include +#include /** * DOC: XDP RX-queue information @@ -425,6 +426,50 @@ XDP_METADATA_KFUNC_xxx MAX_XDP_METADATA_KFUNC, }; +enum xdp_rss_hash_type { + /* First part: Individual bits for L3/L4 types */ + XDP_RSS_L3_IPV4 = BIT(0), + XDP_RSS_L3_IPV6 = BIT(1), + + /* The fixed (L3) IPv4 and IPv6 headers can both be followed by + * variable/dynamic headers, IPv4 called Options and IPv6 called + * Extension Headers. HW RSS type can contain this info. + */ + XDP_RSS_L3_DYNHDR = BIT(2), + + /* When RSS hash covers L4 then drivers MUST set XDP_RSS_L4 bit in + * addition to the protocol specific bit. This ease interaction with + * SKBs and avoids reserving a fixed mask for future L4 protocol bits. + */ + XDP_RSS_L4 = BIT(3), /* L4 based hash, proto can be unknown */ + XDP_RSS_L4_TCP = BIT(4), + XDP_RSS_L4_UDP = BIT(5), + XDP_RSS_L4_SCTP = BIT(6), + XDP_RSS_L4_IPSEC = BIT(7), /* L4 based hash include IPSEC SPI */ + + /* Second part: RSS hash type combinations used for driver HW mapping */ + XDP_RSS_TYPE_NONE = 0, + XDP_RSS_TYPE_L2 = XDP_RSS_TYPE_NONE, + + XDP_RSS_TYPE_L3_IPV4 = XDP_RSS_L3_IPV4, + XDP_RSS_TYPE_L3_IPV6 = XDP_RSS_L3_IPV6, + XDP_RSS_TYPE_L3_IPV4_OPT = XDP_RSS_L3_IPV4 | XDP_RSS_L3_DYNHDR, + XDP_RSS_TYPE_L3_IPV6_EX = XDP_RSS_L3_IPV6 | XDP_RSS_L3_DYNHDR, + + XDP_RSS_TYPE_L4_ANY = XDP_RSS_L4, + XDP_RSS_TYPE_L4_IPV4_TCP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_TCP, + XDP_RSS_TYPE_L4_IPV4_UDP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_UDP, + XDP_RSS_TYPE_L4_IPV4_SCTP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_SCTP, + + XDP_RSS_TYPE_L4_IPV6_TCP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_TCP, + XDP_RSS_TYPE_L4_IPV6_UDP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_UDP, + XDP_RSS_TYPE_L4_IPV6_SCTP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_SCTP, + + XDP_RSS_TYPE_L4_IPV6_TCP_EX = XDP_RSS_TYPE_L4_IPV6_TCP | XDP_RSS_L3_DYNHDR, + XDP_RSS_TYPE_L4_IPV6_UDP_EX = XDP_RSS_TYPE_L4_IPV6_UDP | XDP_RSS_L3_DYNHDR, + XDP_RSS_TYPE_L4_IPV6_SCTP_EX = XDP_RSS_TYPE_L4_IPV6_SCTP | XDP_RSS_L3_DYNHDR, +}; + #ifdef CONFIG_NET u32 bpf_xdp_metadata_kfunc_id(int id); bool bpf_dev_bound_kfunc_id(u32 btf_id); diff --git a/net/core/xdp.c b/net/core/xdp.c index 528d4b37983d..fb85aca81961 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -734,13 +734,21 @@ __bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *tim * bpf_xdp_metadata_rx_hash - Read XDP frame RX hash. * @ctx: XDP context pointer. * @hash: Return value pointer. + * @rss_type: Return value pointer for RSS type. + * + * The RSS hash type (@rss_type) specifies what portion of packet headers NIC + * hardware used when calculating RSS hash value. The RSS type can be decoded + * via &enum xdp_rss_hash_type either matching on individual L3/L4 bits + * ``XDP_RSS_L*`` or by combined traditional *RSS Hashing Types* + * ``XDP_RSS_TYPE_L*``. * * Return: * * Returns 0 on success or ``-errno`` on error. * * ``-EOPNOTSUPP`` : means device driver doesn't implement kfunc * * ``-ENODATA`` : means no RX-hash available for this frame */ -__bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash) +__bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash, + enum xdp_rss_hash_type *rss_type) { return -EOPNOTSUPP; } -- cgit v1.2.3-70-g09d2 From 8c48eea3adf3119e0a3fc57bd31f6966f26ee784 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 12 Apr 2023 21:26:04 -0700 Subject: page_pool: allow caching from safely localized NAPI Recent patches to mlx5 mentioned a regression when moving from driver local page pool to only using the generic page pool code. Page pool has two recycling paths (1) direct one, which runs in safe NAPI context (basically consumer context, so producing can be lockless); and (2) via a ptr_ring, which takes a spin lock because the freeing can happen from any CPU; producer and consumer may run concurrently. Since the page pool code was added, Eric introduced a revised version of deferred skb freeing. TCP skbs are now usually returned to the CPU which allocated them, and freed in softirq context. This places the freeing (producing of pages back to the pool) enticingly close to the allocation (consumer). If we can prove that we're freeing in the same softirq context in which the consumer NAPI will run - lockless use of the cache is perfectly fine, no need for the lock. Let drivers link the page pool to a NAPI instance. If the NAPI instance is scheduled on the same CPU on which we're freeing - place the pages in the direct cache. With that and patched bnxt (XDP enabled to engage the page pool, sigh, bnxt really needs page pool work :() I see a 2.6% perf boost with a TCP stream test (app on a different physical core than softirq). The CPU use of relevant functions decreases as expected: page_pool_refill_alloc_cache 1.17% -> 0% _raw_spin_lock 2.41% -> 0.98% Only consider lockless path to be safe when NAPI is scheduled - in practice this should cover majority if not all of steady state workloads. It's usually the NAPI kicking in that causes the skb flush. The main case we'll miss out on is when application runs on the same CPU as NAPI. In that case we don't use the deferred skb free path. Reviewed-by: Tariq Toukan Acked-by: Jesper Dangaard Brouer Tested-by: Dragos Tatulea Signed-off-by: Jakub Kicinski --- Documentation/networking/page_pool.rst | 1 + include/linux/netdevice.h | 3 +++ include/linux/skbuff.h | 20 +++++++++++++------- include/net/page_pool.h | 3 ++- net/core/dev.c | 3 +++ net/core/page_pool.c | 15 +++++++++++++-- net/core/skbuff.c | 4 ++-- 7 files changed, 37 insertions(+), 12 deletions(-) (limited to 'include/linux/netdevice.h') diff --git a/Documentation/networking/page_pool.rst b/Documentation/networking/page_pool.rst index 30f1344e7cca..873efd97f822 100644 --- a/Documentation/networking/page_pool.rst +++ b/Documentation/networking/page_pool.rst @@ -165,6 +165,7 @@ Registration pp_params.pool_size = DESC_NUM; pp_params.nid = NUMA_NO_NODE; pp_params.dev = priv->dev; + pp_params.napi = napi; /* only if locking is tied to NAPI */ pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; page_pool = page_pool_create(&pp_params); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 96d27d558b0c..203c0df2046c 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -360,8 +360,11 @@ struct napi_struct { unsigned long gro_bitmask; int (*poll)(struct napi_struct *, int); #ifdef CONFIG_NETPOLL + /* CPU actively polling if netpoll is configured */ int poll_owner; #endif + /* CPU on which NAPI has been scheduled for processing */ + int list_owner; struct net_device *dev; struct gro_list gro_hash[GRO_HASH_BUCKETS]; struct sk_buff *skb; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 494a23a976b0..a823ec3aa326 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3386,6 +3386,18 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f) __skb_frag_ref(&skb_shinfo(skb)->frags[f]); } +static inline void +napi_frag_unref(skb_frag_t *frag, bool recycle, bool napi_safe) +{ + struct page *page = skb_frag_page(frag); + +#ifdef CONFIG_PAGE_POOL + if (recycle && page_pool_return_skb_page(page, napi_safe)) + return; +#endif + put_page(page); +} + /** * __skb_frag_unref - release a reference on a paged fragment. * @frag: the paged fragment @@ -3396,13 +3408,7 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f) */ static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) { - struct page *page = skb_frag_page(frag); - -#ifdef CONFIG_PAGE_POOL - if (recycle && page_pool_return_skb_page(page)) - return; -#endif - put_page(page); + napi_frag_unref(frag, recycle, false); } /** diff --git a/include/net/page_pool.h b/include/net/page_pool.h index ddfa0b328677..91b808dade82 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -77,6 +77,7 @@ struct page_pool_params { unsigned int pool_size; int nid; /* Numa node id to allocate from pages from */ struct device *dev; /* device, for DMA pre-mapping purposes */ + struct napi_struct *napi; /* Sole consumer of pages, otherwise NULL */ enum dma_data_direction dma_dir; /* DMA mapping direction */ unsigned int max_len; /* max DMA sync memory size */ unsigned int offset; /* DMA addr offset */ @@ -239,7 +240,7 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) return pool->p.dma_dir; } -bool page_pool_return_skb_page(struct page *page); +bool page_pool_return_skb_page(struct page *page, bool napi_safe); struct page_pool *page_pool_create(const struct page_pool_params *params); diff --git a/net/core/dev.c b/net/core/dev.c index c7f13742b56c..8aea68275172 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4359,6 +4359,7 @@ static inline void ____napi_schedule(struct softnet_data *sd, } list_add_tail(&napi->poll_list, &sd->poll_list); + WRITE_ONCE(napi->list_owner, smp_processor_id()); /* If not called from net_rx_action() * we have to raise NET_RX_SOFTIRQ. */ @@ -6069,6 +6070,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done) list_del_init(&n->poll_list); local_irq_restore(flags); } + WRITE_ONCE(n->list_owner, -1); val = READ_ONCE(n->state); do { @@ -6384,6 +6386,7 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, #ifdef CONFIG_NETPOLL napi->poll_owner = -1; #endif + napi->list_owner = -1; set_bit(NAPI_STATE_SCHED, &napi->state); set_bit(NAPI_STATE_NPSVC, &napi->state); list_add_rcu(&napi->dev_list, &dev->napi_list); diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 193c18799865..2f6bf422ed30 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -19,6 +19,7 @@ #include /* for put_page() */ #include #include +#include #include @@ -874,9 +875,11 @@ void page_pool_update_nid(struct page_pool *pool, int new_nid) } EXPORT_SYMBOL(page_pool_update_nid); -bool page_pool_return_skb_page(struct page *page) +bool page_pool_return_skb_page(struct page *page, bool napi_safe) { + struct napi_struct *napi; struct page_pool *pp; + bool allow_direct; page = compound_head(page); @@ -892,12 +895,20 @@ bool page_pool_return_skb_page(struct page *page) pp = page->pp; + /* Allow direct recycle if we have reasons to believe that we are + * in the same context as the consumer would run, so there's + * no possible race. + */ + napi = pp->p.napi; + allow_direct = napi_safe && napi && + READ_ONCE(napi->list_owner) == smp_processor_id(); + /* Driver set this to memory recycling info. Reset it on recycle. * This will *not* work for NIC using a split-page memory model. * The page will be returned to the pool here regardless of the * 'flipped' fragment being in use or not. */ - page_pool_put_full_page(pp, page, false); + page_pool_put_full_page(pp, page, allow_direct); return true; } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 2b5a98c5cb49..ef81452759be 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -843,7 +843,7 @@ static bool skb_pp_recycle(struct sk_buff *skb, void *data, bool napi_safe) { if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) return false; - return page_pool_return_skb_page(virt_to_page(data)); + return page_pool_return_skb_page(virt_to_page(data), napi_safe); } static void skb_kfree_head(void *head, unsigned int end_offset) @@ -889,7 +889,7 @@ static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason, } for (i = 0; i < shinfo->nr_frags; i++) - __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); + napi_frag_unref(&shinfo->frags[i], skb->pp_recycle, napi_safe); free_head: if (shinfo->frag_list) -- cgit v1.2.3-70-g09d2 From 5b8285cca6fed9bc5baabe2e5699a5a5c0d96371 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 19 Apr 2023 14:52:52 +0200 Subject: net: move dropreason.h to dropreason-core.h This will, after the next patch, hold only the core drop reasons and minimal infrastructure. Fix a small kernel-doc issue while at it, to avoid the move triggering a checker. Signed-off-by: Johannes Berg Signed-off-by: Jakub Kicinski --- include/linux/netdevice.h | 2 +- include/linux/skbuff.h | 2 +- include/net/dropreason-core.h | 364 ++++++++++++++++++++++++++++++++++++++++++ include/net/dropreason.h | 363 ----------------------------------------- include/net/inet_frag.h | 2 +- 5 files changed, 367 insertions(+), 366 deletions(-) create mode 100644 include/net/dropreason-core.h delete mode 100644 include/net/dropreason.h (limited to 'include/linux/netdevice.h') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 203c0df2046c..a6a3e9457d6c 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -52,7 +52,7 @@ #include #include #include -#include +#include struct netpoll_info; struct device; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 03aa7ed076f0..eb9e7bb76fa6 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -37,7 +37,7 @@ #include #endif #include -#include +#include /** * DOC: skb checksums diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h new file mode 100644 index 000000000000..ade6d5b9186c --- /dev/null +++ b/include/net/dropreason-core.h @@ -0,0 +1,364 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef _LINUX_DROPREASON_CORE_H +#define _LINUX_DROPREASON_CORE_H + +#define DEFINE_DROP_REASON(FN, FNe) \ + FN(NOT_SPECIFIED) \ + FN(NO_SOCKET) \ + FN(PKT_TOO_SMALL) \ + FN(TCP_CSUM) \ + FN(SOCKET_FILTER) \ + FN(UDP_CSUM) \ + FN(NETFILTER_DROP) \ + FN(OTHERHOST) \ + FN(IP_CSUM) \ + FN(IP_INHDR) \ + FN(IP_RPFILTER) \ + FN(UNICAST_IN_L2_MULTICAST) \ + FN(XFRM_POLICY) \ + FN(IP_NOPROTO) \ + FN(SOCKET_RCVBUFF) \ + FN(PROTO_MEM) \ + FN(TCP_MD5NOTFOUND) \ + FN(TCP_MD5UNEXPECTED) \ + FN(TCP_MD5FAILURE) \ + FN(SOCKET_BACKLOG) \ + FN(TCP_FLAGS) \ + FN(TCP_ZEROWINDOW) \ + FN(TCP_OLD_DATA) \ + FN(TCP_OVERWINDOW) \ + FN(TCP_OFOMERGE) \ + FN(TCP_RFC7323_PAWS) \ + FN(TCP_INVALID_SEQUENCE) \ + FN(TCP_RESET) \ + FN(TCP_INVALID_SYN) \ + FN(TCP_CLOSE) \ + FN(TCP_FASTOPEN) \ + FN(TCP_OLD_ACK) \ + FN(TCP_TOO_OLD_ACK) \ + FN(TCP_ACK_UNSENT_DATA) \ + FN(TCP_OFO_QUEUE_PRUNE) \ + FN(TCP_OFO_DROP) \ + FN(IP_OUTNOROUTES) \ + FN(BPF_CGROUP_EGRESS) \ + FN(IPV6DISABLED) \ + FN(NEIGH_CREATEFAIL) \ + FN(NEIGH_FAILED) \ + FN(NEIGH_QUEUEFULL) \ + FN(NEIGH_DEAD) \ + FN(TC_EGRESS) \ + FN(QDISC_DROP) \ + FN(CPU_BACKLOG) \ + FN(XDP) \ + FN(TC_INGRESS) \ + FN(UNHANDLED_PROTO) \ + FN(SKB_CSUM) \ + FN(SKB_GSO_SEG) \ + FN(SKB_UCOPY_FAULT) \ + FN(DEV_HDR) \ + FN(DEV_READY) \ + FN(FULL_RING) \ + FN(NOMEM) \ + FN(HDR_TRUNC) \ + FN(TAP_FILTER) \ + FN(TAP_TXFILTER) \ + FN(ICMP_CSUM) \ + FN(INVALID_PROTO) \ + FN(IP_INADDRERRORS) \ + FN(IP_INNOROUTES) \ + FN(PKT_TOO_BIG) \ + FN(DUP_FRAG) \ + FN(FRAG_REASM_TIMEOUT) \ + FN(FRAG_TOO_FAR) \ + FN(TCP_MINTTL) \ + FN(IPV6_BAD_EXTHDR) \ + FN(IPV6_NDISC_FRAG) \ + FN(IPV6_NDISC_HOP_LIMIT) \ + FN(IPV6_NDISC_BAD_CODE) \ + FN(IPV6_NDISC_BAD_OPTIONS) \ + FN(IPV6_NDISC_NS_OTHERHOST) \ + FNe(MAX) + +/** + * enum skb_drop_reason - the reasons of skb drops + * + * The reason of skb drop, which is used in kfree_skb_reason(). + */ +enum skb_drop_reason { + /** + * @SKB_NOT_DROPPED_YET: skb is not dropped yet (used for no-drop case) + */ + SKB_NOT_DROPPED_YET = 0, + /** @SKB_CONSUMED: packet has been consumed */ + SKB_CONSUMED, + /** @SKB_DROP_REASON_NOT_SPECIFIED: drop reason is not specified */ + SKB_DROP_REASON_NOT_SPECIFIED, + /** @SKB_DROP_REASON_NO_SOCKET: socket not found */ + SKB_DROP_REASON_NO_SOCKET, + /** @SKB_DROP_REASON_PKT_TOO_SMALL: packet size is too small */ + SKB_DROP_REASON_PKT_TOO_SMALL, + /** @SKB_DROP_REASON_TCP_CSUM: TCP checksum error */ + SKB_DROP_REASON_TCP_CSUM, + /** @SKB_DROP_REASON_SOCKET_FILTER: dropped by socket filter */ + SKB_DROP_REASON_SOCKET_FILTER, + /** @SKB_DROP_REASON_UDP_CSUM: UDP checksum error */ + SKB_DROP_REASON_UDP_CSUM, + /** @SKB_DROP_REASON_NETFILTER_DROP: dropped by netfilter */ + SKB_DROP_REASON_NETFILTER_DROP, + /** + * @SKB_DROP_REASON_OTHERHOST: packet don't belong to current host + * (interface is in promisc mode) + */ + SKB_DROP_REASON_OTHERHOST, + /** @SKB_DROP_REASON_IP_CSUM: IP checksum error */ + SKB_DROP_REASON_IP_CSUM, + /** + * @SKB_DROP_REASON_IP_INHDR: there is something wrong with IP header (see + * IPSTATS_MIB_INHDRERRORS) + */ + SKB_DROP_REASON_IP_INHDR, + /** + * @SKB_DROP_REASON_IP_RPFILTER: IP rpfilter validate failed. see the + * document for rp_filter in ip-sysctl.rst for more information + */ + SKB_DROP_REASON_IP_RPFILTER, + /** + * @SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST: destination address of L2 is + * multicast, but L3 is unicast. + */ + SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST, + /** @SKB_DROP_REASON_XFRM_POLICY: xfrm policy check failed */ + SKB_DROP_REASON_XFRM_POLICY, + /** @SKB_DROP_REASON_IP_NOPROTO: no support for IP protocol */ + SKB_DROP_REASON_IP_NOPROTO, + /** @SKB_DROP_REASON_SOCKET_RCVBUFF: socket receive buff is full */ + SKB_DROP_REASON_SOCKET_RCVBUFF, + /** + * @SKB_DROP_REASON_PROTO_MEM: proto memory limition, such as udp packet + * drop out of udp_memory_allocated. + */ + SKB_DROP_REASON_PROTO_MEM, + /** + * @SKB_DROP_REASON_TCP_MD5NOTFOUND: no MD5 hash and one expected, + * corresponding to LINUX_MIB_TCPMD5NOTFOUND + */ + SKB_DROP_REASON_TCP_MD5NOTFOUND, + /** + * @SKB_DROP_REASON_TCP_MD5UNEXPECTED: MD5 hash and we're not expecting + * one, corresponding to LINUX_MIB_TCPMD5UNEXPECTED + */ + SKB_DROP_REASON_TCP_MD5UNEXPECTED, + /** + * @SKB_DROP_REASON_TCP_MD5FAILURE: MD5 hash and its wrong, corresponding + * to LINUX_MIB_TCPMD5FAILURE + */ + SKB_DROP_REASON_TCP_MD5FAILURE, + /** + * @SKB_DROP_REASON_SOCKET_BACKLOG: failed to add skb to socket backlog ( + * see LINUX_MIB_TCPBACKLOGDROP) + */ + SKB_DROP_REASON_SOCKET_BACKLOG, + /** @SKB_DROP_REASON_TCP_FLAGS: TCP flags invalid */ + SKB_DROP_REASON_TCP_FLAGS, + /** + * @SKB_DROP_REASON_TCP_ZEROWINDOW: TCP receive window size is zero, + * see LINUX_MIB_TCPZEROWINDOWDROP + */ + SKB_DROP_REASON_TCP_ZEROWINDOW, + /** + * @SKB_DROP_REASON_TCP_OLD_DATA: the TCP data reveived is already + * received before (spurious retrans may happened), see + * LINUX_MIB_DELAYEDACKLOST + */ + SKB_DROP_REASON_TCP_OLD_DATA, + /** + * @SKB_DROP_REASON_TCP_OVERWINDOW: the TCP data is out of window, + * the seq of the first byte exceed the right edges of receive + * window + */ + SKB_DROP_REASON_TCP_OVERWINDOW, + /** + * @SKB_DROP_REASON_TCP_OFOMERGE: the data of skb is already in the ofo + * queue, corresponding to LINUX_MIB_TCPOFOMERGE + */ + SKB_DROP_REASON_TCP_OFOMERGE, + /** + * @SKB_DROP_REASON_TCP_RFC7323_PAWS: PAWS check, corresponding to + * LINUX_MIB_PAWSESTABREJECTED + */ + SKB_DROP_REASON_TCP_RFC7323_PAWS, + /** @SKB_DROP_REASON_TCP_INVALID_SEQUENCE: Not acceptable SEQ field */ + SKB_DROP_REASON_TCP_INVALID_SEQUENCE, + /** @SKB_DROP_REASON_TCP_RESET: Invalid RST packet */ + SKB_DROP_REASON_TCP_RESET, + /** + * @SKB_DROP_REASON_TCP_INVALID_SYN: Incoming packet has unexpected + * SYN flag + */ + SKB_DROP_REASON_TCP_INVALID_SYN, + /** @SKB_DROP_REASON_TCP_CLOSE: TCP socket in CLOSE state */ + SKB_DROP_REASON_TCP_CLOSE, + /** @SKB_DROP_REASON_TCP_FASTOPEN: dropped by FASTOPEN request socket */ + SKB_DROP_REASON_TCP_FASTOPEN, + /** @SKB_DROP_REASON_TCP_OLD_ACK: TCP ACK is old, but in window */ + SKB_DROP_REASON_TCP_OLD_ACK, + /** @SKB_DROP_REASON_TCP_TOO_OLD_ACK: TCP ACK is too old */ + SKB_DROP_REASON_TCP_TOO_OLD_ACK, + /** + * @SKB_DROP_REASON_TCP_ACK_UNSENT_DATA: TCP ACK for data we haven't + * sent yet + */ + SKB_DROP_REASON_TCP_ACK_UNSENT_DATA, + /** @SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE: pruned from TCP OFO queue */ + SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE, + /** @SKB_DROP_REASON_TCP_OFO_DROP: data already in receive queue */ + SKB_DROP_REASON_TCP_OFO_DROP, + /** @SKB_DROP_REASON_IP_OUTNOROUTES: route lookup failed */ + SKB_DROP_REASON_IP_OUTNOROUTES, + /** + * @SKB_DROP_REASON_BPF_CGROUP_EGRESS: dropped by BPF_PROG_TYPE_CGROUP_SKB + * eBPF program + */ + SKB_DROP_REASON_BPF_CGROUP_EGRESS, + /** @SKB_DROP_REASON_IPV6DISABLED: IPv6 is disabled on the device */ + SKB_DROP_REASON_IPV6DISABLED, + /** @SKB_DROP_REASON_NEIGH_CREATEFAIL: failed to create neigh entry */ + SKB_DROP_REASON_NEIGH_CREATEFAIL, + /** @SKB_DROP_REASON_NEIGH_FAILED: neigh entry in failed state */ + SKB_DROP_REASON_NEIGH_FAILED, + /** @SKB_DROP_REASON_NEIGH_QUEUEFULL: arp_queue for neigh entry is full */ + SKB_DROP_REASON_NEIGH_QUEUEFULL, + /** @SKB_DROP_REASON_NEIGH_DEAD: neigh entry is dead */ + SKB_DROP_REASON_NEIGH_DEAD, + /** @SKB_DROP_REASON_TC_EGRESS: dropped in TC egress HOOK */ + SKB_DROP_REASON_TC_EGRESS, + /** + * @SKB_DROP_REASON_QDISC_DROP: dropped by qdisc when packet outputting ( + * failed to enqueue to current qdisc) + */ + SKB_DROP_REASON_QDISC_DROP, + /** + * @SKB_DROP_REASON_CPU_BACKLOG: failed to enqueue the skb to the per CPU + * backlog queue. This can be caused by backlog queue full (see + * netdev_max_backlog in net.rst) or RPS flow limit + */ + SKB_DROP_REASON_CPU_BACKLOG, + /** @SKB_DROP_REASON_XDP: dropped by XDP in input path */ + SKB_DROP_REASON_XDP, + /** @SKB_DROP_REASON_TC_INGRESS: dropped in TC ingress HOOK */ + SKB_DROP_REASON_TC_INGRESS, + /** @SKB_DROP_REASON_UNHANDLED_PROTO: protocol not implemented or not supported */ + SKB_DROP_REASON_UNHANDLED_PROTO, + /** @SKB_DROP_REASON_SKB_CSUM: sk_buff checksum computation error */ + SKB_DROP_REASON_SKB_CSUM, + /** @SKB_DROP_REASON_SKB_GSO_SEG: gso segmentation error */ + SKB_DROP_REASON_SKB_GSO_SEG, + /** + * @SKB_DROP_REASON_SKB_UCOPY_FAULT: failed to copy data from user space, + * e.g., via zerocopy_sg_from_iter() or skb_orphan_frags_rx() + */ + SKB_DROP_REASON_SKB_UCOPY_FAULT, + /** @SKB_DROP_REASON_DEV_HDR: device driver specific header/metadata is invalid */ + SKB_DROP_REASON_DEV_HDR, + /** + * @SKB_DROP_REASON_DEV_READY: the device is not ready to xmit/recv due to + * any of its data structure that is not up/ready/initialized, + * e.g., the IFF_UP is not set, or driver specific tun->tfiles[txq] + * is not initialized + */ + SKB_DROP_REASON_DEV_READY, + /** @SKB_DROP_REASON_FULL_RING: ring buffer is full */ + SKB_DROP_REASON_FULL_RING, + /** @SKB_DROP_REASON_NOMEM: error due to OOM */ + SKB_DROP_REASON_NOMEM, + /** + * @SKB_DROP_REASON_HDR_TRUNC: failed to trunc/extract the header from + * networking data, e.g., failed to pull the protocol header from + * frags via pskb_may_pull() + */ + SKB_DROP_REASON_HDR_TRUNC, + /** + * @SKB_DROP_REASON_TAP_FILTER: dropped by (ebpf) filter directly attached + * to tun/tap, e.g., via TUNSETFILTEREBPF + */ + SKB_DROP_REASON_TAP_FILTER, + /** + * @SKB_DROP_REASON_TAP_TXFILTER: dropped by tx filter implemented at + * tun/tap, e.g., check_filter() + */ + SKB_DROP_REASON_TAP_TXFILTER, + /** @SKB_DROP_REASON_ICMP_CSUM: ICMP checksum error */ + SKB_DROP_REASON_ICMP_CSUM, + /** + * @SKB_DROP_REASON_INVALID_PROTO: the packet doesn't follow RFC 2211, + * such as a broadcasts ICMP_TIMESTAMP + */ + SKB_DROP_REASON_INVALID_PROTO, + /** + * @SKB_DROP_REASON_IP_INADDRERRORS: host unreachable, corresponding to + * IPSTATS_MIB_INADDRERRORS + */ + SKB_DROP_REASON_IP_INADDRERRORS, + /** + * @SKB_DROP_REASON_IP_INNOROUTES: network unreachable, corresponding to + * IPSTATS_MIB_INADDRERRORS + */ + SKB_DROP_REASON_IP_INNOROUTES, + /** + * @SKB_DROP_REASON_PKT_TOO_BIG: packet size is too big (maybe exceed the + * MTU) + */ + SKB_DROP_REASON_PKT_TOO_BIG, + /** @SKB_DROP_REASON_DUP_FRAG: duplicate fragment */ + SKB_DROP_REASON_DUP_FRAG, + /** @SKB_DROP_REASON_FRAG_REASM_TIMEOUT: fragment reassembly timeout */ + SKB_DROP_REASON_FRAG_REASM_TIMEOUT, + /** + * @SKB_DROP_REASON_FRAG_TOO_FAR: ipv4 fragment too far. + * (/proc/sys/net/ipv4/ipfrag_max_dist) + */ + SKB_DROP_REASON_FRAG_TOO_FAR, + /** + * @SKB_DROP_REASON_TCP_MINTTL: ipv4 ttl or ipv6 hoplimit below + * the threshold (IP_MINTTL or IPV6_MINHOPCOUNT). + */ + SKB_DROP_REASON_TCP_MINTTL, + /** @SKB_DROP_REASON_IPV6_BAD_EXTHDR: Bad IPv6 extension header. */ + SKB_DROP_REASON_IPV6_BAD_EXTHDR, + /** @SKB_DROP_REASON_IPV6_NDISC_FRAG: invalid frag (suppress_frag_ndisc). */ + SKB_DROP_REASON_IPV6_NDISC_FRAG, + /** @SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT: invalid hop limit. */ + SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT, + /** @SKB_DROP_REASON_IPV6_NDISC_BAD_CODE: invalid NDISC icmp6 code. */ + SKB_DROP_REASON_IPV6_NDISC_BAD_CODE, + /** @SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS: invalid NDISC options. */ + SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS, + /** + * @SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST: NEIGHBOUR SOLICITATION + * for another host. + */ + SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST, + /** + * @SKB_DROP_REASON_MAX: the maximum of drop reason, which shouldn't be + * used as a real 'reason' + */ + SKB_DROP_REASON_MAX, +}; + +#define SKB_DR_INIT(name, reason) \ + enum skb_drop_reason name = SKB_DROP_REASON_##reason +#define SKB_DR(name) \ + SKB_DR_INIT(name, NOT_SPECIFIED) +#define SKB_DR_SET(name, reason) \ + (name = SKB_DROP_REASON_##reason) +#define SKB_DR_OR(name, reason) \ + do { \ + if (name == SKB_DROP_REASON_NOT_SPECIFIED || \ + name == SKB_NOT_DROPPED_YET) \ + SKB_DR_SET(name, reason); \ + } while (0) + +extern const char * const drop_reasons[]; + +#endif diff --git a/include/net/dropreason.h b/include/net/dropreason.h deleted file mode 100644 index c0a3ea806cd5..000000000000 --- a/include/net/dropreason.h +++ /dev/null @@ -1,363 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ - -#ifndef _LINUX_DROPREASON_H -#define _LINUX_DROPREASON_H - -#define DEFINE_DROP_REASON(FN, FNe) \ - FN(NOT_SPECIFIED) \ - FN(NO_SOCKET) \ - FN(PKT_TOO_SMALL) \ - FN(TCP_CSUM) \ - FN(SOCKET_FILTER) \ - FN(UDP_CSUM) \ - FN(NETFILTER_DROP) \ - FN(OTHERHOST) \ - FN(IP_CSUM) \ - FN(IP_INHDR) \ - FN(IP_RPFILTER) \ - FN(UNICAST_IN_L2_MULTICAST) \ - FN(XFRM_POLICY) \ - FN(IP_NOPROTO) \ - FN(SOCKET_RCVBUFF) \ - FN(PROTO_MEM) \ - FN(TCP_MD5NOTFOUND) \ - FN(TCP_MD5UNEXPECTED) \ - FN(TCP_MD5FAILURE) \ - FN(SOCKET_BACKLOG) \ - FN(TCP_FLAGS) \ - FN(TCP_ZEROWINDOW) \ - FN(TCP_OLD_DATA) \ - FN(TCP_OVERWINDOW) \ - FN(TCP_OFOMERGE) \ - FN(TCP_RFC7323_PAWS) \ - FN(TCP_INVALID_SEQUENCE) \ - FN(TCP_RESET) \ - FN(TCP_INVALID_SYN) \ - FN(TCP_CLOSE) \ - FN(TCP_FASTOPEN) \ - FN(TCP_OLD_ACK) \ - FN(TCP_TOO_OLD_ACK) \ - FN(TCP_ACK_UNSENT_DATA) \ - FN(TCP_OFO_QUEUE_PRUNE) \ - FN(TCP_OFO_DROP) \ - FN(IP_OUTNOROUTES) \ - FN(BPF_CGROUP_EGRESS) \ - FN(IPV6DISABLED) \ - FN(NEIGH_CREATEFAIL) \ - FN(NEIGH_FAILED) \ - FN(NEIGH_QUEUEFULL) \ - FN(NEIGH_DEAD) \ - FN(TC_EGRESS) \ - FN(QDISC_DROP) \ - FN(CPU_BACKLOG) \ - FN(XDP) \ - FN(TC_INGRESS) \ - FN(UNHANDLED_PROTO) \ - FN(SKB_CSUM) \ - FN(SKB_GSO_SEG) \ - FN(SKB_UCOPY_FAULT) \ - FN(DEV_HDR) \ - FN(DEV_READY) \ - FN(FULL_RING) \ - FN(NOMEM) \ - FN(HDR_TRUNC) \ - FN(TAP_FILTER) \ - FN(TAP_TXFILTER) \ - FN(ICMP_CSUM) \ - FN(INVALID_PROTO) \ - FN(IP_INADDRERRORS) \ - FN(IP_INNOROUTES) \ - FN(PKT_TOO_BIG) \ - FN(DUP_FRAG) \ - FN(FRAG_REASM_TIMEOUT) \ - FN(FRAG_TOO_FAR) \ - FN(TCP_MINTTL) \ - FN(IPV6_BAD_EXTHDR) \ - FN(IPV6_NDISC_FRAG) \ - FN(IPV6_NDISC_HOP_LIMIT) \ - FN(IPV6_NDISC_BAD_CODE) \ - FN(IPV6_NDISC_BAD_OPTIONS) \ - FN(IPV6_NDISC_NS_OTHERHOST) \ - FNe(MAX) - -/** - * enum skb_drop_reason - the reasons of skb drops - * - * The reason of skb drop, which is used in kfree_skb_reason(). - */ -enum skb_drop_reason { - /** - * @SKB_NOT_DROPPED_YET: skb is not dropped yet (used for no-drop case) - */ - SKB_NOT_DROPPED_YET = 0, - /** @SKB_CONSUMED: packet has been consumed */ - SKB_CONSUMED, - /** @SKB_DROP_REASON_NOT_SPECIFIED: drop reason is not specified */ - SKB_DROP_REASON_NOT_SPECIFIED, - /** @SKB_DROP_REASON_NO_SOCKET: socket not found */ - SKB_DROP_REASON_NO_SOCKET, - /** @SKB_DROP_REASON_PKT_TOO_SMALL: packet size is too small */ - SKB_DROP_REASON_PKT_TOO_SMALL, - /** @SKB_DROP_REASON_TCP_CSUM: TCP checksum error */ - SKB_DROP_REASON_TCP_CSUM, - /** @SKB_DROP_REASON_SOCKET_FILTER: dropped by socket filter */ - SKB_DROP_REASON_SOCKET_FILTER, - /** @SKB_DROP_REASON_UDP_CSUM: UDP checksum error */ - SKB_DROP_REASON_UDP_CSUM, - /** @SKB_DROP_REASON_NETFILTER_DROP: dropped by netfilter */ - SKB_DROP_REASON_NETFILTER_DROP, - /** - * @SKB_DROP_REASON_OTHERHOST: packet don't belong to current host - * (interface is in promisc mode) - */ - SKB_DROP_REASON_OTHERHOST, - /** @SKB_DROP_REASON_IP_CSUM: IP checksum error */ - SKB_DROP_REASON_IP_CSUM, - /** - * @SKB_DROP_REASON_IP_INHDR: there is something wrong with IP header (see - * IPSTATS_MIB_INHDRERRORS) - */ - SKB_DROP_REASON_IP_INHDR, - /** - * @SKB_DROP_REASON_IP_RPFILTER: IP rpfilter validate failed. see the - * document for rp_filter in ip-sysctl.rst for more information - */ - SKB_DROP_REASON_IP_RPFILTER, - /** - * @SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST: destination address of L2 is - * multicast, but L3 is unicast. - */ - SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST, - /** @SKB_DROP_REASON_XFRM_POLICY: xfrm policy check failed */ - SKB_DROP_REASON_XFRM_POLICY, - /** @SKB_DROP_REASON_IP_NOPROTO: no support for IP protocol */ - SKB_DROP_REASON_IP_NOPROTO, - /** @SKB_DROP_REASON_SOCKET_RCVBUFF: socket receive buff is full */ - SKB_DROP_REASON_SOCKET_RCVBUFF, - /** - * @SKB_DROP_REASON_PROTO_MEM: proto memory limition, such as udp packet - * drop out of udp_memory_allocated. - */ - SKB_DROP_REASON_PROTO_MEM, - /** - * @SKB_DROP_REASON_TCP_MD5NOTFOUND: no MD5 hash and one expected, - * corresponding to LINUX_MIB_TCPMD5NOTFOUND - */ - SKB_DROP_REASON_TCP_MD5NOTFOUND, - /** - * @SKB_DROP_REASON_TCP_MD5UNEXPECTED: MD5 hash and we're not expecting - * one, corresponding to LINUX_MIB_TCPMD5UNEXPECTED - */ - SKB_DROP_REASON_TCP_MD5UNEXPECTED, - /** - * @SKB_DROP_REASON_TCP_MD5FAILURE: MD5 hash and its wrong, corresponding - * to LINUX_MIB_TCPMD5FAILURE - */ - SKB_DROP_REASON_TCP_MD5FAILURE, - /** - * @SKB_DROP_REASON_SOCKET_BACKLOG: failed to add skb to socket backlog ( - * see LINUX_MIB_TCPBACKLOGDROP) - */ - SKB_DROP_REASON_SOCKET_BACKLOG, - /** @SKB_DROP_REASON_TCP_FLAGS: TCP flags invalid */ - SKB_DROP_REASON_TCP_FLAGS, - /** - * @SKB_DROP_REASON_TCP_ZEROWINDOW: TCP receive window size is zero, - * see LINUX_MIB_TCPZEROWINDOWDROP - */ - SKB_DROP_REASON_TCP_ZEROWINDOW, - /** - * @SKB_DROP_REASON_TCP_OLD_DATA: the TCP data reveived is already - * received before (spurious retrans may happened), see - * LINUX_MIB_DELAYEDACKLOST - */ - SKB_DROP_REASON_TCP_OLD_DATA, - /** - * @SKB_DROP_REASON_TCP_OVERWINDOW: the TCP data is out of window, - * the seq of the first byte exceed the right edges of receive - * window - */ - SKB_DROP_REASON_TCP_OVERWINDOW, - /** - * @SKB_DROP_REASON_TCP_OFOMERGE: the data of skb is already in the ofo - * queue, corresponding to LINUX_MIB_TCPOFOMERGE - */ - SKB_DROP_REASON_TCP_OFOMERGE, - /** - * @SKB_DROP_REASON_TCP_RFC7323_PAWS: PAWS check, corresponding to - * LINUX_MIB_PAWSESTABREJECTED - */ - SKB_DROP_REASON_TCP_RFC7323_PAWS, - /** @SKB_DROP_REASON_TCP_INVALID_SEQUENCE: Not acceptable SEQ field */ - SKB_DROP_REASON_TCP_INVALID_SEQUENCE, - /** @SKB_DROP_REASON_TCP_RESET: Invalid RST packet */ - SKB_DROP_REASON_TCP_RESET, - /** - * @SKB_DROP_REASON_TCP_INVALID_SYN: Incoming packet has unexpected - * SYN flag - */ - SKB_DROP_REASON_TCP_INVALID_SYN, - /** @SKB_DROP_REASON_TCP_CLOSE: TCP socket in CLOSE state */ - SKB_DROP_REASON_TCP_CLOSE, - /** @SKB_DROP_REASON_TCP_FASTOPEN: dropped by FASTOPEN request socket */ - SKB_DROP_REASON_TCP_FASTOPEN, - /** @SKB_DROP_REASON_TCP_OLD_ACK: TCP ACK is old, but in window */ - SKB_DROP_REASON_TCP_OLD_ACK, - /** @SKB_DROP_REASON_TCP_TOO_OLD_ACK: TCP ACK is too old */ - SKB_DROP_REASON_TCP_TOO_OLD_ACK, - /** - * @SKB_DROP_REASON_TCP_ACK_UNSENT_DATA: TCP ACK for data we haven't - * sent yet - */ - SKB_DROP_REASON_TCP_ACK_UNSENT_DATA, - /** @SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE: pruned from TCP OFO queue */ - SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE, - /** @SKB_DROP_REASON_TCP_OFO_DROP: data already in receive queue */ - SKB_DROP_REASON_TCP_OFO_DROP, - /** @SKB_DROP_REASON_IP_OUTNOROUTES: route lookup failed */ - SKB_DROP_REASON_IP_OUTNOROUTES, - /** - * @SKB_DROP_REASON_BPF_CGROUP_EGRESS: dropped by BPF_PROG_TYPE_CGROUP_SKB - * eBPF program - */ - SKB_DROP_REASON_BPF_CGROUP_EGRESS, - /** @SKB_DROP_REASON_IPV6DISABLED: IPv6 is disabled on the device */ - SKB_DROP_REASON_IPV6DISABLED, - /** @SKB_DROP_REASON_NEIGH_CREATEFAIL: failed to create neigh entry */ - SKB_DROP_REASON_NEIGH_CREATEFAIL, - /** @SKB_DROP_REASON_NEIGH_FAILED: neigh entry in failed state */ - SKB_DROP_REASON_NEIGH_FAILED, - /** @SKB_DROP_REASON_NEIGH_QUEUEFULL: arp_queue for neigh entry is full */ - SKB_DROP_REASON_NEIGH_QUEUEFULL, - /** @SKB_DROP_REASON_NEIGH_DEAD: neigh entry is dead */ - SKB_DROP_REASON_NEIGH_DEAD, - /** @SKB_DROP_REASON_TC_EGRESS: dropped in TC egress HOOK */ - SKB_DROP_REASON_TC_EGRESS, - /** - * @SKB_DROP_REASON_QDISC_DROP: dropped by qdisc when packet outputting ( - * failed to enqueue to current qdisc) - */ - SKB_DROP_REASON_QDISC_DROP, - /** - * @SKB_DROP_REASON_CPU_BACKLOG: failed to enqueue the skb to the per CPU - * backlog queue. This can be caused by backlog queue full (see - * netdev_max_backlog in net.rst) or RPS flow limit - */ - SKB_DROP_REASON_CPU_BACKLOG, - /** @SKB_DROP_REASON_XDP: dropped by XDP in input path */ - SKB_DROP_REASON_XDP, - /** @SKB_DROP_REASON_TC_INGRESS: dropped in TC ingress HOOK */ - SKB_DROP_REASON_TC_INGRESS, - /** @SKB_DROP_REASON_UNHANDLED_PROTO: protocol not implemented or not supported */ - SKB_DROP_REASON_UNHANDLED_PROTO, - /** @SKB_DROP_REASON_SKB_CSUM: sk_buff checksum computation error */ - SKB_DROP_REASON_SKB_CSUM, - /** @SKB_DROP_REASON_SKB_GSO_SEG: gso segmentation error */ - SKB_DROP_REASON_SKB_GSO_SEG, - /** - * @SKB_DROP_REASON_SKB_UCOPY_FAULT: failed to copy data from user space, - * e.g., via zerocopy_sg_from_iter() or skb_orphan_frags_rx() - */ - SKB_DROP_REASON_SKB_UCOPY_FAULT, - /** @SKB_DROP_REASON_DEV_HDR: device driver specific header/metadata is invalid */ - SKB_DROP_REASON_DEV_HDR, - /** - * @SKB_DROP_REASON_DEV_READY: the device is not ready to xmit/recv due to - * any of its data structure that is not up/ready/initialized, - * e.g., the IFF_UP is not set, or driver specific tun->tfiles[txq] - * is not initialized - */ - SKB_DROP_REASON_DEV_READY, - /** @SKB_DROP_REASON_FULL_RING: ring buffer is full */ - SKB_DROP_REASON_FULL_RING, - /** @SKB_DROP_REASON_NOMEM: error due to OOM */ - SKB_DROP_REASON_NOMEM, - /** - * @SKB_DROP_REASON_HDR_TRUNC: failed to trunc/extract the header from - * networking data, e.g., failed to pull the protocol header from - * frags via pskb_may_pull() - */ - SKB_DROP_REASON_HDR_TRUNC, - /** - * @SKB_DROP_REASON_TAP_FILTER: dropped by (ebpf) filter directly attached - * to tun/tap, e.g., via TUNSETFILTEREBPF - */ - SKB_DROP_REASON_TAP_FILTER, - /** - * @SKB_DROP_REASON_TAP_TXFILTER: dropped by tx filter implemented at - * tun/tap, e.g., check_filter() - */ - SKB_DROP_REASON_TAP_TXFILTER, - /** @SKB_DROP_REASON_ICMP_CSUM: ICMP checksum error */ - SKB_DROP_REASON_ICMP_CSUM, - /** - * @SKB_DROP_REASON_INVALID_PROTO: the packet doesn't follow RFC 2211, - * such as a broadcasts ICMP_TIMESTAMP - */ - SKB_DROP_REASON_INVALID_PROTO, - /** - * @SKB_DROP_REASON_IP_INADDRERRORS: host unreachable, corresponding to - * IPSTATS_MIB_INADDRERRORS - */ - SKB_DROP_REASON_IP_INADDRERRORS, - /** - * @SKB_DROP_REASON_IP_INNOROUTES: network unreachable, corresponding to - * IPSTATS_MIB_INADDRERRORS - */ - SKB_DROP_REASON_IP_INNOROUTES, - /** - * @SKB_DROP_REASON_PKT_TOO_BIG: packet size is too big (maybe exceed the - * MTU) - */ - SKB_DROP_REASON_PKT_TOO_BIG, - /** @SKB_DROP_REASON_DUP_FRAG: duplicate fragment */ - SKB_DROP_REASON_DUP_FRAG, - /** @SKB_DROP_REASON_FRAG_REASM_TIMEOUT: fragment reassembly timeout */ - SKB_DROP_REASON_FRAG_REASM_TIMEOUT, - /** - * @SKB_DROP_REASON_FRAG_TOO_FAR: ipv4 fragment too far. - * (/proc/sys/net/ipv4/ipfrag_max_dist) - */ - SKB_DROP_REASON_FRAG_TOO_FAR, - /** - * @SKB_DROP_REASON_TCP_MINTTL: ipv4 ttl or ipv6 hoplimit below - * the threshold (IP_MINTTL or IPV6_MINHOPCOUNT). - */ - SKB_DROP_REASON_TCP_MINTTL, - /** @SKB_DROP_REASON_IPV6_BAD_EXTHDR: Bad IPv6 extension header. */ - SKB_DROP_REASON_IPV6_BAD_EXTHDR, - /** @SKB_DROP_REASON_IPV6_NDISC_FRAG: invalid frag (suppress_frag_ndisc). */ - SKB_DROP_REASON_IPV6_NDISC_FRAG, - /** @SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT: invalid hop limit. */ - SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT, - /** @SKB_DROP_REASON_IPV6_NDISC_BAD_CODE: invalid NDISC icmp6 code. */ - SKB_DROP_REASON_IPV6_NDISC_BAD_CODE, - /** @SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS: invalid NDISC options. */ - SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS, - /** @SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST: NEIGHBOUR SOLICITATION - * for another host. - */ - SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST, - /** - * @SKB_DROP_REASON_MAX: the maximum of drop reason, which shouldn't be - * used as a real 'reason' - */ - SKB_DROP_REASON_MAX, -}; - -#define SKB_DR_INIT(name, reason) \ - enum skb_drop_reason name = SKB_DROP_REASON_##reason -#define SKB_DR(name) \ - SKB_DR_INIT(name, NOT_SPECIFIED) -#define SKB_DR_SET(name, reason) \ - (name = SKB_DROP_REASON_##reason) -#define SKB_DR_OR(name, reason) \ - do { \ - if (name == SKB_DROP_REASON_NOT_SPECIFIED || \ - name == SKB_NOT_DROPPED_YET) \ - SKB_DR_SET(name, reason); \ - } while (0) - -extern const char * const drop_reasons[]; - -#endif diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index b23ddec3cd5c..325ad893f624 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -7,7 +7,7 @@ #include #include #include -#include +#include /* Per netns frag queues directory */ struct fqdir { -- cgit v1.2.3-70-g09d2 From 87eff2ec57b6d68d294013d8dd21e839a1175e3a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 21 Apr 2023 09:43:57 +0000 Subject: net: optimize napi_threaded_poll() vs RPS/RFS We use napi_threaded_poll() in order to reduce our softirq dependency. We can add a followup of 821eba962d95 ("net: optimize napi_schedule_rps()") to further remove the need of firing NET_RX_SOFTIRQ whenever RPS/RFS are used. Signed-off-by: Eric Dumazet Acked-by: Paolo Abeni Signed-off-by: David S. Miller --- include/linux/netdevice.h | 3 +++ net/core/dev.c | 12 ++++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) (limited to 'include/linux/netdevice.h') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a6a3e9457d6c..08fbd4622ccf 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3194,7 +3194,10 @@ struct softnet_data { #ifdef CONFIG_RPS struct softnet_data *rps_ipi_list; #endif + bool in_net_rx_action; + bool in_napi_threaded_poll; + #ifdef CONFIG_NET_FLOW_LIMIT struct sd_flow_limit __rcu *flow_limit; #endif diff --git a/net/core/dev.c b/net/core/dev.c index 7d9ec23f97c6..735096d42c1d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4603,10 +4603,10 @@ static void napi_schedule_rps(struct softnet_data *sd) sd->rps_ipi_next = mysd->rps_ipi_list; mysd->rps_ipi_list = sd; - /* If not called from net_rx_action() + /* If not called from net_rx_action() or napi_threaded_poll() * we have to raise NET_RX_SOFTIRQ. */ - if (!mysd->in_net_rx_action) + if (!mysd->in_net_rx_action && !mysd->in_napi_threaded_poll) __raise_softirq_irqoff(NET_RX_SOFTIRQ); return; } @@ -6631,11 +6631,19 @@ static int napi_threaded_poll(void *data) local_bh_disable(); sd = this_cpu_ptr(&softnet_data); + sd->in_napi_threaded_poll = true; have = netpoll_poll_lock(napi); __napi_poll(napi, &repoll); netpoll_poll_unlock(have); + sd->in_napi_threaded_poll = false; + barrier(); + + if (sd_has_rps_ipi_waiting(sd)) { + local_irq_disable(); + net_rps_action_and_irq_enable(sd); + } skb_defer_free_flush(sd); local_bh_enable(); -- cgit v1.2.3-70-g09d2 From 5c3b74a92aa285a3df722bf6329ba7ccf70346d6 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 6 Jun 2023 07:41:15 +0000 Subject: rfs: annotate lockless accesses to RFS sock flow table Add READ_ONCE()/WRITE_ONCE() on accesses to the sock flow table. This also prevents a (smart ?) compiler to remove the condition in: if (table->ents[index] != newval) table->ents[index] = newval; We need the condition to avoid dirtying a shared cache line. Fixes: fec5e652e58f ("rfs: Receive Flow Steering") Signed-off-by: Eric Dumazet Reviewed-by: Simon Horman Reviewed-by: Kuniyuki Iwashima Signed-off-by: David S. Miller --- include/linux/netdevice.h | 7 +++++-- net/core/dev.c | 6 ++++-- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'include/linux/netdevice.h') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 08fbd4622ccf..e6f22b7403d0 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -768,8 +768,11 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, /* We only give a hint, preemption can change CPU under us */ val |= raw_smp_processor_id(); - if (table->ents[index] != val) - table->ents[index] = val; + /* The following WRITE_ONCE() is paired with the READ_ONCE() + * here, and another one in get_rps_cpu(). + */ + if (READ_ONCE(table->ents[index]) != val) + WRITE_ONCE(table->ents[index], val); } } diff --git a/net/core/dev.c b/net/core/dev.c index b3c13e041935..1495f8aff288 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4471,8 +4471,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, u32 next_cpu; u32 ident; - /* First check into global flow table if there is a match */ - ident = sock_flow_table->ents[hash & sock_flow_table->mask]; + /* First check into global flow table if there is a match. + * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow(). + */ + ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]); if ((ident ^ hash) & ~rps_cpu_mask) goto try_rps; -- cgit v1.2.3-70-g09d2 From d636fc5dd692c8f4e00ae6e0359c0eceeb5d9bdb Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 6 Jun 2023 11:19:29 +0000 Subject: net: sched: add rcu annotations around qdisc->qdisc_sleeping syzbot reported a race around qdisc->qdisc_sleeping [1] It is time we add proper annotations to reads and writes to/from qdisc->qdisc_sleeping. [1] BUG: KCSAN: data-race in dev_graft_qdisc / qdisc_lookup_rcu read to 0xffff8881286fc618 of 8 bytes by task 6928 on cpu 1: qdisc_lookup_rcu+0x192/0x2c0 net/sched/sch_api.c:331 __tcf_qdisc_find+0x74/0x3c0 net/sched/cls_api.c:1174 tc_get_tfilter+0x18f/0x990 net/sched/cls_api.c:2547 rtnetlink_rcv_msg+0x7af/0x8c0 net/core/rtnetlink.c:6386 netlink_rcv_skb+0x126/0x220 net/netlink/af_netlink.c:2546 rtnetlink_rcv+0x1c/0x20 net/core/rtnetlink.c:6413 netlink_unicast_kernel net/netlink/af_netlink.c:1339 [inline] netlink_unicast+0x56f/0x640 net/netlink/af_netlink.c:1365 netlink_sendmsg+0x665/0x770 net/netlink/af_netlink.c:1913 sock_sendmsg_nosec net/socket.c:724 [inline] sock_sendmsg net/socket.c:747 [inline] ____sys_sendmsg+0x375/0x4c0 net/socket.c:2503 ___sys_sendmsg net/socket.c:2557 [inline] __sys_sendmsg+0x1e3/0x270 net/socket.c:2586 __do_sys_sendmsg net/socket.c:2595 [inline] __se_sys_sendmsg net/socket.c:2593 [inline] __x64_sys_sendmsg+0x46/0x50 net/socket.c:2593 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd write to 0xffff8881286fc618 of 8 bytes by task 6912 on cpu 0: dev_graft_qdisc+0x4f/0x80 net/sched/sch_generic.c:1115 qdisc_graft+0x7d0/0xb60 net/sched/sch_api.c:1103 tc_modify_qdisc+0x712/0xf10 net/sched/sch_api.c:1693 rtnetlink_rcv_msg+0x807/0x8c0 net/core/rtnetlink.c:6395 netlink_rcv_skb+0x126/0x220 net/netlink/af_netlink.c:2546 rtnetlink_rcv+0x1c/0x20 net/core/rtnetlink.c:6413 netlink_unicast_kernel net/netlink/af_netlink.c:1339 [inline] netlink_unicast+0x56f/0x640 net/netlink/af_netlink.c:1365 netlink_sendmsg+0x665/0x770 net/netlink/af_netlink.c:1913 sock_sendmsg_nosec net/socket.c:724 [inline] sock_sendmsg net/socket.c:747 [inline] ____sys_sendmsg+0x375/0x4c0 net/socket.c:2503 ___sys_sendmsg net/socket.c:2557 [inline] __sys_sendmsg+0x1e3/0x270 net/socket.c:2586 __do_sys_sendmsg net/socket.c:2595 [inline] __se_sys_sendmsg net/socket.c:2593 [inline] __x64_sys_sendmsg+0x46/0x50 net/socket.c:2593 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd Reported by Kernel Concurrency Sanitizer on: CPU: 0 PID: 6912 Comm: syz-executor.5 Not tainted 6.4.0-rc3-syzkaller-00190-g0d85b27b0cc6 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/16/2023 Fixes: 3a7d0d07a386 ("net: sched: extend Qdisc with rcu") Reported-by: syzbot Signed-off-by: Eric Dumazet Cc: Vlad Buslov Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- include/linux/netdevice.h | 2 +- include/net/sch_generic.h | 6 ++++-- net/core/dev.c | 2 +- net/sched/sch_api.c | 26 ++++++++++++++++---------- net/sched/sch_fq_pie.c | 2 ++ net/sched/sch_generic.c | 30 +++++++++++++++--------------- net/sched/sch_mq.c | 8 ++++---- net/sched/sch_mqprio.c | 8 ++++---- net/sched/sch_pie.c | 5 ++++- net/sched/sch_red.c | 5 ++++- net/sched/sch_sfq.c | 5 ++++- net/sched/sch_taprio.c | 6 +++--- net/sched/sch_teql.c | 2 +- 13 files changed, 63 insertions(+), 44 deletions(-) (limited to 'include/linux/netdevice.h') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e6f22b7403d0..c2f0c6002a84 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -620,7 +620,7 @@ struct netdev_queue { netdevice_tracker dev_tracker; struct Qdisc __rcu *qdisc; - struct Qdisc *qdisc_sleeping; + struct Qdisc __rcu *qdisc_sleeping; #ifdef CONFIG_SYSFS struct kobject kobj; #endif diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index fab5ba3e61b7..27271f2b37cb 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -545,7 +545,7 @@ static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc) static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) { - return qdisc->dev_queue->qdisc_sleeping; + return rcu_dereference_rtnl(qdisc->dev_queue->qdisc_sleeping); } static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) @@ -754,7 +754,9 @@ static inline bool qdisc_tx_changing(const struct net_device *dev) for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) + + if (rcu_access_pointer(txq->qdisc) != + rcu_access_pointer(txq->qdisc_sleeping)) return true; } return false; diff --git a/net/core/dev.c b/net/core/dev.c index 1495f8aff288..c29f3e1db3ca 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -10543,7 +10543,7 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) return NULL; netdev_init_one_queue(dev, queue, NULL); RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); - queue->qdisc_sleeping = &noop_qdisc; + RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc); rcu_assign_pointer(dev->ingress_queue, queue); #endif return queue; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 9ea51812b9cf..e4b6452318c0 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -309,7 +309,7 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) if (dev_ingress_queue(dev)) q = qdisc_match_from_root( - dev_ingress_queue(dev)->qdisc_sleeping, + rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping), handle); out: return q; @@ -328,7 +328,8 @@ struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle) nq = dev_ingress_queue_rcu(dev); if (nq) - q = qdisc_match_from_root(nq->qdisc_sleeping, handle); + q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping), + handle); out: return q; } @@ -634,8 +635,13 @@ EXPORT_SYMBOL(qdisc_watchdog_init); void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires, u64 delta_ns) { - if (test_bit(__QDISC_STATE_DEACTIVATED, - &qdisc_root_sleeping(wd->qdisc)->state)) + bool deactivated; + + rcu_read_lock(); + deactivated = test_bit(__QDISC_STATE_DEACTIVATED, + &qdisc_root_sleeping(wd->qdisc)->state); + rcu_read_unlock(); + if (deactivated) return; if (hrtimer_is_queued(&wd->timer)) { @@ -1478,7 +1484,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, } q = qdisc_leaf(p, clid); } else if (dev_ingress_queue(dev)) { - q = dev_ingress_queue(dev)->qdisc_sleeping; + q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping); } } else { q = rtnl_dereference(dev->qdisc); @@ -1564,7 +1570,7 @@ replay: } q = qdisc_leaf(p, clid); } else if (dev_ingress_queue_create(dev)) { - q = dev_ingress_queue(dev)->qdisc_sleeping; + q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping); } } else { q = rtnl_dereference(dev->qdisc); @@ -1805,8 +1811,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) dev_queue = dev_ingress_queue(dev); if (dev_queue && - tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, - &q_idx, s_q_idx, false, + tc_dump_qdisc_root(rtnl_dereference(dev_queue->qdisc_sleeping), + skb, cb, &q_idx, s_q_idx, false, tca[TCA_DUMP_INVISIBLE]) < 0) goto done; @@ -2249,8 +2255,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) dev_queue = dev_ingress_queue(dev); if (dev_queue && - tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, - &t, s_t, false) < 0) + tc_dump_tclass_root(rtnl_dereference(dev_queue->qdisc_sleeping), + skb, tcm, cb, &t, s_t, false) < 0) goto done; done: diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index c699e5095607..591d87d5e5c0 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -379,6 +379,7 @@ static void fq_pie_timer(struct timer_list *t) spinlock_t *root_lock; /* to lock qdisc for probability calculations */ u32 idx; + rcu_read_lock(); root_lock = qdisc_lock(qdisc_root_sleeping(sch)); spin_lock(root_lock); @@ -391,6 +392,7 @@ static void fq_pie_timer(struct timer_list *t) mod_timer(&q->adapt_timer, jiffies + q->p_params.tupdate); spin_unlock(root_lock); + rcu_read_unlock(); } static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt, diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 37e41f972f69..3248259eba32 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -648,7 +648,7 @@ struct Qdisc_ops noop_qdisc_ops __read_mostly = { static struct netdev_queue noop_netdev_queue = { RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc), - .qdisc_sleeping = &noop_qdisc, + RCU_POINTER_INITIALIZER(qdisc_sleeping, &noop_qdisc), }; struct Qdisc noop_qdisc = { @@ -1103,7 +1103,7 @@ EXPORT_SYMBOL(qdisc_put_unlocked); struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, struct Qdisc *qdisc) { - struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; + struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping); spinlock_t *root_lock; root_lock = qdisc_lock(oqdisc); @@ -1112,7 +1112,7 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, /* ... and graft new one */ if (qdisc == NULL) qdisc = &noop_qdisc; - dev_queue->qdisc_sleeping = qdisc; + rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); spin_unlock_bh(root_lock); @@ -1125,12 +1125,12 @@ static void shutdown_scheduler_queue(struct net_device *dev, struct netdev_queue *dev_queue, void *_qdisc_default) { - struct Qdisc *qdisc = dev_queue->qdisc_sleeping; + struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); struct Qdisc *qdisc_default = _qdisc_default; if (qdisc) { rcu_assign_pointer(dev_queue->qdisc, qdisc_default); - dev_queue->qdisc_sleeping = qdisc_default; + rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default); qdisc_put(qdisc); } @@ -1154,7 +1154,7 @@ static void attach_one_default_qdisc(struct net_device *dev, if (!netif_is_multiqueue(dev)) qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; - dev_queue->qdisc_sleeping = qdisc; + rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); } static void attach_default_qdiscs(struct net_device *dev) @@ -1167,7 +1167,7 @@ static void attach_default_qdiscs(struct net_device *dev) if (!netif_is_multiqueue(dev) || dev->priv_flags & IFF_NO_QUEUE) { netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); - qdisc = txq->qdisc_sleeping; + qdisc = rtnl_dereference(txq->qdisc_sleeping); rcu_assign_pointer(dev->qdisc, qdisc); qdisc_refcount_inc(qdisc); } else { @@ -1186,7 +1186,7 @@ static void attach_default_qdiscs(struct net_device *dev) netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); dev->priv_flags |= IFF_NO_QUEUE; netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); - qdisc = txq->qdisc_sleeping; + qdisc = rtnl_dereference(txq->qdisc_sleeping); rcu_assign_pointer(dev->qdisc, qdisc); qdisc_refcount_inc(qdisc); dev->priv_flags ^= IFF_NO_QUEUE; @@ -1202,7 +1202,7 @@ static void transition_one_qdisc(struct net_device *dev, struct netdev_queue *dev_queue, void *_need_watchdog) { - struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; + struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); int *need_watchdog_p = _need_watchdog; if (!(new_qdisc->flags & TCQ_F_BUILTIN)) @@ -1272,7 +1272,7 @@ static void dev_reset_queue(struct net_device *dev, struct Qdisc *qdisc; bool nolock; - qdisc = dev_queue->qdisc_sleeping; + qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); if (!qdisc) return; @@ -1303,7 +1303,7 @@ static bool some_qdisc_is_busy(struct net_device *dev) int val; dev_queue = netdev_get_tx_queue(dev, i); - q = dev_queue->qdisc_sleeping; + q = rtnl_dereference(dev_queue->qdisc_sleeping); root_lock = qdisc_lock(q); spin_lock_bh(root_lock); @@ -1379,7 +1379,7 @@ EXPORT_SYMBOL(dev_deactivate); static int qdisc_change_tx_queue_len(struct net_device *dev, struct netdev_queue *dev_queue) { - struct Qdisc *qdisc = dev_queue->qdisc_sleeping; + struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); const struct Qdisc_ops *ops = qdisc->ops; if (ops->change_tx_queue_len) @@ -1404,7 +1404,7 @@ void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx) unsigned int i; for (i = new_real_tx; i < dev->real_num_tx_queues; i++) { - qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping; + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping); /* Only update the default qdiscs we created, * qdiscs with handles are always hashed. */ @@ -1412,7 +1412,7 @@ void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx) qdisc_hash_del(qdisc); } for (i = dev->real_num_tx_queues; i < new_real_tx; i++) { - qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping; + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping); if (qdisc != &noop_qdisc && !qdisc->handle) qdisc_hash_add(qdisc, false); } @@ -1449,7 +1449,7 @@ static void dev_init_scheduler_queue(struct net_device *dev, struct Qdisc *qdisc = _qdisc; rcu_assign_pointer(dev_queue->qdisc, qdisc); - dev_queue->qdisc_sleeping = qdisc; + rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); } void dev_init_scheduler(struct net_device *dev) diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index d0bc660d7401..c860119a8f09 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c @@ -141,7 +141,7 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb) * qdisc totals are added at end. */ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { - qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping); spin_lock_bh(qdisc_lock(qdisc)); gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats, @@ -202,7 +202,7 @@ static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl) { struct netdev_queue *dev_queue = mq_queue_get(sch, cl); - return dev_queue->qdisc_sleeping; + return rtnl_dereference(dev_queue->qdisc_sleeping); } static unsigned long mq_find(struct Qdisc *sch, u32 classid) @@ -221,7 +221,7 @@ static int mq_dump_class(struct Qdisc *sch, unsigned long cl, tcm->tcm_parent = TC_H_ROOT; tcm->tcm_handle |= TC_H_MIN(cl); - tcm->tcm_info = dev_queue->qdisc_sleeping->handle; + tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle; return 0; } @@ -230,7 +230,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, { struct netdev_queue *dev_queue = mq_queue_get(sch, cl); - sch = dev_queue->qdisc_sleeping; + sch = rtnl_dereference(dev_queue->qdisc_sleeping); if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 || qdisc_qstats_copy(d, sch) < 0) return -1; diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index dc5a0ff50b14..ab69ff7577fc 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -557,7 +557,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) * qdisc totals are added at end. */ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { - qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping); spin_lock_bh(qdisc_lock(qdisc)); gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats, @@ -604,7 +604,7 @@ static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl) if (!dev_queue) return NULL; - return dev_queue->qdisc_sleeping; + return rtnl_dereference(dev_queue->qdisc_sleeping); } static unsigned long mqprio_find(struct Qdisc *sch, u32 classid) @@ -637,7 +637,7 @@ static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl, tcm->tcm_parent = (tc < 0) ? 0 : TC_H_MAKE(TC_H_MAJ(sch->handle), TC_H_MIN(tc + TC_H_MIN_PRIORITY)); - tcm->tcm_info = dev_queue->qdisc_sleeping->handle; + tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle; } else { tcm->tcm_parent = TC_H_ROOT; tcm->tcm_info = 0; @@ -693,7 +693,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, } else { struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); - sch = dev_queue->qdisc_sleeping; + sch = rtnl_dereference(dev_queue->qdisc_sleeping); if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 || qdisc_qstats_copy(d, sch) < 0) diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index 2152a56d73f8..2da6250ec346 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c @@ -421,8 +421,10 @@ static void pie_timer(struct timer_list *t) { struct pie_sched_data *q = from_timer(q, t, adapt_timer); struct Qdisc *sch = q->sch; - spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); + spinlock_t *root_lock; + rcu_read_lock(); + root_lock = qdisc_lock(qdisc_root_sleeping(sch)); spin_lock(root_lock); pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog); @@ -430,6 +432,7 @@ static void pie_timer(struct timer_list *t) if (q->params.tupdate) mod_timer(&q->adapt_timer, jiffies + q->params.tupdate); spin_unlock(root_lock); + rcu_read_unlock(); } static int pie_init(struct Qdisc *sch, struct nlattr *opt, diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 98129324e157..16277b6a0238 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -321,12 +321,15 @@ static inline void red_adaptative_timer(struct timer_list *t) { struct red_sched_data *q = from_timer(q, t, adapt_timer); struct Qdisc *sch = q->sch; - spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); + spinlock_t *root_lock; + rcu_read_lock(); + root_lock = qdisc_lock(qdisc_root_sleeping(sch)); spin_lock(root_lock); red_adaptative_algo(&q->parms, &q->vars); mod_timer(&q->adapt_timer, jiffies + HZ/2); spin_unlock(root_lock); + rcu_read_unlock(); } static int red_init(struct Qdisc *sch, struct nlattr *opt, diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index abd436307d6a..66dcb18638fe 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -606,10 +606,12 @@ static void sfq_perturbation(struct timer_list *t) { struct sfq_sched_data *q = from_timer(q, t, perturb_timer); struct Qdisc *sch = q->sch; - spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); + spinlock_t *root_lock; siphash_key_t nkey; get_random_bytes(&nkey, sizeof(nkey)); + rcu_read_lock(); + root_lock = qdisc_lock(qdisc_root_sleeping(sch)); spin_lock(root_lock); q->perturbation = nkey; if (!q->filter_list && q->tail) @@ -618,6 +620,7 @@ static void sfq_perturbation(struct timer_list *t) if (q->perturb_period) mod_timer(&q->perturb_timer, jiffies + q->perturb_period); + rcu_read_unlock(); } static int sfq_change(struct Qdisc *sch, struct nlattr *opt) diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 76db9a10ef50..dd7dea2f6e83 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -2358,7 +2358,7 @@ static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl) if (!dev_queue) return NULL; - return dev_queue->qdisc_sleeping; + return rtnl_dereference(dev_queue->qdisc_sleeping); } static unsigned long taprio_find(struct Qdisc *sch, u32 classid) @@ -2377,7 +2377,7 @@ static int taprio_dump_class(struct Qdisc *sch, unsigned long cl, tcm->tcm_parent = TC_H_ROOT; tcm->tcm_handle |= TC_H_MIN(cl); - tcm->tcm_info = dev_queue->qdisc_sleeping->handle; + tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle; return 0; } @@ -2389,7 +2389,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, { struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); - sch = dev_queue->qdisc_sleeping; + sch = rtnl_dereference(dev_queue->qdisc_sleeping); if (gnet_stats_copy_basic(d, NULL, &sch->bstats, true) < 0 || qdisc_qstats_copy(d, sch) < 0) return -1; diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 16f9238aa51d..7721239c185f 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -297,7 +297,7 @@ restart: struct net_device *slave = qdisc_dev(q); struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0); - if (slave_txq->qdisc_sleeping != q) + if (rcu_access_pointer(slave_txq->qdisc_sleeping) != q) continue; if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) || !netif_running(slave)) { -- cgit v1.2.3-70-g09d2