summaryrefslogtreecommitdiff
path: root/net/ipv6/mcast.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2022-03-14 19:15:16 -0300
committerArnaldo Carvalho de Melo <acme@redhat.com>2022-03-14 19:15:16 -0300
commit65eab2bc7dab326ee892ec5a4c749470b368b51a (patch)
tree341189a55a3d021db7f1c8a8e5b4772b6c782c25 /net/ipv6/mcast.c
parentf693dac4794fae99c04f75a3a1a5c4018bb33144 (diff)
parent09688c0166e76ce2fb85e86b9d99be8b0084cdf9 (diff)
Merge remote-tracking branch 'torvalds/master' into perf/core
To pick up fixes that went thru perf/urgent. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'net/ipv6/mcast.c')
-rw-r--r--net/ipv6/mcast.c32
1 files changed, 12 insertions, 20 deletions
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index a8861db52c18..909f937befd7 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1371,27 +1371,23 @@ static void mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
}
/* called with rcu_read_lock() */
-int igmp6_event_query(struct sk_buff *skb)
+void igmp6_event_query(struct sk_buff *skb)
{
struct inet6_dev *idev = __in6_dev_get(skb->dev);
- if (!idev)
- return -EINVAL;
-
- if (idev->dead) {
- kfree_skb(skb);
- return -ENODEV;
- }
+ if (!idev || idev->dead)
+ goto out;
spin_lock_bh(&idev->mc_query_lock);
if (skb_queue_len(&idev->mc_query_queue) < MLD_MAX_SKBS) {
__skb_queue_tail(&idev->mc_query_queue, skb);
if (!mod_delayed_work(mld_wq, &idev->mc_query_work, 0))
in6_dev_hold(idev);
+ skb = NULL;
}
spin_unlock_bh(&idev->mc_query_lock);
-
- return 0;
+out:
+ kfree_skb(skb);
}
static void __mld_query_work(struct sk_buff *skb)
@@ -1542,27 +1538,23 @@ static void mld_query_work(struct work_struct *work)
}
/* called with rcu_read_lock() */
-int igmp6_event_report(struct sk_buff *skb)
+void igmp6_event_report(struct sk_buff *skb)
{
struct inet6_dev *idev = __in6_dev_get(skb->dev);
- if (!idev)
- return -EINVAL;
-
- if (idev->dead) {
- kfree_skb(skb);
- return -ENODEV;
- }
+ if (!idev || idev->dead)
+ goto out;
spin_lock_bh(&idev->mc_report_lock);
if (skb_queue_len(&idev->mc_report_queue) < MLD_MAX_SKBS) {
__skb_queue_tail(&idev->mc_report_queue, skb);
if (!mod_delayed_work(mld_wq, &idev->mc_report_work, 0))
in6_dev_hold(idev);
+ skb = NULL;
}
spin_unlock_bh(&idev->mc_report_lock);
-
- return 0;
+out:
+ kfree_skb(skb);
}
static void __mld_report_work(struct sk_buff *skb)