diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-03 21:05:43 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-03 21:05:43 -0700 |
commit | 6dbbd92522a13bcd5003829cbed30bc38a3d0362 (patch) | |
tree | b486642d7392b81d89f159d65fd556a432e78d16 /net | |
parent | d6748066ad0e8b2514545998f8367ebb3906f299 (diff) | |
parent | e1cfb67acd5e890bbad695000d2c997bfb7f1756 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (45 commits)
be2net: Add detect UE feature for Lancer
be2net: Prevent CQ full condition for Lancer
be2net: Fix disabling multicast promiscous mode
be2net: Fix endian issue in RX filter command
af_packet: de-inline some helper functions
MAINTAINERS: Add can-gw include to maintained files
net: Add back alignment for size for __alloc_skb
net: add missing bh_unlock_sock() calls
l2tp: fix race in l2tp_recv_dequeue()
ixgbevf: Update release version
ixgbe: DCB, return max for IEEE traffic classes
ixgbe: fix reading of the buffer returned by the firmware
ixgbe: Fix compiler warnings
ixgbe: fix smatch splat due to missing NULL check
ixgbe: fix disabling of Tx laser at probe
ixgbe: Fix link issues caused by a reset while interface is down
igb: Fix for I347AT4 PHY cable length unit detection
e100: make sure vlan support isn't advertised on old adapters
e1000e: demote a debugging WARN to a debug log message
net: fix typo in drivers/net/ethernet/xilinx/ll_temac_main.c
...
Diffstat (limited to 'net')
39 files changed, 185 insertions, 208 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index c8cf9391417e..bc2528624583 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -470,10 +470,12 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change) { struct net_device *real_dev = vlan_dev_info(dev)->real_dev; - if (change & IFF_ALLMULTI) - dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); - if (change & IFF_PROMISC) - dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1); + if (dev->flags & IFF_UP) { + if (change & IFF_ALLMULTI) + dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); + if (change & IFF_PROMISC) + dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1); + } } static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c index bf2a333ca7c7..5449294bdd5e 100644 --- a/net/bridge/netfilter/ebt_ulog.c +++ b/net/bridge/netfilter/ebt_ulog.c @@ -102,16 +102,15 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size) unsigned int n; n = max(size, nlbufsiz); - skb = alloc_skb(n, GFP_ATOMIC); + skb = alloc_skb(n, GFP_ATOMIC | __GFP_NOWARN); if (!skb) { - pr_debug("cannot alloc whole buffer of size %ub!\n", n); if (n > size) { /* try to allocate only as much as we need for * current packet */ skb = alloc_skb(size, GFP_ATOMIC); if (!skb) - pr_debug("cannot even allocate " - "buffer of size %ub\n", size); + pr_debug("cannot even allocate buffer of size %ub\n", + size); } } diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 909ecb3c2a33..039d51e6c284 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -872,12 +872,8 @@ static void neigh_timer_handler(unsigned long arg) now = jiffies; next = now + HZ; - if (!(state & NUD_IN_TIMER)) { -#ifndef CONFIG_SMP - printk(KERN_WARNING "neigh: timer & !nud_in_timer\n"); -#endif + if (!(state & NUD_IN_TIMER)) goto out; - } if (state & NUD_REACHABLE) { if (time_before_eq(now, diff --git a/net/core/skbuff.c b/net/core/skbuff.c index ca4db40e75b8..18a3cebb753d 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -189,6 +189,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, * aligned memory blocks, unless SLUB/SLAB debug is enabled. * Both skb->head and skb_shared_info are cache line aligned. */ + size = SKB_DATA_ALIGN(size); size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); data = kmalloc_node_track_caller(size, gfp_mask, node); if (!data) diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 332639b56f4d..90a919afbed7 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -433,6 +433,7 @@ exit: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); return NULL; put_and_exit: + bh_unlock_sock(newsk); sock_put(newsk); goto exit; } diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index db8d22db425f..a639967eb727 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -395,7 +395,6 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par) config = clusterip_config_init(cipinfo, e->ip.dst.s_addr, dev); if (!config) { - pr_info("cannot allocate config\n"); dev_put(dev); return -ENOMEM; } diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index 446e0f467a17..b5508151e547 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c @@ -135,10 +135,8 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size) * due to slab allocator restrictions */ n = max(size, nlbufsiz); - skb = alloc_skb(n, GFP_ATOMIC); + skb = alloc_skb(n, GFP_ATOMIC | __GFP_NOWARN); if (!skb) { - pr_debug("cannot alloc whole buffer %ub!\n", n); - if (n > size) { /* try to allocate only as much as we need for * current packet */ diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index d1cb412c18e0..2133c30a4a5f 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c @@ -400,11 +400,8 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx, *len = 0; *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC); - if (*octets == NULL) { - if (net_ratelimit()) - pr_notice("OOM in bsalg (%d)\n", __LINE__); + if (*octets == NULL) return 0; - } ptr = *octets; while (ctx->pointer < eoc) { @@ -451,11 +448,8 @@ static unsigned char asn1_oid_decode(struct asn1_ctx *ctx, return 0; *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC); - if (*oid == NULL) { - if (net_ratelimit()) - pr_notice("OOM in bsalg (%d)\n", __LINE__); + if (*oid == NULL) return 0; - } optr = *oid; @@ -728,8 +722,6 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); if (*obj == NULL) { kfree(id); - if (net_ratelimit()) - pr_notice("OOM in bsalg (%d)\n", __LINE__); return 0; } (*obj)->syntax.l[0] = l; @@ -744,8 +736,6 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, if (*obj == NULL) { kfree(p); kfree(id); - if (net_ratelimit()) - pr_notice("OOM in bsalg (%d)\n", __LINE__); return 0; } memcpy((*obj)->syntax.c, p, len); @@ -759,8 +749,6 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, *obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC); if (*obj == NULL) { kfree(id); - if (net_ratelimit()) - pr_notice("OOM in bsalg (%d)\n", __LINE__); return 0; } if (!asn1_null_decode(ctx, end)) { @@ -780,8 +768,6 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, if (*obj == NULL) { kfree(lp); kfree(id); - if (net_ratelimit()) - pr_notice("OOM in bsalg (%d)\n", __LINE__); return 0; } memcpy((*obj)->syntax.ul, lp, len); @@ -801,8 +787,6 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, if (*obj == NULL) { kfree(p); kfree(id); - if (net_ratelimit()) - pr_notice("OOM in bsalg (%d)\n", __LINE__); return 0; } memcpy((*obj)->syntax.uc, p, len); @@ -819,8 +803,6 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); if (*obj == NULL) { kfree(id); - if (net_ratelimit()) - pr_notice("OOM in bsalg (%d)\n", __LINE__); return 0; } (*obj)->syntax.ul[0] = ul; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 0ea10eefa60f..a7443159c400 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1510,6 +1510,7 @@ exit: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); return NULL; put_and_exit: + bh_unlock_sock(newsk); sock_put(newsk); goto exit; } @@ -2339,7 +2340,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v) } } -static int tcp_seq_open(struct inode *inode, struct file *file) +int tcp_seq_open(struct inode *inode, struct file *file) { struct tcp_seq_afinfo *afinfo = PDE(inode)->data; struct tcp_iter_state *s; @@ -2355,23 +2356,19 @@ static int tcp_seq_open(struct inode *inode, struct file *file) s->last_pos = 0; return 0; } +EXPORT_SYMBOL(tcp_seq_open); int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo) { int rc = 0; struct proc_dir_entry *p; - afinfo->seq_fops.open = tcp_seq_open; - afinfo->seq_fops.read = seq_read; - afinfo->seq_fops.llseek = seq_lseek; - afinfo->seq_fops.release = seq_release_net; - afinfo->seq_ops.start = tcp_seq_start; afinfo->seq_ops.next = tcp_seq_next; afinfo->seq_ops.stop = tcp_seq_stop; p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, - &afinfo->seq_fops, afinfo); + afinfo->seq_fops, afinfo); if (!p) rc = -ENOMEM; return rc; @@ -2520,12 +2517,18 @@ out: return 0; } +static const struct file_operations tcp_afinfo_seq_fops = { + .owner = THIS_MODULE, + .open = tcp_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_net +}; + static struct tcp_seq_afinfo tcp4_seq_afinfo = { .name = "tcp", .family = AF_INET, - .seq_fops = { - .owner = THIS_MODULE, - }, + .seq_fops = &tcp_afinfo_seq_fops, .seq_ops = { .show = tcp4_seq_show, }, diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index ebaa96bd3464..ab0966df1e2a 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1397,6 +1397,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) nf_reset(skb); if (up->encap_type) { + int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); + /* * This is an encapsulation socket so pass the skb to * the socket's udp_encap_rcv() hook. Otherwise, just @@ -1409,11 +1411,11 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) */ /* if we're overly short, let UDP handle it */ - if (skb->len > sizeof(struct udphdr) && - up->encap_rcv != NULL) { + encap_rcv = ACCESS_ONCE(up->encap_rcv); + if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) { int ret; - ret = (*up->encap_rcv)(sk, skb); + ret = encap_rcv(sk, skb); if (ret <= 0) { UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INDATAGRAMS, @@ -2037,7 +2039,7 @@ static void udp_seq_stop(struct seq_file *seq, void *v) spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); } -static int udp_seq_open(struct inode *inode, struct file *file) +int udp_seq_open(struct inode *inode, struct file *file) { struct udp_seq_afinfo *afinfo = PDE(inode)->data; struct udp_iter_state *s; @@ -2053,6 +2055,7 @@ static int udp_seq_open(struct inode *inode, struct file *file) s->udp_table = afinfo->udp_table; return err; } +EXPORT_SYMBOL(udp_seq_open); /* ------------------------------------------------------------------------ */ int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo) @@ -2060,17 +2063,12 @@ int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo) struct proc_dir_entry *p; int rc = 0; - afinfo->seq_fops.open = udp_seq_open; - afinfo->seq_fops.read = seq_read; - afinfo->seq_fops.llseek = seq_lseek; - afinfo->seq_fops.release = seq_release_net; - afinfo->seq_ops.start = udp_seq_start; afinfo->seq_ops.next = udp_seq_next; afinfo->seq_ops.stop = udp_seq_stop; p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, - &afinfo->seq_fops, afinfo); + afinfo->seq_fops, afinfo); if (!p) rc = -ENOMEM; return rc; @@ -2120,14 +2118,20 @@ int udp4_seq_show(struct seq_file *seq, void *v) return 0; } +static const struct file_operations udp_afinfo_seq_fops = { + .owner = THIS_MODULE, + .open = udp_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_net +}; + /* ------------------------------------------------------------------------ */ static struct udp_seq_afinfo udp4_seq_afinfo = { .name = "udp", .family = AF_INET, .udp_table = &udp_table, - .seq_fops = { - .owner = THIS_MODULE, - }, + .seq_fops = &udp_afinfo_seq_fops, .seq_ops = { .show = udp4_seq_show, }, diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index aee9963f7f5a..08383eb54208 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c @@ -71,13 +71,20 @@ static struct inet_protosw udplite4_protosw = { }; #ifdef CONFIG_PROC_FS + +static const struct file_operations udplite_afinfo_seq_fops = { + .owner = THIS_MODULE, + .open = udp_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_net +}; + static struct udp_seq_afinfo udplite4_seq_afinfo = { .name = "udplite", .family = AF_INET, .udp_table = &udplite_table, - .seq_fops = { - .owner = THIS_MODULE, - }, + .seq_fops = &udplite_afinfo_seq_fops, .seq_ops = { .show = udp4_seq_show, }, diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 30fcee465448..8992cf6651d4 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c @@ -100,9 +100,16 @@ static int nf_ip6_route(struct net *net, struct dst_entry **dst, .pinet6 = (struct ipv6_pinfo *) &fake_pinfo, }; const void *sk = strict ? &fake_sk : NULL; - - *dst = ip6_route_output(net, sk, &fl->u.ip6); - return (*dst)->error; + struct dst_entry *result; + int err; + + result = ip6_route_output(net, sk, &fl->u.ip6); + err = result->error; + if (err) + dst_release(result); + else + *dst = result; + return err; } __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index e8762c73b170..38f00b0298d3 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -182,7 +182,6 @@ fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst) return container_of(q, struct nf_ct_frag6_queue, q); oom: - pr_debug("Can't alloc new queue\n"); return NULL; } @@ -370,10 +369,10 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) struct sk_buff *clone; int i, plen = 0; - if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) { - pr_debug("Can't alloc skb\n"); + clone = alloc_skb(0, GFP_ATOMIC); + if (clone == NULL) goto out_oom; - } + clone->next = head->next; head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 10b2b3165a1a..36131d122a6f 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -2161,12 +2161,18 @@ out: return 0; } +static const struct file_operations tcp6_afinfo_seq_fops = { + .owner = THIS_MODULE, + .open = tcp_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_net +}; + static struct tcp_seq_afinfo tcp6_seq_afinfo = { .name = "tcp6", .family = AF_INET6, - .seq_fops = { - .owner = THIS_MODULE, - }, + .seq_fops = &tcp6_afinfo_seq_fops, .seq_ops = { .show = tcp6_seq_show, }, diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index f4ca0a5b3457..846f4757eb8d 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1424,13 +1424,19 @@ int udp6_seq_show(struct seq_file *seq, void *v) return 0; } +static const struct file_operations udp6_afinfo_seq_fops = { + .owner = THIS_MODULE, + .open = udp_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_net +}; + static struct udp_seq_afinfo udp6_seq_afinfo = { .name = "udp6", .family = AF_INET6, .udp_table = &udp_table, - .seq_fops = { - .owner = THIS_MODULE, - }, + .seq_fops = &udp6_afinfo_seq_fops, .seq_ops = { .show = udp6_seq_show, }, diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index 986c4de5292e..8889aa22ed47 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c @@ -93,13 +93,20 @@ void udplitev6_exit(void) } #ifdef CONFIG_PROC_FS + +static const struct file_operations udplite6_afinfo_seq_fops = { + .owner = THIS_MODULE, + .open = udp_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_net +}; + static struct udp_seq_afinfo udplite6_seq_afinfo = { .name = "udplite6", .family = AF_INET6, .udp_table = &udplite_table, - .seq_fops = { - .owner = THIS_MODULE, - }, + .seq_fops = &udplite6_afinfo_seq_fops, .seq_ops = { .show = udp6_seq_show, }, diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 34b2ddeacb67..bf8d50c67931 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -397,6 +397,7 @@ static void l2tp_recv_dequeue(struct l2tp_session *session) * expect to send up next, dequeue it and any other * in-sequence packets behind it. */ +start: spin_lock_bh(&session->reorder_q.lock); skb_queue_walk_safe(&session->reorder_q, skb, tmp) { if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { @@ -433,7 +434,7 @@ static void l2tp_recv_dequeue(struct l2tp_session *session) */ spin_unlock_bh(&session->reorder_q.lock); l2tp_recv_dequeue_skb(session, skb); - spin_lock_bh(&session->reorder_q.lock); + goto start; } out: diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 3346829ea07f..afca6c78948c 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -180,17 +180,16 @@ next_hook: if (ret == 0) ret = -EPERM; } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { - ret = nf_queue(skb, elem, pf, hook, indev, outdev, okfn, - verdict >> NF_VERDICT_QBITS); - if (ret < 0) { - if (ret == -ECANCELED) + int err = nf_queue(skb, elem, pf, hook, indev, outdev, okfn, + verdict >> NF_VERDICT_QBITS); + if (err < 0) { + if (err == -ECANCELED) goto next_hook; - if (ret == -ESRCH && + if (err == -ESRCH && (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) goto next_hook; kfree_skb(skb); } - ret = 0; } rcu_read_unlock(); return ret; diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index d7e86ef9d23a..86137b558f45 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -1699,10 +1699,8 @@ ip_set_init(void) ip_set_list = kzalloc(sizeof(struct ip_set *) * ip_set_max, GFP_KERNEL); - if (!ip_set_list) { - pr_err("ip_set: Unable to create ip_set_list\n"); + if (!ip_set_list) return -ENOMEM; - } ret = nfnetlink_subsys_register(&ip_set_netlink_subsys); if (ret != 0) { diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 4f77bb16d22a..093cc327020f 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -188,14 +188,13 @@ ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc) } -static inline int +static inline void ip_vs_set_state(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, struct ip_vs_proto_data *pd) { - if (unlikely(!pd->pp->state_transition)) - return 0; - return pd->pp->state_transition(cp, direction, skb, pd); + if (likely(pd->pp->state_transition)) + pd->pp->state_transition(cp, direction, skb, pd); } static inline int @@ -530,7 +529,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, a cache_bypass connection entry */ ipvs = net_ipvs(net); if (ipvs->sysctl_cache_bypass && svc->fwmark && unicast) { - int ret, cs; + int ret; struct ip_vs_conn *cp; unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET && iph.protocol == IPPROTO_UDP)? @@ -557,7 +556,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, ip_vs_in_stats(cp, skb); /* set state */ - cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); + ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); /* transmit the first SYN packet */ ret = cp->packet_xmit(skb, cp, pd->pp); @@ -1490,7 +1489,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) struct ip_vs_protocol *pp; struct ip_vs_proto_data *pd; struct ip_vs_conn *cp; - int ret, restart, pkts; + int ret, pkts; struct netns_ipvs *ipvs; /* Already marked as IPVS request or reply? */ @@ -1591,7 +1590,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) } ip_vs_in_stats(cp, skb); - restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); + ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); if (cp->packet_xmit) ret = cp->packet_xmit(skb, cp, pp); /* do not touch skb anymore */ @@ -1878,10 +1877,9 @@ static int __net_init __ip_vs_init(struct net *net) struct netns_ipvs *ipvs; ipvs = net_generic(net, ip_vs_net_id); - if (ipvs == NULL) { - pr_err("%s(): no memory.\n", __func__); + if (ipvs == NULL) return -ENOMEM; - } + /* Hold the beast until a service is registerd */ ipvs->enable = 0; ipvs->net = net; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index e3be48bf4dcd..008bf97cc91a 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -856,15 +856,12 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, } dest = kzalloc(sizeof(struct ip_vs_dest), GFP_KERNEL); - if (dest == NULL) { - pr_err("%s(): no memory.\n", __func__); + if (dest == NULL) return -ENOMEM; - } + dest->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); - if (!dest->stats.cpustats) { - pr_err("%s() alloc_percpu failed\n", __func__); + if (!dest->stats.cpustats) goto err_alloc; - } dest->af = svc->af; dest->protocol = svc->protocol; @@ -1168,10 +1165,8 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, goto out_err; } svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); - if (!svc->stats.cpustats) { - pr_err("%s() alloc_percpu failed\n", __func__); + if (!svc->stats.cpustats) goto out_err; - } /* I'm the first user of the service */ atomic_set(&svc->usecnt, 0); @@ -3326,10 +3321,8 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) int ret = 0, cmd; int need_full_svc = 0, need_full_dest = 0; struct net *net; - struct netns_ipvs *ipvs; net = skb_sknet(skb); - ipvs = net_ipvs(net); cmd = info->genlhdr->cmd; mutex_lock(&__ip_vs_mutex); @@ -3421,10 +3414,8 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info) void *reply; int ret, cmd, reply_cmd; struct net *net; - struct netns_ipvs *ipvs; net = skb_sknet(skb); - ipvs = net_ipvs(net); cmd = info->genlhdr->cmd; if (cmd == IPVS_CMD_GET_SERVICE) @@ -3720,10 +3711,9 @@ int __net_init ip_vs_control_net_init(struct net *net) /* procfs stats */ ipvs->tot_stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); - if (!ipvs->tot_stats.cpustats) { - pr_err("%s(): alloc_percpu.\n", __func__); + if (!ipvs->tot_stats.cpustats) return -ENOMEM; - } + spin_lock_init(&ipvs->tot_stats.lock); proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops); diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c index 95fd0d14200b..1c269e56200a 100644 --- a/net/netfilter/ipvs/ip_vs_dh.c +++ b/net/netfilter/ipvs/ip_vs_dh.c @@ -150,10 +150,9 @@ static int ip_vs_dh_init_svc(struct ip_vs_service *svc) /* allocate the DH table for this service */ tbl = kmalloc(sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE, GFP_ATOMIC); - if (tbl == NULL) { - pr_err("%s(): no memory\n", __func__); + if (tbl == NULL) return -ENOMEM; - } + svc->sched_data = tbl; IP_VS_DBG(6, "DH hash table (memory=%Zdbytes) allocated for " "current service\n", diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 4490a32ad5b2..538d74ee4f68 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c @@ -52,8 +52,9 @@ * List of ports (up to IP_VS_APP_MAX_PORTS) to be handled by helper * First port is set to the default port. */ +static unsigned int ports_count = 1; static unsigned short ports[IP_VS_APP_MAX_PORTS] = {21, 0}; -module_param_array(ports, ushort, NULL, 0); +module_param_array(ports, ushort, &ports_count, 0444); MODULE_PARM_DESC(ports, "Ports to monitor for FTP control commands"); @@ -449,7 +450,7 @@ static int __net_init __ip_vs_ftp_init(struct net *net) if (ret) goto err_exit; - for (i=0; i<IP_VS_APP_MAX_PORTS; i++) { + for (i = 0; i < ports_count; i++) { if (!ports[i]) continue; ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]); diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index 87e40ea77a95..0f16283fd058 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c @@ -202,10 +202,8 @@ ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr, en = ip_vs_lblc_get(dest->af, tbl, daddr); if (!en) { en = kmalloc(sizeof(*en), GFP_ATOMIC); - if (!en) { - pr_err("%s(): no memory\n", __func__); + if (!en) return NULL; - } en->af = dest->af; ip_vs_addr_copy(dest->af, &en->addr, daddr); @@ -345,10 +343,9 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc) * Allocate the ip_vs_lblc_table for this service */ tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC); - if (tbl == NULL) { - pr_err("%s(): no memory\n", __func__); + if (tbl == NULL) return -ENOMEM; - } + svc->sched_data = tbl; IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for " "current service\n", sizeof(*tbl)); diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index 90f618ab6dda..eec797f8cce7 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c @@ -112,10 +112,8 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) } e = kmalloc(sizeof(*e), GFP_ATOMIC); - if (e == NULL) { - pr_err("%s(): no memory\n", __func__); + if (e == NULL) return NULL; - } atomic_inc(&dest->refcnt); e->dest = dest; @@ -373,10 +371,8 @@ ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr, en = ip_vs_lblcr_get(dest->af, tbl, daddr); if (!en) { en = kmalloc(sizeof(*en), GFP_ATOMIC); - if (!en) { - pr_err("%s(): no memory\n", __func__); + if (!en) return NULL; - } en->af = dest->af; ip_vs_addr_copy(dest->af, &en->addr, daddr); @@ -516,10 +512,9 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) * Allocate the ip_vs_lblcr_table for this service */ tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC); - if (tbl == NULL) { - pr_err("%s(): no memory\n", __func__); + if (tbl == NULL) return -ENOMEM; - } + svc->sched_data = tbl; IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for " "current service\n", sizeof(*tbl)); diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c index f454c80df0a7..022e77e1e766 100644 --- a/net/netfilter/ipvs/ip_vs_nfct.c +++ b/net/netfilter/ipvs/ip_vs_nfct.c @@ -127,7 +127,7 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin) nf_conntrack_alter_reply(ct, &new_tuple); } -int ip_vs_confirm_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp) +int ip_vs_confirm_conntrack(struct sk_buff *skb) { return nf_conntrack_confirm(skb); } diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index 52d073c105e9..85312939695f 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c @@ -74,10 +74,9 @@ register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp) struct ip_vs_proto_data *pd = kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC); - if (!pd) { - pr_err("%s(): no memory.\n", __func__); + if (!pd) return -ENOMEM; - } + pd->pp = pp; /* For speed issues */ pd->next = ipvs->proto_data_table[hash]; ipvs->proto_data_table[hash] = pd; diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index d12ed53ec95f..1fbf7a2816f5 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c @@ -906,7 +906,7 @@ static const char *sctp_state_name(int state) return "?"; } -static inline int +static inline void set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, int direction, const struct sk_buff *skb) { @@ -924,7 +924,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, sch = skb_header_pointer(skb, ihl + sizeof(sctp_sctphdr_t), sizeof(_sctpch), &_sctpch); if (sch == NULL) - return 0; + return; chunk_type = sch->type; /* @@ -993,21 +993,15 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, cp->timeout = pd->timeout_table[cp->state = next_state]; else /* What to do ? */ cp->timeout = sctp_timeouts[cp->state = next_state]; - - return 1; } -static int +static void sctp_state_transition(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, struct ip_vs_proto_data *pd) { - int ret = 0; - spin_lock(&cp->lock); - ret = set_sctp_state(pd, cp, direction, skb); + set_sctp_state(pd, cp, direction, skb); spin_unlock(&cp->lock); - - return ret; } static inline __u16 sctp_app_hashkey(__be16 port) diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c index c0cc341b840d..ef8641f7af83 100644 --- a/net/netfilter/ipvs/ip_vs_proto_tcp.c +++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c @@ -546,7 +546,7 @@ set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, /* * Handle state transitions */ -static int +static void tcp_state_transition(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, struct ip_vs_proto_data *pd) @@ -561,13 +561,11 @@ tcp_state_transition(struct ip_vs_conn *cp, int direction, th = skb_header_pointer(skb, ihl, sizeof(_tcph), &_tcph); if (th == NULL) - return 0; + return; spin_lock(&cp->lock); set_tcp_state(pd, cp, direction, th); spin_unlock(&cp->lock); - - return 1; } static inline __u16 tcp_app_hashkey(__be16 port) diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c index f1282cbe6fe3..f4b7262896bb 100644 --- a/net/netfilter/ipvs/ip_vs_proto_udp.c +++ b/net/netfilter/ipvs/ip_vs_proto_udp.c @@ -454,18 +454,17 @@ static const char * udp_state_name(int state) return udp_state_name_table[state] ? udp_state_name_table[state] : "?"; } -static int +static void udp_state_transition(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, struct ip_vs_proto_data *pd) { if (unlikely(!pd)) { pr_err("UDP no ns data\n"); - return 0; + return; } cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL]; - return 1; } static void __udp_init(struct net *net, struct ip_vs_proto_data *pd) diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c index b5e2556c581a..33815f4fb451 100644 --- a/net/netfilter/ipvs/ip_vs_sh.c +++ b/net/netfilter/ipvs/ip_vs_sh.c @@ -147,10 +147,9 @@ static int ip_vs_sh_init_svc(struct ip_vs_service *svc) /* allocate the SH table for this service */ tbl = kmalloc(sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE, GFP_ATOMIC); - if (tbl == NULL) { - pr_err("%s(): no memory\n", __func__); + if (tbl == NULL) return -ENOMEM; - } + svc->sched_data = tbl; IP_VS_DBG(6, "SH hash table (memory=%Zdbytes) allocated for " "current service\n", diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c index 1ef41f50723c..fd0d4e09876a 100644 --- a/net/netfilter/ipvs/ip_vs_wrr.c +++ b/net/netfilter/ipvs/ip_vs_wrr.c @@ -85,10 +85,9 @@ static int ip_vs_wrr_init_svc(struct ip_vs_service *svc) * Allocate the mark variable for WRR scheduling */ mark = kmalloc(sizeof(struct ip_vs_wrr_mark), GFP_ATOMIC); - if (mark == NULL) { - pr_err("%s(): no memory\n", __func__); + if (mark == NULL) return -ENOMEM; - } + mark->cl = &svc->destinations; mark->cw = 0; mark->mw = ip_vs_wrr_max_weight(svc); diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index ee319a4338b0..aa2d7206ee8a 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -339,7 +339,7 @@ ip_vs_dst_reset(struct ip_vs_dest *dest) \ (skb)->ipvs_property = 1; \ if (unlikely((cp)->flags & IP_VS_CONN_F_NFCT)) \ - __ret = ip_vs_confirm_conntrack(skb, cp); \ + __ret = ip_vs_confirm_conntrack(skb); \ if (__ret == NF_ACCEPT) { \ nf_reset(skb); \ skb_forward_csum(skb); \ diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 5acfaf59a9c3..7202b0631cd6 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -661,7 +661,6 @@ __nf_conntrack_alloc(struct net *net, u16 zone, */ ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp); if (ct == NULL) { - pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); atomic_dec(&net->ct.count); return ERR_PTR(-ENOMEM); } @@ -749,10 +748,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC, hash); - if (IS_ERR(ct)) { - pr_debug("Can't allocate conntrack.\n"); + if (IS_ERR(ct)) return (struct nf_conntrack_tuple_hash *)ct; - } if (!l4proto->new(ct, skb, dataoff)) { nf_conntrack_free(ct); diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 2d8158acf6fa..66b2c54c544f 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -307,17 +307,14 @@ nfulnl_alloc_skb(unsigned int inst_size, unsigned int pkt_size) n = max(inst_size, pkt_size); skb = alloc_skb(n, GFP_ATOMIC); if (!skb) { - pr_notice("nfnetlink_log: can't alloc whole buffer (%u bytes)\n", - inst_size); - if (n > pkt_size) { /* try to allocate only as much as we need for current * packet */ skb = alloc_skb(pkt_size, GFP_ATOMIC); if (!skb) - pr_err("nfnetlink_log: can't even alloc %u " - "bytes\n", pkt_size); + pr_err("nfnetlink_log: can't even alloc %u bytes\n", + pkt_size); } } diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c index 3bdd443aaf15..f407ebc13481 100644 --- a/net/netfilter/xt_IDLETIMER.c +++ b/net/netfilter/xt_IDLETIMER.c @@ -122,14 +122,12 @@ static int idletimer_tg_create(struct idletimer_tg_info *info) info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL); if (!info->timer) { - pr_debug("couldn't alloc timer\n"); ret = -ENOMEM; goto out; } info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL); if (!info->timer->attr.attr.name) { - pr_debug("couldn't alloc attribute name\n"); ret = -ENOMEM; goto out_free_timer; } diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 9228ee0dc11a..dfd52bad1523 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -176,10 +176,7 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht, ent = NULL; } else ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC); - if (!ent) { - if (net_ratelimit()) - pr_err("cannot allocate dsthash_ent\n"); - } else { + if (ent) { memcpy(&ent->dst, dst, sizeof(ent->dst)); spin_lock_init(&ent->lock); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 03bb45adf2fc..82a6f34d39d0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -335,7 +335,7 @@ struct packet_skb_cb { (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ ((x)->kactive_blk_num+1) : 0) -static inline struct packet_sock *pkt_sk(struct sock *sk) +static struct packet_sock *pkt_sk(struct sock *sk) { return (struct packet_sock *)sk; } @@ -477,7 +477,7 @@ static void *packet_lookup_frame(struct packet_sock *po, return h.raw; } -static inline void *packet_current_frame(struct packet_sock *po, +static void *packet_current_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { @@ -715,7 +715,7 @@ out: spin_unlock(&po->sk.sk_receive_queue.lock); } -static inline void prb_flush_block(struct tpacket_kbdq_core *pkc1, +static void prb_flush_block(struct tpacket_kbdq_core *pkc1, struct tpacket_block_desc *pbd1, __u32 status) { /* Flush everything minus the block header */ @@ -793,7 +793,7 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1, pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); } -static inline void prb_thaw_queue(struct tpacket_kbdq_core *pkc) +static void prb_thaw_queue(struct tpacket_kbdq_core *pkc) { pkc->reset_pending_on_curr_blk = 0; } @@ -869,7 +869,7 @@ static void prb_open_block(struct tpacket_kbdq_core *pkc1, * case and __packet_lookup_frame_in_block will check if block-0 * is free and can now be re-used. */ -static inline void prb_freeze_queue(struct tpacket_kbdq_core *pkc, +static void prb_freeze_queue(struct tpacket_kbdq_core *pkc, struct packet_sock *po) { pkc->reset_pending_on_curr_blk = 1; @@ -940,36 +940,36 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, BUG(); } -static inline int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc, +static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc, struct tpacket_block_desc *pbd) { return TP_STATUS_USER & BLOCK_STATUS(pbd); } -static inline int prb_queue_frozen(struct tpacket_kbdq_core *pkc) +static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) { return pkc->reset_pending_on_curr_blk; } -static inline void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) +static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) { struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); atomic_dec(&pkc->blk_fill_in_prog); } -static inline void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, +static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb); } -static inline void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, +static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { ppd->hv1.tp_rxhash = 0; } -static inline void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, +static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { if (vlan_tx_tag_present(pkc->skb)) { @@ -991,7 +991,7 @@ static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, prb_clear_rxhash(pkc, ppd); } -static inline void prb_fill_curr_block(char *curr, +static void prb_fill_curr_block(char *curr, struct tpacket_kbdq_core *pkc, struct tpacket_block_desc *pbd, unsigned int len) @@ -1071,7 +1071,7 @@ static void *__packet_lookup_frame_in_block(struct packet_sock *po, return NULL; } -static inline void *packet_current_rx_frame(struct packet_sock *po, +static void *packet_current_rx_frame(struct packet_sock *po, struct sk_buff *skb, int status, unsigned int len) { @@ -1091,7 +1091,7 @@ static inline void *packet_current_rx_frame(struct packet_sock *po, } } -static inline void *prb_lookup_block(struct packet_sock *po, +static void *prb_lookup_block(struct packet_sock *po, struct packet_ring_buffer *rb, unsigned int previous, int status) @@ -1104,7 +1104,7 @@ static inline void *prb_lookup_block(struct packet_sock *po, return pbd; } -static inline int prb_previous_blk_num(struct packet_ring_buffer *rb) +static int prb_previous_blk_num(struct packet_ring_buffer *rb) { unsigned int prev; if (rb->prb_bdqc.kactive_blk_num) @@ -1115,7 +1115,7 @@ static inline int prb_previous_blk_num(struct packet_ring_buffer *rb) } /* Assumes caller has held the rx_queue.lock */ -static inline void *__prb_previous_block(struct packet_sock *po, +static void *__prb_previous_block(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { @@ -1123,7 +1123,7 @@ static inline void *__prb_previous_block(struct packet_sock *po, return prb_lookup_block(po, rb, previous, status); } -static inline void *packet_previous_rx_frame(struct packet_sock *po, +static void *packet_previous_rx_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { @@ -1133,7 +1133,7 @@ static inline void *packet_previous_rx_frame(struct packet_sock *po, return __prb_previous_block(po, rb, status); } -static inline void packet_increment_rx_head(struct packet_sock *po, +static void packet_increment_rx_head(struct packet_sock *po, struct packet_ring_buffer *rb) { switch (po->tp_version) { @@ -1148,7 +1148,7 @@ static inline void packet_increment_rx_head(struct packet_sock *po, } } -static inline void *packet_previous_frame(struct packet_sock *po, +static void *packet_previous_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { @@ -1156,7 +1156,7 @@ static inline void *packet_previous_frame(struct packet_sock *po, return packet_lookup_frame(po, rb, previous, status); } -static inline void packet_increment_head(struct packet_ring_buffer *buff) +static void packet_increment_head(struct packet_ring_buffer *buff) { buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; } @@ -1558,7 +1558,7 @@ out_free: return err; } -static inline unsigned int run_filter(const struct sk_buff *skb, +static unsigned int run_filter(const struct sk_buff *skb, const struct sock *sk, unsigned int res) { @@ -2167,10 +2167,10 @@ out: return err; } -static inline struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, - size_t reserve, size_t len, - size_t linear, int noblock, - int *err) +static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, + size_t reserve, size_t len, + size_t linear, int noblock, + int *err) { struct sk_buff *skb; @@ -3494,7 +3494,7 @@ static void free_pg_vec(struct pgv *pg_vec, unsigned int order, kfree(pg_vec); } -static inline char *alloc_one_pg_vec_page(unsigned long order) +static char *alloc_one_pg_vec_page(unsigned long order) { char *buffer = NULL; gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 5f03e4ea65bf..3e16c6abde4f 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -1261,14 +1261,19 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, struct x25_sock *x25 = x25_sk(sk); struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name; size_t copied; - int qbit, header_len = x25->neighbour->extended ? - X25_EXT_MIN_LEN : X25_STD_MIN_LEN; - + int qbit, header_len; struct sk_buff *skb; unsigned char *asmptr; int rc = -ENOTCONN; lock_sock(sk); + + if (x25->neighbour == NULL) + goto out; + + header_len = x25->neighbour->extended ? + X25_EXT_MIN_LEN : X25_STD_MIN_LEN; + /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though |