diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/bluetooth/hci_debugfs.c | 13 | ||||
-rw-r--r-- | net/ceph/ceph_common.c | 4 | ||||
-rw-r--r-- | net/dns_resolver/dns_query.c | 22 | ||||
-rw-r--r-- | net/sched/act_api.c | 72 | ||||
-rw-r--r-- | net/sched/cls_api.c | 8 | ||||
-rw-r--r-- | net/sched/cls_basic.c | 33 | ||||
-rw-r--r-- | net/sched/cls_bpf.c | 30 | ||||
-rw-r--r-- | net/sched/cls_flower.c | 34 | ||||
-rw-r--r-- | net/sched/cls_u32.c | 47 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 16 | ||||
-rw-r--r-- | net/sunrpc/svcsock.c | 14 | ||||
-rw-r--r-- | net/sunrpc/xprt.c | 3 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/rpc_rdma.c | 2 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 5 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 9 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_rw.c | 12 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_sendto.c | 6 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 25 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/verbs.c | 8 | ||||
-rw-r--r-- | net/sunrpc/xprtsock.c | 27 |
20 files changed, 160 insertions, 230 deletions
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c index 57403bd567d0..418b76e557b0 100644 --- a/net/bluetooth/hci_debugfs.c +++ b/net/bluetooth/hci_debugfs.c @@ -90,19 +90,6 @@ static int __name ## _show(struct seq_file *f, void *ptr) \ \ DEFINE_SHOW_ATTRIBUTE(__name) -#define DEFINE_SHOW_ATTRIBUTE(__name) \ -static int __name ## _open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, __name ## _show, inode->i_private); \ -} \ - \ -static const struct file_operations __name ## _fops = { \ - .open = __name ## _open, \ - .read = seq_read, \ - .llseek = seq_lseek, \ - .release = single_release, \ -} \ - static int features_show(struct seq_file *f, void *ptr) { struct hci_dev *hdev = f->private; diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 5c036d2f401e..1e492ef2a33d 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -421,6 +421,10 @@ ceph_parse_options(char *options, const char *dev_name, opt->name = kstrndup(argstr[0].from, argstr[0].to-argstr[0].from, GFP_KERNEL); + if (!opt->name) { + err = -ENOMEM; + goto out; + } break; case Opt_secret: opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL); diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c index af781010753b..49da67034f29 100644 --- a/net/dns_resolver/dns_query.c +++ b/net/dns_resolver/dns_query.c @@ -52,11 +52,11 @@ * @name: Name to look up * @namelen: Length of name * @options: Request options (or NULL if no options) - * @_result: Where to place the returned data. + * @_result: Where to place the returned data (or NULL) * @_expiry: Where to store the result expiry time (or NULL) * - * The data will be returned in the pointer at *result, and the caller is - * responsible for freeing it. + * The data will be returned in the pointer at *result, if provided, and the + * caller is responsible for freeing it. * * The description should be of the form "[<query_type>:]<domain_name>", and * the options need to be appropriate for the query type requested. If no @@ -81,7 +81,7 @@ int dns_query(const char *type, const char *name, size_t namelen, kenter("%s,%*.*s,%zu,%s", type, (int)namelen, (int)namelen, name, namelen, options); - if (!name || namelen == 0 || !_result) + if (!name || namelen == 0) return -EINVAL; /* construct the query key description as "[<type>:]<name>" */ @@ -146,13 +146,15 @@ int dns_query(const char *type, const char *name, size_t namelen, upayload = user_key_payload_locked(rkey); len = upayload->datalen; - ret = -ENOMEM; - *_result = kmalloc(len + 1, GFP_KERNEL); - if (!*_result) - goto put; + if (_result) { + ret = -ENOMEM; + *_result = kmalloc(len + 1, GFP_KERNEL); + if (!*_result) + goto put; - memcpy(*_result, upayload->data, len); - (*_result)[len] = '\0'; + memcpy(*_result, upayload->data, len); + (*_result)[len] = '\0'; + } if (_expiry) *_expiry = rkey->expiry; diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 52622a3d2517..eba6682727dd 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -78,7 +78,7 @@ static void free_tcf(struct tc_action *p) static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p) { spin_lock_bh(&idrinfo->lock); - idr_remove_ext(&idrinfo->action_idr, p->tcfa_index); + idr_remove(&idrinfo->action_idr, p->tcfa_index); spin_unlock_bh(&idrinfo->lock); gen_kill_estimator(&p->tcfa_rate_est); free_tcf(p); @@ -124,7 +124,7 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, s_i = cb->args[0]; - idr_for_each_entry_ext(idr, p, id) { + idr_for_each_entry_ul(idr, p, id) { index++; if (index < s_i) continue; @@ -181,7 +181,7 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, if (nla_put_string(skb, TCA_KIND, ops->kind)) goto nla_put_failure; - idr_for_each_entry_ext(idr, p, id) { + idr_for_each_entry_ul(idr, p, id) { ret = __tcf_idr_release(p, false, true); if (ret == ACT_P_DELETED) { module_put(ops->owner); @@ -222,7 +222,7 @@ static struct tc_action *tcf_idr_lookup(u32 index, struct tcf_idrinfo *idrinfo) struct tc_action *p = NULL; spin_lock_bh(&idrinfo->lock); - p = idr_find_ext(&idrinfo->action_idr, index); + p = idr_find(&idrinfo->action_idr, index); spin_unlock_bh(&idrinfo->lock); return p; @@ -274,7 +274,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, struct tcf_idrinfo *idrinfo = tn->idrinfo; struct idr *idr = &idrinfo->action_idr; int err = -ENOMEM; - unsigned long idr_index; if (unlikely(!p)) return -ENOMEM; @@ -284,45 +283,28 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, if (cpustats) { p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); - if (!p->cpu_bstats) { -err1: - kfree(p); - return err; - } - p->cpu_qstats = alloc_percpu(struct gnet_stats_queue); - if (!p->cpu_qstats) { -err2: - free_percpu(p->cpu_bstats); + if (!p->cpu_bstats) goto err1; - } + p->cpu_qstats = alloc_percpu(struct gnet_stats_queue); + if (!p->cpu_qstats) + goto err2; } spin_lock_init(&p->tcfa_lock); + idr_preload(GFP_KERNEL); + spin_lock_bh(&idrinfo->lock); /* user doesn't specify an index */ if (!index) { - idr_preload(GFP_KERNEL); - spin_lock_bh(&idrinfo->lock); - err = idr_alloc_ext(idr, NULL, &idr_index, 1, 0, - GFP_ATOMIC); - spin_unlock_bh(&idrinfo->lock); - idr_preload_end(); - if (err) { -err3: - free_percpu(p->cpu_qstats); - goto err2; - } - p->tcfa_index = idr_index; + index = 1; + err = idr_alloc_u32(idr, NULL, &index, UINT_MAX, GFP_ATOMIC); } else { - idr_preload(GFP_KERNEL); - spin_lock_bh(&idrinfo->lock); - err = idr_alloc_ext(idr, NULL, NULL, index, index + 1, - GFP_ATOMIC); - spin_unlock_bh(&idrinfo->lock); - idr_preload_end(); - if (err) - goto err3; - p->tcfa_index = index; + err = idr_alloc_u32(idr, NULL, &index, index, GFP_ATOMIC); } + spin_unlock_bh(&idrinfo->lock); + idr_preload_end(); + if (err) + goto err3; + p->tcfa_index = index; p->tcfa_tm.install = jiffies; p->tcfa_tm.lastuse = jiffies; p->tcfa_tm.firstuse = 0; @@ -330,9 +312,8 @@ err3: err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats, &p->tcfa_rate_est, &p->tcfa_lock, NULL, est); - if (err) { - goto err3; - } + if (err) + goto err4; } p->idrinfo = idrinfo; @@ -340,6 +321,15 @@ err3: INIT_LIST_HEAD(&p->list); *a = p; return 0; +err4: + idr_remove(idr, index); +err3: + free_percpu(p->cpu_qstats); +err2: + free_percpu(p->cpu_bstats); +err1: + kfree(p); + return err; } EXPORT_SYMBOL(tcf_idr_create); @@ -348,7 +338,7 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a) struct tcf_idrinfo *idrinfo = tn->idrinfo; spin_lock_bh(&idrinfo->lock); - idr_replace_ext(&idrinfo->action_idr, a, a->tcfa_index); + idr_replace(&idrinfo->action_idr, a, a->tcfa_index); spin_unlock_bh(&idrinfo->lock); } EXPORT_SYMBOL(tcf_idr_insert); @@ -361,7 +351,7 @@ void tcf_idrinfo_destroy(const struct tc_action_ops *ops, int ret; unsigned long id = 1; - idr_for_each_entry_ext(idr, p, id) { + idr_for_each_entry_ul(idr, p, id) { ret = __tcf_idr_release(p, false, true); if (ret == ACT_P_DELETED) module_put(ops->owner); diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index bcb4ccb5f894..2bc1bc23d42e 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -381,8 +381,8 @@ static int tcf_block_insert(struct tcf_block *block, struct net *net, struct tcf_net *tn = net_generic(net, tcf_net_id); int err; - err = idr_alloc_ext(&tn->idr, block, NULL, block_index, - block_index + 1, GFP_KERNEL); + err = idr_alloc_u32(&tn->idr, block, &block_index, block_index, + GFP_KERNEL); if (err) return err; block->index = block_index; @@ -393,7 +393,7 @@ static void tcf_block_remove(struct tcf_block *block, struct net *net) { struct tcf_net *tn = net_generic(net, tcf_net_id); - idr_remove_ext(&tn->idr, block->index); + idr_remove(&tn->idr, block->index); } static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, @@ -434,7 +434,7 @@ static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) { struct tcf_net *tn = net_generic(net, tcf_net_id); - return idr_find_ext(&tn->idr, block_index); + return idr_find(&tn->idr, block_index); } static struct tcf_chain *tcf_block_chain_zero(struct tcf_block *block) diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index d333f5c5101d..6b7ab3512f5b 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -120,7 +120,7 @@ static void basic_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack) list_for_each_entry_safe(f, n, &head->flist, link) { list_del_rcu(&f->link); tcf_unbind_filter(tp, &f->res); - idr_remove_ext(&head->handle_idr, f->handle); + idr_remove(&head->handle_idr, f->handle); if (tcf_exts_get_net(&f->exts)) call_rcu(&f->rcu, basic_delete_filter); else @@ -138,7 +138,7 @@ static int basic_delete(struct tcf_proto *tp, void *arg, bool *last, list_del_rcu(&f->link); tcf_unbind_filter(tp, &f->res); - idr_remove_ext(&head->handle_idr, f->handle); + idr_remove(&head->handle_idr, f->handle); tcf_exts_get_net(&f->exts); call_rcu(&f->rcu, basic_delete_filter); *last = list_empty(&head->flist); @@ -185,7 +185,6 @@ static int basic_change(struct net *net, struct sk_buff *in_skb, struct nlattr *tb[TCA_BASIC_MAX + 1]; struct basic_filter *fold = (struct basic_filter *) *arg; struct basic_filter *fnew; - unsigned long idr_index; if (tca[TCA_OPTIONS] == NULL) return -EINVAL; @@ -208,34 +207,30 @@ static int basic_change(struct net *net, struct sk_buff *in_skb, if (err < 0) goto errout; - if (handle) { - fnew->handle = handle; - if (!fold) { - err = idr_alloc_ext(&head->handle_idr, fnew, &idr_index, - handle, handle + 1, GFP_KERNEL); - if (err) - goto errout; - } - } else { - err = idr_alloc_ext(&head->handle_idr, fnew, &idr_index, - 1, 0x7FFFFFFF, GFP_KERNEL); - if (err) - goto errout; - fnew->handle = idr_index; + if (!handle) { + handle = 1; + err = idr_alloc_u32(&head->handle_idr, fnew, &handle, + INT_MAX, GFP_KERNEL); + } else if (!fold) { + err = idr_alloc_u32(&head->handle_idr, fnew, &handle, + handle, GFP_KERNEL); } + if (err) + goto errout; + fnew->handle = handle; err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr, extack); if (err < 0) { if (!fold) - idr_remove_ext(&head->handle_idr, fnew->handle); + idr_remove(&head->handle_idr, fnew->handle); goto errout; } *arg = fnew; if (fold) { - idr_replace_ext(&head->handle_idr, fnew, fnew->handle); + idr_replace(&head->handle_idr, fnew, fnew->handle); list_replace_rcu(&fold->link, &fnew->link); tcf_unbind_filter(tp, &fold->res); tcf_exts_get_net(&fold->exts); diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 8e5326bc6440..b07c1fa8bc0d 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -295,7 +295,7 @@ static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog, { struct cls_bpf_head *head = rtnl_dereference(tp->root); - idr_remove_ext(&head->handle_idr, prog->handle); + idr_remove(&head->handle_idr, prog->handle); cls_bpf_stop_offload(tp, prog, extack); list_del_rcu(&prog->link); tcf_unbind_filter(tp, &prog->res); @@ -471,7 +471,6 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, struct cls_bpf_prog *oldprog = *arg; struct nlattr *tb[TCA_BPF_MAX + 1]; struct cls_bpf_prog *prog; - unsigned long idr_index; int ret; if (tca[TCA_OPTIONS] == NULL) @@ -498,21 +497,18 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, } if (handle == 0) { - ret = idr_alloc_ext(&head->handle_idr, prog, &idr_index, - 1, 0x7FFFFFFF, GFP_KERNEL); - if (ret) - goto errout; - prog->handle = idr_index; - } else { - if (!oldprog) { - ret = idr_alloc_ext(&head->handle_idr, prog, &idr_index, - handle, handle + 1, GFP_KERNEL); - if (ret) - goto errout; - } - prog->handle = handle; + handle = 1; + ret = idr_alloc_u32(&head->handle_idr, prog, &handle, + INT_MAX, GFP_KERNEL); + } else if (!oldprog) { + ret = idr_alloc_u32(&head->handle_idr, prog, &handle, + handle, GFP_KERNEL); } + if (ret) + goto errout; + prog->handle = handle; + ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr, extack); if (ret < 0) @@ -526,7 +522,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW; if (oldprog) { - idr_replace_ext(&head->handle_idr, prog, handle); + idr_replace(&head->handle_idr, prog, handle); list_replace_rcu(&oldprog->link, &prog->link); tcf_unbind_filter(tp, &oldprog->res); tcf_exts_get_net(&oldprog->exts); @@ -542,7 +538,7 @@ errout_parms: cls_bpf_free_parms(prog); errout_idr: if (!oldprog) - idr_remove_ext(&head->handle_idr, prog->handle); + idr_remove(&head->handle_idr, prog->handle); errout: tcf_exts_destroy(&prog->exts); kfree(prog); diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index dc9acaafc0a8..7d0ce2c40f93 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -288,7 +288,7 @@ static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f, { struct cls_fl_head *head = rtnl_dereference(tp->root); - idr_remove_ext(&head->handle_idr, f->handle); + idr_remove(&head->handle_idr, f->handle); list_del_rcu(&f->list); if (!tc_skip_hw(f->flags)) fl_hw_destroy_filter(tp, f, extack); @@ -334,7 +334,7 @@ static void *fl_get(struct tcf_proto *tp, u32 handle) { struct cls_fl_head *head = rtnl_dereference(tp->root); - return idr_find_ext(&head->handle_idr, handle); + return idr_find(&head->handle_idr, handle); } static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { @@ -865,7 +865,6 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, struct cls_fl_filter *fnew; struct nlattr **tb; struct fl_flow_mask mask = {}; - unsigned long idr_index; int err; if (!tca[TCA_OPTIONS]) @@ -896,21 +895,17 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, goto errout; if (!handle) { - err = idr_alloc_ext(&head->handle_idr, fnew, &idr_index, - 1, 0x80000000, GFP_KERNEL); - if (err) - goto errout; - fnew->handle = idr_index; - } - - /* user specifies a handle and it doesn't exist */ - if (handle && !fold) { - err = idr_alloc_ext(&head->handle_idr, fnew, &idr_index, - handle, handle + 1, GFP_KERNEL); - if (err) - goto errout; - fnew->handle = idr_index; + handle = 1; + err = idr_alloc_u32(&head->handle_idr, fnew, &handle, + INT_MAX, GFP_KERNEL); + } else if (!fold) { + /* user specifies a handle and it doesn't exist */ + err = idr_alloc_u32(&head->handle_idr, fnew, &handle, + handle, GFP_KERNEL); } + if (err) + goto errout; + fnew->handle = handle; if (tb[TCA_FLOWER_FLAGS]) { fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]); @@ -966,8 +961,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, *arg = fnew; if (fold) { - fnew->handle = handle; - idr_replace_ext(&head->handle_idr, fnew, fnew->handle); + idr_replace(&head->handle_idr, fnew, fnew->handle); list_replace_rcu(&fold->list, &fnew->list); tcf_unbind_filter(tp, &fold->res); tcf_exts_get_net(&fold->exts); @@ -981,7 +975,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, errout_idr: if (fnew->handle) - idr_remove_ext(&head->handle_idr, fnew->handle); + idr_remove(&head->handle_idr, fnew->handle); errout: tcf_exts_destroy(&fnew->exts); kfree(fnew); diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index c75e68e839c7..6c7601a530e3 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -316,19 +316,13 @@ static void *u32_get(struct tcf_proto *tp, u32 handle) return u32_lookup_key(ht, handle); } +/* Protected by rtnl lock */ static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr) { - unsigned long idr_index; - int err; - - /* This is only used inside rtnl lock it is safe to increment - * without read _copy_ update semantics - */ - err = idr_alloc_ext(&tp_c->handle_idr, ptr, &idr_index, - 1, 0x7FF, GFP_KERNEL); - if (err) + int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL); + if (id < 0) return 0; - return (u32)(idr_index | 0x800) << 20; + return (id | 0x800U) << 20; } static struct hlist_head *tc_u_common_hash; @@ -598,7 +592,7 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, rtnl_dereference(n->next)); tcf_unbind_filter(tp, &n->res); u32_remove_hw_knode(tp, n, extack); - idr_remove_ext(&ht->handle_idr, n->handle); + idr_remove(&ht->handle_idr, n->handle); if (tcf_exts_get_net(&n->exts)) call_rcu(&n->rcu, u32_delete_key_freepf_rcu); else @@ -625,7 +619,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, if (phn == ht) { u32_clear_hw_hnode(tp, ht, extack); idr_destroy(&ht->handle_idr); - idr_remove_ext(&tp_c->handle_idr, ht->handle); + idr_remove(&tp_c->handle_idr, ht->handle); RCU_INIT_POINTER(*hn, ht->next); kfree_rcu(ht, rcu); return 0; @@ -747,19 +741,17 @@ ret: static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid) { - unsigned long idr_index; - u32 start = htid | 0x800; + u32 index = htid | 0x800; u32 max = htid | 0xFFF; - u32 min = htid; - if (idr_alloc_ext(&ht->handle_idr, NULL, &idr_index, - start, max + 1, GFP_KERNEL)) { - if (idr_alloc_ext(&ht->handle_idr, NULL, &idr_index, - min + 1, max + 1, GFP_KERNEL)) - return max; + if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) { + index = htid + 1; + if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, + GFP_KERNEL)) + index = max; } - return (u32)idr_index; + return index; } static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { @@ -849,7 +841,7 @@ static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c, if (pins->handle == n->handle) break; - idr_replace_ext(&ht->handle_idr, n, n->handle); + idr_replace(&ht->handle_idr, n, n->handle); RCU_INIT_POINTER(n->next, pins->next); rcu_assign_pointer(*ins, n); } @@ -1011,8 +1003,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, return -ENOMEM; } } else { - err = idr_alloc_ext(&tp_c->handle_idr, ht, NULL, - handle, handle + 1, GFP_KERNEL); + err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle, + handle, GFP_KERNEL); if (err) { kfree(ht); return err; @@ -1028,7 +1020,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, err = u32_replace_hw_hnode(tp, ht, flags, extack); if (err) { - idr_remove_ext(&tp_c->handle_idr, handle); + idr_remove(&tp_c->handle_idr, handle); kfree(ht); return err; } @@ -1068,8 +1060,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, return -EINVAL; } handle = htid | TC_U32_NODE(handle); - err = idr_alloc_ext(&ht->handle_idr, NULL, NULL, - handle, handle + 1, + err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle, GFP_KERNEL); if (err) return err; @@ -1164,7 +1155,7 @@ errfree: #endif kfree(n); erridr: - idr_remove_ext(&ht->handle_idr, handle); + idr_remove(&ht->handle_idr, handle); return err; } diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 896691afbb1a..d9db2eab3a8d 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -461,6 +461,18 @@ static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct r /* * Wake up a task on a specific queue */ +void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq, + struct rpc_wait_queue *queue, + struct rpc_task *task) +{ + spin_lock_bh(&queue->lock); + rpc_wake_up_task_on_wq_queue_locked(wq, queue, task); + spin_unlock_bh(&queue->lock); +} + +/* + * Wake up a task on a specific queue + */ void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) { spin_lock_bh(&queue->lock); @@ -1092,12 +1104,12 @@ static int rpciod_start(void) * Create the rpciod thread and wait for it to start. */ dprintk("RPC: creating workqueue rpciod\n"); - wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0); + wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); if (!wq) goto out_failed; rpciod_workqueue = wq; /* Note: highpri because network receive is latency sensitive */ - wq = alloc_workqueue("xprtiod", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); + wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0); if (!wq) goto free_rpciod; xprtiod_workqueue = wq; diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 5570719e4787..943f2a745cd5 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -384,25 +384,11 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp, static void svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv) { -#if 0 - mm_segment_t oldfs; - oldfs = get_fs(); set_fs(KERNEL_DS); - sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF, - (char*)&snd, sizeof(snd)); - sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, - (char*)&rcv, sizeof(rcv)); -#else - /* sock_setsockopt limits use to sysctl_?mem_max, - * which isn't acceptable. Until that is made conditional - * on not having CAP_SYS_RESOURCE or similar, we go direct... - * DaveM said I could! - */ lock_sock(sock->sk); sock->sk->sk_sndbuf = snd * 2; sock->sk->sk_rcvbuf = rcv * 2; sock->sk->sk_write_space(sock->sk); release_sock(sock->sk); -#endif } static int svc_sock_secure_port(struct svc_rqst *rqstp) diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 2436fd1125fc..8f0ad4f268da 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -517,7 +517,8 @@ void xprt_write_space(struct rpc_xprt *xprt) if (xprt->snd_task) { dprintk("RPC: write space: waking waiting task on " "xprt %p\n", xprt); - rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task); + rpc_wake_up_queued_task_on_wq(xprtiod_workqueue, + &xprt->pending, xprt->snd_task); } spin_unlock_bh(&xprt->transport_lock); } diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 162e5dd82466..f0855a959a27 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -143,7 +143,7 @@ static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt, if (xdr->page_len) { remaining = xdr->page_len; offset = offset_in_page(xdr->page_base); - count = 0; + count = RPCRDMA_MIN_SEND_SGES; while (remaining) { remaining -= min_t(unsigned int, PAGE_SIZE - offset, remaining); diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index af7893501e40..a73632ca9048 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -95,7 +95,6 @@ out_shortreply: out_notfound: dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n", xprt, be32_to_cpu(xid)); - goto out_unlock; } @@ -129,10 +128,6 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, if (ret < 0) goto out_err; - ret = svc_rdma_repost_recv(rdma, GFP_NOIO); - if (ret) - goto out_err; - /* Bump page refcnt so Send completion doesn't release * the rq_buffer before all retransmits are complete. */ diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index ad4bd62eebf1..19e9c6b33042 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -400,10 +400,6 @@ static void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct page *page; int ret; - ret = svc_rdma_repost_recv(xprt, GFP_KERNEL); - if (ret) - return; - page = alloc_page(GFP_KERNEL); if (!page) return; @@ -554,8 +550,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p, &rqstp->rq_arg); svc_rdma_put_context(ctxt, 0); - if (ret) - goto repost; return ret; } @@ -590,6 +584,5 @@ out_postfail: out_drop: svc_rdma_put_context(ctxt, 1); -repost: - return svc_rdma_repost_recv(rdma_xprt, GFP_KERNEL); + return 0; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 9bd04549a1ad..12b9a7e0b6d2 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -727,12 +727,16 @@ static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp, head->arg.head[0].iov_len - info->ri_position; head->arg.head[0].iov_len = info->ri_position; - /* Read chunk may need XDR roundup (see RFC 5666, s. 3.7). + /* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2). * - * NFSv2/3 write decoders need the length of the tail to - * contain the size of the roundup padding. + * If the client already rounded up the chunk length, the + * length does not change. Otherwise, the length of the page + * list is increased to include XDR round-up. + * + * Currently these chunks always start at page offset 0, + * thus the rounded-up length never crosses a page boundary. */ - head->arg.tail[0].iov_len += 4 - (info->ri_chunklen & 3); + info->ri_chunklen = XDR_QUADLEN(info->ri_chunklen) << 2; head->arg.page_len = info->ri_chunklen; head->arg.len += info->ri_chunklen; diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 7c3a211e0e9a..649441d5087d 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -674,9 +674,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) svc_rdma_xdr_encode_reply_chunk(rdma_resp, rp_ch, ret); } - ret = svc_rdma_post_recv(rdma, GFP_KERNEL); - if (ret) - goto err1; ret = svc_rdma_send_reply_msg(rdma, rdma_argp, rdma_resp, rqstp, wr_lst, rp_ch); if (ret < 0) @@ -687,9 +684,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) if (ret != -E2BIG && ret != -EINVAL) goto err1; - ret = svc_rdma_post_recv(rdma, GFP_KERNEL); - if (ret) - goto err1; ret = svc_rdma_send_error_msg(rdma, rdma_resp, rqstp); if (ret < 0) goto err0; diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 46ec069150d5..9ad12a215b51 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -58,6 +58,7 @@ #define RPCDBG_FACILITY RPCDBG_SVCXPRT +static int svc_rdma_post_recv(struct svcxprt_rdma *xprt); static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *, int); static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, struct net *net, @@ -320,6 +321,8 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) list_add_tail(&ctxt->list, &xprt->sc_rq_dto_q); spin_unlock(&xprt->sc_rq_dto_lock); + svc_rdma_post_recv(xprt); + set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); if (test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags)) goto out; @@ -404,7 +407,8 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, return cma_xprt; } -int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags) +static int +svc_rdma_post_recv(struct svcxprt_rdma *xprt) { struct ib_recv_wr recv_wr, *bad_recv_wr; struct svc_rdma_op_ctxt *ctxt; @@ -423,7 +427,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags) pr_err("svcrdma: Too many sges (%d)\n", sge_no); goto err_put_ctxt; } - page = alloc_page(flags); + page = alloc_page(GFP_KERNEL); if (!page) goto err_put_ctxt; ctxt->pages[sge_no] = page; @@ -459,21 +463,6 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags) return -ENOMEM; } -int svc_rdma_repost_recv(struct svcxprt_rdma *xprt, gfp_t flags) -{ - int ret = 0; - - ret = svc_rdma_post_recv(xprt, flags); - if (ret) { - pr_err("svcrdma: could not post a receive buffer, err=%d.\n", - ret); - pr_err("svcrdma: closing transport %p.\n", xprt); - set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); - ret = -ENOTCONN; - } - return ret; -} - static void svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt, struct rdma_conn_param *param) @@ -833,7 +822,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) /* Post receive buffers */ for (i = 0; i < newxprt->sc_max_requests; i++) { - ret = svc_rdma_post_recv(newxprt, GFP_KERNEL); + ret = svc_rdma_post_recv(newxprt); if (ret) { dprintk("svcrdma: failure posting receive buffers\n"); goto errout; diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index f4eb63e8e689..e6f84a6434a0 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -505,7 +505,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); return -ENOMEM; } - ia->ri_max_send_sges = max_sge - RPCRDMA_MIN_SEND_SGES; + ia->ri_max_send_sges = max_sge; if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) { dprintk("RPC: %s: insufficient wqe's available\n", @@ -1502,6 +1502,9 @@ __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb) static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb) { + if (!rb) + return; + if (!rpcrdma_regbuf_is_mapped(rb)) return; @@ -1517,9 +1520,6 @@ rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb) void rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb) { - if (!rb) - return; - rpcrdma_dma_unmap_regbuf(rb); kfree(rb); } diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 18803021f242..a6b8c1f8f92a 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -807,13 +807,6 @@ static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) smp_mb__after_atomic(); } -static void xs_sock_mark_closed(struct rpc_xprt *xprt) -{ - xs_sock_reset_connection_flags(xprt); - /* Mark transport as closed and wake up all pending tasks */ - xprt_disconnect_done(xprt); -} - /** * xs_error_report - callback to handle TCP socket state errors * @sk: socket @@ -833,9 +826,6 @@ static void xs_error_report(struct sock *sk) err = -sk->sk_err; if (err == 0) goto out; - /* Is this a reset event? */ - if (sk->sk_state == TCP_CLOSE) - xs_sock_mark_closed(xprt); dprintk("RPC: xs_error_report client %p, error=%d...\n", xprt, -err); trace_rpc_socket_error(xprt, sk->sk_socket, err); @@ -1078,18 +1068,18 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt, /* Suck it into the iovec, verify checksum if not done by hw. */ if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) { - __UDPX_INC_STATS(sk, UDP_MIB_INERRORS); spin_lock(&xprt->recv_lock); + __UDPX_INC_STATS(sk, UDP_MIB_INERRORS); goto out_unpin; } - __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS); spin_lock_bh(&xprt->transport_lock); xprt_adjust_cwnd(xprt, task, copied); spin_unlock_bh(&xprt->transport_lock); spin_lock(&xprt->recv_lock); xprt_complete_rqst(task, copied); + __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS); out_unpin: xprt_unpin_rqst(rovr); out_unlock: @@ -1655,9 +1645,11 @@ static void xs_tcp_state_change(struct sock *sk) if (test_and_clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state)) xprt_clear_connecting(xprt); + clear_bit(XPRT_CLOSING, &xprt->state); if (sk->sk_err) xprt_wake_pending_tasks(xprt, -sk->sk_err); - xs_sock_mark_closed(xprt); + /* Trigger the socket release */ + xs_tcp_force_close(xprt); } out: read_unlock_bh(&sk->sk_callback_lock); @@ -2265,14 +2257,19 @@ static void xs_tcp_shutdown(struct rpc_xprt *xprt) { struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); struct socket *sock = transport->sock; + int skst = transport->inet ? transport->inet->sk_state : TCP_CLOSE; if (sock == NULL) return; - if (xprt_connected(xprt)) { + switch (skst) { + default: kernel_sock_shutdown(sock, SHUT_RDWR); trace_rpc_socket_shutdown(xprt, sock); - } else + break; + case TCP_CLOSE: + case TCP_TIME_WAIT: xs_reset_transport(transport); + } } static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt, |