diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-02-24 15:11:03 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-02-24 15:11:03 -0800 |
commit | 8cbd92339db08b19b93d1637e5799ff2a8dddfd2 (patch) | |
tree | 7e62d961f32e8a2a96271029b376f1e8bbd70a7c /drivers/infiniband/core | |
parent | 143c7bc6496c886ce5db2a2f9cec580494690170 (diff) | |
parent | 66fb1d5df6ace316a4a6e2c31e13fc123ea2b644 (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"Quite a small cycle this time, even with the rc8. I suppose everyone
went to sleep over xmas.
- Minor driver updates for hfi1, cxgb4, erdma, hns, irdma, mlx5, siw,
mana
- inline CQE support for hns
- Have mlx5 display device error codes
- Pinned DMABUF support for irdma
- Continued rxe cleanups, particularly converting the MRs to use
xarray
- Improvements to what can be cached in the mlx5 mkey cache"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (61 commits)
IB/mlx5: Extend debug control for CC parameters
IB/hfi1: Fix sdma.h tx->num_descs off-by-one errors
IB/hfi1: Fix math bugs in hfi1_can_pin_pages()
RDMA/irdma: Add support for dmabuf pin memory regions
RDMA/mlx5: Use query_special_contexts for mkeys
net/mlx5e: Use query_special_contexts for mkeys
net/mlx5: Change define name for 0x100 lkey value
net/mlx5: Expose bits for querying special mkeys
RDMA/rxe: Fix missing memory barriers in rxe_queue.h
RDMA/mana_ib: Fix a bug when the PF indicates more entries for registering memory on first packet
RDMA/rxe: Remove rxe_alloc()
RDMA/cma: Distinguish between sockaddr_in and sockaddr_in6 by size
Subject: RDMA/rxe: Handle zero length rdma
iw_cxgb4: Fix potential NULL dereference in c4iw_fill_res_cm_id_entry()
RDMA/mlx5: Use rdma_umem_for_each_dma_block()
RDMA/umem: Remove unused 'work' member from struct ib_umem
RDMA/irdma: Cap MSIX used to online CPUs + 1
RDMA/mlx5: Check reg_create() create for errors
RDMA/restrack: Correct spelling
RDMA/cxgb4: Fix potential null-ptr-deref in pass_establish()
...
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r-- | drivers/infiniband/core/cma.c | 300 | ||||
-rw-r--r-- | drivers/infiniband/core/sa_query.c | 171 |
2 files changed, 214 insertions, 257 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 68721ff10255..308155937713 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -479,13 +479,20 @@ static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa, if (sa->sa_family != sb->sa_family) return sa->sa_family - sb->sa_family; - if (sa->sa_family == AF_INET) - return memcmp((char *)&((struct sockaddr_in *)sa)->sin_addr, - (char *)&((struct sockaddr_in *)sb)->sin_addr, + if (sa->sa_family == AF_INET && + __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in)) { + return memcmp(&((struct sockaddr_in *)sa)->sin_addr, + &((struct sockaddr_in *)sb)->sin_addr, sizeof(((struct sockaddr_in *)sa)->sin_addr)); + } + + if (sa->sa_family == AF_INET6 && + __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in6)) { + return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr, + &((struct sockaddr_in6 *)sb)->sin6_addr); + } - return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr, - &((struct sockaddr_in6 *)sb)->sin6_addr); + return -1; } static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv) @@ -2819,8 +2826,8 @@ int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer) } EXPORT_SYMBOL(rdma_set_min_rnr_timer); -static void route_set_path_rec_inbound(struct cma_work *work, - struct sa_path_rec *path_rec) +static int route_set_path_rec_inbound(struct cma_work *work, + struct sa_path_rec *path_rec) { struct rdma_route *route = &work->id->id.route; @@ -2828,14 +2835,15 @@ static void route_set_path_rec_inbound(struct cma_work *work, route->path_rec_inbound = kzalloc(sizeof(*route->path_rec_inbound), GFP_KERNEL); if (!route->path_rec_inbound) - return; + return -ENOMEM; } *route->path_rec_inbound = *path_rec; + return 0; } -static void route_set_path_rec_outbound(struct cma_work *work, - struct sa_path_rec *path_rec) +static int route_set_path_rec_outbound(struct cma_work *work, + struct sa_path_rec *path_rec) { struct rdma_route *route = &work->id->id.route; @@ -2843,14 +2851,15 @@ static void route_set_path_rec_outbound(struct cma_work *work, route->path_rec_outbound = kzalloc(sizeof(*route->path_rec_outbound), GFP_KERNEL); if (!route->path_rec_outbound) - return; + return -ENOMEM; } *route->path_rec_outbound = *path_rec; + return 0; } static void cma_query_handler(int status, struct sa_path_rec *path_rec, - int num_prs, void *context) + unsigned int num_prs, void *context) { struct cma_work *work = context; struct rdma_route *route; @@ -2865,13 +2874,15 @@ static void cma_query_handler(int status, struct sa_path_rec *path_rec, if (!path_rec[i].flags || (path_rec[i].flags & IB_PATH_GMP)) *route->path_rec = path_rec[i]; else if (path_rec[i].flags & IB_PATH_INBOUND) - route_set_path_rec_inbound(work, &path_rec[i]); + status = route_set_path_rec_inbound(work, &path_rec[i]); else if (path_rec[i].flags & IB_PATH_OUTBOUND) - route_set_path_rec_outbound(work, &path_rec[i]); - } - if (!route->path_rec) { - status = -EINVAL; - goto fail; + status = route_set_path_rec_outbound(work, + &path_rec[i]); + else + status = -EINVAL; + + if (status) + goto fail; } route->num_pri_alt_paths = 1; @@ -3541,121 +3552,6 @@ err: return ret; } -static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, - const struct sockaddr *dst_addr) -{ - struct sockaddr_storage zero_sock = {}; - - if (src_addr && src_addr->sa_family) - return rdma_bind_addr(id, src_addr); - - /* - * When the src_addr is not specified, automatically supply an any addr - */ - zero_sock.ss_family = dst_addr->sa_family; - if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { - struct sockaddr_in6 *src_addr6 = - (struct sockaddr_in6 *)&zero_sock; - struct sockaddr_in6 *dst_addr6 = - (struct sockaddr_in6 *)dst_addr; - - src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; - if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) - id->route.addr.dev_addr.bound_dev_if = - dst_addr6->sin6_scope_id; - } else if (dst_addr->sa_family == AF_IB) { - ((struct sockaddr_ib *)&zero_sock)->sib_pkey = - ((struct sockaddr_ib *)dst_addr)->sib_pkey; - } - return rdma_bind_addr(id, (struct sockaddr *)&zero_sock); -} - -/* - * If required, resolve the source address for bind and leave the id_priv in - * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior - * calls made by ULP, a previously bound ID will not be re-bound and src_addr is - * ignored. - */ -static int resolve_prepare_src(struct rdma_id_private *id_priv, - struct sockaddr *src_addr, - const struct sockaddr *dst_addr) -{ - int ret; - - memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); - if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) { - /* For a well behaved ULP state will be RDMA_CM_IDLE */ - ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr); - if (ret) - goto err_dst; - if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, - RDMA_CM_ADDR_QUERY))) { - ret = -EINVAL; - goto err_dst; - } - } - - if (cma_family(id_priv) != dst_addr->sa_family) { - ret = -EINVAL; - goto err_state; - } - return 0; - -err_state: - cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); -err_dst: - memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr)); - return ret; -} - -int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, - const struct sockaddr *dst_addr, unsigned long timeout_ms) -{ - struct rdma_id_private *id_priv = - container_of(id, struct rdma_id_private, id); - int ret; - - ret = resolve_prepare_src(id_priv, src_addr, dst_addr); - if (ret) - return ret; - - if (cma_any_addr(dst_addr)) { - ret = cma_resolve_loopback(id_priv); - } else { - if (dst_addr->sa_family == AF_IB) { - ret = cma_resolve_ib_addr(id_priv); - } else { - /* - * The FSM can return back to RDMA_CM_ADDR_BOUND after - * rdma_resolve_ip() is called, eg through the error - * path in addr_handler(). If this happens the existing - * request must be canceled before issuing a new one. - * Since canceling a request is a bit slow and this - * oddball path is rare, keep track once a request has - * been issued. The track turns out to be a permanent - * state since this is the only cancel as it is - * immediately before rdma_resolve_ip(). - */ - if (id_priv->used_resolve_ip) - rdma_addr_cancel(&id->route.addr.dev_addr); - else - id_priv->used_resolve_ip = 1; - ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr, - &id->route.addr.dev_addr, - timeout_ms, addr_handler, - false, id_priv); - } - } - if (ret) - goto err; - - return 0; -err: - cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); - return ret; -} -EXPORT_SYMBOL(rdma_resolve_addr); - int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) { struct rdma_id_private *id_priv; @@ -4058,27 +3954,26 @@ err: } EXPORT_SYMBOL(rdma_listen); -int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) +static int rdma_bind_addr_dst(struct rdma_id_private *id_priv, + struct sockaddr *addr, const struct sockaddr *daddr) { - struct rdma_id_private *id_priv; + struct sockaddr *id_daddr; int ret; - struct sockaddr *daddr; if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && addr->sa_family != AF_IB) return -EAFNOSUPPORT; - id_priv = container_of(id, struct rdma_id_private, id); if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) return -EINVAL; - ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); + ret = cma_check_linklocal(&id_priv->id.route.addr.dev_addr, addr); if (ret) goto err1; memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); if (!cma_any_addr(addr)) { - ret = cma_translate_addr(addr, &id->route.addr.dev_addr); + ret = cma_translate_addr(addr, &id_priv->id.route.addr.dev_addr); if (ret) goto err1; @@ -4098,8 +3993,10 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) } #endif } - daddr = cma_dst_addr(id_priv); - daddr->sa_family = addr->sa_family; + id_daddr = cma_dst_addr(id_priv); + if (daddr != id_daddr) + memcpy(id_daddr, daddr, rdma_addr_size(addr)); + id_daddr->sa_family = addr->sa_family; ret = cma_get_port(id_priv); if (ret) @@ -4115,6 +4012,127 @@ err1: cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); return ret; } + +static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, + const struct sockaddr *dst_addr) +{ + struct rdma_id_private *id_priv = + container_of(id, struct rdma_id_private, id); + struct sockaddr_storage zero_sock = {}; + + if (src_addr && src_addr->sa_family) + return rdma_bind_addr_dst(id_priv, src_addr, dst_addr); + + /* + * When the src_addr is not specified, automatically supply an any addr + */ + zero_sock.ss_family = dst_addr->sa_family; + if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { + struct sockaddr_in6 *src_addr6 = + (struct sockaddr_in6 *)&zero_sock; + struct sockaddr_in6 *dst_addr6 = + (struct sockaddr_in6 *)dst_addr; + + src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; + if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) + id->route.addr.dev_addr.bound_dev_if = + dst_addr6->sin6_scope_id; + } else if (dst_addr->sa_family == AF_IB) { + ((struct sockaddr_ib *)&zero_sock)->sib_pkey = + ((struct sockaddr_ib *)dst_addr)->sib_pkey; + } + return rdma_bind_addr_dst(id_priv, (struct sockaddr *)&zero_sock, dst_addr); +} + +/* + * If required, resolve the source address for bind and leave the id_priv in + * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior + * calls made by ULP, a previously bound ID will not be re-bound and src_addr is + * ignored. + */ +static int resolve_prepare_src(struct rdma_id_private *id_priv, + struct sockaddr *src_addr, + const struct sockaddr *dst_addr) +{ + int ret; + + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) { + /* For a well behaved ULP state will be RDMA_CM_IDLE */ + ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr); + if (ret) + return ret; + if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, + RDMA_CM_ADDR_QUERY))) + return -EINVAL; + + } + + if (cma_family(id_priv) != dst_addr->sa_family) { + ret = -EINVAL; + goto err_state; + } + return 0; + +err_state: + cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); + return ret; +} + +int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, + const struct sockaddr *dst_addr, unsigned long timeout_ms) +{ + struct rdma_id_private *id_priv = + container_of(id, struct rdma_id_private, id); + int ret; + + ret = resolve_prepare_src(id_priv, src_addr, dst_addr); + if (ret) + return ret; + + if (cma_any_addr(dst_addr)) { + ret = cma_resolve_loopback(id_priv); + } else { + if (dst_addr->sa_family == AF_IB) { + ret = cma_resolve_ib_addr(id_priv); + } else { + /* + * The FSM can return back to RDMA_CM_ADDR_BOUND after + * rdma_resolve_ip() is called, eg through the error + * path in addr_handler(). If this happens the existing + * request must be canceled before issuing a new one. + * Since canceling a request is a bit slow and this + * oddball path is rare, keep track once a request has + * been issued. The track turns out to be a permanent + * state since this is the only cancel as it is + * immediately before rdma_resolve_ip(). + */ + if (id_priv->used_resolve_ip) + rdma_addr_cancel(&id->route.addr.dev_addr); + else + id_priv->used_resolve_ip = 1; + ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr, + &id->route.addr.dev_addr, + timeout_ms, addr_handler, + false, id_priv); + } + } + if (ret) + goto err; + + return 0; +err: + cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); + return ret; +} +EXPORT_SYMBOL(rdma_resolve_addr); + +int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) +{ + struct rdma_id_private *id_priv = + container_of(id, struct rdma_id_private, id); + + return rdma_bind_addr_dst(id_priv, addr, cma_dst_addr(id_priv)); +} EXPORT_SYMBOL(rdma_bind_addr); static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 0de83d9a4985..59179cfc20ef 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -106,7 +106,7 @@ struct ib_sa_device { struct ib_sa_query { void (*callback)(struct ib_sa_query *sa_query, int status, - int num_prs, struct ib_sa_mad *mad); + struct ib_sa_mad *mad); void (*release)(struct ib_sa_query *); struct ib_sa_client *client; struct ib_sa_port *port; @@ -118,12 +118,6 @@ struct ib_sa_query { u32 seq; /* Local svc request sequence number */ unsigned long timeout; /* Local svc timeout */ u8 path_use; /* How will the pathrecord be used */ - - /* A separate buffer to save pathrecords of a response, as in cases - * like IB/netlink, mulptiple pathrecords are supported, so that - * mad->data is not large enough to hold them - */ - void *resp_pr_data; }; #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001 @@ -132,7 +126,7 @@ struct ib_sa_query { struct ib_sa_path_query { void (*callback)(int status, struct sa_path_rec *rec, - int num_paths, void *context); + unsigned int num_paths, void *context); void *context; struct ib_sa_query sa_query; struct sa_path_rec *conv_pr; @@ -690,6 +684,8 @@ static const struct ib_field guidinfo_rec_table[] = { .size_bits = 512 }, }; +#define RDMA_PRIMARY_PATH_MAX_REC_NUM 3 + static inline void ib_sa_disable_local_svc(struct ib_sa_query *query) { query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE; @@ -874,30 +870,21 @@ static void send_handler(struct ib_mad_agent *agent, static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query, const struct nlmsghdr *nlh) { - struct ib_path_rec_data *srec, *drec; + struct sa_path_rec recs[RDMA_PRIMARY_PATH_MAX_REC_NUM]; struct ib_sa_path_query *path_query; + struct ib_path_rec_data *rec_data; struct ib_mad_send_wc mad_send_wc; const struct nlattr *head, *curr; struct ib_sa_mad *mad = NULL; - int len, rem, num_prs = 0; + int len, rem, status = -EIO; + unsigned int num_prs = 0; u32 mask = 0; - int status = -EIO; if (!query->callback) goto out; path_query = container_of(query, struct ib_sa_path_query, sa_query); mad = query->mad_buf->mad; - if (!path_query->conv_pr && - (be16_to_cpu(mad->mad_hdr.attr_id) == IB_SA_ATTR_PATH_REC)) { - /* Need a larger buffer for possible multiple PRs */ - query->resp_pr_data = kvcalloc(RDMA_PRIMARY_PATH_MAX_REC_NUM, - sizeof(*drec), GFP_KERNEL); - if (!query->resp_pr_data) { - query->callback(query, -ENOMEM, 0, NULL); - return; - } - } head = (const struct nlattr *) nlmsg_data(nlh); len = nlmsg_len(nlh); @@ -917,36 +904,41 @@ static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query, break; } - drec = (struct ib_path_rec_data *)query->resp_pr_data; nla_for_each_attr(curr, head, len, rem) { if (curr->nla_type != LS_NLA_TYPE_PATH_RECORD) continue; - srec = nla_data(curr); - if ((srec->flags & mask) != mask) + rec_data = nla_data(curr); + if ((rec_data->flags & mask) != mask) continue; - status = 0; - if (!drec) { - memcpy(mad->data, srec->path_rec, - sizeof(srec->path_rec)); - num_prs = 1; - break; + if ((query->flags & IB_SA_QUERY_OPA) || + path_query->conv_pr) { + mad->mad_hdr.method |= IB_MGMT_METHOD_RESP; + memcpy(mad->data, rec_data->path_rec, + sizeof(rec_data->path_rec)); + query->callback(query, 0, mad); + goto out; } - memcpy(drec, srec, sizeof(*drec)); - drec++; + status = 0; + ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), + rec_data->path_rec, &recs[num_prs]); + recs[num_prs].flags = rec_data->flags; + recs[num_prs].rec_type = SA_PATH_REC_TYPE_IB; + sa_path_set_dmac_zero(&recs[num_prs]); + num_prs++; if (num_prs >= RDMA_PRIMARY_PATH_MAX_REC_NUM) break; } - if (!status) + if (!status) { mad->mad_hdr.method |= IB_MGMT_METHOD_RESP; - - query->callback(query, status, num_prs, mad); - kvfree(query->resp_pr_data); - query->resp_pr_data = NULL; + path_query->callback(status, recs, num_prs, + path_query->context); + } else + query->callback(query, status, mad); out: mad_send_wc.send_buf = query->mad_buf; @@ -1451,11 +1443,26 @@ static int opa_pr_query_possible(struct ib_sa_client *client, return PR_IB_SUPPORTED; } -static void ib_sa_pr_callback_single(struct ib_sa_path_query *query, - int status, struct ib_sa_mad *mad) +static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, + int status, struct ib_sa_mad *mad) { + struct ib_sa_path_query *query = + container_of(sa_query, struct ib_sa_path_query, sa_query); struct sa_path_rec rec = {}; + if (!mad) { + query->callback(status, NULL, 0, query->context); + return; + } + + if (sa_query->flags & IB_SA_QUERY_OPA) { + ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table), + mad->data, &rec); + rec.rec_type = SA_PATH_REC_TYPE_OPA; + query->callback(status, &rec, 1, query->context); + return; + } + ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), mad->data, &rec); rec.rec_type = SA_PATH_REC_TYPE_IB; @@ -1472,71 +1479,6 @@ static void ib_sa_pr_callback_single(struct ib_sa_path_query *query, } } -/** - * ib_sa_pr_callback_multiple() - Parse path records then do callback. - * - * In a multiple-PR case the PRs are saved in "query->resp_pr_data" - * (instead of"mad->data") and with "ib_path_rec_data" structure format, - * so that rec->flags can be set to indicate the type of PR. - * This is valid only in IB fabric. - */ -static void ib_sa_pr_callback_multiple(struct ib_sa_path_query *query, - int status, int num_prs, - struct ib_path_rec_data *rec_data) -{ - struct sa_path_rec *rec; - int i; - - rec = kvcalloc(num_prs, sizeof(*rec), GFP_KERNEL); - if (!rec) { - query->callback(-ENOMEM, NULL, 0, query->context); - return; - } - - for (i = 0; i < num_prs; i++) { - ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), - rec_data[i].path_rec, rec + i); - rec[i].rec_type = SA_PATH_REC_TYPE_IB; - sa_path_set_dmac_zero(rec + i); - rec[i].flags = rec_data[i].flags; - } - - query->callback(status, rec, num_prs, query->context); - kvfree(rec); -} - -static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, - int status, int num_prs, - struct ib_sa_mad *mad) -{ - struct ib_sa_path_query *query = - container_of(sa_query, struct ib_sa_path_query, sa_query); - struct sa_path_rec rec; - - if (!mad || !num_prs) { - query->callback(status, NULL, 0, query->context); - return; - } - - if (sa_query->flags & IB_SA_QUERY_OPA) { - if (num_prs != 1) { - query->callback(-EINVAL, NULL, 0, query->context); - return; - } - - ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table), - mad->data, &rec); - rec.rec_type = SA_PATH_REC_TYPE_OPA; - query->callback(status, &rec, num_prs, query->context); - } else { - if (!sa_query->resp_pr_data) - ib_sa_pr_callback_single(query, status, mad); - else - ib_sa_pr_callback_multiple(query, status, num_prs, - sa_query->resp_pr_data); - } -} - static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) { struct ib_sa_path_query *query = @@ -1578,7 +1520,7 @@ int ib_sa_path_rec_get(struct ib_sa_client *client, unsigned long timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct sa_path_rec *resp, - int num_paths, void *context), + unsigned int num_paths, void *context), void *context, struct ib_sa_query **sa_query) { @@ -1677,8 +1619,7 @@ err1: EXPORT_SYMBOL(ib_sa_path_rec_get); static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query, - int status, int num_prs, - struct ib_sa_mad *mad) + int status, struct ib_sa_mad *mad) { struct ib_sa_mcmember_query *query = container_of(sa_query, struct ib_sa_mcmember_query, sa_query); @@ -1769,8 +1710,7 @@ err1: /* Support GuidInfoRecord */ static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query, - int status, int num_paths, - struct ib_sa_mad *mad) + int status, struct ib_sa_mad *mad) { struct ib_sa_guidinfo_query *query = container_of(sa_query, struct ib_sa_guidinfo_query, sa_query); @@ -1879,8 +1819,7 @@ static void ib_classportinfo_cb(void *context) } static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query, - int status, int num_prs, - struct ib_sa_mad *mad) + int status, struct ib_sa_mad *mad) { unsigned long flags; struct ib_sa_classport_info_query *query = @@ -2055,13 +1994,13 @@ static void send_handler(struct ib_mad_agent *agent, /* No callback -- already got recv */ break; case IB_WC_RESP_TIMEOUT_ERR: - query->callback(query, -ETIMEDOUT, 0, NULL); + query->callback(query, -ETIMEDOUT, NULL); break; case IB_WC_WR_FLUSH_ERR: - query->callback(query, -EINTR, 0, NULL); + query->callback(query, -EINTR, NULL); break; default: - query->callback(query, -EIO, 0, NULL); + query->callback(query, -EIO, NULL); break; } @@ -2089,10 +2028,10 @@ static void recv_handler(struct ib_mad_agent *mad_agent, if (mad_recv_wc->wc->status == IB_WC_SUCCESS) query->callback(query, mad_recv_wc->recv_buf.mad->mad_hdr.status ? - -EINVAL : 0, 1, + -EINVAL : 0, (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad); else - query->callback(query, -EIO, 0, NULL); + query->callback(query, -EIO, NULL); } ib_free_recv_mad(mad_recv_wc); |