2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2006 Intel Corporation. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/err.h>
38 #include <linux/random.h>
39 #include <linux/spinlock.h>
40 #include <linux/slab.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kref.h>
43 #include <linux/idr.h>
44 #include <linux/workqueue.h>
45 #include <uapi/linux/if_ether.h>
46 #include <rdma/ib_pack.h>
47 #include <rdma/ib_cache.h>
48 #include <rdma/rdma_netlink.h>
49 #include <net/netlink.h>
50 #include <uapi/rdma/ib_user_sa.h>
51 #include <rdma/ib_marshall.h>
52 #include <rdma/ib_addr.h>
54 #include "core_priv.h"
56 MODULE_AUTHOR("Roland Dreier");
57 MODULE_DESCRIPTION("InfiniBand subnet administration query support");
58 MODULE_LICENSE("Dual BSD/GPL");
60 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
61 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
62 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
63 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
73 struct ib_mad_agent *agent;
74 struct ib_sa_sm_ah *sm_ah;
75 struct work_struct update_task;
81 int start_port, end_port;
82 struct ib_event_handler event_handler;
83 struct ib_sa_port port[0];
87 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
88 void (*release)(struct ib_sa_query *);
89 struct ib_sa_client *client;
90 struct ib_sa_port *port;
91 struct ib_mad_send_buf *mad_buf;
92 struct ib_sa_sm_ah *sm_ah;
95 struct list_head list; /* Local svc request list */
96 u32 seq; /* Local svc request sequence number */
97 unsigned long timeout; /* Local svc timeout */
98 u8 path_use; /* How will the pathrecord be used */
101 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
102 #define IB_SA_CANCEL 0x00000002
104 struct ib_sa_service_query {
105 void (*callback)(int, struct ib_sa_service_rec *, void *);
107 struct ib_sa_query sa_query;
110 struct ib_sa_path_query {
111 void (*callback)(int, struct ib_sa_path_rec *, void *);
113 struct ib_sa_query sa_query;
116 struct ib_sa_guidinfo_query {
117 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
119 struct ib_sa_query sa_query;
122 struct ib_sa_mcmember_query {
123 void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
125 struct ib_sa_query sa_query;
128 static LIST_HEAD(ib_nl_request_list);
129 static DEFINE_SPINLOCK(ib_nl_request_lock);
130 static atomic_t ib_nl_sa_request_seq;
131 static struct workqueue_struct *ib_nl_wq;
132 static struct delayed_work ib_nl_timed_work;
133 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
134 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY,
135 .len = sizeof(struct ib_path_rec_data)},
136 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32},
137 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64},
138 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
139 .len = sizeof(struct rdma_nla_ls_gid)},
140 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY,
141 .len = sizeof(struct rdma_nla_ls_gid)},
142 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8},
143 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16},
144 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16},
148 static void ib_sa_add_one(struct ib_device *device);
149 static void ib_sa_remove_one(struct ib_device *device, void *client_data);
151 static struct ib_client sa_client = {
153 .add = ib_sa_add_one,
154 .remove = ib_sa_remove_one
157 static DEFINE_SPINLOCK(idr_lock);
158 static DEFINE_IDR(query_idr);
160 static DEFINE_SPINLOCK(tid_lock);
163 #define PATH_REC_FIELD(field) \
164 .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \
165 .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \
166 .field_name = "sa_path_rec:" #field
168 static const struct ib_field path_rec_table[] = {
169 { PATH_REC_FIELD(service_id),
173 { PATH_REC_FIELD(dgid),
177 { PATH_REC_FIELD(sgid),
181 { PATH_REC_FIELD(dlid),
185 { PATH_REC_FIELD(slid),
189 { PATH_REC_FIELD(raw_traffic),
197 { PATH_REC_FIELD(flow_label),
201 { PATH_REC_FIELD(hop_limit),
205 { PATH_REC_FIELD(traffic_class),
209 { PATH_REC_FIELD(reversible),
213 { PATH_REC_FIELD(numb_path),
217 { PATH_REC_FIELD(pkey),
221 { PATH_REC_FIELD(qos_class),
225 { PATH_REC_FIELD(sl),
229 { PATH_REC_FIELD(mtu_selector),
233 { PATH_REC_FIELD(mtu),
237 { PATH_REC_FIELD(rate_selector),
241 { PATH_REC_FIELD(rate),
245 { PATH_REC_FIELD(packet_life_time_selector),
249 { PATH_REC_FIELD(packet_life_time),
253 { PATH_REC_FIELD(preference),
263 #define MCMEMBER_REC_FIELD(field) \
264 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
265 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
266 .field_name = "sa_mcmember_rec:" #field
268 static const struct ib_field mcmember_rec_table[] = {
269 { MCMEMBER_REC_FIELD(mgid),
273 { MCMEMBER_REC_FIELD(port_gid),
277 { MCMEMBER_REC_FIELD(qkey),
281 { MCMEMBER_REC_FIELD(mlid),
285 { MCMEMBER_REC_FIELD(mtu_selector),
289 { MCMEMBER_REC_FIELD(mtu),
293 { MCMEMBER_REC_FIELD(traffic_class),
297 { MCMEMBER_REC_FIELD(pkey),
301 { MCMEMBER_REC_FIELD(rate_selector),
305 { MCMEMBER_REC_FIELD(rate),
309 { MCMEMBER_REC_FIELD(packet_life_time_selector),
313 { MCMEMBER_REC_FIELD(packet_life_time),
317 { MCMEMBER_REC_FIELD(sl),
321 { MCMEMBER_REC_FIELD(flow_label),
325 { MCMEMBER_REC_FIELD(hop_limit),
329 { MCMEMBER_REC_FIELD(scope),
333 { MCMEMBER_REC_FIELD(join_state),
337 { MCMEMBER_REC_FIELD(proxy_join),
347 #define SERVICE_REC_FIELD(field) \
348 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
349 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
350 .field_name = "sa_service_rec:" #field
352 static const struct ib_field service_rec_table[] = {
353 { SERVICE_REC_FIELD(id),
357 { SERVICE_REC_FIELD(gid),
361 { SERVICE_REC_FIELD(pkey),
365 { SERVICE_REC_FIELD(lease),
369 { SERVICE_REC_FIELD(key),
373 { SERVICE_REC_FIELD(name),
377 { SERVICE_REC_FIELD(data8),
381 { SERVICE_REC_FIELD(data16),
385 { SERVICE_REC_FIELD(data32),
389 { SERVICE_REC_FIELD(data64),
395 #define GUIDINFO_REC_FIELD(field) \
396 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
397 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
398 .field_name = "sa_guidinfo_rec:" #field
400 static const struct ib_field guidinfo_rec_table[] = {
401 { GUIDINFO_REC_FIELD(lid),
405 { GUIDINFO_REC_FIELD(block_num),
409 { GUIDINFO_REC_FIELD(res1),
413 { GUIDINFO_REC_FIELD(res2),
417 { GUIDINFO_REC_FIELD(guid_info_list),
423 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
425 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
428 static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
430 return (query->flags & IB_SA_CANCEL);
433 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
434 struct ib_sa_query *query)
436 struct ib_sa_path_rec *sa_rec = query->mad_buf->context[1];
437 struct ib_sa_mad *mad = query->mad_buf->mad;
438 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
441 struct rdma_ls_resolve_header *header;
443 query->mad_buf->context[1] = NULL;
445 /* Construct the family header first */
446 header = (struct rdma_ls_resolve_header *)
447 skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
448 memcpy(header->device_name, query->port->agent->device->name,
450 header->port_num = query->port->port_num;
452 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
453 sa_rec->reversible != 0)
454 query->path_use = LS_RESOLVE_PATH_USE_GMP;
456 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
457 header->path_use = query->path_use;
459 /* Now build the attributes */
460 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
461 val64 = be64_to_cpu(sa_rec->service_id);
462 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
463 sizeof(val64), &val64);
465 if (comp_mask & IB_SA_PATH_REC_DGID)
466 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
467 sizeof(sa_rec->dgid), &sa_rec->dgid);
468 if (comp_mask & IB_SA_PATH_REC_SGID)
469 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
470 sizeof(sa_rec->sgid), &sa_rec->sgid);
471 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
472 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
473 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
475 if (comp_mask & IB_SA_PATH_REC_PKEY) {
476 val16 = be16_to_cpu(sa_rec->pkey);
477 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
478 sizeof(val16), &val16);
480 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
481 val16 = be16_to_cpu(sa_rec->qos_class);
482 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
483 sizeof(val16), &val16);
487 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
491 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
492 len += nla_total_size(sizeof(u64));
493 if (comp_mask & IB_SA_PATH_REC_DGID)
494 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
495 if (comp_mask & IB_SA_PATH_REC_SGID)
496 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
497 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
498 len += nla_total_size(sizeof(u8));
499 if (comp_mask & IB_SA_PATH_REC_PKEY)
500 len += nla_total_size(sizeof(u16));
501 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
502 len += nla_total_size(sizeof(u16));
505 * Make sure that at least some of the required comp_mask bits are
508 if (WARN_ON(len == 0))
511 /* Add the family header */
512 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
517 static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
519 struct sk_buff *skb = NULL;
520 struct nlmsghdr *nlh;
523 struct ib_sa_mad *mad;
526 mad = query->mad_buf->mad;
527 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
531 skb = nlmsg_new(len, gfp_mask);
535 /* Put nlmsg header only for now */
536 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
537 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
544 ib_nl_set_path_rec_attrs(skb, query);
546 /* Repair the nlmsg header length */
549 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask);
558 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
564 INIT_LIST_HEAD(&query->list);
565 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
567 /* Put the request on the list first.*/
568 spin_lock_irqsave(&ib_nl_request_lock, flags);
569 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
570 query->timeout = delay + jiffies;
571 list_add_tail(&query->list, &ib_nl_request_list);
572 /* Start the timeout if this is the only request */
573 if (ib_nl_request_list.next == &query->list)
574 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
575 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
577 ret = ib_nl_send_msg(query, gfp_mask);
580 /* Remove the request */
581 spin_lock_irqsave(&ib_nl_request_lock, flags);
582 list_del(&query->list);
583 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
591 static int ib_nl_cancel_request(struct ib_sa_query *query)
594 struct ib_sa_query *wait_query;
597 spin_lock_irqsave(&ib_nl_request_lock, flags);
598 list_for_each_entry(wait_query, &ib_nl_request_list, list) {
599 /* Let the timeout to take care of the callback */
600 if (query == wait_query) {
601 query->flags |= IB_SA_CANCEL;
602 query->timeout = jiffies;
603 list_move(&query->list, &ib_nl_request_list);
605 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
609 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
614 static void send_handler(struct ib_mad_agent *agent,
615 struct ib_mad_send_wc *mad_send_wc);
617 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
618 const struct nlmsghdr *nlh)
620 struct ib_mad_send_wc mad_send_wc;
621 struct ib_sa_mad *mad = NULL;
622 const struct nlattr *head, *curr;
623 struct ib_path_rec_data *rec;
628 if (query->callback) {
629 head = (const struct nlattr *) nlmsg_data(nlh);
630 len = nlmsg_len(nlh);
631 switch (query->path_use) {
632 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
633 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
636 case LS_RESOLVE_PATH_USE_ALL:
637 case LS_RESOLVE_PATH_USE_GMP:
639 mask = IB_PATH_PRIMARY | IB_PATH_GMP |
640 IB_PATH_BIDIRECTIONAL;
643 nla_for_each_attr(curr, head, len, rem) {
644 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
645 rec = nla_data(curr);
647 * Get the first one. In the future, we may
648 * need to get up to 6 pathrecords.
650 if ((rec->flags & mask) == mask) {
651 mad = query->mad_buf->mad;
652 mad->mad_hdr.method |=
654 memcpy(mad->data, rec->path_rec,
655 sizeof(rec->path_rec));
661 query->callback(query, status, mad);
664 mad_send_wc.send_buf = query->mad_buf;
665 mad_send_wc.status = IB_WC_SUCCESS;
666 send_handler(query->mad_buf->mad_agent, &mad_send_wc);
669 static void ib_nl_request_timeout(struct work_struct *work)
672 struct ib_sa_query *query;
674 struct ib_mad_send_wc mad_send_wc;
677 spin_lock_irqsave(&ib_nl_request_lock, flags);
678 while (!list_empty(&ib_nl_request_list)) {
679 query = list_entry(ib_nl_request_list.next,
680 struct ib_sa_query, list);
682 if (time_after(query->timeout, jiffies)) {
683 delay = query->timeout - jiffies;
684 if ((long)delay <= 0)
686 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
690 list_del(&query->list);
691 ib_sa_disable_local_svc(query);
692 /* Hold the lock to protect against query cancellation */
693 if (ib_sa_query_cancelled(query))
696 ret = ib_post_send_mad(query->mad_buf, NULL);
698 mad_send_wc.send_buf = query->mad_buf;
699 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
700 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
701 send_handler(query->port->agent, &mad_send_wc);
702 spin_lock_irqsave(&ib_nl_request_lock, flags);
705 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
708 static int ib_nl_handle_set_timeout(struct sk_buff *skb,
709 struct netlink_callback *cb)
711 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
712 int timeout, delta, abs_delta;
713 const struct nlattr *attr;
715 struct ib_sa_query *query;
717 struct nlattr *tb[LS_NLA_TYPE_MAX];
720 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
721 !(NETLINK_CB(skb).sk) ||
722 !netlink_capable(skb, CAP_NET_ADMIN))
725 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
726 nlmsg_len(nlh), ib_nl_policy);
727 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
731 timeout = *(int *) nla_data(attr);
732 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
733 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
734 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
735 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
737 delta = timeout - sa_local_svc_timeout_ms;
744 spin_lock_irqsave(&ib_nl_request_lock, flags);
745 sa_local_svc_timeout_ms = timeout;
746 list_for_each_entry(query, &ib_nl_request_list, list) {
747 if (delta < 0 && abs_delta > query->timeout)
750 query->timeout += delta;
752 /* Get the new delay from the first entry */
754 delay = query->timeout - jiffies;
760 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
761 (unsigned long)delay);
762 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
769 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
771 struct nlattr *tb[LS_NLA_TYPE_MAX];
774 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
777 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
778 nlmsg_len(nlh), ib_nl_policy);
785 static int ib_nl_handle_resolve_resp(struct sk_buff *skb,
786 struct netlink_callback *cb)
788 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
790 struct ib_sa_query *query;
791 struct ib_mad_send_buf *send_buf;
792 struct ib_mad_send_wc mad_send_wc;
796 if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
797 !(NETLINK_CB(skb).sk) ||
798 !netlink_capable(skb, CAP_NET_ADMIN))
801 spin_lock_irqsave(&ib_nl_request_lock, flags);
802 list_for_each_entry(query, &ib_nl_request_list, list) {
804 * If the query is cancelled, let the timeout routine
807 if (nlh->nlmsg_seq == query->seq) {
808 found = !ib_sa_query_cancelled(query);
810 list_del(&query->list);
816 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
820 send_buf = query->mad_buf;
822 if (!ib_nl_is_good_resolve_resp(nlh)) {
823 /* if the result is a failure, send out the packet via IB */
824 ib_sa_disable_local_svc(query);
825 ret = ib_post_send_mad(query->mad_buf, NULL);
826 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
828 mad_send_wc.send_buf = send_buf;
829 mad_send_wc.status = IB_WC_GENERAL_ERR;
830 send_handler(query->port->agent, &mad_send_wc);
833 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
834 ib_nl_process_good_resolve_rsp(query, nlh);
841 static struct ibnl_client_cbs ib_sa_cb_table[] = {
842 [RDMA_NL_LS_OP_RESOLVE] = {
843 .dump = ib_nl_handle_resolve_resp,
844 .module = THIS_MODULE },
845 [RDMA_NL_LS_OP_SET_TIMEOUT] = {
846 .dump = ib_nl_handle_set_timeout,
847 .module = THIS_MODULE },
850 static void free_sm_ah(struct kref *kref)
852 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
854 ib_destroy_ah(sm_ah->ah);
858 static void update_sm_ah(struct work_struct *work)
860 struct ib_sa_port *port =
861 container_of(work, struct ib_sa_port, update_task);
862 struct ib_sa_sm_ah *new_ah;
863 struct ib_port_attr port_attr;
864 struct ib_ah_attr ah_attr;
866 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
867 pr_warn("Couldn't query port\n");
871 new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);
876 kref_init(&new_ah->ref);
877 new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
879 new_ah->pkey_index = 0;
880 if (ib_find_pkey(port->agent->device, port->port_num,
881 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
882 pr_err("Couldn't find index for default PKey\n");
884 memset(&ah_attr, 0, sizeof ah_attr);
885 ah_attr.dlid = port_attr.sm_lid;
886 ah_attr.sl = port_attr.sm_sl;
887 ah_attr.port_num = port->port_num;
889 new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr);
890 if (IS_ERR(new_ah->ah)) {
891 pr_warn("Couldn't create new SM AH\n");
896 spin_lock_irq(&port->ah_lock);
898 kref_put(&port->sm_ah->ref, free_sm_ah);
899 port->sm_ah = new_ah;
900 spin_unlock_irq(&port->ah_lock);
904 static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event)
906 if (event->event == IB_EVENT_PORT_ERR ||
907 event->event == IB_EVENT_PORT_ACTIVE ||
908 event->event == IB_EVENT_LID_CHANGE ||
909 event->event == IB_EVENT_PKEY_CHANGE ||
910 event->event == IB_EVENT_SM_CHANGE ||
911 event->event == IB_EVENT_CLIENT_REREGISTER) {
913 struct ib_sa_device *sa_dev =
914 container_of(handler, typeof(*sa_dev), event_handler);
915 struct ib_sa_port *port =
916 &sa_dev->port[event->element.port_num - sa_dev->start_port];
918 if (!rdma_cap_ib_sa(handler->device, port->port_num))
921 spin_lock_irqsave(&port->ah_lock, flags);
923 kref_put(&port->sm_ah->ref, free_sm_ah);
925 spin_unlock_irqrestore(&port->ah_lock, flags);
927 queue_work(ib_wq, &sa_dev->port[event->element.port_num -
928 sa_dev->start_port].update_task);
932 void ib_sa_register_client(struct ib_sa_client *client)
934 atomic_set(&client->users, 1);
935 init_completion(&client->comp);
937 EXPORT_SYMBOL(ib_sa_register_client);
939 void ib_sa_unregister_client(struct ib_sa_client *client)
941 ib_sa_client_put(client);
942 wait_for_completion(&client->comp);
944 EXPORT_SYMBOL(ib_sa_unregister_client);
947 * ib_sa_cancel_query - try to cancel an SA query
948 * @id:ID of query to cancel
949 * @query:query pointer to cancel
951 * Try to cancel an SA query. If the id and query don't match up or
952 * the query has already completed, nothing is done. Otherwise the
953 * query is canceled and will complete with a status of -EINTR.
955 void ib_sa_cancel_query(int id, struct ib_sa_query *query)
958 struct ib_mad_agent *agent;
959 struct ib_mad_send_buf *mad_buf;
961 spin_lock_irqsave(&idr_lock, flags);
962 if (idr_find(&query_idr, id) != query) {
963 spin_unlock_irqrestore(&idr_lock, flags);
966 agent = query->port->agent;
967 mad_buf = query->mad_buf;
968 spin_unlock_irqrestore(&idr_lock, flags);
971 * If the query is still on the netlink request list, schedule
972 * it to be cancelled by the timeout routine. Otherwise, it has been
973 * sent to the MAD layer and has to be cancelled from there.
975 if (!ib_nl_cancel_request(query))
976 ib_cancel_mad(agent, mad_buf);
978 EXPORT_SYMBOL(ib_sa_cancel_query);
980 static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
982 struct ib_sa_device *sa_dev;
983 struct ib_sa_port *port;
987 sa_dev = ib_get_client_data(device, &sa_client);
991 port = &sa_dev->port[port_num - sa_dev->start_port];
992 spin_lock_irqsave(&port->ah_lock, flags);
993 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
994 spin_unlock_irqrestore(&port->ah_lock, flags);
996 return src_path_mask;
999 int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
1000 struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr)
1005 struct net_device *ndev = NULL;
1007 memset(ah_attr, 0, sizeof *ah_attr);
1008 ah_attr->dlid = be16_to_cpu(rec->dlid);
1009 ah_attr->sl = rec->sl;
1010 ah_attr->src_path_bits = be16_to_cpu(rec->slid) &
1011 get_src_path_mask(device, port_num);
1012 ah_attr->port_num = port_num;
1013 ah_attr->static_rate = rec->rate;
1015 use_roce = rdma_cap_eth_ah(device, port_num);
1018 struct net_device *idev;
1019 struct net_device *resolved_dev;
1020 struct rdma_dev_addr dev_addr = {.bound_dev_if = rec->ifindex,
1021 .net = rec->net ? rec->net :
1024 struct sockaddr _sockaddr;
1025 struct sockaddr_in _sockaddr_in;
1026 struct sockaddr_in6 _sockaddr_in6;
1027 } sgid_addr, dgid_addr;
1029 if (!device->get_netdev)
1032 rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
1033 rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
1035 /* validate the route */
1036 ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
1037 &dgid_addr._sockaddr, &dev_addr);
1041 if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
1042 dev_addr.network == RDMA_NETWORK_IPV6) &&
1043 rec->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
1046 idev = device->get_netdev(device, port_num);
1050 resolved_dev = dev_get_by_index(dev_addr.net,
1051 dev_addr.bound_dev_if);
1052 if (resolved_dev->flags & IFF_LOOPBACK) {
1053 dev_put(resolved_dev);
1054 resolved_dev = idev;
1055 dev_hold(resolved_dev);
1057 ndev = ib_get_ndev_from_path(rec);
1059 if ((ndev && ndev != resolved_dev) ||
1060 (resolved_dev != idev &&
1061 !rdma_is_upper_dev_rcu(idev, resolved_dev)))
1062 ret = -EHOSTUNREACH;
1065 dev_put(resolved_dev);
1073 if (rec->hop_limit > 0 || use_roce) {
1074 ah_attr->ah_flags = IB_AH_GRH;
1075 ah_attr->grh.dgid = rec->dgid;
1077 ret = ib_find_cached_gid_by_port(device, &rec->sgid,
1078 rec->gid_type, port_num, ndev,
1086 ah_attr->grh.sgid_index = gid_index;
1087 ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
1088 ah_attr->grh.hop_limit = rec->hop_limit;
1089 ah_attr->grh.traffic_class = rec->traffic_class;
1095 memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN);
1099 EXPORT_SYMBOL(ib_init_ah_from_path);
1101 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
1103 unsigned long flags;
1105 spin_lock_irqsave(&query->port->ah_lock, flags);
1106 if (!query->port->sm_ah) {
1107 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1110 kref_get(&query->port->sm_ah->ref);
1111 query->sm_ah = query->port->sm_ah;
1112 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1114 query->mad_buf = ib_create_send_mad(query->port->agent, 1,
1115 query->sm_ah->pkey_index,
1116 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
1118 IB_MGMT_BASE_VERSION);
1119 if (IS_ERR(query->mad_buf)) {
1120 kref_put(&query->sm_ah->ref, free_sm_ah);
1124 query->mad_buf->ah = query->sm_ah->ah;
1129 static void free_mad(struct ib_sa_query *query)
1131 ib_free_send_mad(query->mad_buf);
1132 kref_put(&query->sm_ah->ref, free_sm_ah);
1135 static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
1137 unsigned long flags;
1139 memset(mad, 0, sizeof *mad);
1141 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
1142 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
1143 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
1145 spin_lock_irqsave(&tid_lock, flags);
1147 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
1148 spin_unlock_irqrestore(&tid_lock, flags);
1151 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
1153 bool preload = gfpflags_allow_blocking(gfp_mask);
1154 unsigned long flags;
1158 idr_preload(gfp_mask);
1159 spin_lock_irqsave(&idr_lock, flags);
1161 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
1163 spin_unlock_irqrestore(&idr_lock, flags);
1169 query->mad_buf->timeout_ms = timeout_ms;
1170 query->mad_buf->context[0] = query;
1173 if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) {
1174 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) {
1175 if (!ib_nl_make_request(query, gfp_mask))
1178 ib_sa_disable_local_svc(query);
1181 ret = ib_post_send_mad(query->mad_buf, NULL);
1183 spin_lock_irqsave(&idr_lock, flags);
1184 idr_remove(&query_idr, id);
1185 spin_unlock_irqrestore(&idr_lock, flags);
1189 * It's not safe to dereference query any more, because the
1190 * send may already have completed and freed the query in
1193 return ret ? ret : id;
1196 void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec)
1198 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
1200 EXPORT_SYMBOL(ib_sa_unpack_path);
1202 void ib_sa_pack_path(struct ib_sa_path_rec *rec, void *attribute)
1204 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
1206 EXPORT_SYMBOL(ib_sa_pack_path);
1208 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
1210 struct ib_sa_mad *mad)
1212 struct ib_sa_path_query *query =
1213 container_of(sa_query, struct ib_sa_path_query, sa_query);
1216 struct ib_sa_path_rec rec;
1218 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
1222 rec.gid_type = IB_GID_TYPE_IB;
1223 eth_zero_addr(rec.dmac);
1224 query->callback(status, &rec, query->context);
1226 query->callback(status, NULL, query->context);
1229 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
1231 kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
1235 * ib_sa_path_rec_get - Start a Path get query
1237 * @device:device to send query on
1238 * @port_num: port number to send query on
1239 * @rec:Path Record to send in query
1240 * @comp_mask:component mask to send in query
1241 * @timeout_ms:time to wait for response
1242 * @gfp_mask:GFP mask to use for internal allocations
1243 * @callback:function called when query completes, times out or is
1245 * @context:opaque user context passed to callback
1246 * @sa_query:query context, used to cancel query
1248 * Send a Path Record Get query to the SA to look up a path. The
1249 * callback function will be called when the query completes (or
1250 * fails); status is 0 for a successful response, -EINTR if the query
1251 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1252 * occurred sending the query. The resp parameter of the callback is
1253 * only valid if status is 0.
1255 * If the return value of ib_sa_path_rec_get() is negative, it is an
1256 * error code. Otherwise it is a query ID that can be used to cancel
1259 int ib_sa_path_rec_get(struct ib_sa_client *client,
1260 struct ib_device *device, u8 port_num,
1261 struct ib_sa_path_rec *rec,
1262 ib_sa_comp_mask comp_mask,
1263 int timeout_ms, gfp_t gfp_mask,
1264 void (*callback)(int status,
1265 struct ib_sa_path_rec *resp,
1268 struct ib_sa_query **sa_query)
1270 struct ib_sa_path_query *query;
1271 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1272 struct ib_sa_port *port;
1273 struct ib_mad_agent *agent;
1274 struct ib_sa_mad *mad;
1280 port = &sa_dev->port[port_num - sa_dev->start_port];
1281 agent = port->agent;
1283 query = kzalloc(sizeof(*query), gfp_mask);
1287 query->sa_query.port = port;
1288 ret = alloc_mad(&query->sa_query, gfp_mask);
1292 ib_sa_client_get(client);
1293 query->sa_query.client = client;
1294 query->callback = callback;
1295 query->context = context;
1297 mad = query->sa_query.mad_buf->mad;
1298 init_mad(mad, agent);
1300 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
1301 query->sa_query.release = ib_sa_path_rec_release;
1302 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1303 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
1304 mad->sa_hdr.comp_mask = comp_mask;
1306 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);
1308 *sa_query = &query->sa_query;
1310 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
1311 query->sa_query.mad_buf->context[1] = rec;
1313 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1321 ib_sa_client_put(query->sa_query.client);
1322 free_mad(&query->sa_query);
1328 EXPORT_SYMBOL(ib_sa_path_rec_get);
1330 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
1332 struct ib_sa_mad *mad)
1334 struct ib_sa_service_query *query =
1335 container_of(sa_query, struct ib_sa_service_query, sa_query);
1338 struct ib_sa_service_rec rec;
1340 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
1342 query->callback(status, &rec, query->context);
1344 query->callback(status, NULL, query->context);
1347 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
1349 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
1353 * ib_sa_service_rec_query - Start Service Record operation
1355 * @device:device to send request on
1356 * @port_num: port number to send request on
1357 * @method:SA method - should be get, set, or delete
1358 * @rec:Service Record to send in request
1359 * @comp_mask:component mask to send in request
1360 * @timeout_ms:time to wait for response
1361 * @gfp_mask:GFP mask to use for internal allocations
1362 * @callback:function called when request completes, times out or is
1364 * @context:opaque user context passed to callback
1365 * @sa_query:request context, used to cancel request
1367 * Send a Service Record set/get/delete to the SA to register,
1368 * unregister or query a service record.
1369 * The callback function will be called when the request completes (or
1370 * fails); status is 0 for a successful response, -EINTR if the query
1371 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1372 * occurred sending the query. The resp parameter of the callback is
1373 * only valid if status is 0.
1375 * If the return value of ib_sa_service_rec_query() is negative, it is an
1376 * error code. Otherwise it is a request ID that can be used to cancel
1379 int ib_sa_service_rec_query(struct ib_sa_client *client,
1380 struct ib_device *device, u8 port_num, u8 method,
1381 struct ib_sa_service_rec *rec,
1382 ib_sa_comp_mask comp_mask,
1383 int timeout_ms, gfp_t gfp_mask,
1384 void (*callback)(int status,
1385 struct ib_sa_service_rec *resp,
1388 struct ib_sa_query **sa_query)
1390 struct ib_sa_service_query *query;
1391 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1392 struct ib_sa_port *port;
1393 struct ib_mad_agent *agent;
1394 struct ib_sa_mad *mad;
1400 port = &sa_dev->port[port_num - sa_dev->start_port];
1401 agent = port->agent;
1403 if (method != IB_MGMT_METHOD_GET &&
1404 method != IB_MGMT_METHOD_SET &&
1405 method != IB_SA_METHOD_DELETE)
1408 query = kzalloc(sizeof(*query), gfp_mask);
1412 query->sa_query.port = port;
1413 ret = alloc_mad(&query->sa_query, gfp_mask);
1417 ib_sa_client_get(client);
1418 query->sa_query.client = client;
1419 query->callback = callback;
1420 query->context = context;
1422 mad = query->sa_query.mad_buf->mad;
1423 init_mad(mad, agent);
1425 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
1426 query->sa_query.release = ib_sa_service_rec_release;
1427 mad->mad_hdr.method = method;
1428 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
1429 mad->sa_hdr.comp_mask = comp_mask;
1431 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
1434 *sa_query = &query->sa_query;
1436 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1444 ib_sa_client_put(query->sa_query.client);
1445 free_mad(&query->sa_query);
1451 EXPORT_SYMBOL(ib_sa_service_rec_query);
1453 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1455 struct ib_sa_mad *mad)
1457 struct ib_sa_mcmember_query *query =
1458 container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1461 struct ib_sa_mcmember_rec rec;
1463 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1465 query->callback(status, &rec, query->context);
1467 query->callback(status, NULL, query->context);
1470 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1472 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1475 int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1476 struct ib_device *device, u8 port_num,
1478 struct ib_sa_mcmember_rec *rec,
1479 ib_sa_comp_mask comp_mask,
1480 int timeout_ms, gfp_t gfp_mask,
1481 void (*callback)(int status,
1482 struct ib_sa_mcmember_rec *resp,
1485 struct ib_sa_query **sa_query)
1487 struct ib_sa_mcmember_query *query;
1488 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1489 struct ib_sa_port *port;
1490 struct ib_mad_agent *agent;
1491 struct ib_sa_mad *mad;
1497 port = &sa_dev->port[port_num - sa_dev->start_port];
1498 agent = port->agent;
1500 query = kzalloc(sizeof(*query), gfp_mask);
1504 query->sa_query.port = port;
1505 ret = alloc_mad(&query->sa_query, gfp_mask);
1509 ib_sa_client_get(client);
1510 query->sa_query.client = client;
1511 query->callback = callback;
1512 query->context = context;
1514 mad = query->sa_query.mad_buf->mad;
1515 init_mad(mad, agent);
1517 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1518 query->sa_query.release = ib_sa_mcmember_rec_release;
1519 mad->mad_hdr.method = method;
1520 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1521 mad->sa_hdr.comp_mask = comp_mask;
1523 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1526 *sa_query = &query->sa_query;
1528 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1536 ib_sa_client_put(query->sa_query.client);
1537 free_mad(&query->sa_query);
1544 /* Support GuidInfoRecord */
1545 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1547 struct ib_sa_mad *mad)
1549 struct ib_sa_guidinfo_query *query =
1550 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1553 struct ib_sa_guidinfo_rec rec;
1555 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1557 query->callback(status, &rec, query->context);
1559 query->callback(status, NULL, query->context);
1562 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1564 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1567 int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1568 struct ib_device *device, u8 port_num,
1569 struct ib_sa_guidinfo_rec *rec,
1570 ib_sa_comp_mask comp_mask, u8 method,
1571 int timeout_ms, gfp_t gfp_mask,
1572 void (*callback)(int status,
1573 struct ib_sa_guidinfo_rec *resp,
1576 struct ib_sa_query **sa_query)
1578 struct ib_sa_guidinfo_query *query;
1579 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1580 struct ib_sa_port *port;
1581 struct ib_mad_agent *agent;
1582 struct ib_sa_mad *mad;
1588 if (method != IB_MGMT_METHOD_GET &&
1589 method != IB_MGMT_METHOD_SET &&
1590 method != IB_SA_METHOD_DELETE) {
1594 port = &sa_dev->port[port_num - sa_dev->start_port];
1595 agent = port->agent;
1597 query = kzalloc(sizeof(*query), gfp_mask);
1601 query->sa_query.port = port;
1602 ret = alloc_mad(&query->sa_query, gfp_mask);
1606 ib_sa_client_get(client);
1607 query->sa_query.client = client;
1608 query->callback = callback;
1609 query->context = context;
1611 mad = query->sa_query.mad_buf->mad;
1612 init_mad(mad, agent);
1614 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1615 query->sa_query.release = ib_sa_guidinfo_rec_release;
1617 mad->mad_hdr.method = method;
1618 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1619 mad->sa_hdr.comp_mask = comp_mask;
1621 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1624 *sa_query = &query->sa_query;
1626 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1634 ib_sa_client_put(query->sa_query.client);
1635 free_mad(&query->sa_query);
1641 EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
1643 static void send_handler(struct ib_mad_agent *agent,
1644 struct ib_mad_send_wc *mad_send_wc)
1646 struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
1647 unsigned long flags;
1649 if (query->callback)
1650 switch (mad_send_wc->status) {
1652 /* No callback -- already got recv */
1654 case IB_WC_RESP_TIMEOUT_ERR:
1655 query->callback(query, -ETIMEDOUT, NULL);
1657 case IB_WC_WR_FLUSH_ERR:
1658 query->callback(query, -EINTR, NULL);
1661 query->callback(query, -EIO, NULL);
1665 spin_lock_irqsave(&idr_lock, flags);
1666 idr_remove(&query_idr, query->id);
1667 spin_unlock_irqrestore(&idr_lock, flags);
1670 ib_sa_client_put(query->client);
1671 query->release(query);
1674 static void recv_handler(struct ib_mad_agent *mad_agent,
1675 struct ib_mad_send_buf *send_buf,
1676 struct ib_mad_recv_wc *mad_recv_wc)
1678 struct ib_sa_query *query;
1683 query = send_buf->context[0];
1684 if (query->callback) {
1685 if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
1686 query->callback(query,
1687 mad_recv_wc->recv_buf.mad->mad_hdr.status ?
1689 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
1691 query->callback(query, -EIO, NULL);
1694 ib_free_recv_mad(mad_recv_wc);
1697 static void ib_sa_add_one(struct ib_device *device)
1699 struct ib_sa_device *sa_dev;
1703 s = rdma_start_port(device);
1704 e = rdma_end_port(device);
1706 sa_dev = kzalloc(sizeof *sa_dev +
1707 (e - s + 1) * sizeof (struct ib_sa_port),
1712 sa_dev->start_port = s;
1713 sa_dev->end_port = e;
1715 for (i = 0; i <= e - s; ++i) {
1716 spin_lock_init(&sa_dev->port[i].ah_lock);
1717 if (!rdma_cap_ib_sa(device, i + 1))
1720 sa_dev->port[i].sm_ah = NULL;
1721 sa_dev->port[i].port_num = i + s;
1723 sa_dev->port[i].agent =
1724 ib_register_mad_agent(device, i + s, IB_QPT_GSI,
1725 NULL, 0, send_handler,
1726 recv_handler, sa_dev, 0);
1727 if (IS_ERR(sa_dev->port[i].agent))
1730 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
1738 ib_set_client_data(device, &sa_client, sa_dev);
1741 * We register our event handler after everything is set up,
1742 * and then update our cached info after the event handler is
1743 * registered to avoid any problems if a port changes state
1744 * during our initialization.
1747 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
1748 if (ib_register_event_handler(&sa_dev->event_handler))
1751 for (i = 0; i <= e - s; ++i) {
1752 if (rdma_cap_ib_sa(device, i + 1))
1753 update_sm_ah(&sa_dev->port[i].update_task);
1760 if (rdma_cap_ib_sa(device, i + 1))
1761 ib_unregister_mad_agent(sa_dev->port[i].agent);
1768 static void ib_sa_remove_one(struct ib_device *device, void *client_data)
1770 struct ib_sa_device *sa_dev = client_data;
1776 ib_unregister_event_handler(&sa_dev->event_handler);
1778 flush_workqueue(ib_wq);
1780 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
1781 if (rdma_cap_ib_sa(device, i + 1)) {
1782 ib_unregister_mad_agent(sa_dev->port[i].agent);
1783 if (sa_dev->port[i].sm_ah)
1784 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
1792 static int __init ib_sa_init(void)
1796 get_random_bytes(&tid, sizeof tid);
1798 atomic_set(&ib_nl_sa_request_seq, 0);
1800 ret = ib_register_client(&sa_client);
1802 pr_err("Couldn't register ib_sa client\n");
1808 pr_err("Couldn't initialize multicast handling\n");
1812 ib_nl_wq = create_singlethread_workqueue("ib_nl_sa_wq");
1818 if (ibnl_add_client(RDMA_NL_LS, RDMA_NL_LS_NUM_OPS,
1820 pr_err("Failed to add netlink callback\n");
1824 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
1828 destroy_workqueue(ib_nl_wq);
1832 ib_unregister_client(&sa_client);
1837 static void __exit ib_sa_cleanup(void)
1839 ibnl_remove_client(RDMA_NL_LS);
1840 cancel_delayed_work(&ib_nl_timed_work);
1841 flush_workqueue(ib_nl_wq);
1842 destroy_workqueue(ib_nl_wq);
1844 ib_unregister_client(&sa_client);
1845 idr_destroy(&query_idr);
1848 module_init(ib_sa_init);
1849 module_exit(ib_sa_cleanup);