2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2006 Intel Corporation. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/err.h>
38 #include <linux/random.h>
39 #include <linux/spinlock.h>
40 #include <linux/slab.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kref.h>
43 #include <linux/idr.h>
44 #include <linux/workqueue.h>
45 #include <uapi/linux/if_ether.h>
46 #include <rdma/ib_pack.h>
47 #include <rdma/ib_cache.h>
48 #include <rdma/rdma_netlink.h>
49 #include <net/netlink.h>
50 #include <uapi/rdma/ib_user_sa.h>
51 #include <rdma/ib_marshall.h>
52 #include <rdma/ib_addr.h>
54 #include "core_priv.h"
56 MODULE_AUTHOR("Roland Dreier");
57 MODULE_DESCRIPTION("InfiniBand subnet administration query support");
58 MODULE_LICENSE("Dual BSD/GPL");
60 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
61 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
62 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
63 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
73 struct ib_mad_agent *agent;
74 struct ib_sa_sm_ah *sm_ah;
75 struct work_struct update_task;
81 int start_port, end_port;
82 struct ib_event_handler event_handler;
83 struct ib_sa_port port[0];
87 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
88 void (*release)(struct ib_sa_query *);
89 struct ib_sa_client *client;
90 struct ib_sa_port *port;
91 struct ib_mad_send_buf *mad_buf;
92 struct ib_sa_sm_ah *sm_ah;
95 struct list_head list; /* Local svc request list */
96 u32 seq; /* Local svc request sequence number */
97 unsigned long timeout; /* Local svc timeout */
98 u8 path_use; /* How will the pathrecord be used */
101 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
102 #define IB_SA_CANCEL 0x00000002
104 struct ib_sa_service_query {
105 void (*callback)(int, struct ib_sa_service_rec *, void *);
107 struct ib_sa_query sa_query;
110 struct ib_sa_path_query {
111 void (*callback)(int, struct ib_sa_path_rec *, void *);
113 struct ib_sa_query sa_query;
116 struct ib_sa_guidinfo_query {
117 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
119 struct ib_sa_query sa_query;
122 struct ib_sa_classport_info_query {
123 void (*callback)(int, struct ib_class_port_info *, void *);
125 struct ib_sa_query sa_query;
128 struct ib_sa_mcmember_query {
129 void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
131 struct ib_sa_query sa_query;
134 static LIST_HEAD(ib_nl_request_list);
135 static DEFINE_SPINLOCK(ib_nl_request_lock);
136 static atomic_t ib_nl_sa_request_seq;
137 static struct workqueue_struct *ib_nl_wq;
138 static struct delayed_work ib_nl_timed_work;
139 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
140 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY,
141 .len = sizeof(struct ib_path_rec_data)},
142 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32},
143 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64},
144 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
145 .len = sizeof(struct rdma_nla_ls_gid)},
146 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY,
147 .len = sizeof(struct rdma_nla_ls_gid)},
148 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8},
149 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16},
150 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16},
154 static void ib_sa_add_one(struct ib_device *device);
155 static void ib_sa_remove_one(struct ib_device *device, void *client_data);
157 static struct ib_client sa_client = {
159 .add = ib_sa_add_one,
160 .remove = ib_sa_remove_one
163 static DEFINE_SPINLOCK(idr_lock);
164 static DEFINE_IDR(query_idr);
166 static DEFINE_SPINLOCK(tid_lock);
169 #define PATH_REC_FIELD(field) \
170 .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \
171 .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \
172 .field_name = "sa_path_rec:" #field
174 static const struct ib_field path_rec_table[] = {
175 { PATH_REC_FIELD(service_id),
179 { PATH_REC_FIELD(dgid),
183 { PATH_REC_FIELD(sgid),
187 { PATH_REC_FIELD(dlid),
191 { PATH_REC_FIELD(slid),
195 { PATH_REC_FIELD(raw_traffic),
203 { PATH_REC_FIELD(flow_label),
207 { PATH_REC_FIELD(hop_limit),
211 { PATH_REC_FIELD(traffic_class),
215 { PATH_REC_FIELD(reversible),
219 { PATH_REC_FIELD(numb_path),
223 { PATH_REC_FIELD(pkey),
227 { PATH_REC_FIELD(qos_class),
231 { PATH_REC_FIELD(sl),
235 { PATH_REC_FIELD(mtu_selector),
239 { PATH_REC_FIELD(mtu),
243 { PATH_REC_FIELD(rate_selector),
247 { PATH_REC_FIELD(rate),
251 { PATH_REC_FIELD(packet_life_time_selector),
255 { PATH_REC_FIELD(packet_life_time),
259 { PATH_REC_FIELD(preference),
269 #define MCMEMBER_REC_FIELD(field) \
270 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
271 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
272 .field_name = "sa_mcmember_rec:" #field
274 static const struct ib_field mcmember_rec_table[] = {
275 { MCMEMBER_REC_FIELD(mgid),
279 { MCMEMBER_REC_FIELD(port_gid),
283 { MCMEMBER_REC_FIELD(qkey),
287 { MCMEMBER_REC_FIELD(mlid),
291 { MCMEMBER_REC_FIELD(mtu_selector),
295 { MCMEMBER_REC_FIELD(mtu),
299 { MCMEMBER_REC_FIELD(traffic_class),
303 { MCMEMBER_REC_FIELD(pkey),
307 { MCMEMBER_REC_FIELD(rate_selector),
311 { MCMEMBER_REC_FIELD(rate),
315 { MCMEMBER_REC_FIELD(packet_life_time_selector),
319 { MCMEMBER_REC_FIELD(packet_life_time),
323 { MCMEMBER_REC_FIELD(sl),
327 { MCMEMBER_REC_FIELD(flow_label),
331 { MCMEMBER_REC_FIELD(hop_limit),
335 { MCMEMBER_REC_FIELD(scope),
339 { MCMEMBER_REC_FIELD(join_state),
343 { MCMEMBER_REC_FIELD(proxy_join),
353 #define SERVICE_REC_FIELD(field) \
354 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
355 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
356 .field_name = "sa_service_rec:" #field
358 static const struct ib_field service_rec_table[] = {
359 { SERVICE_REC_FIELD(id),
363 { SERVICE_REC_FIELD(gid),
367 { SERVICE_REC_FIELD(pkey),
371 { SERVICE_REC_FIELD(lease),
375 { SERVICE_REC_FIELD(key),
379 { SERVICE_REC_FIELD(name),
383 { SERVICE_REC_FIELD(data8),
387 { SERVICE_REC_FIELD(data16),
391 { SERVICE_REC_FIELD(data32),
395 { SERVICE_REC_FIELD(data64),
401 #define CLASSPORTINFO_REC_FIELD(field) \
402 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
403 .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \
404 .field_name = "ib_class_port_info:" #field
406 static const struct ib_field classport_info_rec_table[] = {
407 { CLASSPORTINFO_REC_FIELD(base_version),
411 { CLASSPORTINFO_REC_FIELD(class_version),
415 { CLASSPORTINFO_REC_FIELD(capability_mask),
419 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
423 { CLASSPORTINFO_REC_FIELD(redirect_gid),
427 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
431 { CLASSPORTINFO_REC_FIELD(redirect_lid),
435 { CLASSPORTINFO_REC_FIELD(redirect_pkey),
440 { CLASSPORTINFO_REC_FIELD(redirect_qp),
444 { CLASSPORTINFO_REC_FIELD(redirect_qkey),
449 { CLASSPORTINFO_REC_FIELD(trap_gid),
453 { CLASSPORTINFO_REC_FIELD(trap_tcslfl),
458 { CLASSPORTINFO_REC_FIELD(trap_lid),
462 { CLASSPORTINFO_REC_FIELD(trap_pkey),
467 { CLASSPORTINFO_REC_FIELD(trap_hlqp),
471 { CLASSPORTINFO_REC_FIELD(trap_qkey),
477 #define GUIDINFO_REC_FIELD(field) \
478 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
479 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
480 .field_name = "sa_guidinfo_rec:" #field
482 static const struct ib_field guidinfo_rec_table[] = {
483 { GUIDINFO_REC_FIELD(lid),
487 { GUIDINFO_REC_FIELD(block_num),
491 { GUIDINFO_REC_FIELD(res1),
495 { GUIDINFO_REC_FIELD(res2),
499 { GUIDINFO_REC_FIELD(guid_info_list),
505 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
507 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
510 static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
512 return (query->flags & IB_SA_CANCEL);
515 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
516 struct ib_sa_query *query)
518 struct ib_sa_path_rec *sa_rec = query->mad_buf->context[1];
519 struct ib_sa_mad *mad = query->mad_buf->mad;
520 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
523 struct rdma_ls_resolve_header *header;
525 query->mad_buf->context[1] = NULL;
527 /* Construct the family header first */
528 header = (struct rdma_ls_resolve_header *)
529 skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
530 memcpy(header->device_name, query->port->agent->device->name,
532 header->port_num = query->port->port_num;
534 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
535 sa_rec->reversible != 0)
536 query->path_use = LS_RESOLVE_PATH_USE_GMP;
538 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
539 header->path_use = query->path_use;
541 /* Now build the attributes */
542 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
543 val64 = be64_to_cpu(sa_rec->service_id);
544 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
545 sizeof(val64), &val64);
547 if (comp_mask & IB_SA_PATH_REC_DGID)
548 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
549 sizeof(sa_rec->dgid), &sa_rec->dgid);
550 if (comp_mask & IB_SA_PATH_REC_SGID)
551 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
552 sizeof(sa_rec->sgid), &sa_rec->sgid);
553 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
554 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
555 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
557 if (comp_mask & IB_SA_PATH_REC_PKEY) {
558 val16 = be16_to_cpu(sa_rec->pkey);
559 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
560 sizeof(val16), &val16);
562 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
563 val16 = be16_to_cpu(sa_rec->qos_class);
564 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
565 sizeof(val16), &val16);
569 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
573 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
574 len += nla_total_size(sizeof(u64));
575 if (comp_mask & IB_SA_PATH_REC_DGID)
576 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
577 if (comp_mask & IB_SA_PATH_REC_SGID)
578 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
579 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
580 len += nla_total_size(sizeof(u8));
581 if (comp_mask & IB_SA_PATH_REC_PKEY)
582 len += nla_total_size(sizeof(u16));
583 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
584 len += nla_total_size(sizeof(u16));
587 * Make sure that at least some of the required comp_mask bits are
590 if (WARN_ON(len == 0))
593 /* Add the family header */
594 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
599 static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
601 struct sk_buff *skb = NULL;
602 struct nlmsghdr *nlh;
605 struct ib_sa_mad *mad;
608 mad = query->mad_buf->mad;
609 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
613 skb = nlmsg_new(len, gfp_mask);
617 /* Put nlmsg header only for now */
618 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
619 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
626 ib_nl_set_path_rec_attrs(skb, query);
628 /* Repair the nlmsg header length */
631 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask);
640 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
646 INIT_LIST_HEAD(&query->list);
647 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
649 /* Put the request on the list first.*/
650 spin_lock_irqsave(&ib_nl_request_lock, flags);
651 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
652 query->timeout = delay + jiffies;
653 list_add_tail(&query->list, &ib_nl_request_list);
654 /* Start the timeout if this is the only request */
655 if (ib_nl_request_list.next == &query->list)
656 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
657 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
659 ret = ib_nl_send_msg(query, gfp_mask);
662 /* Remove the request */
663 spin_lock_irqsave(&ib_nl_request_lock, flags);
664 list_del(&query->list);
665 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
673 static int ib_nl_cancel_request(struct ib_sa_query *query)
676 struct ib_sa_query *wait_query;
679 spin_lock_irqsave(&ib_nl_request_lock, flags);
680 list_for_each_entry(wait_query, &ib_nl_request_list, list) {
681 /* Let the timeout to take care of the callback */
682 if (query == wait_query) {
683 query->flags |= IB_SA_CANCEL;
684 query->timeout = jiffies;
685 list_move(&query->list, &ib_nl_request_list);
687 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
691 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
696 static void send_handler(struct ib_mad_agent *agent,
697 struct ib_mad_send_wc *mad_send_wc);
699 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
700 const struct nlmsghdr *nlh)
702 struct ib_mad_send_wc mad_send_wc;
703 struct ib_sa_mad *mad = NULL;
704 const struct nlattr *head, *curr;
705 struct ib_path_rec_data *rec;
710 if (query->callback) {
711 head = (const struct nlattr *) nlmsg_data(nlh);
712 len = nlmsg_len(nlh);
713 switch (query->path_use) {
714 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
715 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
718 case LS_RESOLVE_PATH_USE_ALL:
719 case LS_RESOLVE_PATH_USE_GMP:
721 mask = IB_PATH_PRIMARY | IB_PATH_GMP |
722 IB_PATH_BIDIRECTIONAL;
725 nla_for_each_attr(curr, head, len, rem) {
726 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
727 rec = nla_data(curr);
729 * Get the first one. In the future, we may
730 * need to get up to 6 pathrecords.
732 if ((rec->flags & mask) == mask) {
733 mad = query->mad_buf->mad;
734 mad->mad_hdr.method |=
736 memcpy(mad->data, rec->path_rec,
737 sizeof(rec->path_rec));
743 query->callback(query, status, mad);
746 mad_send_wc.send_buf = query->mad_buf;
747 mad_send_wc.status = IB_WC_SUCCESS;
748 send_handler(query->mad_buf->mad_agent, &mad_send_wc);
751 static void ib_nl_request_timeout(struct work_struct *work)
754 struct ib_sa_query *query;
756 struct ib_mad_send_wc mad_send_wc;
759 spin_lock_irqsave(&ib_nl_request_lock, flags);
760 while (!list_empty(&ib_nl_request_list)) {
761 query = list_entry(ib_nl_request_list.next,
762 struct ib_sa_query, list);
764 if (time_after(query->timeout, jiffies)) {
765 delay = query->timeout - jiffies;
766 if ((long)delay <= 0)
768 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
772 list_del(&query->list);
773 ib_sa_disable_local_svc(query);
774 /* Hold the lock to protect against query cancellation */
775 if (ib_sa_query_cancelled(query))
778 ret = ib_post_send_mad(query->mad_buf, NULL);
780 mad_send_wc.send_buf = query->mad_buf;
781 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
782 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
783 send_handler(query->port->agent, &mad_send_wc);
784 spin_lock_irqsave(&ib_nl_request_lock, flags);
787 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
790 static int ib_nl_handle_set_timeout(struct sk_buff *skb,
791 struct netlink_callback *cb)
793 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
794 int timeout, delta, abs_delta;
795 const struct nlattr *attr;
797 struct ib_sa_query *query;
799 struct nlattr *tb[LS_NLA_TYPE_MAX];
802 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
803 !(NETLINK_CB(skb).sk) ||
804 !netlink_capable(skb, CAP_NET_ADMIN))
807 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
808 nlmsg_len(nlh), ib_nl_policy);
809 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
813 timeout = *(int *) nla_data(attr);
814 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
815 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
816 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
817 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
819 delta = timeout - sa_local_svc_timeout_ms;
826 spin_lock_irqsave(&ib_nl_request_lock, flags);
827 sa_local_svc_timeout_ms = timeout;
828 list_for_each_entry(query, &ib_nl_request_list, list) {
829 if (delta < 0 && abs_delta > query->timeout)
832 query->timeout += delta;
834 /* Get the new delay from the first entry */
836 delay = query->timeout - jiffies;
842 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
843 (unsigned long)delay);
844 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
851 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
853 struct nlattr *tb[LS_NLA_TYPE_MAX];
856 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
859 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
860 nlmsg_len(nlh), ib_nl_policy);
867 static int ib_nl_handle_resolve_resp(struct sk_buff *skb,
868 struct netlink_callback *cb)
870 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
872 struct ib_sa_query *query;
873 struct ib_mad_send_buf *send_buf;
874 struct ib_mad_send_wc mad_send_wc;
878 if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
879 !(NETLINK_CB(skb).sk) ||
880 !netlink_capable(skb, CAP_NET_ADMIN))
883 spin_lock_irqsave(&ib_nl_request_lock, flags);
884 list_for_each_entry(query, &ib_nl_request_list, list) {
886 * If the query is cancelled, let the timeout routine
889 if (nlh->nlmsg_seq == query->seq) {
890 found = !ib_sa_query_cancelled(query);
892 list_del(&query->list);
898 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
902 send_buf = query->mad_buf;
904 if (!ib_nl_is_good_resolve_resp(nlh)) {
905 /* if the result is a failure, send out the packet via IB */
906 ib_sa_disable_local_svc(query);
907 ret = ib_post_send_mad(query->mad_buf, NULL);
908 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
910 mad_send_wc.send_buf = send_buf;
911 mad_send_wc.status = IB_WC_GENERAL_ERR;
912 send_handler(query->port->agent, &mad_send_wc);
915 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
916 ib_nl_process_good_resolve_rsp(query, nlh);
923 static struct ibnl_client_cbs ib_sa_cb_table[] = {
924 [RDMA_NL_LS_OP_RESOLVE] = {
925 .dump = ib_nl_handle_resolve_resp,
926 .module = THIS_MODULE },
927 [RDMA_NL_LS_OP_SET_TIMEOUT] = {
928 .dump = ib_nl_handle_set_timeout,
929 .module = THIS_MODULE },
932 static void free_sm_ah(struct kref *kref)
934 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
936 ib_destroy_ah(sm_ah->ah);
940 static void update_sm_ah(struct work_struct *work)
942 struct ib_sa_port *port =
943 container_of(work, struct ib_sa_port, update_task);
944 struct ib_sa_sm_ah *new_ah;
945 struct ib_port_attr port_attr;
946 struct ib_ah_attr ah_attr;
948 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
949 pr_warn("Couldn't query port\n");
953 new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);
958 kref_init(&new_ah->ref);
959 new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
961 new_ah->pkey_index = 0;
962 if (ib_find_pkey(port->agent->device, port->port_num,
963 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
964 pr_err("Couldn't find index for default PKey\n");
966 memset(&ah_attr, 0, sizeof ah_attr);
967 ah_attr.dlid = port_attr.sm_lid;
968 ah_attr.sl = port_attr.sm_sl;
969 ah_attr.port_num = port->port_num;
970 if (port_attr.grh_required) {
971 ah_attr.ah_flags = IB_AH_GRH;
972 ah_attr.grh.dgid.global.subnet_prefix = cpu_to_be64(port_attr.subnet_prefix);
973 ah_attr.grh.dgid.global.interface_id = cpu_to_be64(IB_SA_WELL_KNOWN_GUID);
976 new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr);
977 if (IS_ERR(new_ah->ah)) {
978 pr_warn("Couldn't create new SM AH\n");
983 spin_lock_irq(&port->ah_lock);
985 kref_put(&port->sm_ah->ref, free_sm_ah);
986 port->sm_ah = new_ah;
987 spin_unlock_irq(&port->ah_lock);
991 static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event)
993 if (event->event == IB_EVENT_PORT_ERR ||
994 event->event == IB_EVENT_PORT_ACTIVE ||
995 event->event == IB_EVENT_LID_CHANGE ||
996 event->event == IB_EVENT_PKEY_CHANGE ||
997 event->event == IB_EVENT_SM_CHANGE ||
998 event->event == IB_EVENT_CLIENT_REREGISTER) {
1000 struct ib_sa_device *sa_dev =
1001 container_of(handler, typeof(*sa_dev), event_handler);
1002 struct ib_sa_port *port =
1003 &sa_dev->port[event->element.port_num - sa_dev->start_port];
1005 if (!rdma_cap_ib_sa(handler->device, port->port_num))
1008 spin_lock_irqsave(&port->ah_lock, flags);
1010 kref_put(&port->sm_ah->ref, free_sm_ah);
1012 spin_unlock_irqrestore(&port->ah_lock, flags);
1014 queue_work(ib_wq, &sa_dev->port[event->element.port_num -
1015 sa_dev->start_port].update_task);
1019 void ib_sa_register_client(struct ib_sa_client *client)
1021 atomic_set(&client->users, 1);
1022 init_completion(&client->comp);
1024 EXPORT_SYMBOL(ib_sa_register_client);
1026 void ib_sa_unregister_client(struct ib_sa_client *client)
1028 ib_sa_client_put(client);
1029 wait_for_completion(&client->comp);
1031 EXPORT_SYMBOL(ib_sa_unregister_client);
1034 * ib_sa_cancel_query - try to cancel an SA query
1035 * @id:ID of query to cancel
1036 * @query:query pointer to cancel
1038 * Try to cancel an SA query. If the id and query don't match up or
1039 * the query has already completed, nothing is done. Otherwise the
1040 * query is canceled and will complete with a status of -EINTR.
1042 void ib_sa_cancel_query(int id, struct ib_sa_query *query)
1044 unsigned long flags;
1045 struct ib_mad_agent *agent;
1046 struct ib_mad_send_buf *mad_buf;
1048 spin_lock_irqsave(&idr_lock, flags);
1049 if (idr_find(&query_idr, id) != query) {
1050 spin_unlock_irqrestore(&idr_lock, flags);
1053 agent = query->port->agent;
1054 mad_buf = query->mad_buf;
1055 spin_unlock_irqrestore(&idr_lock, flags);
1058 * If the query is still on the netlink request list, schedule
1059 * it to be cancelled by the timeout routine. Otherwise, it has been
1060 * sent to the MAD layer and has to be cancelled from there.
1062 if (!ib_nl_cancel_request(query))
1063 ib_cancel_mad(agent, mad_buf);
1065 EXPORT_SYMBOL(ib_sa_cancel_query);
1067 static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
1069 struct ib_sa_device *sa_dev;
1070 struct ib_sa_port *port;
1071 unsigned long flags;
1074 sa_dev = ib_get_client_data(device, &sa_client);
1078 port = &sa_dev->port[port_num - sa_dev->start_port];
1079 spin_lock_irqsave(&port->ah_lock, flags);
1080 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
1081 spin_unlock_irqrestore(&port->ah_lock, flags);
1083 return src_path_mask;
1086 int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
1087 struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr)
1092 struct net_device *ndev = NULL;
1094 memset(ah_attr, 0, sizeof *ah_attr);
1095 ah_attr->dlid = be16_to_cpu(rec->dlid);
1096 ah_attr->sl = rec->sl;
1097 ah_attr->src_path_bits = be16_to_cpu(rec->slid) &
1098 get_src_path_mask(device, port_num);
1099 ah_attr->port_num = port_num;
1100 ah_attr->static_rate = rec->rate;
1102 use_roce = rdma_cap_eth_ah(device, port_num);
1105 struct net_device *idev;
1106 struct net_device *resolved_dev;
1107 struct rdma_dev_addr dev_addr = {.bound_dev_if = rec->ifindex,
1108 .net = rec->net ? rec->net :
1111 struct sockaddr _sockaddr;
1112 struct sockaddr_in _sockaddr_in;
1113 struct sockaddr_in6 _sockaddr_in6;
1114 } sgid_addr, dgid_addr;
1116 if (!device->get_netdev)
1119 rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
1120 rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
1122 /* validate the route */
1123 ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
1124 &dgid_addr._sockaddr, &dev_addr);
1128 if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
1129 dev_addr.network == RDMA_NETWORK_IPV6) &&
1130 rec->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
1133 idev = device->get_netdev(device, port_num);
1137 resolved_dev = dev_get_by_index(dev_addr.net,
1138 dev_addr.bound_dev_if);
1139 if (resolved_dev->flags & IFF_LOOPBACK) {
1140 dev_put(resolved_dev);
1141 resolved_dev = idev;
1142 dev_hold(resolved_dev);
1144 ndev = ib_get_ndev_from_path(rec);
1146 if ((ndev && ndev != resolved_dev) ||
1147 (resolved_dev != idev &&
1148 !rdma_is_upper_dev_rcu(idev, resolved_dev)))
1149 ret = -EHOSTUNREACH;
1152 dev_put(resolved_dev);
1160 if (rec->hop_limit > 0 || use_roce) {
1161 ah_attr->ah_flags = IB_AH_GRH;
1162 ah_attr->grh.dgid = rec->dgid;
1164 ret = ib_find_cached_gid_by_port(device, &rec->sgid,
1165 rec->gid_type, port_num, ndev,
1173 ah_attr->grh.sgid_index = gid_index;
1174 ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
1175 ah_attr->grh.hop_limit = rec->hop_limit;
1176 ah_attr->grh.traffic_class = rec->traffic_class;
1182 memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN);
1186 EXPORT_SYMBOL(ib_init_ah_from_path);
1188 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
1190 unsigned long flags;
1192 spin_lock_irqsave(&query->port->ah_lock, flags);
1193 if (!query->port->sm_ah) {
1194 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1197 kref_get(&query->port->sm_ah->ref);
1198 query->sm_ah = query->port->sm_ah;
1199 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1201 query->mad_buf = ib_create_send_mad(query->port->agent, 1,
1202 query->sm_ah->pkey_index,
1203 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
1205 IB_MGMT_BASE_VERSION);
1206 if (IS_ERR(query->mad_buf)) {
1207 kref_put(&query->sm_ah->ref, free_sm_ah);
1211 query->mad_buf->ah = query->sm_ah->ah;
1216 static void free_mad(struct ib_sa_query *query)
1218 ib_free_send_mad(query->mad_buf);
1219 kref_put(&query->sm_ah->ref, free_sm_ah);
1222 static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
1224 unsigned long flags;
1226 memset(mad, 0, sizeof *mad);
1228 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
1229 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
1230 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
1232 spin_lock_irqsave(&tid_lock, flags);
1234 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
1235 spin_unlock_irqrestore(&tid_lock, flags);
1238 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
1240 bool preload = gfpflags_allow_blocking(gfp_mask);
1241 unsigned long flags;
1245 idr_preload(gfp_mask);
1246 spin_lock_irqsave(&idr_lock, flags);
1248 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
1250 spin_unlock_irqrestore(&idr_lock, flags);
1256 query->mad_buf->timeout_ms = timeout_ms;
1257 query->mad_buf->context[0] = query;
1260 if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) {
1261 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) {
1262 if (!ib_nl_make_request(query, gfp_mask))
1265 ib_sa_disable_local_svc(query);
1268 ret = ib_post_send_mad(query->mad_buf, NULL);
1270 spin_lock_irqsave(&idr_lock, flags);
1271 idr_remove(&query_idr, id);
1272 spin_unlock_irqrestore(&idr_lock, flags);
1276 * It's not safe to dereference query any more, because the
1277 * send may already have completed and freed the query in
1280 return ret ? ret : id;
1283 void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec)
1285 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
1287 EXPORT_SYMBOL(ib_sa_unpack_path);
1289 void ib_sa_pack_path(struct ib_sa_path_rec *rec, void *attribute)
1291 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
1293 EXPORT_SYMBOL(ib_sa_pack_path);
1295 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
1297 struct ib_sa_mad *mad)
1299 struct ib_sa_path_query *query =
1300 container_of(sa_query, struct ib_sa_path_query, sa_query);
1303 struct ib_sa_path_rec rec;
1305 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
1309 rec.gid_type = IB_GID_TYPE_IB;
1310 eth_zero_addr(rec.dmac);
1311 query->callback(status, &rec, query->context);
1313 query->callback(status, NULL, query->context);
1316 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
1318 kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
1322 * ib_sa_path_rec_get - Start a Path get query
1324 * @device:device to send query on
1325 * @port_num: port number to send query on
1326 * @rec:Path Record to send in query
1327 * @comp_mask:component mask to send in query
1328 * @timeout_ms:time to wait for response
1329 * @gfp_mask:GFP mask to use for internal allocations
1330 * @callback:function called when query completes, times out or is
1332 * @context:opaque user context passed to callback
1333 * @sa_query:query context, used to cancel query
1335 * Send a Path Record Get query to the SA to look up a path. The
1336 * callback function will be called when the query completes (or
1337 * fails); status is 0 for a successful response, -EINTR if the query
1338 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1339 * occurred sending the query. The resp parameter of the callback is
1340 * only valid if status is 0.
1342 * If the return value of ib_sa_path_rec_get() is negative, it is an
1343 * error code. Otherwise it is a query ID that can be used to cancel
1346 int ib_sa_path_rec_get(struct ib_sa_client *client,
1347 struct ib_device *device, u8 port_num,
1348 struct ib_sa_path_rec *rec,
1349 ib_sa_comp_mask comp_mask,
1350 int timeout_ms, gfp_t gfp_mask,
1351 void (*callback)(int status,
1352 struct ib_sa_path_rec *resp,
1355 struct ib_sa_query **sa_query)
1357 struct ib_sa_path_query *query;
1358 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1359 struct ib_sa_port *port;
1360 struct ib_mad_agent *agent;
1361 struct ib_sa_mad *mad;
1367 port = &sa_dev->port[port_num - sa_dev->start_port];
1368 agent = port->agent;
1370 query = kzalloc(sizeof(*query), gfp_mask);
1374 query->sa_query.port = port;
1375 ret = alloc_mad(&query->sa_query, gfp_mask);
1379 ib_sa_client_get(client);
1380 query->sa_query.client = client;
1381 query->callback = callback;
1382 query->context = context;
1384 mad = query->sa_query.mad_buf->mad;
1385 init_mad(mad, agent);
1387 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
1388 query->sa_query.release = ib_sa_path_rec_release;
1389 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1390 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
1391 mad->sa_hdr.comp_mask = comp_mask;
1393 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);
1395 *sa_query = &query->sa_query;
1397 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
1398 query->sa_query.mad_buf->context[1] = rec;
1400 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1408 ib_sa_client_put(query->sa_query.client);
1409 free_mad(&query->sa_query);
1415 EXPORT_SYMBOL(ib_sa_path_rec_get);
1417 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
1419 struct ib_sa_mad *mad)
1421 struct ib_sa_service_query *query =
1422 container_of(sa_query, struct ib_sa_service_query, sa_query);
1425 struct ib_sa_service_rec rec;
1427 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
1429 query->callback(status, &rec, query->context);
1431 query->callback(status, NULL, query->context);
1434 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
1436 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
1440 * ib_sa_service_rec_query - Start Service Record operation
1442 * @device:device to send request on
1443 * @port_num: port number to send request on
1444 * @method:SA method - should be get, set, or delete
1445 * @rec:Service Record to send in request
1446 * @comp_mask:component mask to send in request
1447 * @timeout_ms:time to wait for response
1448 * @gfp_mask:GFP mask to use for internal allocations
1449 * @callback:function called when request completes, times out or is
1451 * @context:opaque user context passed to callback
1452 * @sa_query:request context, used to cancel request
1454 * Send a Service Record set/get/delete to the SA to register,
1455 * unregister or query a service record.
1456 * The callback function will be called when the request completes (or
1457 * fails); status is 0 for a successful response, -EINTR if the query
1458 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1459 * occurred sending the query. The resp parameter of the callback is
1460 * only valid if status is 0.
1462 * If the return value of ib_sa_service_rec_query() is negative, it is an
1463 * error code. Otherwise it is a request ID that can be used to cancel
1466 int ib_sa_service_rec_query(struct ib_sa_client *client,
1467 struct ib_device *device, u8 port_num, u8 method,
1468 struct ib_sa_service_rec *rec,
1469 ib_sa_comp_mask comp_mask,
1470 int timeout_ms, gfp_t gfp_mask,
1471 void (*callback)(int status,
1472 struct ib_sa_service_rec *resp,
1475 struct ib_sa_query **sa_query)
1477 struct ib_sa_service_query *query;
1478 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1479 struct ib_sa_port *port;
1480 struct ib_mad_agent *agent;
1481 struct ib_sa_mad *mad;
1487 port = &sa_dev->port[port_num - sa_dev->start_port];
1488 agent = port->agent;
1490 if (method != IB_MGMT_METHOD_GET &&
1491 method != IB_MGMT_METHOD_SET &&
1492 method != IB_SA_METHOD_DELETE)
1495 query = kzalloc(sizeof(*query), gfp_mask);
1499 query->sa_query.port = port;
1500 ret = alloc_mad(&query->sa_query, gfp_mask);
1504 ib_sa_client_get(client);
1505 query->sa_query.client = client;
1506 query->callback = callback;
1507 query->context = context;
1509 mad = query->sa_query.mad_buf->mad;
1510 init_mad(mad, agent);
1512 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
1513 query->sa_query.release = ib_sa_service_rec_release;
1514 mad->mad_hdr.method = method;
1515 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
1516 mad->sa_hdr.comp_mask = comp_mask;
1518 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
1521 *sa_query = &query->sa_query;
1523 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1531 ib_sa_client_put(query->sa_query.client);
1532 free_mad(&query->sa_query);
1538 EXPORT_SYMBOL(ib_sa_service_rec_query);
1540 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1542 struct ib_sa_mad *mad)
1544 struct ib_sa_mcmember_query *query =
1545 container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1548 struct ib_sa_mcmember_rec rec;
1550 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1552 query->callback(status, &rec, query->context);
1554 query->callback(status, NULL, query->context);
1557 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1559 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1562 int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1563 struct ib_device *device, u8 port_num,
1565 struct ib_sa_mcmember_rec *rec,
1566 ib_sa_comp_mask comp_mask,
1567 int timeout_ms, gfp_t gfp_mask,
1568 void (*callback)(int status,
1569 struct ib_sa_mcmember_rec *resp,
1572 struct ib_sa_query **sa_query)
1574 struct ib_sa_mcmember_query *query;
1575 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1576 struct ib_sa_port *port;
1577 struct ib_mad_agent *agent;
1578 struct ib_sa_mad *mad;
1584 port = &sa_dev->port[port_num - sa_dev->start_port];
1585 agent = port->agent;
1587 query = kzalloc(sizeof(*query), gfp_mask);
1591 query->sa_query.port = port;
1592 ret = alloc_mad(&query->sa_query, gfp_mask);
1596 ib_sa_client_get(client);
1597 query->sa_query.client = client;
1598 query->callback = callback;
1599 query->context = context;
1601 mad = query->sa_query.mad_buf->mad;
1602 init_mad(mad, agent);
1604 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1605 query->sa_query.release = ib_sa_mcmember_rec_release;
1606 mad->mad_hdr.method = method;
1607 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1608 mad->sa_hdr.comp_mask = comp_mask;
1610 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1613 *sa_query = &query->sa_query;
1615 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1623 ib_sa_client_put(query->sa_query.client);
1624 free_mad(&query->sa_query);
1631 /* Support GuidInfoRecord */
1632 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1634 struct ib_sa_mad *mad)
1636 struct ib_sa_guidinfo_query *query =
1637 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1640 struct ib_sa_guidinfo_rec rec;
1642 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1644 query->callback(status, &rec, query->context);
1646 query->callback(status, NULL, query->context);
1649 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1651 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1654 int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1655 struct ib_device *device, u8 port_num,
1656 struct ib_sa_guidinfo_rec *rec,
1657 ib_sa_comp_mask comp_mask, u8 method,
1658 int timeout_ms, gfp_t gfp_mask,
1659 void (*callback)(int status,
1660 struct ib_sa_guidinfo_rec *resp,
1663 struct ib_sa_query **sa_query)
1665 struct ib_sa_guidinfo_query *query;
1666 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1667 struct ib_sa_port *port;
1668 struct ib_mad_agent *agent;
1669 struct ib_sa_mad *mad;
1675 if (method != IB_MGMT_METHOD_GET &&
1676 method != IB_MGMT_METHOD_SET &&
1677 method != IB_SA_METHOD_DELETE) {
1681 port = &sa_dev->port[port_num - sa_dev->start_port];
1682 agent = port->agent;
1684 query = kzalloc(sizeof(*query), gfp_mask);
1688 query->sa_query.port = port;
1689 ret = alloc_mad(&query->sa_query, gfp_mask);
1693 ib_sa_client_get(client);
1694 query->sa_query.client = client;
1695 query->callback = callback;
1696 query->context = context;
1698 mad = query->sa_query.mad_buf->mad;
1699 init_mad(mad, agent);
1701 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1702 query->sa_query.release = ib_sa_guidinfo_rec_release;
1704 mad->mad_hdr.method = method;
1705 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1706 mad->sa_hdr.comp_mask = comp_mask;
1708 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1711 *sa_query = &query->sa_query;
1713 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1721 ib_sa_client_put(query->sa_query.client);
1722 free_mad(&query->sa_query);
1728 EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
1730 /* Support get SA ClassPortInfo */
1731 static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
1733 struct ib_sa_mad *mad)
1735 struct ib_sa_classport_info_query *query =
1736 container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
1739 struct ib_class_port_info rec;
1741 ib_unpack(classport_info_rec_table,
1742 ARRAY_SIZE(classport_info_rec_table),
1744 query->callback(status, &rec, query->context);
1746 query->callback(status, NULL, query->context);
1750 static void ib_sa_portclass_info_rec_release(struct ib_sa_query *sa_query)
1752 kfree(container_of(sa_query, struct ib_sa_classport_info_query,
1756 int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
1757 struct ib_device *device, u8 port_num,
1758 int timeout_ms, gfp_t gfp_mask,
1759 void (*callback)(int status,
1760 struct ib_class_port_info *resp,
1763 struct ib_sa_query **sa_query)
1765 struct ib_sa_classport_info_query *query;
1766 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1767 struct ib_sa_port *port;
1768 struct ib_mad_agent *agent;
1769 struct ib_sa_mad *mad;
1775 port = &sa_dev->port[port_num - sa_dev->start_port];
1776 agent = port->agent;
1778 query = kzalloc(sizeof(*query), gfp_mask);
1782 query->sa_query.port = port;
1783 ret = alloc_mad(&query->sa_query, gfp_mask);
1787 ib_sa_client_get(client);
1788 query->sa_query.client = client;
1789 query->callback = callback;
1790 query->context = context;
1792 mad = query->sa_query.mad_buf->mad;
1793 init_mad(mad, agent);
1795 query->sa_query.callback = callback ? ib_sa_classport_info_rec_callback : NULL;
1797 query->sa_query.release = ib_sa_portclass_info_rec_release;
1798 /* support GET only */
1799 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1800 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
1801 mad->sa_hdr.comp_mask = 0;
1802 *sa_query = &query->sa_query;
1804 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1812 ib_sa_client_put(query->sa_query.client);
1813 free_mad(&query->sa_query);
1819 EXPORT_SYMBOL(ib_sa_classport_info_rec_query);
1821 static void send_handler(struct ib_mad_agent *agent,
1822 struct ib_mad_send_wc *mad_send_wc)
1824 struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
1825 unsigned long flags;
1827 if (query->callback)
1828 switch (mad_send_wc->status) {
1830 /* No callback -- already got recv */
1832 case IB_WC_RESP_TIMEOUT_ERR:
1833 query->callback(query, -ETIMEDOUT, NULL);
1835 case IB_WC_WR_FLUSH_ERR:
1836 query->callback(query, -EINTR, NULL);
1839 query->callback(query, -EIO, NULL);
1843 spin_lock_irqsave(&idr_lock, flags);
1844 idr_remove(&query_idr, query->id);
1845 spin_unlock_irqrestore(&idr_lock, flags);
1848 ib_sa_client_put(query->client);
1849 query->release(query);
1852 static void recv_handler(struct ib_mad_agent *mad_agent,
1853 struct ib_mad_send_buf *send_buf,
1854 struct ib_mad_recv_wc *mad_recv_wc)
1856 struct ib_sa_query *query;
1861 query = send_buf->context[0];
1862 if (query->callback) {
1863 if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
1864 query->callback(query,
1865 mad_recv_wc->recv_buf.mad->mad_hdr.status ?
1867 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
1869 query->callback(query, -EIO, NULL);
1872 ib_free_recv_mad(mad_recv_wc);
1875 static void ib_sa_add_one(struct ib_device *device)
1877 struct ib_sa_device *sa_dev;
1881 s = rdma_start_port(device);
1882 e = rdma_end_port(device);
1884 sa_dev = kzalloc(sizeof *sa_dev +
1885 (e - s + 1) * sizeof (struct ib_sa_port),
1890 sa_dev->start_port = s;
1891 sa_dev->end_port = e;
1893 for (i = 0; i <= e - s; ++i) {
1894 spin_lock_init(&sa_dev->port[i].ah_lock);
1895 if (!rdma_cap_ib_sa(device, i + 1))
1898 sa_dev->port[i].sm_ah = NULL;
1899 sa_dev->port[i].port_num = i + s;
1901 sa_dev->port[i].agent =
1902 ib_register_mad_agent(device, i + s, IB_QPT_GSI,
1903 NULL, 0, send_handler,
1904 recv_handler, sa_dev, 0);
1905 if (IS_ERR(sa_dev->port[i].agent))
1908 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
1916 ib_set_client_data(device, &sa_client, sa_dev);
1919 * We register our event handler after everything is set up,
1920 * and then update our cached info after the event handler is
1921 * registered to avoid any problems if a port changes state
1922 * during our initialization.
1925 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
1926 if (ib_register_event_handler(&sa_dev->event_handler))
1929 for (i = 0; i <= e - s; ++i) {
1930 if (rdma_cap_ib_sa(device, i + 1))
1931 update_sm_ah(&sa_dev->port[i].update_task);
1938 if (rdma_cap_ib_sa(device, i + 1))
1939 ib_unregister_mad_agent(sa_dev->port[i].agent);
1946 static void ib_sa_remove_one(struct ib_device *device, void *client_data)
1948 struct ib_sa_device *sa_dev = client_data;
1954 ib_unregister_event_handler(&sa_dev->event_handler);
1956 flush_workqueue(ib_wq);
1958 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
1959 if (rdma_cap_ib_sa(device, i + 1)) {
1960 ib_unregister_mad_agent(sa_dev->port[i].agent);
1961 if (sa_dev->port[i].sm_ah)
1962 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
1970 static int __init ib_sa_init(void)
1974 get_random_bytes(&tid, sizeof tid);
1976 atomic_set(&ib_nl_sa_request_seq, 0);
1978 ret = ib_register_client(&sa_client);
1980 pr_err("Couldn't register ib_sa client\n");
1986 pr_err("Couldn't initialize multicast handling\n");
1990 ib_nl_wq = create_singlethread_workqueue("ib_nl_sa_wq");
1996 if (ibnl_add_client(RDMA_NL_LS, ARRAY_SIZE(ib_sa_cb_table),
1998 pr_err("Failed to add netlink callback\n");
2002 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
2006 destroy_workqueue(ib_nl_wq);
2010 ib_unregister_client(&sa_client);
2015 static void __exit ib_sa_cleanup(void)
2017 ibnl_remove_client(RDMA_NL_LS);
2018 cancel_delayed_work(&ib_nl_timed_work);
2019 flush_workqueue(ib_nl_wq);
2020 destroy_workqueue(ib_nl_wq);
2022 ib_unregister_client(&sa_client);
2023 idr_destroy(&query_idr);
2026 module_init(ib_sa_init);
2027 module_exit(ib_sa_cleanup);