2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
43 #include <linux/slab.h>
45 #include <linux/in6.h>
46 #include <net/addrconf.h>
48 #include <rdma/ib_verbs.h>
49 #include <rdma/ib_cache.h>
50 #include <rdma/ib_addr.h>
52 #include "core_priv.h"
54 static const char * const ib_events[] = {
55 [IB_EVENT_CQ_ERR] = "CQ error",
56 [IB_EVENT_QP_FATAL] = "QP fatal error",
57 [IB_EVENT_QP_REQ_ERR] = "QP request error",
58 [IB_EVENT_QP_ACCESS_ERR] = "QP access error",
59 [IB_EVENT_COMM_EST] = "communication established",
60 [IB_EVENT_SQ_DRAINED] = "send queue drained",
61 [IB_EVENT_PATH_MIG] = "path migration successful",
62 [IB_EVENT_PATH_MIG_ERR] = "path migration error",
63 [IB_EVENT_DEVICE_FATAL] = "device fatal error",
64 [IB_EVENT_PORT_ACTIVE] = "port active",
65 [IB_EVENT_PORT_ERR] = "port error",
66 [IB_EVENT_LID_CHANGE] = "LID change",
67 [IB_EVENT_PKEY_CHANGE] = "P_key change",
68 [IB_EVENT_SM_CHANGE] = "SM change",
69 [IB_EVENT_SRQ_ERR] = "SRQ error",
70 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
71 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
72 [IB_EVENT_CLIENT_REREGISTER] = "client reregister",
73 [IB_EVENT_GID_CHANGE] = "GID changed",
76 const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
80 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
81 ib_events[index] : "unrecognized event";
83 EXPORT_SYMBOL(ib_event_msg);
85 static const char * const wc_statuses[] = {
86 [IB_WC_SUCCESS] = "success",
87 [IB_WC_LOC_LEN_ERR] = "local length error",
88 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
89 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
90 [IB_WC_LOC_PROT_ERR] = "local protection error",
91 [IB_WC_WR_FLUSH_ERR] = "WR flushed",
92 [IB_WC_MW_BIND_ERR] = "memory management operation error",
93 [IB_WC_BAD_RESP_ERR] = "bad response error",
94 [IB_WC_LOC_ACCESS_ERR] = "local access error",
95 [IB_WC_REM_INV_REQ_ERR] = "invalid request error",
96 [IB_WC_REM_ACCESS_ERR] = "remote access error",
97 [IB_WC_REM_OP_ERR] = "remote operation error",
98 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
99 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
100 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
101 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
102 [IB_WC_REM_ABORT_ERR] = "operation aborted",
103 [IB_WC_INV_EECN_ERR] = "invalid EE context number",
104 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
105 [IB_WC_FATAL_ERR] = "fatal error",
106 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
107 [IB_WC_GENERAL_ERR] = "general error",
110 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
112 size_t index = status;
114 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
115 wc_statuses[index] : "unrecognized status";
117 EXPORT_SYMBOL(ib_wc_status_msg);
119 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
122 case IB_RATE_2_5_GBPS: return 1;
123 case IB_RATE_5_GBPS: return 2;
124 case IB_RATE_10_GBPS: return 4;
125 case IB_RATE_20_GBPS: return 8;
126 case IB_RATE_30_GBPS: return 12;
127 case IB_RATE_40_GBPS: return 16;
128 case IB_RATE_60_GBPS: return 24;
129 case IB_RATE_80_GBPS: return 32;
130 case IB_RATE_120_GBPS: return 48;
134 EXPORT_SYMBOL(ib_rate_to_mult);
136 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
139 case 1: return IB_RATE_2_5_GBPS;
140 case 2: return IB_RATE_5_GBPS;
141 case 4: return IB_RATE_10_GBPS;
142 case 8: return IB_RATE_20_GBPS;
143 case 12: return IB_RATE_30_GBPS;
144 case 16: return IB_RATE_40_GBPS;
145 case 24: return IB_RATE_60_GBPS;
146 case 32: return IB_RATE_80_GBPS;
147 case 48: return IB_RATE_120_GBPS;
148 default: return IB_RATE_PORT_CURRENT;
151 EXPORT_SYMBOL(mult_to_ib_rate);
153 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
156 case IB_RATE_2_5_GBPS: return 2500;
157 case IB_RATE_5_GBPS: return 5000;
158 case IB_RATE_10_GBPS: return 10000;
159 case IB_RATE_20_GBPS: return 20000;
160 case IB_RATE_30_GBPS: return 30000;
161 case IB_RATE_40_GBPS: return 40000;
162 case IB_RATE_60_GBPS: return 60000;
163 case IB_RATE_80_GBPS: return 80000;
164 case IB_RATE_120_GBPS: return 120000;
165 case IB_RATE_14_GBPS: return 14062;
166 case IB_RATE_56_GBPS: return 56250;
167 case IB_RATE_112_GBPS: return 112500;
168 case IB_RATE_168_GBPS: return 168750;
169 case IB_RATE_25_GBPS: return 25781;
170 case IB_RATE_100_GBPS: return 103125;
171 case IB_RATE_200_GBPS: return 206250;
172 case IB_RATE_300_GBPS: return 309375;
176 EXPORT_SYMBOL(ib_rate_to_mbps);
178 __attribute_const__ enum rdma_transport_type
179 rdma_node_get_transport(enum rdma_node_type node_type)
182 case RDMA_NODE_IB_CA:
183 case RDMA_NODE_IB_SWITCH:
184 case RDMA_NODE_IB_ROUTER:
185 return RDMA_TRANSPORT_IB;
187 return RDMA_TRANSPORT_IWARP;
188 case RDMA_NODE_USNIC:
189 return RDMA_TRANSPORT_USNIC;
190 case RDMA_NODE_USNIC_UDP:
191 return RDMA_TRANSPORT_USNIC_UDP;
197 EXPORT_SYMBOL(rdma_node_get_transport);
199 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
201 if (device->get_link_layer)
202 return device->get_link_layer(device, port_num);
204 switch (rdma_node_get_transport(device->node_type)) {
205 case RDMA_TRANSPORT_IB:
206 return IB_LINK_LAYER_INFINIBAND;
207 case RDMA_TRANSPORT_IWARP:
208 case RDMA_TRANSPORT_USNIC:
209 case RDMA_TRANSPORT_USNIC_UDP:
210 return IB_LINK_LAYER_ETHERNET;
212 return IB_LINK_LAYER_UNSPECIFIED;
215 EXPORT_SYMBOL(rdma_port_get_link_layer);
217 /* Protection domains */
220 * ib_alloc_pd - Allocates an unused protection domain.
221 * @device: The device on which to allocate the protection domain.
223 * A protection domain object provides an association between QPs, shared
224 * receive queues, address handles, memory regions, and memory windows.
226 * Every PD has a local_dma_lkey which can be used as the lkey value for local
229 struct ib_pd *ib_alloc_pd(struct ib_device *device)
233 pd = device->alloc_pd(device, NULL, NULL);
240 atomic_set(&pd->usecnt, 0);
242 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
243 pd->local_dma_lkey = device->local_dma_lkey;
247 mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
250 return (struct ib_pd *)mr;
254 pd->local_dma_lkey = pd->local_mr->lkey;
258 EXPORT_SYMBOL(ib_alloc_pd);
261 * ib_dealloc_pd - Deallocates a protection domain.
262 * @pd: The protection domain to deallocate.
264 * It is an error to call this function while any resources in the pd still
265 * exist. The caller is responsible to synchronously destroy them and
266 * guarantee no new allocations will happen.
268 void ib_dealloc_pd(struct ib_pd *pd)
273 ret = ib_dereg_mr(pd->local_mr);
278 /* uverbs manipulates usecnt with proper locking, while the kabi
279 requires the caller to guarantee we can't race here. */
280 WARN_ON(atomic_read(&pd->usecnt));
282 /* Making delalloc_pd a void return is a WIP, no driver should return
284 ret = pd->device->dealloc_pd(pd);
285 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
287 EXPORT_SYMBOL(ib_dealloc_pd);
289 /* Address handles */
291 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
295 ah = pd->device->create_ah(pd, ah_attr);
298 ah->device = pd->device;
301 atomic_inc(&pd->usecnt);
306 EXPORT_SYMBOL(ib_create_ah);
308 static int ib_get_header_version(const union rdma_network_hdr *hdr)
310 const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
311 struct iphdr ip4h_checked;
312 const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
314 /* If it's IPv6, the version must be 6, otherwise, the first
315 * 20 bytes (before the IPv4 header) are garbled.
317 if (ip6h->version != 6)
318 return (ip4h->version == 4) ? 4 : 0;
319 /* version may be 6 or 4 because the first 20 bytes could be garbled */
321 /* RoCE v2 requires no options, thus header length
328 * We can't write on scattered buffers so we need to copy to
331 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
332 ip4h_checked.check = 0;
333 ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
334 /* if IPv4 header checksum is OK, believe it */
335 if (ip4h->check == ip4h_checked.check)
340 static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
342 const struct ib_grh *grh)
346 if (rdma_protocol_ib(device, port_num))
347 return RDMA_NETWORK_IB;
349 grh_version = ib_get_header_version((union rdma_network_hdr *)grh);
351 if (grh_version == 4)
352 return RDMA_NETWORK_IPV4;
354 if (grh->next_hdr == IPPROTO_UDP)
355 return RDMA_NETWORK_IPV6;
357 return RDMA_NETWORK_ROCE_V1;
360 struct find_gid_index_context {
362 enum ib_gid_type gid_type;
365 static bool find_gid_index(const union ib_gid *gid,
366 const struct ib_gid_attr *gid_attr,
369 struct find_gid_index_context *ctx =
370 (struct find_gid_index_context *)context;
372 if (ctx->gid_type != gid_attr->gid_type)
375 if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
376 (is_vlan_dev(gid_attr->ndev) &&
377 vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
383 static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
384 u16 vlan_id, const union ib_gid *sgid,
385 enum ib_gid_type gid_type,
388 struct find_gid_index_context context = {.vlan_id = vlan_id,
389 .gid_type = gid_type};
391 return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index,
392 &context, gid_index);
395 static int get_gids_from_rdma_hdr(union rdma_network_hdr *hdr,
396 enum rdma_network_type net_type,
397 union ib_gid *sgid, union ib_gid *dgid)
399 struct sockaddr_in src_in;
400 struct sockaddr_in dst_in;
401 __be32 src_saddr, dst_saddr;
406 if (net_type == RDMA_NETWORK_IPV4) {
407 memcpy(&src_in.sin_addr.s_addr,
408 &hdr->roce4grh.saddr, 4);
409 memcpy(&dst_in.sin_addr.s_addr,
410 &hdr->roce4grh.daddr, 4);
411 src_saddr = src_in.sin_addr.s_addr;
412 dst_saddr = dst_in.sin_addr.s_addr;
413 ipv6_addr_set_v4mapped(src_saddr,
414 (struct in6_addr *)sgid);
415 ipv6_addr_set_v4mapped(dst_saddr,
416 (struct in6_addr *)dgid);
418 } else if (net_type == RDMA_NETWORK_IPV6 ||
419 net_type == RDMA_NETWORK_IB) {
420 *dgid = hdr->ibgrh.dgid;
421 *sgid = hdr->ibgrh.sgid;
428 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
429 const struct ib_wc *wc, const struct ib_grh *grh,
430 struct ib_ah_attr *ah_attr)
435 enum rdma_network_type net_type = RDMA_NETWORK_IB;
436 enum ib_gid_type gid_type = IB_GID_TYPE_IB;
440 memset(ah_attr, 0, sizeof *ah_attr);
441 if (rdma_cap_eth_ah(device, port_num)) {
442 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
443 net_type = wc->network_hdr_type;
445 net_type = ib_get_net_type_by_grh(device, port_num, grh);
446 gid_type = ib_network_to_gid_type(net_type);
448 ret = get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
453 if (rdma_protocol_roce(device, port_num)) {
455 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
456 wc->vlan_id : 0xffff;
457 struct net_device *idev;
458 struct net_device *resolved_dev;
460 if (!(wc->wc_flags & IB_WC_GRH))
463 if (!device->get_netdev)
466 idev = device->get_netdev(device, port_num);
470 ret = rdma_addr_find_dmac_by_grh(&dgid, &sgid,
472 wc->wc_flags & IB_WC_WITH_VLAN ?
480 resolved_dev = dev_get_by_index(&init_net, if_index);
481 if (resolved_dev->flags & IFF_LOOPBACK) {
482 dev_put(resolved_dev);
484 dev_hold(resolved_dev);
487 if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev,
492 dev_put(resolved_dev);
496 ret = get_sgid_index_from_eth(device, port_num, vlan_id,
497 &dgid, gid_type, &gid_index);
502 ah_attr->dlid = wc->slid;
503 ah_attr->sl = wc->sl;
504 ah_attr->src_path_bits = wc->dlid_path_bits;
505 ah_attr->port_num = port_num;
507 if (wc->wc_flags & IB_WC_GRH) {
508 ah_attr->ah_flags = IB_AH_GRH;
509 ah_attr->grh.dgid = sgid;
511 if (!rdma_cap_eth_ah(device, port_num)) {
512 ret = ib_find_cached_gid_by_port(device, &dgid,
520 ah_attr->grh.sgid_index = (u8) gid_index;
521 flow_class = be32_to_cpu(grh->version_tclass_flow);
522 ah_attr->grh.flow_label = flow_class & 0xFFFFF;
523 ah_attr->grh.hop_limit = 0xFF;
524 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
528 EXPORT_SYMBOL(ib_init_ah_from_wc);
530 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
531 const struct ib_grh *grh, u8 port_num)
533 struct ib_ah_attr ah_attr;
536 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
540 return ib_create_ah(pd, &ah_attr);
542 EXPORT_SYMBOL(ib_create_ah_from_wc);
544 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
546 return ah->device->modify_ah ?
547 ah->device->modify_ah(ah, ah_attr) :
550 EXPORT_SYMBOL(ib_modify_ah);
552 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
554 return ah->device->query_ah ?
555 ah->device->query_ah(ah, ah_attr) :
558 EXPORT_SYMBOL(ib_query_ah);
560 int ib_destroy_ah(struct ib_ah *ah)
566 ret = ah->device->destroy_ah(ah);
568 atomic_dec(&pd->usecnt);
572 EXPORT_SYMBOL(ib_destroy_ah);
574 /* Shared receive queues */
576 struct ib_srq *ib_create_srq(struct ib_pd *pd,
577 struct ib_srq_init_attr *srq_init_attr)
581 if (!pd->device->create_srq)
582 return ERR_PTR(-ENOSYS);
584 srq = pd->device->create_srq(pd, srq_init_attr, NULL);
587 srq->device = pd->device;
590 srq->event_handler = srq_init_attr->event_handler;
591 srq->srq_context = srq_init_attr->srq_context;
592 srq->srq_type = srq_init_attr->srq_type;
593 if (srq->srq_type == IB_SRQT_XRC) {
594 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
595 srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq;
596 atomic_inc(&srq->ext.xrc.xrcd->usecnt);
597 atomic_inc(&srq->ext.xrc.cq->usecnt);
599 atomic_inc(&pd->usecnt);
600 atomic_set(&srq->usecnt, 0);
605 EXPORT_SYMBOL(ib_create_srq);
607 int ib_modify_srq(struct ib_srq *srq,
608 struct ib_srq_attr *srq_attr,
609 enum ib_srq_attr_mask srq_attr_mask)
611 return srq->device->modify_srq ?
612 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
615 EXPORT_SYMBOL(ib_modify_srq);
617 int ib_query_srq(struct ib_srq *srq,
618 struct ib_srq_attr *srq_attr)
620 return srq->device->query_srq ?
621 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
623 EXPORT_SYMBOL(ib_query_srq);
625 int ib_destroy_srq(struct ib_srq *srq)
628 enum ib_srq_type srq_type;
629 struct ib_xrcd *uninitialized_var(xrcd);
630 struct ib_cq *uninitialized_var(cq);
633 if (atomic_read(&srq->usecnt))
637 srq_type = srq->srq_type;
638 if (srq_type == IB_SRQT_XRC) {
639 xrcd = srq->ext.xrc.xrcd;
640 cq = srq->ext.xrc.cq;
643 ret = srq->device->destroy_srq(srq);
645 atomic_dec(&pd->usecnt);
646 if (srq_type == IB_SRQT_XRC) {
647 atomic_dec(&xrcd->usecnt);
648 atomic_dec(&cq->usecnt);
654 EXPORT_SYMBOL(ib_destroy_srq);
658 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
660 struct ib_qp *qp = context;
663 spin_lock_irqsave(&qp->device->event_handler_lock, flags);
664 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
665 if (event->element.qp->event_handler)
666 event->element.qp->event_handler(event, event->element.qp->qp_context);
667 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
670 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
672 mutex_lock(&xrcd->tgt_qp_mutex);
673 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
674 mutex_unlock(&xrcd->tgt_qp_mutex);
677 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
678 void (*event_handler)(struct ib_event *, void *),
684 qp = kzalloc(sizeof *qp, GFP_KERNEL);
686 return ERR_PTR(-ENOMEM);
688 qp->real_qp = real_qp;
689 atomic_inc(&real_qp->usecnt);
690 qp->device = real_qp->device;
691 qp->event_handler = event_handler;
692 qp->qp_context = qp_context;
693 qp->qp_num = real_qp->qp_num;
694 qp->qp_type = real_qp->qp_type;
696 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
697 list_add(&qp->open_list, &real_qp->open_list);
698 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
703 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
704 struct ib_qp_open_attr *qp_open_attr)
706 struct ib_qp *qp, *real_qp;
708 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
709 return ERR_PTR(-EINVAL);
711 qp = ERR_PTR(-EINVAL);
712 mutex_lock(&xrcd->tgt_qp_mutex);
713 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
714 if (real_qp->qp_num == qp_open_attr->qp_num) {
715 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
716 qp_open_attr->qp_context);
720 mutex_unlock(&xrcd->tgt_qp_mutex);
723 EXPORT_SYMBOL(ib_open_qp);
725 struct ib_qp *ib_create_qp(struct ib_pd *pd,
726 struct ib_qp_init_attr *qp_init_attr)
728 struct ib_qp *qp, *real_qp;
729 struct ib_device *device;
731 device = pd ? pd->device : qp_init_attr->xrcd->device;
732 qp = device->create_qp(pd, qp_init_attr, NULL);
738 qp->qp_type = qp_init_attr->qp_type;
740 atomic_set(&qp->usecnt, 0);
741 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
742 qp->event_handler = __ib_shared_qp_event_handler;
745 qp->send_cq = qp->recv_cq = NULL;
747 qp->xrcd = qp_init_attr->xrcd;
748 atomic_inc(&qp_init_attr->xrcd->usecnt);
749 INIT_LIST_HEAD(&qp->open_list);
752 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
753 qp_init_attr->qp_context);
755 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
757 real_qp->device->destroy_qp(real_qp);
759 qp->event_handler = qp_init_attr->event_handler;
760 qp->qp_context = qp_init_attr->qp_context;
761 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
765 qp->recv_cq = qp_init_attr->recv_cq;
766 atomic_inc(&qp_init_attr->recv_cq->usecnt);
767 qp->srq = qp_init_attr->srq;
769 atomic_inc(&qp_init_attr->srq->usecnt);
773 qp->send_cq = qp_init_attr->send_cq;
776 atomic_inc(&pd->usecnt);
777 atomic_inc(&qp_init_attr->send_cq->usecnt);
783 EXPORT_SYMBOL(ib_create_qp);
785 static const struct {
787 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
788 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
789 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
791 [IB_QPS_RESET] = { .valid = 1 },
795 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
798 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
799 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
802 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
805 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
808 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
811 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
813 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
819 [IB_QPS_RESET] = { .valid = 1 },
820 [IB_QPS_ERR] = { .valid = 1 },
824 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
827 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
830 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
833 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
836 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
839 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
841 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
848 [IB_QPT_UC] = (IB_QP_AV |
852 [IB_QPT_RC] = (IB_QP_AV |
856 IB_QP_MAX_DEST_RD_ATOMIC |
857 IB_QP_MIN_RNR_TIMER),
858 [IB_QPT_XRC_INI] = (IB_QP_AV |
862 [IB_QPT_XRC_TGT] = (IB_QP_AV |
866 IB_QP_MAX_DEST_RD_ATOMIC |
867 IB_QP_MIN_RNR_TIMER),
870 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
872 [IB_QPT_UC] = (IB_QP_ALT_PATH |
875 [IB_QPT_RC] = (IB_QP_ALT_PATH |
878 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
881 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
884 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
886 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
892 [IB_QPS_RESET] = { .valid = 1 },
893 [IB_QPS_ERR] = { .valid = 1 },
897 [IB_QPT_UD] = IB_QP_SQ_PSN,
898 [IB_QPT_UC] = IB_QP_SQ_PSN,
899 [IB_QPT_RC] = (IB_QP_TIMEOUT |
903 IB_QP_MAX_QP_RD_ATOMIC),
904 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
908 IB_QP_MAX_QP_RD_ATOMIC),
909 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
911 [IB_QPT_SMI] = IB_QP_SQ_PSN,
912 [IB_QPT_GSI] = IB_QP_SQ_PSN,
915 [IB_QPT_UD] = (IB_QP_CUR_STATE |
917 [IB_QPT_UC] = (IB_QP_CUR_STATE |
920 IB_QP_PATH_MIG_STATE),
921 [IB_QPT_RC] = (IB_QP_CUR_STATE |
924 IB_QP_MIN_RNR_TIMER |
925 IB_QP_PATH_MIG_STATE),
926 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
929 IB_QP_PATH_MIG_STATE),
930 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
933 IB_QP_MIN_RNR_TIMER |
934 IB_QP_PATH_MIG_STATE),
935 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
937 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
943 [IB_QPS_RESET] = { .valid = 1 },
944 [IB_QPS_ERR] = { .valid = 1 },
948 [IB_QPT_UD] = (IB_QP_CUR_STATE |
950 [IB_QPT_UC] = (IB_QP_CUR_STATE |
953 IB_QP_PATH_MIG_STATE),
954 [IB_QPT_RC] = (IB_QP_CUR_STATE |
957 IB_QP_PATH_MIG_STATE |
958 IB_QP_MIN_RNR_TIMER),
959 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
962 IB_QP_PATH_MIG_STATE),
963 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
966 IB_QP_PATH_MIG_STATE |
967 IB_QP_MIN_RNR_TIMER),
968 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
970 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
977 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
978 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
979 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
980 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
981 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
982 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
983 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
988 [IB_QPS_RESET] = { .valid = 1 },
989 [IB_QPS_ERR] = { .valid = 1 },
993 [IB_QPT_UD] = (IB_QP_CUR_STATE |
995 [IB_QPT_UC] = (IB_QP_CUR_STATE |
998 IB_QP_PATH_MIG_STATE),
999 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1001 IB_QP_ACCESS_FLAGS |
1002 IB_QP_MIN_RNR_TIMER |
1003 IB_QP_PATH_MIG_STATE),
1004 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1006 IB_QP_ACCESS_FLAGS |
1007 IB_QP_PATH_MIG_STATE),
1008 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1010 IB_QP_ACCESS_FLAGS |
1011 IB_QP_MIN_RNR_TIMER |
1012 IB_QP_PATH_MIG_STATE),
1013 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1015 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1022 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1024 [IB_QPT_UC] = (IB_QP_AV |
1026 IB_QP_ACCESS_FLAGS |
1028 IB_QP_PATH_MIG_STATE),
1029 [IB_QPT_RC] = (IB_QP_PORT |
1034 IB_QP_MAX_QP_RD_ATOMIC |
1035 IB_QP_MAX_DEST_RD_ATOMIC |
1037 IB_QP_ACCESS_FLAGS |
1039 IB_QP_MIN_RNR_TIMER |
1040 IB_QP_PATH_MIG_STATE),
1041 [IB_QPT_XRC_INI] = (IB_QP_PORT |
1046 IB_QP_MAX_QP_RD_ATOMIC |
1048 IB_QP_ACCESS_FLAGS |
1050 IB_QP_PATH_MIG_STATE),
1051 [IB_QPT_XRC_TGT] = (IB_QP_PORT |
1054 IB_QP_MAX_DEST_RD_ATOMIC |
1056 IB_QP_ACCESS_FLAGS |
1058 IB_QP_MIN_RNR_TIMER |
1059 IB_QP_PATH_MIG_STATE),
1060 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1062 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1068 [IB_QPS_RESET] = { .valid = 1 },
1069 [IB_QPS_ERR] = { .valid = 1 },
1073 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1075 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1076 IB_QP_ACCESS_FLAGS),
1077 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1079 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1085 [IB_QPS_RESET] = { .valid = 1 },
1086 [IB_QPS_ERR] = { .valid = 1 }
1090 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1091 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1092 enum rdma_link_layer ll)
1094 enum ib_qp_attr_mask req_param, opt_param;
1096 if (cur_state < 0 || cur_state > IB_QPS_ERR ||
1097 next_state < 0 || next_state > IB_QPS_ERR)
1100 if (mask & IB_QP_CUR_STATE &&
1101 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1102 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1105 if (!qp_state_table[cur_state][next_state].valid)
1108 req_param = qp_state_table[cur_state][next_state].req_param[type];
1109 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1111 if ((mask & req_param) != req_param)
1114 if (mask & ~(req_param | opt_param | IB_QP_STATE))
1119 EXPORT_SYMBOL(ib_modify_qp_is_ok);
1121 int ib_resolve_eth_dmac(struct ib_qp *qp,
1122 struct ib_qp_attr *qp_attr, int *qp_attr_mask)
1126 if (*qp_attr_mask & IB_QP_AV) {
1127 if (qp_attr->ah_attr.port_num < rdma_start_port(qp->device) ||
1128 qp_attr->ah_attr.port_num > rdma_end_port(qp->device))
1131 if (!rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))
1134 if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
1135 rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw,
1136 qp_attr->ah_attr.dmac);
1139 struct ib_gid_attr sgid_attr;
1142 ret = ib_query_gid(qp->device,
1143 qp_attr->ah_attr.port_num,
1144 qp_attr->ah_attr.grh.sgid_index,
1147 if (ret || !sgid_attr.ndev) {
1152 if (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
1153 /* TODO: get the hoplimit from the inet/inet6
1156 qp_attr->ah_attr.grh.hop_limit =
1157 IPV6_DEFAULT_HOPLIMIT;
1159 ifindex = sgid_attr.ndev->ifindex;
1161 ret = rdma_addr_find_dmac_by_grh(&sgid,
1162 &qp_attr->ah_attr.grh.dgid,
1163 qp_attr->ah_attr.dmac,
1166 dev_put(sgid_attr.ndev);
1172 EXPORT_SYMBOL(ib_resolve_eth_dmac);
1175 int ib_modify_qp(struct ib_qp *qp,
1176 struct ib_qp_attr *qp_attr,
1181 ret = ib_resolve_eth_dmac(qp, qp_attr, &qp_attr_mask);
1185 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
1187 EXPORT_SYMBOL(ib_modify_qp);
1189 int ib_query_qp(struct ib_qp *qp,
1190 struct ib_qp_attr *qp_attr,
1192 struct ib_qp_init_attr *qp_init_attr)
1194 return qp->device->query_qp ?
1195 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
1198 EXPORT_SYMBOL(ib_query_qp);
1200 int ib_close_qp(struct ib_qp *qp)
1202 struct ib_qp *real_qp;
1203 unsigned long flags;
1205 real_qp = qp->real_qp;
1209 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
1210 list_del(&qp->open_list);
1211 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
1213 atomic_dec(&real_qp->usecnt);
1218 EXPORT_SYMBOL(ib_close_qp);
1220 static int __ib_destroy_shared_qp(struct ib_qp *qp)
1222 struct ib_xrcd *xrcd;
1223 struct ib_qp *real_qp;
1226 real_qp = qp->real_qp;
1227 xrcd = real_qp->xrcd;
1229 mutex_lock(&xrcd->tgt_qp_mutex);
1231 if (atomic_read(&real_qp->usecnt) == 0)
1232 list_del(&real_qp->xrcd_list);
1235 mutex_unlock(&xrcd->tgt_qp_mutex);
1238 ret = ib_destroy_qp(real_qp);
1240 atomic_dec(&xrcd->usecnt);
1242 __ib_insert_xrcd_qp(xrcd, real_qp);
1248 int ib_destroy_qp(struct ib_qp *qp)
1251 struct ib_cq *scq, *rcq;
1255 if (atomic_read(&qp->usecnt))
1258 if (qp->real_qp != qp)
1259 return __ib_destroy_shared_qp(qp);
1266 ret = qp->device->destroy_qp(qp);
1269 atomic_dec(&pd->usecnt);
1271 atomic_dec(&scq->usecnt);
1273 atomic_dec(&rcq->usecnt);
1275 atomic_dec(&srq->usecnt);
1280 EXPORT_SYMBOL(ib_destroy_qp);
1282 /* Completion queues */
1284 struct ib_cq *ib_create_cq(struct ib_device *device,
1285 ib_comp_handler comp_handler,
1286 void (*event_handler)(struct ib_event *, void *),
1288 const struct ib_cq_init_attr *cq_attr)
1292 cq = device->create_cq(device, cq_attr, NULL, NULL);
1295 cq->device = device;
1297 cq->comp_handler = comp_handler;
1298 cq->event_handler = event_handler;
1299 cq->cq_context = cq_context;
1300 atomic_set(&cq->usecnt, 0);
1305 EXPORT_SYMBOL(ib_create_cq);
1307 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1309 return cq->device->modify_cq ?
1310 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
1312 EXPORT_SYMBOL(ib_modify_cq);
1314 int ib_destroy_cq(struct ib_cq *cq)
1316 if (atomic_read(&cq->usecnt))
1319 return cq->device->destroy_cq(cq);
1321 EXPORT_SYMBOL(ib_destroy_cq);
1323 int ib_resize_cq(struct ib_cq *cq, int cqe)
1325 return cq->device->resize_cq ?
1326 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
1328 EXPORT_SYMBOL(ib_resize_cq);
1330 /* Memory regions */
1332 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
1337 err = ib_check_mr_access(mr_access_flags);
1339 return ERR_PTR(err);
1341 mr = pd->device->get_dma_mr(pd, mr_access_flags);
1344 mr->device = pd->device;
1347 atomic_inc(&pd->usecnt);
1352 EXPORT_SYMBOL(ib_get_dma_mr);
1354 int ib_dereg_mr(struct ib_mr *mr)
1356 struct ib_pd *pd = mr->pd;
1359 ret = mr->device->dereg_mr(mr);
1361 atomic_dec(&pd->usecnt);
1365 EXPORT_SYMBOL(ib_dereg_mr);
1368 * ib_alloc_mr() - Allocates a memory region
1369 * @pd: protection domain associated with the region
1370 * @mr_type: memory region type
1371 * @max_num_sg: maximum sg entries available for registration.
1374 * Memory registeration page/sg lists must not exceed max_num_sg.
1375 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
1376 * max_num_sg * used_page_size.
1379 struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
1380 enum ib_mr_type mr_type,
1385 if (!pd->device->alloc_mr)
1386 return ERR_PTR(-ENOSYS);
1388 mr = pd->device->alloc_mr(pd, mr_type, max_num_sg);
1390 mr->device = pd->device;
1393 atomic_inc(&pd->usecnt);
1398 EXPORT_SYMBOL(ib_alloc_mr);
1400 /* "Fast" memory regions */
1402 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1403 int mr_access_flags,
1404 struct ib_fmr_attr *fmr_attr)
1408 if (!pd->device->alloc_fmr)
1409 return ERR_PTR(-ENOSYS);
1411 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1413 fmr->device = pd->device;
1415 atomic_inc(&pd->usecnt);
1420 EXPORT_SYMBOL(ib_alloc_fmr);
1422 int ib_unmap_fmr(struct list_head *fmr_list)
1426 if (list_empty(fmr_list))
1429 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1430 return fmr->device->unmap_fmr(fmr_list);
1432 EXPORT_SYMBOL(ib_unmap_fmr);
1434 int ib_dealloc_fmr(struct ib_fmr *fmr)
1440 ret = fmr->device->dealloc_fmr(fmr);
1442 atomic_dec(&pd->usecnt);
1446 EXPORT_SYMBOL(ib_dealloc_fmr);
1448 /* Multicast groups */
1450 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1454 if (!qp->device->attach_mcast)
1456 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1459 ret = qp->device->attach_mcast(qp, gid, lid);
1461 atomic_inc(&qp->usecnt);
1464 EXPORT_SYMBOL(ib_attach_mcast);
1466 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1470 if (!qp->device->detach_mcast)
1472 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1475 ret = qp->device->detach_mcast(qp, gid, lid);
1477 atomic_dec(&qp->usecnt);
1480 EXPORT_SYMBOL(ib_detach_mcast);
1482 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1484 struct ib_xrcd *xrcd;
1486 if (!device->alloc_xrcd)
1487 return ERR_PTR(-ENOSYS);
1489 xrcd = device->alloc_xrcd(device, NULL, NULL);
1490 if (!IS_ERR(xrcd)) {
1491 xrcd->device = device;
1493 atomic_set(&xrcd->usecnt, 0);
1494 mutex_init(&xrcd->tgt_qp_mutex);
1495 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
1500 EXPORT_SYMBOL(ib_alloc_xrcd);
1502 int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1507 if (atomic_read(&xrcd->usecnt))
1510 while (!list_empty(&xrcd->tgt_qp_list)) {
1511 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1512 ret = ib_destroy_qp(qp);
1517 return xrcd->device->dealloc_xrcd(xrcd);
1519 EXPORT_SYMBOL(ib_dealloc_xrcd);
1521 struct ib_flow *ib_create_flow(struct ib_qp *qp,
1522 struct ib_flow_attr *flow_attr,
1525 struct ib_flow *flow_id;
1526 if (!qp->device->create_flow)
1527 return ERR_PTR(-ENOSYS);
1529 flow_id = qp->device->create_flow(qp, flow_attr, domain);
1530 if (!IS_ERR(flow_id))
1531 atomic_inc(&qp->usecnt);
1534 EXPORT_SYMBOL(ib_create_flow);
1536 int ib_destroy_flow(struct ib_flow *flow_id)
1539 struct ib_qp *qp = flow_id->qp;
1541 err = qp->device->destroy_flow(flow_id);
1543 atomic_dec(&qp->usecnt);
1546 EXPORT_SYMBOL(ib_destroy_flow);
1548 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
1549 struct ib_mr_status *mr_status)
1551 return mr->device->check_mr_status ?
1552 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
1554 EXPORT_SYMBOL(ib_check_mr_status);
1557 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
1558 * and set it the memory region.
1559 * @mr: memory region
1560 * @sg: dma mapped scatterlist
1561 * @sg_nents: number of entries in sg
1562 * @page_size: page vector desired page size
1565 * - The first sg element is allowed to have an offset.
1566 * - Each sg element must be aligned to page_size (or physically
1567 * contiguous to the previous element). In case an sg element has a
1568 * non contiguous offset, the mapping prefix will not include it.
1569 * - The last sg element is allowed to have length less than page_size.
1570 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
1571 * then only max_num_sg entries will be mapped.
1573 * Returns the number of sg elements that were mapped to the memory region.
1575 * After this completes successfully, the memory region
1576 * is ready for registration.
1578 int ib_map_mr_sg(struct ib_mr *mr,
1579 struct scatterlist *sg,
1581 unsigned int page_size)
1583 if (unlikely(!mr->device->map_mr_sg))
1586 mr->page_size = page_size;
1588 return mr->device->map_mr_sg(mr, sg, sg_nents);
1590 EXPORT_SYMBOL(ib_map_mr_sg);
1593 * ib_sg_to_pages() - Convert the largest prefix of a sg list
1595 * @mr: memory region
1596 * @sgl: dma mapped scatterlist
1597 * @sg_nents: number of entries in sg
1598 * @set_page: driver page assignment function pointer
1600 * Core service helper for drivers to convert the largest
1601 * prefix of given sg list to a page vector. The sg list
1602 * prefix converted is the prefix that meet the requirements
1605 * Returns the number of sg elements that were assigned to
1608 int ib_sg_to_pages(struct ib_mr *mr,
1609 struct scatterlist *sgl,
1611 int (*set_page)(struct ib_mr *, u64))
1613 struct scatterlist *sg;
1614 u64 last_end_dma_addr = 0;
1615 unsigned int last_page_off = 0;
1616 u64 page_mask = ~((u64)mr->page_size - 1);
1619 mr->iova = sg_dma_address(&sgl[0]);
1622 for_each_sg(sgl, sg, sg_nents, i) {
1623 u64 dma_addr = sg_dma_address(sg);
1624 unsigned int dma_len = sg_dma_len(sg);
1625 u64 end_dma_addr = dma_addr + dma_len;
1626 u64 page_addr = dma_addr & page_mask;
1629 * For the second and later elements, check whether either the
1630 * end of element i-1 or the start of element i is not aligned
1631 * on a page boundary.
1633 if (i && (last_page_off != 0 || page_addr != dma_addr)) {
1634 /* Stop mapping if there is a gap. */
1635 if (last_end_dma_addr != dma_addr)
1639 * Coalesce this element with the last. If it is small
1640 * enough just update mr->length. Otherwise start
1641 * mapping from the next page.
1647 ret = set_page(mr, page_addr);
1648 if (unlikely(ret < 0))
1651 page_addr += mr->page_size;
1652 } while (page_addr < end_dma_addr);
1654 mr->length += dma_len;
1655 last_end_dma_addr = end_dma_addr;
1656 last_page_off = end_dma_addr & ~page_mask;
1661 EXPORT_SYMBOL(ib_sg_to_pages);