2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_mad.h>
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_sa.h>
36 #include <rdma/ib_cache.h>
38 #include <linux/mlx4/cmd.h>
39 #include <linux/gfp.h>
40 #include <rdma/ib_pma.h>
45 MLX4_IB_VENDOR_CLASS1 = 0x9,
46 MLX4_IB_VENDOR_CLASS2 = 0xa
49 #define MLX4_TUN_SEND_WRID_SHIFT 34
50 #define MLX4_TUN_QPN_SHIFT 32
51 #define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
52 #define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
54 #define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
55 #define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
57 struct mlx4_mad_rcv_buf {
62 struct mlx4_mad_snd_buf {
66 struct mlx4_tunnel_mad {
68 struct mlx4_ib_tunnel_header hdr;
72 struct mlx4_rcv_tunnel_mad {
73 struct mlx4_rcv_tunnel_hdr hdr;
78 static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
80 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
82 return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
83 cpu_to_be64(0xff00000000000000LL);
86 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
87 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
88 void *in_mad, void *response_mad)
90 struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
93 u32 in_modifier = port;
96 inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
97 if (IS_ERR(inmailbox))
98 return PTR_ERR(inmailbox);
99 inbox = inmailbox->buf;
101 outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
102 if (IS_ERR(outmailbox)) {
103 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
104 return PTR_ERR(outmailbox);
107 memcpy(inbox, in_mad, 256);
110 * Key check traps can't be generated unless we have in_wc to
111 * tell us where to send the trap.
113 if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc)
115 if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc)
117 if (mlx4_is_mfunc(dev->dev) &&
118 (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc))
134 memset(inbox + 256, 0, 256);
135 ext_info = inbox + 256;
137 ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
138 ext_info->rqpn = cpu_to_be32(in_wc->src_qp);
139 ext_info->sl = in_wc->sl << 4;
140 ext_info->g_path = in_wc->dlid_path_bits |
141 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
142 ext_info->pkey = cpu_to_be16(in_wc->pkey_index);
145 memcpy(ext_info->grh, in_grh, 40);
149 in_modifier |= in_wc->slid << 16;
152 err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
153 mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier,
154 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
155 (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED);
158 memcpy(response_mad, outmailbox->buf, 256);
160 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
161 mlx4_free_cmd_mailbox(dev->dev, outmailbox);
166 static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
168 struct ib_ah *new_ah;
169 struct ib_ah_attr ah_attr;
172 if (!dev->send_agent[port_num - 1][0])
175 memset(&ah_attr, 0, sizeof ah_attr);
178 ah_attr.port_num = port_num;
180 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
185 spin_lock_irqsave(&dev->sm_lock, flags);
186 if (dev->sm_ah[port_num - 1])
187 ib_destroy_ah(dev->sm_ah[port_num - 1]);
188 dev->sm_ah[port_num - 1] = new_ah;
189 spin_unlock_irqrestore(&dev->sm_lock, flags);
193 * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
194 * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
196 static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
199 struct ib_port_info *pinfo;
202 u32 bn, pkey_change_bitmap;
206 struct mlx4_ib_dev *dev = to_mdev(ibdev);
207 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
208 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
209 mad->mad_hdr.method == IB_MGMT_METHOD_SET)
210 switch (mad->mad_hdr.attr_id) {
211 case IB_SMP_ATTR_PORT_INFO:
212 pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
213 lid = be16_to_cpu(pinfo->lid);
215 update_sm_ah(dev, port_num,
216 be16_to_cpu(pinfo->sm_lid),
217 pinfo->neighbormtu_mastersmsl & 0xf);
219 if (pinfo->clientrereg_resv_subnetto & 0x80)
220 handle_client_rereg_event(dev, port_num);
223 mlx4_ib_dispatch_event(dev, port_num,
224 IB_EVENT_LID_CHANGE);
227 case IB_SMP_ATTR_PKEY_TABLE:
228 if (!mlx4_is_mfunc(dev->dev)) {
229 mlx4_ib_dispatch_event(dev, port_num,
230 IB_EVENT_PKEY_CHANGE);
234 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF;
235 base = (__be16 *) &(((struct ib_smp *)mad)->data[0]);
236 pkey_change_bitmap = 0;
237 for (i = 0; i < 32; i++) {
238 pr_debug("PKEY[%d] = x%x\n",
239 i + bn*32, be16_to_cpu(base[i]));
240 if (be16_to_cpu(base[i]) !=
241 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) {
242 pkey_change_bitmap |= (1 << i);
243 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] =
244 be16_to_cpu(base[i]);
247 pr_debug("PKEY Change event: port=%d, "
248 "block=0x%x, change_bitmap=0x%x\n",
249 port_num, bn, pkey_change_bitmap);
251 if (pkey_change_bitmap)
252 mlx4_ib_dispatch_event(dev, port_num,
253 IB_EVENT_PKEY_CHANGE);
257 case IB_SMP_ATTR_GUID_INFO:
258 /* paravirtualized master's guid is guid 0 -- does not change */
259 if (!mlx4_is_master(dev->dev))
260 mlx4_ib_dispatch_event(dev, port_num,
261 IB_EVENT_GID_CHANGE);
268 static void node_desc_override(struct ib_device *dev,
273 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
274 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
275 mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
276 mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
277 spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
278 memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
279 spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
283 static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad)
285 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
286 struct ib_mad_send_buf *send_buf;
287 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
292 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
293 IB_MGMT_MAD_DATA, GFP_ATOMIC);
294 if (IS_ERR(send_buf))
297 * We rely here on the fact that MLX QPs don't use the
298 * address handle after the send is posted (this is
299 * wrong following the IB spec strictly, but we know
300 * it's OK for our devices).
302 spin_lock_irqsave(&dev->sm_lock, flags);
303 memcpy(send_buf->mad, mad, sizeof *mad);
304 if ((send_buf->ah = dev->sm_ah[port_num - 1]))
305 ret = ib_post_send_mad(send_buf, NULL);
308 spin_unlock_irqrestore(&dev->sm_lock, flags);
311 ib_free_send_mad(send_buf);
315 static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave,
316 struct ib_sa_mad *sa_mad)
320 /* dispatch to different sa handlers */
321 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
322 case IB_SA_ATTR_MC_MEMBER_REC:
323 ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad);
331 int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
333 struct mlx4_ib_dev *dev = to_mdev(ibdev);
336 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
337 if (dev->sriov.demux[port - 1].guid_cache[i] == guid)
344 static int get_pkey_phys_indices(struct mlx4_ib_dev *ibdev, u8 port, u8 ph_pkey_ix,
345 u8 *full_pk_ix, u8 *partial_pk_ix,
353 err = ib_get_cached_pkey(&ibdev->ib_dev, port, ph_pkey_ix, &search_pkey);
357 fm = (search_pkey & 0x8000) ? 1 : 0;
359 *full_pk_ix = ph_pkey_ix;
360 search_pkey &= 0x7FFF;
362 *partial_pk_ix = ph_pkey_ix;
363 search_pkey |= 0x8000;
366 if (ib_find_exact_cached_pkey(&ibdev->ib_dev, port, search_pkey, &pk))
370 *partial_pk_ix = (pk & 0xFF);
372 *full_pk_ix = (pk & 0xFF);
374 *is_full_member = fm;
378 int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
379 enum ib_qp_type dest_qpt, struct ib_wc *wc,
380 struct ib_grh *grh, struct ib_mad *mad)
383 struct ib_send_wr wr, *bad_wr;
384 struct mlx4_ib_demux_pv_ctx *tun_ctx;
385 struct mlx4_ib_demux_pv_qp *tun_qp;
386 struct mlx4_rcv_tunnel_mad *tun_mad;
387 struct ib_ah_attr attr;
389 struct ib_qp *src_qp = NULL;
390 unsigned tun_tx_ix = 0;
394 int is_full_member = 0;
396 u8 ph_pkey_ix, full_pk_ix = 0, partial_pk_ix = 0;
398 if (dest_qpt > IB_QPT_GSI)
401 tun_ctx = dev->sriov.demux[port-1].tun[slave];
403 /* check if proxy qp created */
404 if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
407 /* QP0 forwarding only for Dom0 */
408 if (!dest_qpt && (mlx4_master_func_num(dev->dev) != slave))
412 tun_qp = &tun_ctx->qp[0];
414 tun_qp = &tun_ctx->qp[1];
416 /* compute pkey index for slave */
417 /* get physical pkey -- virtualized Dom0 pkey to phys*/
420 dev->pkeys.virt2phys_pkey[mlx4_master_func_num(dev->dev)][port - 1][wc->pkey_index];
422 /* now, translate this to the slave pkey index */
423 ret = get_pkey_phys_indices(dev, port, ph_pkey_ix, &full_pk_ix,
424 &partial_pk_ix, &is_full_member);
428 for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
429 if ((dev->pkeys.virt2phys_pkey[slave][port - 1][i] == full_pk_ix) ||
431 (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == partial_pk_ix)))
434 if (i == dev->dev->caps.pkey_table_len[port])
438 tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
440 dqpn = dev->dev->caps.sqp_start + 8 * slave + port + (dest_qpt * 2) - 1;
442 /* get tunnel tx data buf for slave */
445 /* create ah. Just need an empty one with the port num for the post send.
446 * The driver will set the force loopback bit in post_send */
447 memset(&attr, 0, sizeof attr);
448 attr.port_num = port;
449 ah = ib_create_ah(tun_ctx->pd, &attr);
453 /* allocate tunnel tx buf after pass failure returns */
454 spin_lock(&tun_qp->tx_lock);
455 if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >=
456 (MLX4_NUM_TUNNEL_BUFS - 1))
459 tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
460 spin_unlock(&tun_qp->tx_lock);
464 tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
465 if (tun_qp->tx_ring[tun_tx_ix].ah)
466 ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah);
467 tun_qp->tx_ring[tun_tx_ix].ah = ah;
468 ib_dma_sync_single_for_cpu(&dev->ib_dev,
469 tun_qp->tx_ring[tun_tx_ix].buf.map,
470 sizeof (struct mlx4_rcv_tunnel_mad),
473 /* copy over to tunnel buffer */
475 memcpy(&tun_mad->grh, grh, sizeof *grh);
476 memcpy(&tun_mad->mad, mad, sizeof *mad);
478 /* adjust tunnel data */
479 tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
480 tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
481 tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
482 tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
483 tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
485 ib_dma_sync_single_for_device(&dev->ib_dev,
486 tun_qp->tx_ring[tun_tx_ix].buf.map,
487 sizeof (struct mlx4_rcv_tunnel_mad),
490 list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
491 list.length = sizeof (struct mlx4_rcv_tunnel_mad);
492 list.lkey = tun_ctx->mr->lkey;
495 wr.wr.ud.port_num = port;
496 wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
497 wr.wr.ud.remote_qpn = dqpn;
499 wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
502 wr.opcode = IB_WR_SEND;
503 wr.send_flags = IB_SEND_SIGNALED;
505 ret = ib_post_send(src_qp, &wr, &bad_wr);
512 static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
513 struct ib_wc *wc, struct ib_grh *grh,
516 struct mlx4_ib_dev *dev = to_mdev(ibdev);
521 /* Initially assume that this mad is for us */
522 slave = mlx4_master_func_num(dev->dev);
524 /* See if the slave id is encoded in a response mad */
525 if (mad->mad_hdr.method & 0x80) {
526 slave_id = (u8 *) &mad->mad_hdr.tid;
528 if (slave != 255) /*255 indicates the dom0*/
529 *slave_id = 0; /* remap tid */
532 /* If a grh is present, we demux according to it */
533 if (wc->wc_flags & IB_WC_GRH) {
534 slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id);
536 mlx4_ib_warn(ibdev, "failed matching grh\n");
540 /* Class-specific handling */
541 switch (mad->mad_hdr.mgmt_class) {
542 case IB_MGMT_CLASS_SUBN_ADM:
543 if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
544 (struct ib_sa_mad *) mad))
547 case IB_MGMT_CLASS_DEVICE_MGMT:
548 if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP)
552 /* Drop unsupported classes for slaves in tunnel mode */
553 if (slave != mlx4_master_func_num(dev->dev)) {
554 pr_debug("dropping unsupported ingress mad from class:%d "
555 "for slave:%d\n", mad->mad_hdr.mgmt_class, slave);
559 /*make sure that no slave==255 was not handled yet.*/
560 if (slave >= dev->dev->caps.sqp_demux) {
561 mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
562 slave, dev->dev->caps.sqp_demux);
566 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
568 pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
573 static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
574 struct ib_wc *in_wc, struct ib_grh *in_grh,
575 struct ib_mad *in_mad, struct ib_mad *out_mad)
577 u16 slid, prev_lid = 0;
579 struct ib_port_attr pattr;
581 if (in_wc && in_wc->qp->qp_num) {
582 pr_debug("received MAD: slid:%d sqpn:%d "
583 "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
584 in_wc->slid, in_wc->src_qp,
585 in_wc->dlid_path_bits,
588 in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
589 be16_to_cpu(in_mad->mad_hdr.attr_id));
590 if (in_wc->wc_flags & IB_WC_GRH) {
591 pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
592 be64_to_cpu(in_grh->sgid.global.subnet_prefix),
593 be64_to_cpu(in_grh->sgid.global.interface_id));
594 pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
595 be64_to_cpu(in_grh->dgid.global.subnet_prefix),
596 be64_to_cpu(in_grh->dgid.global.interface_id));
600 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
602 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
603 forward_trap(to_mdev(ibdev), port_num, in_mad);
604 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
607 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
608 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
609 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
610 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
611 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
612 return IB_MAD_RESULT_SUCCESS;
615 * Don't process SMInfo queries -- the SMA can't handle them.
617 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
618 return IB_MAD_RESULT_SUCCESS;
619 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
620 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
621 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 ||
622 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
623 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
624 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
625 return IB_MAD_RESULT_SUCCESS;
627 return IB_MAD_RESULT_SUCCESS;
629 if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
630 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
631 in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
632 in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
633 !ib_query_port(ibdev, port_num, &pattr))
634 prev_lid = pattr.lid;
636 err = mlx4_MAD_IFC(to_mdev(ibdev),
637 (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
638 (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) |
639 MLX4_MAD_IFC_NET_VIEW,
640 port_num, in_wc, in_grh, in_mad, out_mad);
642 return IB_MAD_RESULT_FAILURE;
644 if (!out_mad->mad_hdr.status) {
645 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
646 smp_snoop(ibdev, port_num, in_mad, prev_lid);
647 node_desc_override(ibdev, out_mad);
650 /* set return bit in status of directed route responses */
651 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
652 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
654 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
655 /* no response for trap repress */
656 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
658 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
661 static void edit_counter(struct mlx4_counter *cnt,
662 struct ib_pma_portcounters *pma_cnt)
664 pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2));
665 pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2));
666 pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames));
667 pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames));
670 static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
671 struct ib_wc *in_wc, struct ib_grh *in_grh,
672 struct ib_mad *in_mad, struct ib_mad *out_mad)
674 struct mlx4_cmd_mailbox *mailbox;
675 struct mlx4_ib_dev *dev = to_mdev(ibdev);
677 u32 inmod = dev->counters[port_num - 1] & 0xffff;
680 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
683 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
685 return IB_MAD_RESULT_FAILURE;
687 err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
688 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
691 err = IB_MAD_RESULT_FAILURE;
693 memset(out_mad->data, 0, sizeof out_mad->data);
694 mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode;
695 switch (mode & 0xf) {
697 edit_counter(mailbox->buf,
698 (void *)(out_mad->data + 40));
699 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
702 err = IB_MAD_RESULT_FAILURE;
706 mlx4_free_cmd_mailbox(dev->dev, mailbox);
711 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
712 struct ib_wc *in_wc, struct ib_grh *in_grh,
713 struct ib_mad *in_mad, struct ib_mad *out_mad)
715 switch (rdma_port_get_link_layer(ibdev, port_num)) {
716 case IB_LINK_LAYER_INFINIBAND:
717 return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
718 in_grh, in_mad, out_mad);
719 case IB_LINK_LAYER_ETHERNET:
720 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
721 in_grh, in_mad, out_mad);
727 static void send_handler(struct ib_mad_agent *agent,
728 struct ib_mad_send_wc *mad_send_wc)
730 ib_free_send_mad(mad_send_wc->send_buf);
733 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
735 struct ib_mad_agent *agent;
738 enum rdma_link_layer ll;
740 for (p = 0; p < dev->num_ports; ++p) {
741 ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
742 for (q = 0; q <= 1; ++q) {
743 if (ll == IB_LINK_LAYER_INFINIBAND) {
744 agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
745 q ? IB_QPT_GSI : IB_QPT_SMI,
746 NULL, 0, send_handler,
749 ret = PTR_ERR(agent);
752 dev->send_agent[p][q] = agent;
754 dev->send_agent[p][q] = NULL;
761 for (p = 0; p < dev->num_ports; ++p)
762 for (q = 0; q <= 1; ++q)
763 if (dev->send_agent[p][q])
764 ib_unregister_mad_agent(dev->send_agent[p][q]);
769 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
771 struct ib_mad_agent *agent;
774 for (p = 0; p < dev->num_ports; ++p) {
775 for (q = 0; q <= 1; ++q) {
776 agent = dev->send_agent[p][q];
778 dev->send_agent[p][q] = NULL;
779 ib_unregister_mad_agent(agent);
784 ib_destroy_ah(dev->sm_ah[p]);
788 static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
790 /* re-configure the mcg's */
791 if (mlx4_is_master(dev->dev)) {
792 if (!dev->sriov.is_going_down)
793 mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
795 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
798 void handle_port_mgmt_change_event(struct work_struct *work)
800 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
801 struct mlx4_ib_dev *dev = ew->ib_dev;
802 struct mlx4_eqe *eqe = &(ew->ib_eqe);
803 u8 port = eqe->event.port_mgmt_change.port;
806 switch (eqe->subtype) {
807 case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
808 changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
810 /* Update the SM ah - This should be done before handling
811 the other changed attributes so that MADs can be sent to the SM */
812 if (changed_attr & MSTR_SM_CHANGE_MASK) {
813 u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
814 u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
815 update_sm_ah(dev, port, lid, sl);
818 /* Check if it is a lid change event */
819 if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
820 mlx4_ib_dispatch_event(dev, port, IB_EVENT_LID_CHANGE);
822 /* Generate GUID changed event */
823 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK)
824 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
826 if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
827 handle_client_rereg_event(dev, port);
830 case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
831 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
833 case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
834 /* paravirtualized master's guid is guid 0 -- does not change */
835 if (!mlx4_is_master(dev->dev))
836 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
839 pr_warn("Unsupported subtype 0x%x for "
840 "Port Management Change event\n", eqe->subtype);
846 void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
847 enum ib_event_type type)
849 struct ib_event event;
851 event.device = &dev->ib_dev;
852 event.element.port_num = port_num;
855 ib_dispatch_event(&event);
858 static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
861 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
862 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
863 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
864 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
865 queue_work(ctx->wq, &ctx->work);
866 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
869 static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
870 struct mlx4_ib_demux_pv_qp *tun_qp,
873 struct ib_sge sg_list;
874 struct ib_recv_wr recv_wr, *bad_recv_wr;
877 size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
878 sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf);
880 sg_list.addr = tun_qp->ring[index].map;
881 sg_list.length = size;
882 sg_list.lkey = ctx->mr->lkey;
885 recv_wr.sg_list = &sg_list;
887 recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV |
888 MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
889 ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
890 size, DMA_FROM_DEVICE);
891 return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
894 static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
895 int slave, struct ib_sa_mad *sa_mad)
899 /* dispatch to different sa handlers */
900 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
901 case IB_SA_ATTR_MC_MEMBER_REC:
902 ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad);
910 static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
912 int slave_start = dev->dev->caps.sqp_start + 8 * slave;
914 return (qpn >= slave_start && qpn <= slave_start + 1);
918 int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
919 enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
920 u32 qkey, struct ib_ah_attr *attr, struct ib_mad *mad)
923 struct ib_send_wr wr, *bad_wr;
924 struct mlx4_ib_demux_pv_ctx *sqp_ctx;
925 struct mlx4_ib_demux_pv_qp *sqp;
926 struct mlx4_mad_snd_buf *sqp_mad;
928 struct ib_qp *send_qp = NULL;
929 unsigned wire_tx_ix = 0;
936 sqp_ctx = dev->sriov.sqps[port-1];
938 /* check if proxy qp created */
939 if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
942 /* QP0 forwarding only for Dom0 */
943 if (dest_qpt == IB_QPT_SMI && (mlx4_master_func_num(dev->dev) != slave))
946 if (dest_qpt == IB_QPT_SMI) {
948 sqp = &sqp_ctx->qp[0];
949 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
952 sqp = &sqp_ctx->qp[1];
953 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index];
959 sgid_index = attr->grh.sgid_index;
960 attr->grh.sgid_index = 0;
961 ah = ib_create_ah(sqp_ctx->pd, attr);
964 attr->grh.sgid_index = sgid_index;
965 to_mah(ah)->av.ib.gid_index = sgid_index;
966 /* get rid of force-loopback bit */
967 to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
968 spin_lock(&sqp->tx_lock);
969 if (sqp->tx_ix_head - sqp->tx_ix_tail >=
970 (MLX4_NUM_TUNNEL_BUFS - 1))
973 wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
974 spin_unlock(&sqp->tx_lock);
978 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
979 if (sqp->tx_ring[wire_tx_ix].ah)
980 ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah);
981 sqp->tx_ring[wire_tx_ix].ah = ah;
982 ib_dma_sync_single_for_cpu(&dev->ib_dev,
983 sqp->tx_ring[wire_tx_ix].buf.map,
984 sizeof (struct mlx4_mad_snd_buf),
987 memcpy(&sqp_mad->payload, mad, sizeof *mad);
989 ib_dma_sync_single_for_device(&dev->ib_dev,
990 sqp->tx_ring[wire_tx_ix].buf.map,
991 sizeof (struct mlx4_mad_snd_buf),
994 list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
995 list.length = sizeof (struct mlx4_mad_snd_buf);
996 list.lkey = sqp_ctx->mr->lkey;
999 wr.wr.ud.port_num = port;
1000 wr.wr.ud.pkey_index = wire_pkey_ix;
1001 wr.wr.ud.remote_qkey = qkey;
1002 wr.wr.ud.remote_qpn = remote_qpn;
1004 wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
1007 wr.opcode = IB_WR_SEND;
1008 wr.send_flags = IB_SEND_SIGNALED;
1010 ret = ib_post_send(send_qp, &wr, &bad_wr);
1017 static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
1019 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1020 struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)];
1021 int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1);
1022 struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
1023 struct mlx4_ib_ah ah;
1024 struct ib_ah_attr ah_attr;
1028 /* Get slave that sent this packet */
1029 if (wc->src_qp < dev->dev->caps.sqp_start ||
1030 wc->src_qp >= dev->dev->caps.base_tunnel_sqpn ||
1031 (wc->src_qp & 0x1) != ctx->port - 1 ||
1033 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp);
1036 slave = ((wc->src_qp & ~0x7) - dev->dev->caps.sqp_start) / 8;
1037 if (slave != ctx->slave) {
1038 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1039 "belongs to another slave\n", wc->src_qp);
1042 if (slave != mlx4_master_func_num(dev->dev) && !(wc->src_qp & 0x2)) {
1043 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1044 "non-master trying to send QP0 packets\n", wc->src_qp);
1048 /* Map transaction ID */
1049 ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
1050 sizeof (struct mlx4_tunnel_mad),
1052 switch (tunnel->mad.mad_hdr.method) {
1053 case IB_MGMT_METHOD_SET:
1054 case IB_MGMT_METHOD_GET:
1055 case IB_MGMT_METHOD_REPORT:
1056 case IB_SA_METHOD_GET_TABLE:
1057 case IB_SA_METHOD_DELETE:
1058 case IB_SA_METHOD_GET_MULTI:
1059 case IB_SA_METHOD_GET_TRACE_TBL:
1060 slave_id = (u8 *) &tunnel->mad.mad_hdr.tid;
1062 mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d "
1063 "class:%d slave:%d\n", *slave_id,
1064 tunnel->mad.mad_hdr.mgmt_class, slave);
1072 /* Class-specific handling */
1073 switch (tunnel->mad.mad_hdr.mgmt_class) {
1074 case IB_MGMT_CLASS_SUBN_ADM:
1075 if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
1076 (struct ib_sa_mad *) &tunnel->mad))
1079 case IB_MGMT_CLASS_DEVICE_MGMT:
1080 if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET &&
1081 tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET)
1085 /* Drop unsupported classes for slaves in tunnel mode */
1086 if (slave != mlx4_master_func_num(dev->dev)) {
1087 mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d "
1088 "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave);
1093 /* We are using standard ib_core services to send the mad, so generate a
1094 * stadard address handle by decoding the tunnelled mlx4_ah fields */
1095 memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
1096 ah.ibah.device = ctx->ib_dev;
1097 mlx4_ib_query_ah(&ah.ibah, &ah_attr);
1098 if ((ah_attr.ah_flags & IB_AH_GRH) &&
1099 (ah_attr.grh.sgid_index != slave)) {
1100 mlx4_ib_warn(ctx->ib_dev, "slave:%d accessed invalid sgid_index:%d\n",
1101 slave, ah_attr.grh.sgid_index);
1105 mlx4_ib_send_to_wire(dev, slave, ctx->port,
1106 is_proxy_qp0(dev, wc->src_qp, slave) ?
1107 IB_QPT_SMI : IB_QPT_GSI,
1108 be16_to_cpu(tunnel->hdr.pkey_index),
1109 be32_to_cpu(tunnel->hdr.remote_qpn),
1110 be32_to_cpu(tunnel->hdr.qkey),
1111 &ah_attr, &tunnel->mad);
1114 static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1115 enum ib_qp_type qp_type, int is_tun)
1118 struct mlx4_ib_demux_pv_qp *tun_qp;
1119 int rx_buf_size, tx_buf_size;
1121 if (qp_type > IB_QPT_GSI)
1124 tun_qp = &ctx->qp[qp_type];
1126 tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS,
1131 tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
1132 sizeof (struct mlx4_ib_tun_tx_buf),
1134 if (!tun_qp->tx_ring) {
1135 kfree(tun_qp->ring);
1136 tun_qp->ring = NULL;
1141 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1142 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1144 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1145 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1148 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1149 tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
1150 if (!tun_qp->ring[i].addr)
1152 tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
1153 tun_qp->ring[i].addr,
1158 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1159 tun_qp->tx_ring[i].buf.addr =
1160 kmalloc(tx_buf_size, GFP_KERNEL);
1161 if (!tun_qp->tx_ring[i].buf.addr)
1163 tun_qp->tx_ring[i].buf.map =
1164 ib_dma_map_single(ctx->ib_dev,
1165 tun_qp->tx_ring[i].buf.addr,
1168 tun_qp->tx_ring[i].ah = NULL;
1170 spin_lock_init(&tun_qp->tx_lock);
1171 tun_qp->tx_ix_head = 0;
1172 tun_qp->tx_ix_tail = 0;
1173 tun_qp->proxy_qpt = qp_type;
1180 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1181 tx_buf_size, DMA_TO_DEVICE);
1182 kfree(tun_qp->tx_ring[i].buf.addr);
1184 kfree(tun_qp->tx_ring);
1185 tun_qp->tx_ring = NULL;
1186 i = MLX4_NUM_TUNNEL_BUFS;
1190 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1191 rx_buf_size, DMA_FROM_DEVICE);
1192 kfree(tun_qp->ring[i].addr);
1194 kfree(tun_qp->ring);
1195 tun_qp->ring = NULL;
1199 static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1200 enum ib_qp_type qp_type, int is_tun)
1203 struct mlx4_ib_demux_pv_qp *tun_qp;
1204 int rx_buf_size, tx_buf_size;
1206 if (qp_type > IB_QPT_GSI)
1209 tun_qp = &ctx->qp[qp_type];
1211 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1212 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1214 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1215 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1219 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1220 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1221 rx_buf_size, DMA_FROM_DEVICE);
1222 kfree(tun_qp->ring[i].addr);
1225 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1226 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1227 tx_buf_size, DMA_TO_DEVICE);
1228 kfree(tun_qp->tx_ring[i].buf.addr);
1229 if (tun_qp->tx_ring[i].ah)
1230 ib_destroy_ah(tun_qp->tx_ring[i].ah);
1232 kfree(tun_qp->tx_ring);
1233 kfree(tun_qp->ring);
1236 static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
1238 struct mlx4_ib_demux_pv_ctx *ctx;
1239 struct mlx4_ib_demux_pv_qp *tun_qp;
1242 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1243 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1245 while (ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1246 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1247 if (wc.status == IB_WC_SUCCESS) {
1248 switch (wc.opcode) {
1250 mlx4_ib_multiplex_mad(ctx, &wc);
1251 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
1253 (MLX4_NUM_TUNNEL_BUFS - 1));
1255 pr_err("Failed reposting tunnel "
1256 "buf:%lld\n", wc.wr_id);
1259 pr_debug("received tunnel send completion:"
1260 "wrid=0x%llx, status=0x%x\n",
1261 wc.wr_id, wc.status);
1262 ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1263 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1264 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1266 spin_lock(&tun_qp->tx_lock);
1267 tun_qp->tx_ix_tail++;
1268 spin_unlock(&tun_qp->tx_lock);
1275 pr_debug("mlx4_ib: completion error in tunnel: %d."
1276 " status = %d, wrid = 0x%llx\n",
1277 ctx->slave, wc.status, wc.wr_id);
1278 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1279 ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1280 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1281 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1283 spin_lock(&tun_qp->tx_lock);
1284 tun_qp->tx_ix_tail++;
1285 spin_unlock(&tun_qp->tx_lock);
1291 static void pv_qp_event_handler(struct ib_event *event, void *qp_context)
1293 struct mlx4_ib_demux_pv_ctx *sqp = qp_context;
1295 /* It's worse than that! He's dead, Jim! */
1296 pr_err("Fatal error (%d) on a MAD QP on port %d\n",
1297 event->event, sqp->port);
1300 static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
1301 enum ib_qp_type qp_type, int create_tun)
1304 struct mlx4_ib_demux_pv_qp *tun_qp;
1305 struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
1306 struct ib_qp_attr attr;
1307 int qp_attr_mask_INIT;
1309 if (qp_type > IB_QPT_GSI)
1312 tun_qp = &ctx->qp[qp_type];
1314 memset(&qp_init_attr, 0, sizeof qp_init_attr);
1315 qp_init_attr.init_attr.send_cq = ctx->cq;
1316 qp_init_attr.init_attr.recv_cq = ctx->cq;
1317 qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
1318 qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS;
1319 qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS;
1320 qp_init_attr.init_attr.cap.max_send_sge = 1;
1321 qp_init_attr.init_attr.cap.max_recv_sge = 1;
1323 qp_init_attr.init_attr.qp_type = IB_QPT_UD;
1324 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP;
1325 qp_init_attr.port = ctx->port;
1326 qp_init_attr.slave = ctx->slave;
1327 qp_init_attr.proxy_qp_type = qp_type;
1328 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX |
1329 IB_QP_QKEY | IB_QP_PORT;
1331 qp_init_attr.init_attr.qp_type = qp_type;
1332 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_SQP;
1333 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY;
1335 qp_init_attr.init_attr.port_num = ctx->port;
1336 qp_init_attr.init_attr.qp_context = ctx;
1337 qp_init_attr.init_attr.event_handler = pv_qp_event_handler;
1338 tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
1339 if (IS_ERR(tun_qp->qp)) {
1340 ret = PTR_ERR(tun_qp->qp);
1342 pr_err("Couldn't create %s QP (%d)\n",
1343 create_tun ? "tunnel" : "special", ret);
1347 memset(&attr, 0, sizeof attr);
1348 attr.qp_state = IB_QPS_INIT;
1350 to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
1351 attr.qkey = IB_QP1_QKEY;
1352 attr.port_num = ctx->port;
1353 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
1355 pr_err("Couldn't change %s qp state to INIT (%d)\n",
1356 create_tun ? "tunnel" : "special", ret);
1359 attr.qp_state = IB_QPS_RTR;
1360 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
1362 pr_err("Couldn't change %s qp state to RTR (%d)\n",
1363 create_tun ? "tunnel" : "special", ret);
1366 attr.qp_state = IB_QPS_RTS;
1368 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
1370 pr_err("Couldn't change %s qp state to RTS (%d)\n",
1371 create_tun ? "tunnel" : "special", ret);
1375 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1376 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
1378 pr_err(" mlx4_ib_post_pv_buf error"
1379 " (err = %d, i = %d)\n", ret, i);
1386 ib_destroy_qp(tun_qp->qp);
1392 * IB MAD completion callback for real SQPs
1394 static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1396 struct mlx4_ib_demux_pv_ctx *ctx;
1397 struct mlx4_ib_demux_pv_qp *sqp;
1402 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1403 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1405 while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1406 sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1407 if (wc.status == IB_WC_SUCCESS) {
1408 switch (wc.opcode) {
1410 ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1411 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1412 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1414 spin_lock(&sqp->tx_lock);
1416 spin_unlock(&sqp->tx_lock);
1419 mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *)
1420 (sqp->ring[wc.wr_id &
1421 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload);
1422 grh = &(((struct mlx4_mad_rcv_buf *)
1423 (sqp->ring[wc.wr_id &
1424 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh);
1425 mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad);
1426 if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id &
1427 (MLX4_NUM_TUNNEL_BUFS - 1)))
1428 pr_err("Failed reposting SQP "
1429 "buf:%lld\n", wc.wr_id);
1436 pr_debug("mlx4_ib: completion error in tunnel: %d."
1437 " status = %d, wrid = 0x%llx\n",
1438 ctx->slave, wc.status, wc.wr_id);
1439 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1440 ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1441 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1442 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1444 spin_lock(&sqp->tx_lock);
1446 spin_unlock(&sqp->tx_lock);
1452 static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
1453 struct mlx4_ib_demux_pv_ctx **ret_ctx)
1455 struct mlx4_ib_demux_pv_ctx *ctx;
1458 ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
1460 pr_err("failed allocating pv resource context "
1461 "for port %d, slave %d\n", port, slave);
1465 ctx->ib_dev = &dev->ib_dev;
1472 static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port)
1474 if (dev->sriov.demux[port - 1].tun[slave]) {
1475 kfree(dev->sriov.demux[port - 1].tun[slave]);
1476 dev->sriov.demux[port - 1].tun[slave] = NULL;
1480 static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
1481 int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
1485 ctx->state = DEMUX_PV_STATE_STARTING;
1486 /* have QP0 only on port owner, and only if link layer is IB */
1487 if (ctx->slave == mlx4_master_func_num(to_mdev(ctx->ib_dev)->dev) &&
1488 rdma_port_get_link_layer(ibdev, ctx->port) == IB_LINK_LAYER_INFINIBAND)
1492 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun);
1494 pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret);
1499 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun);
1501 pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret);
1505 cq_size = 2 * MLX4_NUM_TUNNEL_BUFS;
1509 ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
1510 NULL, ctx, cq_size, 0);
1511 if (IS_ERR(ctx->cq)) {
1512 ret = PTR_ERR(ctx->cq);
1513 pr_err("Couldn't create tunnel CQ (%d)\n", ret);
1517 ctx->pd = ib_alloc_pd(ctx->ib_dev);
1518 if (IS_ERR(ctx->pd)) {
1519 ret = PTR_ERR(ctx->pd);
1520 pr_err("Couldn't create tunnel PD (%d)\n", ret);
1524 ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE);
1525 if (IS_ERR(ctx->mr)) {
1526 ret = PTR_ERR(ctx->mr);
1527 pr_err("Couldn't get tunnel DMA MR (%d)\n", ret);
1532 ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
1534 pr_err("Couldn't create %s QP0 (%d)\n",
1535 create_tun ? "tunnel for" : "", ret);
1540 ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun);
1542 pr_err("Couldn't create %s QP1 (%d)\n",
1543 create_tun ? "tunnel for" : "", ret);
1548 INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
1550 INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
1552 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
1554 ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1556 pr_err("Couldn't arm tunnel cq (%d)\n", ret);
1559 ctx->state = DEMUX_PV_STATE_ACTIVE;
1564 ib_destroy_qp(ctx->qp[1].qp);
1565 ctx->qp[1].qp = NULL;
1570 ib_destroy_qp(ctx->qp[0].qp);
1571 ctx->qp[0].qp = NULL;
1574 ib_dereg_mr(ctx->mr);
1578 ib_dealloc_pd(ctx->pd);
1582 ib_destroy_cq(ctx->cq);
1586 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun);
1590 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun);
1592 ctx->state = DEMUX_PV_STATE_DOWN;
1596 static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
1597 struct mlx4_ib_demux_pv_ctx *ctx, int flush)
1601 if (ctx->state > DEMUX_PV_STATE_DOWN) {
1602 ctx->state = DEMUX_PV_STATE_DOWNING;
1604 flush_workqueue(ctx->wq);
1606 ib_destroy_qp(ctx->qp[0].qp);
1607 ctx->qp[0].qp = NULL;
1608 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1);
1610 ib_destroy_qp(ctx->qp[1].qp);
1611 ctx->qp[1].qp = NULL;
1612 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
1613 ib_dereg_mr(ctx->mr);
1615 ib_dealloc_pd(ctx->pd);
1617 ib_destroy_cq(ctx->cq);
1619 ctx->state = DEMUX_PV_STATE_DOWN;
1623 static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave,
1624 int port, int do_init)
1629 clean_vf_mcast(&dev->sriov.demux[port - 1], slave);
1630 /* for master, destroy real sqp resources */
1631 if (slave == mlx4_master_func_num(dev->dev))
1632 destroy_pv_resources(dev, slave, port,
1633 dev->sriov.sqps[port - 1], 1);
1634 /* destroy the tunnel qp resources */
1635 destroy_pv_resources(dev, slave, port,
1636 dev->sriov.demux[port - 1].tun[slave], 1);
1640 /* create the tunnel qp resources */
1641 ret = create_pv_resources(&dev->ib_dev, slave, port, 1,
1642 dev->sriov.demux[port - 1].tun[slave]);
1644 /* for master, create the real sqp resources */
1645 if (!ret && slave == mlx4_master_func_num(dev->dev))
1646 ret = create_pv_resources(&dev->ib_dev, slave, port, 0,
1647 dev->sriov.sqps[port - 1]);
1651 void mlx4_ib_tunnels_update_work(struct work_struct *work)
1653 struct mlx4_ib_demux_work *dmxw;
1655 dmxw = container_of(work, struct mlx4_ib_demux_work, work);
1656 mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port,
1662 static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
1663 struct mlx4_ib_demux_ctx *ctx,
1670 ctx->tun = kcalloc(dev->dev->caps.sqp_demux,
1671 sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL);
1677 ctx->ib_dev = &dev->ib_dev;
1679 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
1680 ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
1687 ret = mlx4_ib_mcg_port_init(ctx);
1689 pr_err("Failed initializing mcg para-virt (%d)\n", ret);
1693 snprintf(name, sizeof name, "mlx4_ibt%d", port);
1694 ctx->wq = create_singlethread_workqueue(name);
1696 pr_err("Failed to create tunnelling WQ for port %d\n", port);
1701 snprintf(name, sizeof name, "mlx4_ibud%d", port);
1702 ctx->ud_wq = create_singlethread_workqueue(name);
1704 pr_err("Failed to create up/down WQ for port %d\n", port);
1712 destroy_workqueue(ctx->wq);
1716 mlx4_ib_mcg_port_cleanup(ctx, 1);
1718 for (i = 0; i < dev->dev->caps.sqp_demux; i++)
1719 free_pv_object(dev, i, port);
1725 static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
1727 if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) {
1728 sqp_ctx->state = DEMUX_PV_STATE_DOWNING;
1729 flush_workqueue(sqp_ctx->wq);
1730 if (sqp_ctx->has_smi) {
1731 ib_destroy_qp(sqp_ctx->qp[0].qp);
1732 sqp_ctx->qp[0].qp = NULL;
1733 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0);
1735 ib_destroy_qp(sqp_ctx->qp[1].qp);
1736 sqp_ctx->qp[1].qp = NULL;
1737 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
1738 ib_dereg_mr(sqp_ctx->mr);
1740 ib_dealloc_pd(sqp_ctx->pd);
1742 ib_destroy_cq(sqp_ctx->cq);
1744 sqp_ctx->state = DEMUX_PV_STATE_DOWN;
1748 static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
1752 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1753 mlx4_ib_mcg_port_cleanup(ctx, 1);
1754 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
1757 if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN)
1758 ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
1760 flush_workqueue(ctx->wq);
1761 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
1762 destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
1763 free_pv_object(dev, i, ctx->port);
1766 destroy_workqueue(ctx->ud_wq);
1767 destroy_workqueue(ctx->wq);
1771 static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init)
1775 if (!mlx4_is_master(dev->dev))
1777 /* initialize or tear down tunnel QPs for the master */
1778 for (i = 0; i < dev->dev->caps.num_ports; i++)
1779 mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init);
1783 int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
1788 if (!mlx4_is_mfunc(dev->dev))
1791 dev->sriov.is_going_down = 0;
1792 spin_lock_init(&dev->sriov.going_down_lock);
1794 mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n");
1796 if (mlx4_is_slave(dev->dev)) {
1797 mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n");
1801 mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n",
1802 dev->dev->caps.sqp_demux);
1803 for (i = 0; i < dev->num_ports; i++) {
1804 err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
1805 &dev->sriov.sqps[i]);
1808 err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
1812 mlx4_ib_master_tunnels(dev, 1);
1817 free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
1818 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
1825 void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
1828 unsigned long flags;
1830 if (!mlx4_is_mfunc(dev->dev))
1833 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
1834 dev->sriov.is_going_down = 1;
1835 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
1836 if (mlx4_is_master(dev->dev))
1837 for (i = 0; i < dev->num_ports; i++) {
1838 flush_workqueue(dev->sriov.demux[i].ud_wq);
1839 mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]);
1840 kfree(dev->sriov.sqps[i]);
1841 dev->sriov.sqps[i] = NULL;
1842 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);