Merge branches 'core', 'cxgb4', 'ipath', 'iser', 'lockdep', 'mlx4', 'nes', 'ocrdma...
[cascardo/linux.git] / drivers / infiniband / hw / mlx4 / qp.c
index 3a78489..ceb3332 100644 (file)
@@ -84,6 +84,11 @@ enum {
        MLX4_IB_CACHE_LINE_SIZE = 64,
 };
 
+enum {
+       MLX4_RAW_QP_MTU         = 7,
+       MLX4_RAW_QP_MSGMAX      = 31,
+};
+
 static const __be32 mlx4_ib_opcode[] = {
        [IB_WR_SEND]                            = cpu_to_be32(MLX4_OPCODE_SEND),
        [IB_WR_LSO]                             = cpu_to_be32(MLX4_OPCODE_LSO),
@@ -256,7 +261,7 @@ static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
                        event.event = IB_EVENT_QP_ACCESS_ERR;
                        break;
                default:
-                       printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
+                       pr_warn("Unexpected event type %d "
                               "on QP %06x\n", type, qp->qpn);
                        return;
                }
@@ -573,7 +578,12 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
        if (sqpn) {
                qpn = sqpn;
        } else {
-               err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
+               /* Raw packet QPNs must be aligned to 8 bits. If not, the WQE
+                * BlueFlame setup flow wrongly causes VLAN insertion. */
+               if (init_attr->qp_type == IB_QPT_RAW_PACKET)
+                       err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn);
+               else
+                       err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
                if (err)
                        goto err_wrid;
        }
@@ -715,7 +725,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
        if (qp->state != IB_QPS_RESET)
                if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
                                   MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
-                       printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n",
+                       pr_warn("modify QP %06x to RESET failed.\n",
                               qp->mqp.qpn);
 
        get_cqs(qp, &send_cq, &recv_cq);
@@ -791,6 +801,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
        case IB_QPT_RC:
        case IB_QPT_UC:
        case IB_QPT_UD:
+       case IB_QPT_RAW_PACKET:
        {
                qp = kzalloc(sizeof *qp, GFP_KERNEL);
                if (!qp)
@@ -872,7 +883,8 @@ static int to_mlx4_st(enum ib_qp_type type)
        case IB_QPT_XRC_INI:
        case IB_QPT_XRC_TGT:    return MLX4_QP_ST_XRC;
        case IB_QPT_SMI:
-       case IB_QPT_GSI:        return MLX4_QP_ST_MLX;
+       case IB_QPT_GSI:
+       case IB_QPT_RAW_PACKET: return MLX4_QP_ST_MLX;
        default:                return -1;
        }
 }
@@ -946,7 +958,7 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
 
        if (ah->ah_flags & IB_AH_GRH) {
                if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) {
-                       printk(KERN_ERR "sgid_index (%u) too large. max is %d\n",
+                       pr_err("sgid_index (%u) too large. max is %d\n",
                               ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1);
                        return -1;
                }
@@ -1042,6 +1054,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
 
        if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
                context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
+       else if (ibqp->qp_type == IB_QPT_RAW_PACKET)
+               context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX;
        else if (ibqp->qp_type == IB_QPT_UD) {
                if (qp->flags & MLX4_IB_QP_LSO)
                        context->mtu_msgmax = (IB_MTU_4096 << 5) |
@@ -1050,7 +1064,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                        context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
        } else if (attr_mask & IB_QP_PATH_MTU) {
                if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
-                       printk(KERN_ERR "path MTU (%u) is invalid\n",
+                       pr_err("path MTU (%u) is invalid\n",
                               attr->path_mtu);
                        goto out;
                }
@@ -1200,7 +1214,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
        if (cur_state == IB_QPS_INIT &&
            new_state == IB_QPS_RTR  &&
            (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
-            ibqp->qp_type == IB_QPT_UD)) {
+            ibqp->qp_type == IB_QPT_UD ||
+            ibqp->qp_type == IB_QPT_RAW_PACKET)) {
                context->pri_path.sched_queue = (qp->port - 1) << 6;
                if (is_qp0(dev, qp))
                        context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE;
@@ -1266,7 +1281,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
        if (is_qp0(dev, qp)) {
                if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR)
                        if (mlx4_INIT_PORT(dev->dev, qp->port))
-                               printk(KERN_WARNING "INIT_PORT failed for port %d\n",
+                               pr_warn("INIT_PORT failed for port %d\n",
                                       qp->port);
 
                if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
@@ -1319,6 +1334,11 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                goto out;
        }
 
+       if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) &&
+           (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) !=
+            IB_LINK_LAYER_ETHERNET))
+               goto out;
+
        if (attr_mask & IB_QP_PKEY_INDEX) {
                int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
                if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p])
@@ -1424,6 +1444,9 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
 
        if (is_eth) {
                u8 *smac;
+               u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
+
+               mlx->sched_prio = cpu_to_be16(pcp);
 
                memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
                /* FIXME: cache smac value? */
@@ -1434,10 +1457,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
                if (!is_vlan) {
                        sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
                } else {
-                       u16 pcp;
-
                        sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
-                       pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
                        sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
                }
        } else {
@@ -1460,16 +1480,16 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
        header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
 
        if (0) {
-               printk(KERN_ERR "built UD header of size %d:\n", header_size);
+               pr_err("built UD header of size %d:\n", header_size);
                for (i = 0; i < header_size / 4; ++i) {
                        if (i % 8 == 0)
-                               printk("  [%02x] ", i * 4);
-                       printk(" %08x",
-                              be32_to_cpu(((__be32 *) sqp->header_buf)[i]));
+                               pr_err("  [%02x] ", i * 4);
+                       pr_cont(" %08x",
+                               be32_to_cpu(((__be32 *) sqp->header_buf)[i]));
                        if ((i + 1) % 8 == 0)
-                               printk("\n");
+                               pr_cont("\n");
                }
-               printk("\n");
+               pr_err("\n");
        }
 
        /*