2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/etherdevice.h>
36 #include <linux/mlx4/cmd.h>
37 #include <linux/module.h>
38 #include <linux/cache.h>
44 MLX4_COMMAND_INTERFACE_MIN_REV = 2,
45 MLX4_COMMAND_INTERFACE_MAX_REV = 3,
46 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3,
49 extern void __buggy_use_of_MLX4_GET(void);
50 extern void __buggy_use_of_MLX4_PUT(void);
52 static bool enable_qos;
53 module_param(enable_qos, bool, 0444);
54 MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
56 #define MLX4_GET(dest, source, offset) \
58 void *__p = (char *) (source) + (offset); \
59 switch (sizeof (dest)) { \
60 case 1: (dest) = *(u8 *) __p; break; \
61 case 2: (dest) = be16_to_cpup(__p); break; \
62 case 4: (dest) = be32_to_cpup(__p); break; \
63 case 8: (dest) = be64_to_cpup(__p); break; \
64 default: __buggy_use_of_MLX4_GET(); \
68 #define MLX4_PUT(dest, source, offset) \
70 void *__d = ((char *) (dest) + (offset)); \
71 switch (sizeof(source)) { \
72 case 1: *(u8 *) __d = (source); break; \
73 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
74 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
75 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
76 default: __buggy_use_of_MLX4_PUT(); \
80 static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
82 static const char *fname[] = {
83 [ 0] = "RC transport",
84 [ 1] = "UC transport",
85 [ 2] = "UD transport",
86 [ 3] = "XRC transport",
87 [ 4] = "reliable multicast",
88 [ 5] = "FCoIB support",
90 [ 7] = "IPoIB checksum offload",
91 [ 8] = "P_Key violation counter",
92 [ 9] = "Q_Key violation counter",
94 [12] = "Dual Port Different Protocol (DPDP) support",
95 [15] = "Big LSO headers",
98 [18] = "Atomic ops support",
99 [19] = "Raw multicast support",
100 [20] = "Address vector port checking support",
101 [21] = "UD multicast support",
102 [24] = "Demand paging support",
103 [25] = "Router support",
104 [30] = "IBoE support",
105 [32] = "Unicast loopback support",
106 [34] = "FCS header control",
107 [38] = "Wake On LAN support",
108 [40] = "UDP RSS support",
109 [41] = "Unicast VEP steering support",
110 [42] = "Multicast VEP steering support",
111 [48] = "Counters support",
112 [53] = "Port ETS Scheduler support",
113 [55] = "Port link type sensing support",
114 [59] = "Port management change event support",
115 [61] = "64 byte EQE support",
116 [62] = "64 byte CQE support",
120 mlx4_dbg(dev, "DEV_CAP flags:\n");
121 for (i = 0; i < ARRAY_SIZE(fname); ++i)
122 if (fname[i] && (flags & (1LL << i)))
123 mlx4_dbg(dev, " %s\n", fname[i]);
126 static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
128 static const char * const fname[] = {
130 [1] = "RSS Toeplitz Hash Function support",
131 [2] = "RSS XOR Hash Function support",
132 [3] = "Device managed flow steering support",
133 [4] = "Automatic MAC reassignment support",
134 [5] = "Time stamping support",
135 [6] = "VST (control vlan insertion/stripping) support",
136 [7] = "FSM (MAC anti-spoofing) support",
137 [8] = "Dynamic QP updates support",
138 [9] = "Device managed flow steering IPoIB support",
139 [10] = "TCP/IP offloads/flow-steering for VXLAN support",
140 [11] = "MAD DEMUX (Secure-Host) support",
141 [12] = "Large cache line (>64B) CQE stride support",
142 [13] = "Large cache line (>64B) EQE stride support",
143 [14] = "Ethernet protocol control support",
144 [15] = "Ethernet Backplane autoneg support",
145 [16] = "CONFIG DEV support",
146 [17] = "Asymmetric EQs support",
147 [18] = "More than 80 VFs support",
148 [19] = "Performance optimized for limited rule configuration flow steering support",
149 [20] = "Recoverable error events support"
153 for (i = 0; i < ARRAY_SIZE(fname); ++i)
154 if (fname[i] && (flags & (1LL << i)))
155 mlx4_dbg(dev, " %s\n", fname[i]);
158 int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
160 struct mlx4_cmd_mailbox *mailbox;
164 #define MOD_STAT_CFG_IN_SIZE 0x100
166 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002
167 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003
169 mailbox = mlx4_alloc_cmd_mailbox(dev);
171 return PTR_ERR(mailbox);
172 inbox = mailbox->buf;
174 MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
175 MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
177 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
178 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
180 mlx4_free_cmd_mailbox(dev, mailbox);
184 int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave)
186 struct mlx4_cmd_mailbox *mailbox;
193 #define QUERY_FUNC_BUS_OFFSET 0x00
194 #define QUERY_FUNC_DEVICE_OFFSET 0x01
195 #define QUERY_FUNC_FUNCTION_OFFSET 0x01
196 #define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03
197 #define QUERY_FUNC_RSVD_EQS_OFFSET 0x04
198 #define QUERY_FUNC_MAX_EQ_OFFSET 0x06
199 #define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b
201 mailbox = mlx4_alloc_cmd_mailbox(dev);
203 return PTR_ERR(mailbox);
204 outbox = mailbox->buf;
208 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0,
210 MLX4_CMD_TIME_CLASS_A,
215 MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET);
216 func->bus = field & 0xf;
217 MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET);
218 func->device = field & 0xf1;
219 MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET);
220 func->function = field & 0x7;
221 MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET);
222 func->physical_function = field & 0xf;
223 MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET);
224 func->rsvd_eqs = field16 & 0xffff;
225 MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET);
226 func->max_eq = field16 & 0xffff;
227 MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET);
228 func->rsvd_uars = field & 0x0f;
230 mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n",
231 func->bus, func->device, func->function, func->physical_function,
232 func->max_eq, func->rsvd_eqs, func->rsvd_uars);
235 mlx4_free_cmd_mailbox(dev, mailbox);
239 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
240 struct mlx4_vhcr *vhcr,
241 struct mlx4_cmd_mailbox *inbox,
242 struct mlx4_cmd_mailbox *outbox,
243 struct mlx4_cmd_info *cmd)
245 struct mlx4_priv *priv = mlx4_priv(dev);
247 u32 size, proxy_qp, qkey;
249 struct mlx4_func func;
251 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
252 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
253 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
254 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8
255 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
256 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
257 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
258 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
259 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
260 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
261 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
262 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
263 #define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48
265 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
266 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
267 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
268 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
269 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
270 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
272 #define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET 0x6c
274 #define QUERY_FUNC_CAP_FMR_FLAG 0x80
275 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40
276 #define QUERY_FUNC_CAP_FLAG_ETH 0x80
277 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
278 #define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08
279 #define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04
281 #define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31)
282 #define QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG (1UL << 30)
284 /* when opcode modifier = 1 */
285 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
286 #define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4
287 #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
288 #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
290 #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
291 #define QUERY_FUNC_CAP_QP0_PROXY 0x14
292 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
293 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c
294 #define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28
296 #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40
297 #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80
298 #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10
299 #define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08
301 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
302 #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
304 if (vhcr->op_modifier == 1) {
305 struct mlx4_active_ports actv_ports =
306 mlx4_get_active_ports(dev, slave);
307 int converted_port = mlx4_slave_convert_port(
308 dev, slave, vhcr->in_modifier);
310 if (converted_port < 0)
313 vhcr->in_modifier = converted_port;
314 /* phys-port = logical-port */
315 field = vhcr->in_modifier -
316 find_first_bit(actv_ports.ports, dev->caps.num_ports);
317 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
319 port = vhcr->in_modifier;
320 proxy_qp = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1;
322 /* Set nic_info bit to mark new fields support */
323 field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
325 if (mlx4_vf_smi_enabled(dev, slave, port) &&
326 !mlx4_get_parav_qkey(dev, proxy_qp, &qkey)) {
327 field |= QUERY_FUNC_CAP_VF_ENABLE_QP0;
328 MLX4_PUT(outbox->buf, qkey,
329 QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);
331 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
333 /* size is now the QP number */
334 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1;
335 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
338 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL);
340 MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP0_PROXY);
342 MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP1_PROXY);
344 MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
345 QUERY_FUNC_CAP_PHYS_PORT_ID);
347 } else if (vhcr->op_modifier == 0) {
348 struct mlx4_active_ports actv_ports =
349 mlx4_get_active_ports(dev, slave);
350 /* enable rdma and ethernet interfaces, new quota locations,
353 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
354 QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX |
355 QUERY_FUNC_CAP_FLAG_RESD_LKEY);
356 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
359 bitmap_weight(actv_ports.ports, dev->caps.num_ports),
360 dev->caps.num_ports);
361 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
363 size = dev->caps.function_caps; /* set PF behaviours */
364 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
366 field = 0; /* protected FMR support not available as yet */
367 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
369 size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
370 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
371 size = dev->caps.num_qps;
372 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
374 size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
375 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
376 size = dev->caps.num_srqs;
377 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
379 size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
380 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
381 size = dev->caps.num_cqs;
382 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
384 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) ||
385 mlx4_QUERY_FUNC(dev, &func, slave)) {
386 size = vhcr->in_modifier &
387 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
389 rounddown_pow_of_two(dev->caps.num_eqs);
390 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
391 size = dev->caps.reserved_eqs;
392 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
394 size = vhcr->in_modifier &
395 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
397 rounddown_pow_of_two(func.max_eq);
398 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
399 size = func.rsvd_eqs;
400 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
403 size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
404 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
405 size = dev->caps.num_mpts;
406 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
408 size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
409 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
410 size = dev->caps.num_mtts;
411 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
413 size = dev->caps.num_mgms + dev->caps.num_amgms;
414 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
415 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
417 size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG |
418 QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG;
419 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
421 size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00);
422 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
429 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
430 struct mlx4_func_cap *func_cap)
432 struct mlx4_cmd_mailbox *mailbox;
434 u8 field, op_modifier;
436 int err = 0, quotas = 0;
439 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
440 in_modifier = op_modifier ? gen_or_port :
441 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS;
443 mailbox = mlx4_alloc_cmd_mailbox(dev);
445 return PTR_ERR(mailbox);
447 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier,
448 MLX4_CMD_QUERY_FUNC_CAP,
449 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
453 outbox = mailbox->buf;
456 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
457 if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
458 mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
459 err = -EPROTONOSUPPORT;
462 func_cap->flags = field;
463 quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS);
465 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
466 func_cap->num_ports = field;
468 MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
469 func_cap->pf_context_behaviour = size;
472 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
473 func_cap->qp_quota = size & 0xFFFFFF;
475 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
476 func_cap->srq_quota = size & 0xFFFFFF;
478 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
479 func_cap->cq_quota = size & 0xFFFFFF;
481 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
482 func_cap->mpt_quota = size & 0xFFFFFF;
484 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
485 func_cap->mtt_quota = size & 0xFFFFFF;
487 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
488 func_cap->mcg_quota = size & 0xFFFFFF;
491 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
492 func_cap->qp_quota = size & 0xFFFFFF;
494 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
495 func_cap->srq_quota = size & 0xFFFFFF;
497 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
498 func_cap->cq_quota = size & 0xFFFFFF;
500 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
501 func_cap->mpt_quota = size & 0xFFFFFF;
503 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
504 func_cap->mtt_quota = size & 0xFFFFFF;
506 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
507 func_cap->mcg_quota = size & 0xFFFFFF;
509 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
510 func_cap->max_eq = size & 0xFFFFFF;
512 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
513 func_cap->reserved_eq = size & 0xFFFFFF;
515 if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) {
516 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
517 func_cap->reserved_lkey = size;
519 func_cap->reserved_lkey = 0;
522 func_cap->extra_flags = 0;
524 /* Mailbox data from 0x6c and onward should only be treated if
525 * QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags
527 if (func_cap->flags & QUERY_FUNC_CAP_FLAG_VALID_MAILBOX) {
528 MLX4_GET(size, outbox, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
529 if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG)
530 func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP;
531 if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG)
532 func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_A0_RES_QP;
538 /* logical port query */
539 if (gen_or_port > dev->caps.num_ports) {
544 MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
545 if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
546 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN) {
547 mlx4_err(dev, "VLAN is enforced on this port\n");
548 err = -EPROTONOSUPPORT;
552 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) {
553 mlx4_err(dev, "Force mac is enabled on this port\n");
554 err = -EPROTONOSUPPORT;
557 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
558 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
559 if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
560 mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
561 err = -EPROTONOSUPPORT;
566 MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
567 func_cap->physical_port = field;
568 if (func_cap->physical_port != gen_or_port) {
573 if (func_cap->flags1 & QUERY_FUNC_CAP_VF_ENABLE_QP0) {
574 MLX4_GET(qkey, outbox, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);
575 func_cap->qp0_qkey = qkey;
577 func_cap->qp0_qkey = 0;
580 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL);
581 func_cap->qp0_tunnel_qpn = size & 0xFFFFFF;
583 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY);
584 func_cap->qp0_proxy_qpn = size & 0xFFFFFF;
586 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL);
587 func_cap->qp1_tunnel_qpn = size & 0xFFFFFF;
589 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
590 func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
592 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO)
593 MLX4_GET(func_cap->phys_port_id, outbox,
594 QUERY_FUNC_CAP_PHYS_PORT_ID);
596 /* All other resources are allocated by the master, but we still report
597 * 'num' and 'reserved' capabilities as follows:
598 * - num remains the maximum resource index
599 * - 'num - reserved' is the total available objects of a resource, but
600 * resource indices may be less than 'reserved'
601 * TODO: set per-resource quotas */
604 mlx4_free_cmd_mailbox(dev, mailbox);
609 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
611 struct mlx4_cmd_mailbox *mailbox;
614 u32 field32, flags, ext_flags;
620 #define QUERY_DEV_CAP_OUT_SIZE 0x100
621 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
622 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
623 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
624 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
625 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
626 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
627 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
628 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
629 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
630 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
631 #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
632 #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
633 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
634 #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
635 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
636 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
637 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
638 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
639 #define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26
640 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
641 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
642 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
643 #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
644 #define QUERY_DEV_CAP_RSS_OFFSET 0x2e
645 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
646 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
647 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
648 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
649 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
650 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
651 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
652 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
653 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
654 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
655 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
656 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
657 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
658 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
659 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
660 #define QUERY_DEV_CAP_BF_OFFSET 0x4c
661 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
662 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
663 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
664 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
665 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
666 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
667 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
668 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
669 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
670 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
671 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
672 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
673 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
674 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
675 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
676 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
677 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
678 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
679 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
680 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a
681 #define QUERY_DEV_CAP_ETH_PROT_CTRL_OFFSET 0x7a
682 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
683 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
684 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
685 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
686 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
687 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
688 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
689 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
690 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
691 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
692 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
693 #define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94
694 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
695 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
696 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
697 #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
698 #define QUERY_DEV_CAP_VXLAN 0x9e
699 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
700 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8
701 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac
704 mailbox = mlx4_alloc_cmd_mailbox(dev);
706 return PTR_ERR(mailbox);
707 outbox = mailbox->buf;
709 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
710 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
714 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
715 dev_cap->reserved_qps = 1 << (field & 0xf);
716 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
717 dev_cap->max_qps = 1 << (field & 0x1f);
718 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
719 dev_cap->reserved_srqs = 1 << (field >> 4);
720 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
721 dev_cap->max_srqs = 1 << (field & 0x1f);
722 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
723 dev_cap->max_cq_sz = 1 << field;
724 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
725 dev_cap->reserved_cqs = 1 << (field & 0xf);
726 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
727 dev_cap->max_cqs = 1 << (field & 0x1f);
728 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
729 dev_cap->max_mpts = 1 << (field & 0x3f);
730 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
731 dev_cap->reserved_eqs = 1 << (field & 0xf);
732 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
733 dev_cap->max_eqs = 1 << (field & 0xf);
734 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
735 dev_cap->reserved_mtts = 1 << (field >> 4);
736 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
737 dev_cap->max_mrw_sz = 1 << field;
738 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
739 dev_cap->reserved_mrws = 1 << (field & 0xf);
740 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
741 dev_cap->max_mtt_seg = 1 << (field & 0x3f);
742 MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET);
743 dev_cap->num_sys_eqs = size & 0xfff;
744 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
745 dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
746 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
747 dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
748 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
751 dev_cap->max_gso_sz = 0;
753 dev_cap->max_gso_sz = 1 << field;
755 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET);
757 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR;
759 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP;
762 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS;
763 dev_cap->max_rss_tbl_sz = 1 << field;
765 dev_cap->max_rss_tbl_sz = 0;
766 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
767 dev_cap->max_rdma_global = 1 << (field & 0x3f);
768 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
769 dev_cap->local_ca_ack_delay = field & 0x1f;
770 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
771 dev_cap->num_ports = field & 0xf;
772 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
773 dev_cap->max_msg_sz = 1 << (field & 0x1f);
774 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
776 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
777 dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
778 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
780 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
781 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
782 dev_cap->fs_max_num_qp_per_entry = field;
783 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
784 dev_cap->stat_rate_support = stat_rate;
785 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
787 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS;
788 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
789 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
790 dev_cap->flags = flags | (u64)ext_flags << 32;
791 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
792 dev_cap->reserved_uars = field >> 4;
793 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
794 dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
795 MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
796 dev_cap->min_page_sz = 1 << field;
798 MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
800 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
801 dev_cap->bf_reg_size = 1 << (field & 0x1f);
802 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
803 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
805 dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
807 dev_cap->bf_reg_size = 0;
810 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
811 dev_cap->max_sq_sg = field;
812 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
813 dev_cap->max_sq_desc_sz = size;
815 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
816 dev_cap->max_qp_per_mcg = 1 << field;
817 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
818 dev_cap->reserved_mgms = field & 0xf;
819 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
820 dev_cap->max_mcgs = 1 << field;
821 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
822 dev_cap->reserved_pds = field >> 4;
823 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
824 dev_cap->max_pds = 1 << (field & 0x3f);
825 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
826 dev_cap->reserved_xrcds = field >> 4;
827 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET);
828 dev_cap->max_xrcds = 1 << (field & 0x1f);
830 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
831 dev_cap->rdmarc_entry_sz = size;
832 MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
833 dev_cap->qpc_entry_sz = size;
834 MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
835 dev_cap->aux_entry_sz = size;
836 MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
837 dev_cap->altc_entry_sz = size;
838 MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
839 dev_cap->eqc_entry_sz = size;
840 MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
841 dev_cap->cqc_entry_sz = size;
842 MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
843 dev_cap->srq_entry_sz = size;
844 MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
845 dev_cap->cmpt_entry_sz = size;
846 MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
847 dev_cap->mtt_entry_sz = size;
848 MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
849 dev_cap->dmpt_entry_sz = size;
851 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
852 dev_cap->max_srq_sz = 1 << field;
853 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
854 dev_cap->max_qp_sz = 1 << field;
855 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
856 dev_cap->resize_srq = field & 1;
857 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
858 dev_cap->max_rq_sg = field;
859 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
860 dev_cap->max_rq_desc_sz = size;
861 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
862 if (field & (1 << 5))
863 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL;
864 if (field & (1 << 6))
865 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
866 if (field & (1 << 7))
867 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
868 MLX4_GET(dev_cap->bmme_flags, outbox,
869 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
870 MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
872 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
873 MLX4_GET(dev_cap->reserved_lkey, outbox,
874 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
875 MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
876 if (field32 & (1 << 0))
877 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
878 if (field32 & (1 << 7))
879 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
880 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
882 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
883 MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
885 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS;
886 MLX4_GET(dev_cap->max_icm_sz, outbox,
887 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
888 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
889 MLX4_GET(dev_cap->max_counters, outbox,
890 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
892 MLX4_GET(field32, outbox,
893 QUERY_DEV_CAP_MAD_DEMUX_OFFSET);
894 if (field32 & (1 << 0))
895 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX;
897 MLX4_GET(dev_cap->dmfs_high_rate_qpn_base, outbox,
898 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET);
899 dev_cap->dmfs_high_rate_qpn_base &= MGM_QPN_MASK;
900 MLX4_GET(dev_cap->dmfs_high_rate_qpn_range, outbox,
901 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET);
902 dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK;
904 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
905 if (field32 & (1 << 16))
906 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
907 if (field32 & (1 << 26))
908 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
909 if (field32 & (1 << 20))
910 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
911 if (field32 & (1 << 21))
912 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS;
914 for (i = 1; i <= dev_cap->num_ports; i++) {
915 err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i);
921 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
922 * we can't use any EQs whose doorbell falls on that page,
923 * even if the EQ itself isn't reserved.
925 if (dev_cap->num_sys_eqs == 0)
926 dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
927 dev_cap->reserved_eqs);
929 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS;
932 mlx4_free_cmd_mailbox(dev, mailbox);
936 void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
938 if (dev_cap->bf_reg_size > 0)
939 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
940 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
942 mlx4_dbg(dev, "BlueFlame not available\n");
944 mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
945 dev_cap->bmme_flags, dev_cap->reserved_lkey);
946 mlx4_dbg(dev, "Max ICM size %lld MB\n",
947 (unsigned long long) dev_cap->max_icm_sz >> 20);
948 mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
949 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
950 mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
951 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
952 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
953 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
954 mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n",
955 dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs,
956 dev_cap->eqc_entry_sz);
957 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
958 dev_cap->reserved_mrws, dev_cap->reserved_mtts);
959 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
960 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
961 mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
962 dev_cap->max_pds, dev_cap->reserved_mgms);
963 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
964 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
965 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
966 dev_cap->local_ca_ack_delay, 128 << dev_cap->port_cap[1].ib_mtu,
967 dev_cap->port_cap[1].max_port_width);
968 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
969 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
970 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
971 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
972 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
973 mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
974 mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz);
975 mlx4_dbg(dev, "DMFS high rate steer QPn base: %d\n",
976 dev_cap->dmfs_high_rate_qpn_base);
977 mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n",
978 dev_cap->dmfs_high_rate_qpn_range);
979 dump_dev_cap_flags(dev, dev_cap->flags);
980 dump_dev_cap_flags2(dev, dev_cap->flags2);
983 int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap)
985 struct mlx4_cmd_mailbox *mailbox;
991 mailbox = mlx4_alloc_cmd_mailbox(dev);
993 return PTR_ERR(mailbox);
994 outbox = mailbox->buf;
996 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
997 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
998 MLX4_CMD_TIME_CLASS_A,
1004 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
1005 port_cap->max_vl = field >> 4;
1006 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
1007 port_cap->ib_mtu = field >> 4;
1008 port_cap->max_port_width = field & 0xf;
1009 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
1010 port_cap->max_gids = 1 << (field & 0xf);
1011 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
1012 port_cap->max_pkeys = 1 << (field & 0xf);
1014 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
1015 #define QUERY_PORT_MTU_OFFSET 0x01
1016 #define QUERY_PORT_ETH_MTU_OFFSET 0x02
1017 #define QUERY_PORT_WIDTH_OFFSET 0x06
1018 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
1019 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
1020 #define QUERY_PORT_MAX_VL_OFFSET 0x0b
1021 #define QUERY_PORT_MAC_OFFSET 0x10
1022 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
1023 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
1024 #define QUERY_PORT_TRANS_CODE_OFFSET 0x20
1026 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT,
1027 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1031 MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1032 port_cap->supported_port_types = field & 3;
1033 port_cap->suggested_type = (field >> 3) & 1;
1034 port_cap->default_sense = (field >> 4) & 1;
1035 port_cap->dmfs_optimized_state = (field >> 5) & 1;
1036 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
1037 port_cap->ib_mtu = field & 0xf;
1038 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
1039 port_cap->max_port_width = field & 0xf;
1040 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
1041 port_cap->max_gids = 1 << (field >> 4);
1042 port_cap->max_pkeys = 1 << (field & 0xf);
1043 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
1044 port_cap->max_vl = field & 0xf;
1045 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
1046 port_cap->log_max_macs = field & 0xf;
1047 port_cap->log_max_vlans = field >> 4;
1048 MLX4_GET(port_cap->eth_mtu, outbox, QUERY_PORT_ETH_MTU_OFFSET);
1049 MLX4_GET(port_cap->def_mac, outbox, QUERY_PORT_MAC_OFFSET);
1050 MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
1051 port_cap->trans_type = field32 >> 24;
1052 port_cap->vendor_oui = field32 & 0xffffff;
1053 MLX4_GET(port_cap->wavelength, outbox, QUERY_PORT_WAVELENGTH_OFFSET);
1054 MLX4_GET(port_cap->trans_code, outbox, QUERY_PORT_TRANS_CODE_OFFSET);
1058 mlx4_free_cmd_mailbox(dev, mailbox);
1062 #define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26)
1063 #define DEV_CAP_EXT_2_FLAG_80_VFS (1 << 21)
1064 #define DEV_CAP_EXT_2_FLAG_FSM (1 << 20)
1066 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
1067 struct mlx4_vhcr *vhcr,
1068 struct mlx4_cmd_mailbox *inbox,
1069 struct mlx4_cmd_mailbox *outbox,
1070 struct mlx4_cmd_info *cmd)
1075 u32 bmme_flags, field32;
1079 struct mlx4_active_ports actv_ports;
1081 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
1082 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1086 /* add port mng change event capability and disable mw type 1
1087 * unconditionally to slaves
1089 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
1090 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
1091 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
1092 actv_ports = mlx4_get_active_ports(dev, slave);
1093 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
1094 for (slave_port = 0, real_port = first_port;
1095 real_port < first_port +
1096 bitmap_weight(actv_ports.ports, dev->caps.num_ports);
1097 ++real_port, ++slave_port) {
1098 if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port))
1099 flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port;
1101 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
1103 for (; slave_port < dev->caps.num_ports; ++slave_port)
1104 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
1105 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
1107 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET);
1109 field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F;
1110 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET);
1112 /* For guests, disable timestamp */
1113 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
1115 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
1117 /* For guests, disable vxlan tunneling */
1118 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN);
1120 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
1122 /* For guests, report Blueflame disabled */
1123 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
1125 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
1127 /* For guests, disable mw type 2 */
1128 MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1129 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
1130 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1132 /* turn off device-managed steering capability if not enabled */
1133 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
1134 MLX4_GET(field, outbox->buf,
1135 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
1137 MLX4_PUT(outbox->buf, field,
1138 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
1141 /* turn off ipoib managed steering for guests */
1142 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
1144 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
1146 /* turn off host side virt features (VST, FSM, etc) for guests */
1147 MLX4_GET(field32, outbox->buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
1148 field32 &= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL | DEV_CAP_EXT_2_FLAG_80_VFS |
1149 DEV_CAP_EXT_2_FLAG_FSM);
1150 MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
1155 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
1156 struct mlx4_vhcr *vhcr,
1157 struct mlx4_cmd_mailbox *inbox,
1158 struct mlx4_cmd_mailbox *outbox,
1159 struct mlx4_cmd_info *cmd)
1161 struct mlx4_priv *priv = mlx4_priv(dev);
1166 int admin_link_state;
1167 int port = mlx4_slave_convert_port(dev, slave,
1168 vhcr->in_modifier & 0xFF);
1170 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
1171 #define MLX4_PORT_LINK_UP_MASK 0x80
1172 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
1173 #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
1178 /* Protect against untrusted guests: enforce that this is the
1179 * QUERY_PORT general query.
1181 if (vhcr->op_modifier || vhcr->in_modifier & ~0xFF)
1184 vhcr->in_modifier = port;
1186 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
1187 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
1190 if (!err && dev->caps.function != slave) {
1191 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
1192 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
1194 /* get port type - currently only eth is enabled */
1195 MLX4_GET(port_type, outbox->buf,
1196 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1198 /* No link sensing allowed */
1199 port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK;
1200 /* set port type to currently operating port type */
1201 port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
1203 admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state;
1204 if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state)
1205 port_type |= MLX4_PORT_LINK_UP_MASK;
1206 else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state)
1207 port_type &= ~MLX4_PORT_LINK_UP_MASK;
1209 MLX4_PUT(outbox->buf, port_type,
1210 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1212 if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH)
1213 short_field = mlx4_get_slave_num_gids(dev, slave, port);
1215 short_field = 1; /* slave max gids */
1216 MLX4_PUT(outbox->buf, short_field,
1217 QUERY_PORT_CUR_MAX_GID_OFFSET);
1219 short_field = dev->caps.pkey_table_len[vhcr->in_modifier];
1220 MLX4_PUT(outbox->buf, short_field,
1221 QUERY_PORT_CUR_MAX_PKEY_OFFSET);
1227 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
1228 int *gid_tbl_len, int *pkey_tbl_len)
1230 struct mlx4_cmd_mailbox *mailbox;
1235 mailbox = mlx4_alloc_cmd_mailbox(dev);
1236 if (IS_ERR(mailbox))
1237 return PTR_ERR(mailbox);
1239 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0,
1240 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
1245 outbox = mailbox->buf;
1247 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET);
1248 *gid_tbl_len = field;
1250 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET);
1251 *pkey_tbl_len = field;
1254 mlx4_free_cmd_mailbox(dev, mailbox);
1257 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len);
1259 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
1261 struct mlx4_cmd_mailbox *mailbox;
1262 struct mlx4_icm_iter iter;
1270 mailbox = mlx4_alloc_cmd_mailbox(dev);
1271 if (IS_ERR(mailbox))
1272 return PTR_ERR(mailbox);
1273 pages = mailbox->buf;
1275 for (mlx4_icm_first(icm, &iter);
1276 !mlx4_icm_last(&iter);
1277 mlx4_icm_next(&iter)) {
1279 * We have to pass pages that are aligned to their
1280 * size, so find the least significant 1 in the
1281 * address or size and use that as our log2 size.
1283 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
1284 if (lg < MLX4_ICM_PAGE_SHIFT) {
1285 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
1287 (unsigned long long) mlx4_icm_addr(&iter),
1288 mlx4_icm_size(&iter));
1293 for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
1295 pages[nent * 2] = cpu_to_be64(virt);
1299 pages[nent * 2 + 1] =
1300 cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
1301 (lg - MLX4_ICM_PAGE_SHIFT));
1302 ts += 1 << (lg - 10);
1305 if (++nent == MLX4_MAILBOX_SIZE / 16) {
1306 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1307 MLX4_CMD_TIME_CLASS_B,
1317 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1318 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1323 case MLX4_CMD_MAP_FA:
1324 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
1326 case MLX4_CMD_MAP_ICM_AUX:
1327 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
1329 case MLX4_CMD_MAP_ICM:
1330 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
1331 tc, ts, (unsigned long long) virt - (ts << 10));
1336 mlx4_free_cmd_mailbox(dev, mailbox);
1340 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
1342 return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
1345 int mlx4_UNMAP_FA(struct mlx4_dev *dev)
1347 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
1348 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1352 int mlx4_RUN_FW(struct mlx4_dev *dev)
1354 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
1355 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1358 int mlx4_QUERY_FW(struct mlx4_dev *dev)
1360 struct mlx4_fw *fw = &mlx4_priv(dev)->fw;
1361 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
1362 struct mlx4_cmd_mailbox *mailbox;
1369 #define QUERY_FW_OUT_SIZE 0x100
1370 #define QUERY_FW_VER_OFFSET 0x00
1371 #define QUERY_FW_PPF_ID 0x09
1372 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
1373 #define QUERY_FW_MAX_CMD_OFFSET 0x0f
1374 #define QUERY_FW_ERR_START_OFFSET 0x30
1375 #define QUERY_FW_ERR_SIZE_OFFSET 0x38
1376 #define QUERY_FW_ERR_BAR_OFFSET 0x3c
1378 #define QUERY_FW_SIZE_OFFSET 0x00
1379 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
1380 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
1382 #define QUERY_FW_COMM_BASE_OFFSET 0x40
1383 #define QUERY_FW_COMM_BAR_OFFSET 0x48
1385 #define QUERY_FW_CLOCK_OFFSET 0x50
1386 #define QUERY_FW_CLOCK_BAR 0x58
1388 mailbox = mlx4_alloc_cmd_mailbox(dev);
1389 if (IS_ERR(mailbox))
1390 return PTR_ERR(mailbox);
1391 outbox = mailbox->buf;
1393 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1394 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1398 MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
1400 * FW subminor version is at more significant bits than minor
1401 * version, so swap here.
1403 dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
1404 ((fw_ver & 0xffff0000ull) >> 16) |
1405 ((fw_ver & 0x0000ffffull) << 16);
1407 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
1408 dev->caps.function = lg;
1410 if (mlx4_is_slave(dev))
1414 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
1415 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
1416 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
1417 mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
1419 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
1420 (int) (dev->caps.fw_ver >> 32),
1421 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1422 (int) dev->caps.fw_ver & 0xffff);
1423 mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
1424 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
1429 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
1430 dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
1432 MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
1433 cmd->max_cmds = 1 << lg;
1435 mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
1436 (int) (dev->caps.fw_ver >> 32),
1437 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1438 (int) dev->caps.fw_ver & 0xffff,
1439 cmd_if_rev, cmd->max_cmds);
1441 MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
1442 MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
1443 MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET);
1444 fw->catas_bar = (fw->catas_bar >> 6) * 2;
1446 mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
1447 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
1449 MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
1450 MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
1451 MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
1452 fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
1454 MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
1455 MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET);
1456 fw->comm_bar = (fw->comm_bar >> 6) * 2;
1457 mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
1458 fw->comm_bar, fw->comm_base);
1459 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
1461 MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET);
1462 MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR);
1463 fw->clock_bar = (fw->clock_bar >> 6) * 2;
1464 mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n",
1465 fw->clock_bar, fw->clock_offset);
1468 * Round up number of system pages needed in case
1469 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1472 ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
1473 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
1475 mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
1476 (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
1479 mlx4_free_cmd_mailbox(dev, mailbox);
1483 int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
1484 struct mlx4_vhcr *vhcr,
1485 struct mlx4_cmd_mailbox *inbox,
1486 struct mlx4_cmd_mailbox *outbox,
1487 struct mlx4_cmd_info *cmd)
1492 outbuf = outbox->buf;
1493 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1494 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1498 /* for slaves, set pci PPF ID to invalid and zero out everything
1499 * else except FW version */
1500 outbuf[0] = outbuf[1] = 0;
1501 memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
1502 outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID;
1507 static void get_board_id(void *vsd, char *board_id)
1511 #define VSD_OFFSET_SIG1 0x00
1512 #define VSD_OFFSET_SIG2 0xde
1513 #define VSD_OFFSET_MLX_BOARD_ID 0xd0
1514 #define VSD_OFFSET_TS_BOARD_ID 0x20
1516 #define VSD_SIGNATURE_TOPSPIN 0x5ad
1518 memset(board_id, 0, MLX4_BOARD_ID_LEN);
1520 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
1521 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
1522 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
1525 * The board ID is a string but the firmware byte
1526 * swaps each 4-byte word before passing it back to
1527 * us. Therefore we need to swab it before printing.
1529 for (i = 0; i < 4; ++i)
1530 ((u32 *) board_id)[i] =
1531 swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
1535 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
1537 struct mlx4_cmd_mailbox *mailbox;
1541 #define QUERY_ADAPTER_OUT_SIZE 0x100
1542 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
1543 #define QUERY_ADAPTER_VSD_OFFSET 0x20
1545 mailbox = mlx4_alloc_cmd_mailbox(dev);
1546 if (IS_ERR(mailbox))
1547 return PTR_ERR(mailbox);
1548 outbox = mailbox->buf;
1550 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
1551 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1555 MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
1557 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
1561 mlx4_free_cmd_mailbox(dev, mailbox);
1565 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1567 struct mlx4_cmd_mailbox *mailbox;
1570 static const u8 a0_dmfs_hw_steering[] = {
1571 [MLX4_STEERING_DMFS_A0_DEFAULT] = 0,
1572 [MLX4_STEERING_DMFS_A0_DYNAMIC] = 1,
1573 [MLX4_STEERING_DMFS_A0_STATIC] = 2,
1574 [MLX4_STEERING_DMFS_A0_DISABLE] = 3
1577 #define INIT_HCA_IN_SIZE 0x200
1578 #define INIT_HCA_VERSION_OFFSET 0x000
1579 #define INIT_HCA_VERSION 2
1580 #define INIT_HCA_VXLAN_OFFSET 0x0c
1581 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
1582 #define INIT_HCA_FLAGS_OFFSET 0x014
1583 #define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018
1584 #define INIT_HCA_QPC_OFFSET 0x020
1585 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
1586 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
1587 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
1588 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
1589 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
1590 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
1591 #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
1592 #define INIT_HCA_EQE_CQE_STRIDE_OFFSET (INIT_HCA_QPC_OFFSET + 0x3b)
1593 #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
1594 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
1595 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
1596 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
1597 #define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a)
1598 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
1599 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
1600 #define INIT_HCA_MCAST_OFFSET 0x0c0
1601 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
1602 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1603 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
1604 #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
1605 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1606 #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
1607 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0
1608 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1609 #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1610 #define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18)
1611 #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1612 #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1613 #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1614 #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1615 #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1616 #define INIT_HCA_TPT_OFFSET 0x0f0
1617 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
1618 #define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08)
1619 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
1620 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
1621 #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
1622 #define INIT_HCA_UAR_OFFSET 0x120
1623 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
1624 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
1626 mailbox = mlx4_alloc_cmd_mailbox(dev);
1627 if (IS_ERR(mailbox))
1628 return PTR_ERR(mailbox);
1629 inbox = mailbox->buf;
1631 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
1633 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
1634 (ilog2(cache_line_size()) - 4) << 5;
1636 #if defined(__LITTLE_ENDIAN)
1637 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
1638 #elif defined(__BIG_ENDIAN)
1639 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
1641 #error Host endianness not defined
1643 /* Check port for UD address vector: */
1644 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
1646 /* Enable IPoIB checksumming if we can: */
1647 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
1648 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
1650 /* Enable QoS support if module parameter set */
1652 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
1654 /* enable counters */
1655 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
1656 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
1658 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1659 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
1660 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
1661 dev->caps.eqe_size = 64;
1662 dev->caps.eqe_factor = 1;
1664 dev->caps.eqe_size = 32;
1665 dev->caps.eqe_factor = 0;
1668 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) {
1669 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30);
1670 dev->caps.cqe_size = 64;
1671 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1673 dev->caps.cqe_size = 32;
1676 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
1677 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) &&
1678 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) {
1679 dev->caps.eqe_size = cache_line_size();
1680 dev->caps.cqe_size = cache_line_size();
1681 dev->caps.eqe_factor = 0;
1682 MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 |
1683 (ilog2(dev->caps.eqe_size) - 5)),
1684 INIT_HCA_EQE_CQE_STRIDE_OFFSET);
1686 /* User still need to know to support CQE > 32B */
1687 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1690 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
1691 *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31);
1693 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1695 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
1696 MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET);
1697 MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET);
1698 MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
1699 MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET);
1700 MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET);
1701 MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET);
1702 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET);
1703 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
1704 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
1705 MLX4_PUT(inbox, param->num_sys_eqs, INIT_HCA_NUM_SYS_EQS_OFFSET);
1706 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
1707 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
1709 /* steering attributes */
1710 if (dev->caps.steering_mode ==
1711 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1712 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
1714 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
1716 MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
1717 MLX4_PUT(inbox, param->log_mc_entry_sz,
1718 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1719 MLX4_PUT(inbox, param->log_mc_table_sz,
1720 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1721 /* Enable Ethernet flow steering
1722 * with udp unicast and tcp unicast
1724 if (dev->caps.dmfs_high_steer_mode !=
1725 MLX4_STEERING_DMFS_A0_STATIC)
1727 (u8)(MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1728 INIT_HCA_FS_ETH_BITS_OFFSET);
1729 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1730 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
1731 /* Enable IPoIB flow steering
1732 * with udp unicast and tcp unicast
1734 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1735 INIT_HCA_FS_IB_BITS_OFFSET);
1736 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1737 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
1739 if (dev->caps.dmfs_high_steer_mode !=
1740 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
1742 ((u8)(a0_dmfs_hw_steering[dev->caps.dmfs_high_steer_mode]
1744 INIT_HCA_FS_A0_OFFSET);
1746 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
1747 MLX4_PUT(inbox, param->log_mc_entry_sz,
1748 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1749 MLX4_PUT(inbox, param->log_mc_hash_sz,
1750 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1751 MLX4_PUT(inbox, param->log_mc_table_sz,
1752 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1753 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
1754 MLX4_PUT(inbox, (u8) (1 << 3),
1755 INIT_HCA_UC_STEERING_OFFSET);
1758 /* TPT attributes */
1760 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
1761 MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET);
1762 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
1763 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
1764 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
1766 /* UAR attributes */
1768 MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1769 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
1771 /* set parser VXLAN attributes */
1772 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) {
1773 u8 parser_params = 0;
1774 MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET);
1777 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA,
1778 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1781 mlx4_err(dev, "INIT_HCA returns %d\n", err);
1783 mlx4_free_cmd_mailbox(dev, mailbox);
1787 int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1788 struct mlx4_init_hca_param *param)
1790 struct mlx4_cmd_mailbox *mailbox;
1795 static const u8 a0_dmfs_query_hw_steering[] = {
1796 [0] = MLX4_STEERING_DMFS_A0_DEFAULT,
1797 [1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
1798 [2] = MLX4_STEERING_DMFS_A0_STATIC,
1799 [3] = MLX4_STEERING_DMFS_A0_DISABLE
1802 #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
1803 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c
1805 mailbox = mlx4_alloc_cmd_mailbox(dev);
1806 if (IS_ERR(mailbox))
1807 return PTR_ERR(mailbox);
1808 outbox = mailbox->buf;
1810 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1812 MLX4_CMD_TIME_CLASS_B,
1813 !mlx4_is_slave(dev));
1817 MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
1818 MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
1820 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1822 MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
1823 MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
1824 MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
1825 MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
1826 MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
1827 MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
1828 MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
1829 MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
1830 MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
1831 MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
1832 MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
1833 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
1834 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
1836 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
1837 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
1838 param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1840 MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
1841 if (byte_field & 0x8)
1842 param->steering_mode = MLX4_STEERING_MODE_B0;
1844 param->steering_mode = MLX4_STEERING_MODE_A0;
1846 /* steering attributes */
1847 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
1848 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
1849 MLX4_GET(param->log_mc_entry_sz, outbox,
1850 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1851 MLX4_GET(param->log_mc_table_sz, outbox,
1852 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1853 MLX4_GET(byte_field, outbox,
1854 INIT_HCA_FS_A0_OFFSET);
1855 param->dmfs_high_steer_mode =
1856 a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
1858 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
1859 MLX4_GET(param->log_mc_entry_sz, outbox,
1860 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1861 MLX4_GET(param->log_mc_hash_sz, outbox,
1862 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1863 MLX4_GET(param->log_mc_table_sz, outbox,
1864 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1867 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1868 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS);
1869 if (byte_field & 0x20) /* 64-bytes eqe enabled */
1870 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
1871 if (byte_field & 0x40) /* 64-bytes cqe enabled */
1872 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
1874 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
1875 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET);
1877 param->dev_cap_enabled |= MLX4_DEV_CAP_EQE_STRIDE_ENABLED;
1878 param->dev_cap_enabled |= MLX4_DEV_CAP_CQE_STRIDE_ENABLED;
1879 param->cqe_size = 1 << ((byte_field &
1880 MLX4_CQE_SIZE_MASK_STRIDE) + 5);
1881 param->eqe_size = 1 << (((byte_field &
1882 MLX4_EQE_SIZE_MASK_STRIDE) >> 4) + 5);
1885 /* TPT attributes */
1887 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
1888 MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
1889 MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
1890 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
1891 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
1893 /* UAR attributes */
1895 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1896 MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
1899 mlx4_free_cmd_mailbox(dev, mailbox);
1904 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
1905 * and real QP0 are active, so that the paravirtualized QP0 is ready
1907 static int check_qp0_state(struct mlx4_dev *dev, int function, int port)
1909 struct mlx4_priv *priv = mlx4_priv(dev);
1910 /* irrelevant if not infiniband */
1911 if (priv->mfunc.master.qp0_state[port].proxy_qp0_active &&
1912 priv->mfunc.master.qp0_state[port].qp0_active)
1917 int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
1918 struct mlx4_vhcr *vhcr,
1919 struct mlx4_cmd_mailbox *inbox,
1920 struct mlx4_cmd_mailbox *outbox,
1921 struct mlx4_cmd_info *cmd)
1923 struct mlx4_priv *priv = mlx4_priv(dev);
1924 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
1930 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
1933 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
1934 /* Enable port only if it was previously disabled */
1935 if (!priv->mfunc.master.init_port_ref[port]) {
1936 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1937 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1941 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1943 if (slave == mlx4_master_func_num(dev)) {
1944 if (check_qp0_state(dev, slave, port) &&
1945 !priv->mfunc.master.qp0_state[port].port_active) {
1946 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1947 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1950 priv->mfunc.master.qp0_state[port].port_active = 1;
1951 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1954 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1956 ++priv->mfunc.master.init_port_ref[port];
1960 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
1962 struct mlx4_cmd_mailbox *mailbox;
1968 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1969 #define INIT_PORT_IN_SIZE 256
1970 #define INIT_PORT_FLAGS_OFFSET 0x00
1971 #define INIT_PORT_FLAG_SIG (1 << 18)
1972 #define INIT_PORT_FLAG_NG (1 << 17)
1973 #define INIT_PORT_FLAG_G0 (1 << 16)
1974 #define INIT_PORT_VL_SHIFT 4
1975 #define INIT_PORT_PORT_WIDTH_SHIFT 8
1976 #define INIT_PORT_MTU_OFFSET 0x04
1977 #define INIT_PORT_MAX_GID_OFFSET 0x06
1978 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a
1979 #define INIT_PORT_GUID0_OFFSET 0x10
1980 #define INIT_PORT_NODE_GUID_OFFSET 0x18
1981 #define INIT_PORT_SI_GUID_OFFSET 0x20
1983 mailbox = mlx4_alloc_cmd_mailbox(dev);
1984 if (IS_ERR(mailbox))
1985 return PTR_ERR(mailbox);
1986 inbox = mailbox->buf;
1989 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
1990 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
1991 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
1993 field = 128 << dev->caps.ib_mtu_cap[port];
1994 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
1995 field = dev->caps.gid_table_len[port];
1996 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
1997 field = dev->caps.pkey_table_len[port];
1998 MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
2000 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
2001 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2003 mlx4_free_cmd_mailbox(dev, mailbox);
2005 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
2006 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2010 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
2012 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
2013 struct mlx4_vhcr *vhcr,
2014 struct mlx4_cmd_mailbox *inbox,
2015 struct mlx4_cmd_mailbox *outbox,
2016 struct mlx4_cmd_info *cmd)
2018 struct mlx4_priv *priv = mlx4_priv(dev);
2019 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
2025 if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
2029 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
2030 if (priv->mfunc.master.init_port_ref[port] == 1) {
2031 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2032 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2036 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
2038 /* infiniband port */
2039 if (slave == mlx4_master_func_num(dev)) {
2040 if (!priv->mfunc.master.qp0_state[port].qp0_active &&
2041 priv->mfunc.master.qp0_state[port].port_active) {
2042 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2043 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2046 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
2047 priv->mfunc.master.qp0_state[port].port_active = 0;
2050 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
2052 --priv->mfunc.master.init_port_ref[port];
2056 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
2058 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2059 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2061 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
2063 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
2065 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA,
2066 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
2069 struct mlx4_config_dev {
2070 __be32 update_flags;
2072 __be16 vxlan_udp_dport;
2080 #define MLX4_VXLAN_UDP_DPORT (1 << 0)
2082 static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
2085 struct mlx4_cmd_mailbox *mailbox;
2087 mailbox = mlx4_alloc_cmd_mailbox(dev);
2088 if (IS_ERR(mailbox))
2089 return PTR_ERR(mailbox);
2091 memcpy(mailbox->buf, config_dev, sizeof(*config_dev));
2093 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV,
2094 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2096 mlx4_free_cmd_mailbox(dev, mailbox);
2100 static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
2103 struct mlx4_cmd_mailbox *mailbox;
2105 mailbox = mlx4_alloc_cmd_mailbox(dev);
2106 if (IS_ERR(mailbox))
2107 return PTR_ERR(mailbox);
2109 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV,
2110 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2113 memcpy(config_dev, mailbox->buf, sizeof(*config_dev));
2115 mlx4_free_cmd_mailbox(dev, mailbox);
2119 /* Conversion between the HW values and the actual functionality.
2120 * The value represented by the array index,
2121 * and the functionality determined by the flags.
2123 static const u8 config_dev_csum_flags[] = {
2125 [1] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP,
2126 [2] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP |
2127 MLX4_RX_CSUM_MODE_L4,
2128 [3] = MLX4_RX_CSUM_MODE_L4 |
2129 MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP |
2130 MLX4_RX_CSUM_MODE_MULTI_VLAN
2133 int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
2134 struct mlx4_config_dev_params *params)
2136 struct mlx4_config_dev config_dev;
2140 #define CONFIG_DEV_RX_CSUM_MODE_MASK 0x7
2141 #define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET 0
2142 #define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET 4
2144 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV))
2147 err = mlx4_CONFIG_DEV_get(dev, &config_dev);
2151 csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) &
2152 CONFIG_DEV_RX_CSUM_MODE_MASK;
2154 if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0]))
2156 params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask];
2158 csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) &
2159 CONFIG_DEV_RX_CSUM_MODE_MASK;
2161 if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0]))
2163 params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask];
2165 params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport);
2169 EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval);
2171 int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
2173 struct mlx4_config_dev config_dev;
2175 memset(&config_dev, 0, sizeof(config_dev));
2176 config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT);
2177 config_dev.vxlan_udp_dport = udp_port;
2179 return mlx4_CONFIG_DEV_set(dev, &config_dev);
2181 EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
2184 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
2186 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
2187 MLX4_CMD_SET_ICM_SIZE,
2188 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2193 * Round up number of system pages needed in case
2194 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
2196 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
2197 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
2202 int mlx4_NOP(struct mlx4_dev *dev)
2204 /* Input modifier of 0x1f means "finish as soon as possible." */
2205 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A,
2209 int mlx4_get_phys_port_id(struct mlx4_dev *dev)
2213 struct mlx4_cmd_mailbox *mailbox;
2215 u32 guid_hi, guid_lo;
2217 #define MOD_STAT_CFG_PORT_OFFSET 8
2218 #define MOD_STAT_CFG_GUID_H 0X14
2219 #define MOD_STAT_CFG_GUID_L 0X1c
2221 mailbox = mlx4_alloc_cmd_mailbox(dev);
2222 if (IS_ERR(mailbox))
2223 return PTR_ERR(mailbox);
2224 outbox = mailbox->buf;
2226 for (port = 1; port <= dev->caps.num_ports; port++) {
2227 in_mod = port << MOD_STAT_CFG_PORT_OFFSET;
2228 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2,
2229 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
2232 mlx4_err(dev, "Fail to get port %d uplink guid\n",
2236 MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H);
2237 MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L);
2238 dev->caps.phys_port_id[port] = (u64)guid_lo |
2242 mlx4_free_cmd_mailbox(dev, mailbox);
2246 #define MLX4_WOL_SETUP_MODE (5 << 28)
2247 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
2249 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
2251 return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
2252 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
2255 EXPORT_SYMBOL_GPL(mlx4_wol_read);
2257 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
2259 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
2261 return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
2262 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2264 EXPORT_SYMBOL_GPL(mlx4_wol_write);
2271 void mlx4_opreq_action(struct work_struct *work)
2273 struct mlx4_priv *priv = container_of(work, struct mlx4_priv,
2275 struct mlx4_dev *dev = &priv->dev;
2276 int num_tasks = atomic_read(&priv->opreq_count);
2277 struct mlx4_cmd_mailbox *mailbox;
2278 struct mlx4_mgm *mgm;
2290 #define GET_OP_REQ_MODIFIER_OFFSET 0x08
2291 #define GET_OP_REQ_TOKEN_OFFSET 0x14
2292 #define GET_OP_REQ_TYPE_OFFSET 0x1a
2293 #define GET_OP_REQ_DATA_OFFSET 0x20
2295 mailbox = mlx4_alloc_cmd_mailbox(dev);
2296 if (IS_ERR(mailbox)) {
2297 mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
2300 outbox = mailbox->buf;
2303 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
2304 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
2307 mlx4_err(dev, "Failed to retrieve required operation: %d\n",
2311 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
2312 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
2313 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
2318 if (dev->caps.steering_mode ==
2319 MLX4_STEERING_MODE_DEVICE_MANAGED) {
2320 mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
2324 mgm = (struct mlx4_mgm *)((u8 *)(outbox) +
2325 GET_OP_REQ_DATA_OFFSET);
2326 num_qps = be32_to_cpu(mgm->members_count) &
2328 rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1;
2329 prot = ((u8 *)(&mgm->members_count))[0] >> 6;
2331 for (i = 0; i < num_qps; i++) {
2332 qp.qpn = be32_to_cpu(mgm->qp[i]);
2334 err = mlx4_multicast_detach(dev, &qp,
2338 err = mlx4_multicast_attach(dev, &qp,
2348 mlx4_warn(dev, "Bad type for required operation\n");
2352 err = mlx4_cmd(dev, 0, ((u32) err |
2353 (__force u32)cpu_to_be32(token) << 16),
2354 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
2357 mlx4_err(dev, "Failed to acknowledge required request: %d\n",
2361 memset(outbox, 0, 0xffc);
2362 num_tasks = atomic_dec_return(&priv->opreq_count);
2366 mlx4_free_cmd_mailbox(dev, mailbox);
2369 static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev,
2370 struct mlx4_cmd_mailbox *mailbox)
2372 #define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET 0x10
2373 #define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET 0x20
2374 #define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET 0x40
2375 #define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET 0x70
2377 u32 set_attr_mask, getresp_attr_mask;
2378 u32 trap_attr_mask, traprepress_attr_mask;
2380 MLX4_GET(set_attr_mask, mailbox->buf,
2381 MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET);
2382 mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n",
2385 MLX4_GET(getresp_attr_mask, mailbox->buf,
2386 MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET);
2387 mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n",
2390 MLX4_GET(trap_attr_mask, mailbox->buf,
2391 MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET);
2392 mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n",
2395 MLX4_GET(traprepress_attr_mask, mailbox->buf,
2396 MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET);
2397 mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n",
2398 traprepress_attr_mask);
2400 if (set_attr_mask && getresp_attr_mask && trap_attr_mask &&
2401 traprepress_attr_mask)
2407 int mlx4_config_mad_demux(struct mlx4_dev *dev)
2409 struct mlx4_cmd_mailbox *mailbox;
2410 int secure_host_active;
2413 /* Check if mad_demux is supported */
2414 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX))
2417 mailbox = mlx4_alloc_cmd_mailbox(dev);
2418 if (IS_ERR(mailbox)) {
2419 mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX");
2423 /* Query mad_demux to find out which MADs are handled by internal sma */
2424 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */,
2425 MLX4_CMD_MAD_DEMUX_QUERY_RESTR, MLX4_CMD_MAD_DEMUX,
2426 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2428 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n",
2433 secure_host_active = mlx4_check_smp_firewall_active(dev, mailbox);
2435 /* Config mad_demux to handle all MADs returned by the query above */
2436 err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */,
2437 MLX4_CMD_MAD_DEMUX_CONFIG, MLX4_CMD_MAD_DEMUX,
2438 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2440 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err);
2444 if (secure_host_active)
2445 mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n");
2447 mlx4_free_cmd_mailbox(dev, mailbox);
2451 /* Access Reg commands */
2452 enum mlx4_access_reg_masks {
2453 MLX4_ACCESS_REG_STATUS_MASK = 0x7f,
2454 MLX4_ACCESS_REG_METHOD_MASK = 0x7f,
2455 MLX4_ACCESS_REG_LEN_MASK = 0x7ff
2458 struct mlx4_access_reg {
2468 #define MLX4_ACCESS_REG_HEADER_SIZE (20)
2469 u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE];
2470 } __attribute__((__packed__));
2473 * mlx4_ACCESS_REG - Generic access reg command.
2475 * @reg_id: register ID to access.
2476 * @method: Access method Read/Write.
2477 * @reg_len: register length to Read/Write in bytes.
2478 * @reg_data: reg_data pointer to Read/Write From/To.
2480 * Access ConnectX registers FW command.
2481 * Returns 0 on success and copies outbox mlx4_access_reg data
2482 * field into reg_data or a negative error code.
2484 static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id,
2485 enum mlx4_access_reg_method method,
2486 u16 reg_len, void *reg_data)
2488 struct mlx4_cmd_mailbox *inbox, *outbox;
2489 struct mlx4_access_reg *inbuf, *outbuf;
2492 inbox = mlx4_alloc_cmd_mailbox(dev);
2494 return PTR_ERR(inbox);
2496 outbox = mlx4_alloc_cmd_mailbox(dev);
2497 if (IS_ERR(outbox)) {
2498 mlx4_free_cmd_mailbox(dev, inbox);
2499 return PTR_ERR(outbox);
2503 outbuf = outbox->buf;
2505 inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4);
2506 inbuf->constant2 = 0x1;
2507 inbuf->reg_id = cpu_to_be16(reg_id);
2508 inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK;
2510 reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data)));
2512 cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) |
2515 memcpy(inbuf->reg_data, reg_data, reg_len);
2516 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0,
2517 MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
2522 if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) {
2523 err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK;
2525 "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n",
2530 memcpy(reg_data, outbuf->reg_data, reg_len);
2532 mlx4_free_cmd_mailbox(dev, inbox);
2533 mlx4_free_cmd_mailbox(dev, outbox);
2537 /* ConnectX registers IDs */
2539 MLX4_REG_ID_PTYS = 0x5004,
2543 * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed)
2546 * @method: Access method Read/Write.
2547 * @ptys_reg: PTYS register data pointer.
2549 * Access ConnectX PTYS register, to Read/Write Port Type/Speed
2551 * Returns 0 on success or a negative error code.
2553 int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
2554 enum mlx4_access_reg_method method,
2555 struct mlx4_ptys_reg *ptys_reg)
2557 return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS,
2558 method, sizeof(*ptys_reg), ptys_reg);
2560 EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG);
2562 int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
2563 struct mlx4_vhcr *vhcr,
2564 struct mlx4_cmd_mailbox *inbox,
2565 struct mlx4_cmd_mailbox *outbox,
2566 struct mlx4_cmd_info *cmd)
2568 struct mlx4_access_reg *inbuf = inbox->buf;
2569 u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK;
2570 u16 reg_id = be16_to_cpu(inbuf->reg_id);
2572 if (slave != mlx4_master_func_num(dev) &&
2573 method == MLX4_ACCESS_REG_WRITE)
2576 if (reg_id == MLX4_REG_ID_PTYS) {
2577 struct mlx4_ptys_reg *ptys_reg =
2578 (struct mlx4_ptys_reg *)inbuf->reg_data;
2580 ptys_reg->local_port =
2581 mlx4_slave_convert_port(dev, slave,
2582 ptys_reg->local_port);
2585 return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier,
2586 0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,