2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/etherdevice.h>
36 #include <linux/mlx4/cmd.h>
37 #include <linux/module.h>
38 #include <linux/cache.h>
44 MLX4_COMMAND_INTERFACE_MIN_REV = 2,
45 MLX4_COMMAND_INTERFACE_MAX_REV = 3,
46 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3,
49 extern void __buggy_use_of_MLX4_GET(void);
50 extern void __buggy_use_of_MLX4_PUT(void);
52 static bool enable_qos = true;
53 module_param(enable_qos, bool, 0444);
54 MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)");
56 #define MLX4_GET(dest, source, offset) \
58 void *__p = (char *) (source) + (offset); \
59 switch (sizeof (dest)) { \
60 case 1: (dest) = *(u8 *) __p; break; \
61 case 2: (dest) = be16_to_cpup(__p); break; \
62 case 4: (dest) = be32_to_cpup(__p); break; \
63 case 8: (dest) = be64_to_cpup(__p); break; \
64 default: __buggy_use_of_MLX4_GET(); \
68 #define MLX4_PUT(dest, source, offset) \
70 void *__d = ((char *) (dest) + (offset)); \
71 switch (sizeof(source)) { \
72 case 1: *(u8 *) __d = (source); break; \
73 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
74 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
75 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
76 default: __buggy_use_of_MLX4_PUT(); \
80 static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
82 static const char *fname[] = {
83 [ 0] = "RC transport",
84 [ 1] = "UC transport",
85 [ 2] = "UD transport",
86 [ 3] = "XRC transport",
88 [ 7] = "IPoIB checksum offload",
89 [ 8] = "P_Key violation counter",
90 [ 9] = "Q_Key violation counter",
91 [12] = "Dual Port Different Protocol (DPDP) support",
92 [15] = "Big LSO headers",
95 [18] = "Atomic ops support",
96 [19] = "Raw multicast support",
97 [20] = "Address vector port checking support",
98 [21] = "UD multicast support",
99 [30] = "IBoE support",
100 [32] = "Unicast loopback support",
101 [34] = "FCS header control",
102 [37] = "Wake On LAN (port1) support",
103 [38] = "Wake On LAN (port2) support",
104 [40] = "UDP RSS support",
105 [41] = "Unicast VEP steering support",
106 [42] = "Multicast VEP steering support",
107 [48] = "Counters support",
108 [52] = "RSS IP fragments support",
109 [53] = "Port ETS Scheduler support",
110 [55] = "Port link type sensing support",
111 [59] = "Port management change event support",
112 [61] = "64 byte EQE support",
113 [62] = "64 byte CQE support",
117 mlx4_dbg(dev, "DEV_CAP flags:\n");
118 for (i = 0; i < ARRAY_SIZE(fname); ++i)
119 if (fname[i] && (flags & (1LL << i)))
120 mlx4_dbg(dev, " %s\n", fname[i]);
123 static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
125 static const char * const fname[] = {
127 [1] = "RSS Toeplitz Hash Function support",
128 [2] = "RSS XOR Hash Function support",
129 [3] = "Device managed flow steering support",
130 [4] = "Automatic MAC reassignment support",
131 [5] = "Time stamping support",
132 [6] = "VST (control vlan insertion/stripping) support",
133 [7] = "FSM (MAC anti-spoofing) support",
134 [8] = "Dynamic QP updates support",
135 [9] = "Device managed flow steering IPoIB support",
136 [10] = "TCP/IP offloads/flow-steering for VXLAN support",
137 [11] = "MAD DEMUX (Secure-Host) support",
138 [12] = "Large cache line (>64B) CQE stride support",
139 [13] = "Large cache line (>64B) EQE stride support",
140 [14] = "Ethernet protocol control support",
141 [15] = "Ethernet Backplane autoneg support",
142 [16] = "CONFIG DEV support",
143 [17] = "Asymmetric EQs support",
144 [18] = "More than 80 VFs support",
145 [19] = "Performance optimized for limited rule configuration flow steering support",
146 [20] = "Recoverable error events support",
147 [21] = "Port Remap support",
148 [22] = "QCN support",
149 [23] = "QP rate limiting support",
150 [24] = "Ethernet Flow control statistics support",
151 [25] = "Granular QoS per VF support",
152 [26] = "Port ETS Scheduler support",
156 for (i = 0; i < ARRAY_SIZE(fname); ++i)
157 if (fname[i] && (flags & (1LL << i)))
158 mlx4_dbg(dev, " %s\n", fname[i]);
161 int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
163 struct mlx4_cmd_mailbox *mailbox;
167 #define MOD_STAT_CFG_IN_SIZE 0x100
169 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002
170 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003
172 mailbox = mlx4_alloc_cmd_mailbox(dev);
174 return PTR_ERR(mailbox);
175 inbox = mailbox->buf;
177 MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
178 MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
180 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
181 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
183 mlx4_free_cmd_mailbox(dev, mailbox);
187 int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave)
189 struct mlx4_cmd_mailbox *mailbox;
196 #define QUERY_FUNC_BUS_OFFSET 0x00
197 #define QUERY_FUNC_DEVICE_OFFSET 0x01
198 #define QUERY_FUNC_FUNCTION_OFFSET 0x01
199 #define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03
200 #define QUERY_FUNC_RSVD_EQS_OFFSET 0x04
201 #define QUERY_FUNC_MAX_EQ_OFFSET 0x06
202 #define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b
204 mailbox = mlx4_alloc_cmd_mailbox(dev);
206 return PTR_ERR(mailbox);
207 outbox = mailbox->buf;
211 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0,
213 MLX4_CMD_TIME_CLASS_A,
218 MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET);
219 func->bus = field & 0xf;
220 MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET);
221 func->device = field & 0xf1;
222 MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET);
223 func->function = field & 0x7;
224 MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET);
225 func->physical_function = field & 0xf;
226 MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET);
227 func->rsvd_eqs = field16 & 0xffff;
228 MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET);
229 func->max_eq = field16 & 0xffff;
230 MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET);
231 func->rsvd_uars = field & 0x0f;
233 mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n",
234 func->bus, func->device, func->function, func->physical_function,
235 func->max_eq, func->rsvd_eqs, func->rsvd_uars);
238 mlx4_free_cmd_mailbox(dev, mailbox);
242 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
243 struct mlx4_vhcr *vhcr,
244 struct mlx4_cmd_mailbox *inbox,
245 struct mlx4_cmd_mailbox *outbox,
246 struct mlx4_cmd_info *cmd)
248 struct mlx4_priv *priv = mlx4_priv(dev);
250 u32 size, proxy_qp, qkey;
252 struct mlx4_func func;
254 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
255 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
256 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
257 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8
258 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
259 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
260 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
261 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
262 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
263 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
264 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
265 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
266 #define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48
268 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
269 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
270 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
271 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
272 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
273 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
275 #define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET 0x6c
277 #define QUERY_FUNC_CAP_FMR_FLAG 0x80
278 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40
279 #define QUERY_FUNC_CAP_FLAG_ETH 0x80
280 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
281 #define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08
282 #define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04
284 #define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31)
285 #define QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG (1UL << 30)
287 /* when opcode modifier = 1 */
288 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
289 #define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4
290 #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
291 #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
293 #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
294 #define QUERY_FUNC_CAP_QP0_PROXY 0x14
295 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
296 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c
297 #define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28
299 #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40
300 #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80
301 #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10
302 #define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08
304 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
305 #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
307 if (vhcr->op_modifier == 1) {
308 struct mlx4_active_ports actv_ports =
309 mlx4_get_active_ports(dev, slave);
310 int converted_port = mlx4_slave_convert_port(
311 dev, slave, vhcr->in_modifier);
313 if (converted_port < 0)
316 vhcr->in_modifier = converted_port;
317 /* phys-port = logical-port */
318 field = vhcr->in_modifier -
319 find_first_bit(actv_ports.ports, dev->caps.num_ports);
320 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
322 port = vhcr->in_modifier;
323 proxy_qp = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1;
325 /* Set nic_info bit to mark new fields support */
326 field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
328 if (mlx4_vf_smi_enabled(dev, slave, port) &&
329 !mlx4_get_parav_qkey(dev, proxy_qp, &qkey)) {
330 field |= QUERY_FUNC_CAP_VF_ENABLE_QP0;
331 MLX4_PUT(outbox->buf, qkey,
332 QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);
334 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
336 /* size is now the QP number */
337 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1;
338 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
341 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL);
343 MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP0_PROXY);
345 MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP1_PROXY);
347 MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
348 QUERY_FUNC_CAP_PHYS_PORT_ID);
350 } else if (vhcr->op_modifier == 0) {
351 struct mlx4_active_ports actv_ports =
352 mlx4_get_active_ports(dev, slave);
353 /* enable rdma and ethernet interfaces, new quota locations,
356 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
357 QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX |
358 QUERY_FUNC_CAP_FLAG_RESD_LKEY);
359 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
362 bitmap_weight(actv_ports.ports, dev->caps.num_ports),
363 dev->caps.num_ports);
364 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
366 size = dev->caps.function_caps; /* set PF behaviours */
367 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
369 field = 0; /* protected FMR support not available as yet */
370 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
372 size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
373 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
374 size = dev->caps.num_qps;
375 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
377 size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
378 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
379 size = dev->caps.num_srqs;
380 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
382 size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
383 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
384 size = dev->caps.num_cqs;
385 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
387 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) ||
388 mlx4_QUERY_FUNC(dev, &func, slave)) {
389 size = vhcr->in_modifier &
390 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
392 rounddown_pow_of_two(dev->caps.num_eqs);
393 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
394 size = dev->caps.reserved_eqs;
395 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
397 size = vhcr->in_modifier &
398 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
400 rounddown_pow_of_two(func.max_eq);
401 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
402 size = func.rsvd_eqs;
403 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
406 size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
407 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
408 size = dev->caps.num_mpts;
409 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
411 size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
412 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
413 size = dev->caps.num_mtts;
414 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
416 size = dev->caps.num_mgms + dev->caps.num_amgms;
417 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
418 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
420 size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG |
421 QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG;
422 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
424 size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00);
425 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
432 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
433 struct mlx4_func_cap *func_cap)
435 struct mlx4_cmd_mailbox *mailbox;
437 u8 field, op_modifier;
439 int err = 0, quotas = 0;
442 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
443 in_modifier = op_modifier ? gen_or_port :
444 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS;
446 mailbox = mlx4_alloc_cmd_mailbox(dev);
448 return PTR_ERR(mailbox);
450 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier,
451 MLX4_CMD_QUERY_FUNC_CAP,
452 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
456 outbox = mailbox->buf;
459 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
460 if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
461 mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
462 err = -EPROTONOSUPPORT;
465 func_cap->flags = field;
466 quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS);
468 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
469 func_cap->num_ports = field;
471 MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
472 func_cap->pf_context_behaviour = size;
475 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
476 func_cap->qp_quota = size & 0xFFFFFF;
478 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
479 func_cap->srq_quota = size & 0xFFFFFF;
481 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
482 func_cap->cq_quota = size & 0xFFFFFF;
484 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
485 func_cap->mpt_quota = size & 0xFFFFFF;
487 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
488 func_cap->mtt_quota = size & 0xFFFFFF;
490 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
491 func_cap->mcg_quota = size & 0xFFFFFF;
494 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
495 func_cap->qp_quota = size & 0xFFFFFF;
497 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
498 func_cap->srq_quota = size & 0xFFFFFF;
500 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
501 func_cap->cq_quota = size & 0xFFFFFF;
503 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
504 func_cap->mpt_quota = size & 0xFFFFFF;
506 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
507 func_cap->mtt_quota = size & 0xFFFFFF;
509 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
510 func_cap->mcg_quota = size & 0xFFFFFF;
512 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
513 func_cap->max_eq = size & 0xFFFFFF;
515 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
516 func_cap->reserved_eq = size & 0xFFFFFF;
518 if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) {
519 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
520 func_cap->reserved_lkey = size;
522 func_cap->reserved_lkey = 0;
525 func_cap->extra_flags = 0;
527 /* Mailbox data from 0x6c and onward should only be treated if
528 * QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags
530 if (func_cap->flags & QUERY_FUNC_CAP_FLAG_VALID_MAILBOX) {
531 MLX4_GET(size, outbox, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
532 if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG)
533 func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP;
534 if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG)
535 func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_A0_RES_QP;
541 /* logical port query */
542 if (gen_or_port > dev->caps.num_ports) {
547 MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
548 if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
549 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN) {
550 mlx4_err(dev, "VLAN is enforced on this port\n");
551 err = -EPROTONOSUPPORT;
555 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) {
556 mlx4_err(dev, "Force mac is enabled on this port\n");
557 err = -EPROTONOSUPPORT;
560 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
561 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
562 if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
563 mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
564 err = -EPROTONOSUPPORT;
569 MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
570 func_cap->physical_port = field;
571 if (func_cap->physical_port != gen_or_port) {
576 if (func_cap->flags1 & QUERY_FUNC_CAP_VF_ENABLE_QP0) {
577 MLX4_GET(qkey, outbox, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);
578 func_cap->qp0_qkey = qkey;
580 func_cap->qp0_qkey = 0;
583 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL);
584 func_cap->qp0_tunnel_qpn = size & 0xFFFFFF;
586 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY);
587 func_cap->qp0_proxy_qpn = size & 0xFFFFFF;
589 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL);
590 func_cap->qp1_tunnel_qpn = size & 0xFFFFFF;
592 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
593 func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
595 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO)
596 MLX4_GET(func_cap->phys_port_id, outbox,
597 QUERY_FUNC_CAP_PHYS_PORT_ID);
599 /* All other resources are allocated by the master, but we still report
600 * 'num' and 'reserved' capabilities as follows:
601 * - num remains the maximum resource index
602 * - 'num - reserved' is the total available objects of a resource, but
603 * resource indices may be less than 'reserved'
604 * TODO: set per-resource quotas */
607 mlx4_free_cmd_mailbox(dev, mailbox);
612 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
614 struct mlx4_cmd_mailbox *mailbox;
617 u32 field32, flags, ext_flags;
623 #define QUERY_DEV_CAP_OUT_SIZE 0x100
624 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
625 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
626 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
627 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
628 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
629 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
630 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
631 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
632 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
633 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
634 #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
635 #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
636 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
637 #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
638 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
639 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
640 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
641 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
642 #define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26
643 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
644 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
645 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
646 #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
647 #define QUERY_DEV_CAP_RSS_OFFSET 0x2e
648 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
649 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
650 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
651 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
652 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
653 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
654 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
655 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
656 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
657 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
658 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
659 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
660 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
661 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
662 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
663 #define QUERY_DEV_CAP_BF_OFFSET 0x4c
664 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
665 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
666 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
667 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
668 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
669 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
670 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
671 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
672 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
673 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
674 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
675 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
676 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
677 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
678 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
679 #define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET 0x70
680 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
681 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
682 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
683 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
684 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a
685 #define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET 0x7b
686 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
687 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
688 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
689 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
690 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
691 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
692 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
693 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
694 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
695 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
696 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
697 #define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94
698 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
699 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
700 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
701 #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
702 #define QUERY_DEV_CAP_VXLAN 0x9e
703 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
704 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8
705 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac
706 #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
707 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
708 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
712 mailbox = mlx4_alloc_cmd_mailbox(dev);
714 return PTR_ERR(mailbox);
715 outbox = mailbox->buf;
717 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
718 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
722 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
723 dev_cap->reserved_qps = 1 << (field & 0xf);
724 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
725 dev_cap->max_qps = 1 << (field & 0x1f);
726 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
727 dev_cap->reserved_srqs = 1 << (field >> 4);
728 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
729 dev_cap->max_srqs = 1 << (field & 0x1f);
730 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
731 dev_cap->max_cq_sz = 1 << field;
732 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
733 dev_cap->reserved_cqs = 1 << (field & 0xf);
734 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
735 dev_cap->max_cqs = 1 << (field & 0x1f);
736 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
737 dev_cap->max_mpts = 1 << (field & 0x3f);
738 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
739 dev_cap->reserved_eqs = 1 << (field & 0xf);
740 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
741 dev_cap->max_eqs = 1 << (field & 0xf);
742 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
743 dev_cap->reserved_mtts = 1 << (field >> 4);
744 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
745 dev_cap->max_mrw_sz = 1 << field;
746 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
747 dev_cap->reserved_mrws = 1 << (field & 0xf);
748 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
749 dev_cap->max_mtt_seg = 1 << (field & 0x3f);
750 MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET);
751 dev_cap->num_sys_eqs = size & 0xfff;
752 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
753 dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
754 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
755 dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
756 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
759 dev_cap->max_gso_sz = 0;
761 dev_cap->max_gso_sz = 1 << field;
763 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET);
765 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR;
767 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP;
770 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS;
771 dev_cap->max_rss_tbl_sz = 1 << field;
773 dev_cap->max_rss_tbl_sz = 0;
774 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
775 dev_cap->max_rdma_global = 1 << (field & 0x3f);
776 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
777 dev_cap->local_ca_ack_delay = field & 0x1f;
778 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
779 dev_cap->num_ports = field & 0xf;
780 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
781 MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET);
783 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN;
784 dev_cap->max_msg_sz = 1 << (field & 0x1f);
785 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
787 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
788 dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
789 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
791 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
792 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
793 dev_cap->fs_max_num_qp_per_entry = field;
794 MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
796 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN;
797 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
798 dev_cap->stat_rate_support = stat_rate;
799 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
801 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS;
802 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
803 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
804 dev_cap->flags = flags | (u64)ext_flags << 32;
805 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
806 dev_cap->reserved_uars = field >> 4;
807 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
808 dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
809 MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
810 dev_cap->min_page_sz = 1 << field;
812 MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
814 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
815 dev_cap->bf_reg_size = 1 << (field & 0x1f);
816 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
817 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
819 dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
821 dev_cap->bf_reg_size = 0;
824 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
825 dev_cap->max_sq_sg = field;
826 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
827 dev_cap->max_sq_desc_sz = size;
829 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
830 dev_cap->max_qp_per_mcg = 1 << field;
831 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
832 dev_cap->reserved_mgms = field & 0xf;
833 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
834 dev_cap->max_mcgs = 1 << field;
835 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
836 dev_cap->reserved_pds = field >> 4;
837 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
838 dev_cap->max_pds = 1 << (field & 0x3f);
839 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
840 dev_cap->reserved_xrcds = field >> 4;
841 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET);
842 dev_cap->max_xrcds = 1 << (field & 0x1f);
844 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
845 dev_cap->rdmarc_entry_sz = size;
846 MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
847 dev_cap->qpc_entry_sz = size;
848 MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
849 dev_cap->aux_entry_sz = size;
850 MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
851 dev_cap->altc_entry_sz = size;
852 MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
853 dev_cap->eqc_entry_sz = size;
854 MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
855 dev_cap->cqc_entry_sz = size;
856 MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
857 dev_cap->srq_entry_sz = size;
858 MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
859 dev_cap->cmpt_entry_sz = size;
860 MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
861 dev_cap->mtt_entry_sz = size;
862 MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
863 dev_cap->dmpt_entry_sz = size;
865 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
866 dev_cap->max_srq_sz = 1 << field;
867 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
868 dev_cap->max_qp_sz = 1 << field;
869 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
870 dev_cap->resize_srq = field & 1;
871 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
872 dev_cap->max_rq_sg = field;
873 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
874 dev_cap->max_rq_desc_sz = size;
875 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
876 if (field & (1 << 4))
877 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QOS_VPP;
878 if (field & (1 << 5))
879 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL;
880 if (field & (1 << 6))
881 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
882 if (field & (1 << 7))
883 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
884 MLX4_GET(dev_cap->bmme_flags, outbox,
885 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
886 if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP)
887 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP;
888 MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
890 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
891 MLX4_GET(dev_cap->reserved_lkey, outbox,
892 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
893 MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
894 if (field32 & (1 << 0))
895 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
896 if (field32 & (1 << 7))
897 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
898 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
900 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
901 MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
903 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS;
904 if (field & (1 << 5))
905 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
906 MLX4_GET(dev_cap->max_icm_sz, outbox,
907 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
908 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
909 MLX4_GET(dev_cap->max_counters, outbox,
910 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
912 MLX4_GET(field32, outbox,
913 QUERY_DEV_CAP_MAD_DEMUX_OFFSET);
914 if (field32 & (1 << 0))
915 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX;
917 MLX4_GET(dev_cap->dmfs_high_rate_qpn_base, outbox,
918 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET);
919 dev_cap->dmfs_high_rate_qpn_base &= MGM_QPN_MASK;
920 MLX4_GET(dev_cap->dmfs_high_rate_qpn_range, outbox,
921 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET);
922 dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK;
924 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET);
925 dev_cap->rl_caps.num_rates = size;
926 if (dev_cap->rl_caps.num_rates) {
927 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT;
928 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET);
929 dev_cap->rl_caps.max_val = size & 0xfff;
930 dev_cap->rl_caps.max_unit = size >> 14;
931 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET);
932 dev_cap->rl_caps.min_val = size & 0xfff;
933 dev_cap->rl_caps.min_unit = size >> 14;
936 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
937 if (field32 & (1 << 16))
938 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
939 if (field32 & (1 << 26))
940 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
941 if (field32 & (1 << 20))
942 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
943 if (field32 & (1 << 21))
944 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS;
946 for (i = 1; i <= dev_cap->num_ports; i++) {
947 err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i);
953 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
954 * we can't use any EQs whose doorbell falls on that page,
955 * even if the EQ itself isn't reserved.
957 if (dev_cap->num_sys_eqs == 0)
958 dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
959 dev_cap->reserved_eqs);
961 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS;
964 mlx4_free_cmd_mailbox(dev, mailbox);
968 void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
970 if (dev_cap->bf_reg_size > 0)
971 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
972 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
974 mlx4_dbg(dev, "BlueFlame not available\n");
976 mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
977 dev_cap->bmme_flags, dev_cap->reserved_lkey);
978 mlx4_dbg(dev, "Max ICM size %lld MB\n",
979 (unsigned long long) dev_cap->max_icm_sz >> 20);
980 mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
981 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
982 mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
983 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
984 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
985 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
986 mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n",
987 dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs,
988 dev_cap->eqc_entry_sz);
989 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
990 dev_cap->reserved_mrws, dev_cap->reserved_mtts);
991 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
992 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
993 mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
994 dev_cap->max_pds, dev_cap->reserved_mgms);
995 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
996 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
997 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
998 dev_cap->local_ca_ack_delay, 128 << dev_cap->port_cap[1].ib_mtu,
999 dev_cap->port_cap[1].max_port_width);
1000 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
1001 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
1002 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
1003 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
1004 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
1005 mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
1006 mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz);
1007 mlx4_dbg(dev, "DMFS high rate steer QPn base: %d\n",
1008 dev_cap->dmfs_high_rate_qpn_base);
1009 mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n",
1010 dev_cap->dmfs_high_rate_qpn_range);
1012 if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT) {
1013 struct mlx4_rate_limit_caps *rl_caps = &dev_cap->rl_caps;
1015 mlx4_dbg(dev, "QP Rate-Limit: #rates %d, unit/val max %d/%d, min %d/%d\n",
1016 rl_caps->num_rates, rl_caps->max_unit, rl_caps->max_val,
1017 rl_caps->min_unit, rl_caps->min_val);
1020 dump_dev_cap_flags(dev, dev_cap->flags);
1021 dump_dev_cap_flags2(dev, dev_cap->flags2);
1024 int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap)
1026 struct mlx4_cmd_mailbox *mailbox;
1032 mailbox = mlx4_alloc_cmd_mailbox(dev);
1033 if (IS_ERR(mailbox))
1034 return PTR_ERR(mailbox);
1035 outbox = mailbox->buf;
1037 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1038 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
1039 MLX4_CMD_TIME_CLASS_A,
1045 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
1046 port_cap->max_vl = field >> 4;
1047 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
1048 port_cap->ib_mtu = field >> 4;
1049 port_cap->max_port_width = field & 0xf;
1050 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
1051 port_cap->max_gids = 1 << (field & 0xf);
1052 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
1053 port_cap->max_pkeys = 1 << (field & 0xf);
1055 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
1056 #define QUERY_PORT_MTU_OFFSET 0x01
1057 #define QUERY_PORT_ETH_MTU_OFFSET 0x02
1058 #define QUERY_PORT_WIDTH_OFFSET 0x06
1059 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
1060 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
1061 #define QUERY_PORT_MAX_VL_OFFSET 0x0b
1062 #define QUERY_PORT_MAC_OFFSET 0x10
1063 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
1064 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
1065 #define QUERY_PORT_TRANS_CODE_OFFSET 0x20
1067 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT,
1068 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1072 MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1073 port_cap->supported_port_types = field & 3;
1074 port_cap->suggested_type = (field >> 3) & 1;
1075 port_cap->default_sense = (field >> 4) & 1;
1076 port_cap->dmfs_optimized_state = (field >> 5) & 1;
1077 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
1078 port_cap->ib_mtu = field & 0xf;
1079 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
1080 port_cap->max_port_width = field & 0xf;
1081 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
1082 port_cap->max_gids = 1 << (field >> 4);
1083 port_cap->max_pkeys = 1 << (field & 0xf);
1084 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
1085 port_cap->max_vl = field & 0xf;
1086 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
1087 port_cap->log_max_macs = field & 0xf;
1088 port_cap->log_max_vlans = field >> 4;
1089 MLX4_GET(port_cap->eth_mtu, outbox, QUERY_PORT_ETH_MTU_OFFSET);
1090 MLX4_GET(port_cap->def_mac, outbox, QUERY_PORT_MAC_OFFSET);
1091 MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
1092 port_cap->trans_type = field32 >> 24;
1093 port_cap->vendor_oui = field32 & 0xffffff;
1094 MLX4_GET(port_cap->wavelength, outbox, QUERY_PORT_WAVELENGTH_OFFSET);
1095 MLX4_GET(port_cap->trans_code, outbox, QUERY_PORT_TRANS_CODE_OFFSET);
1099 mlx4_free_cmd_mailbox(dev, mailbox);
1103 #define DEV_CAP_EXT_2_FLAG_PFC_COUNTERS (1 << 28)
1104 #define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26)
1105 #define DEV_CAP_EXT_2_FLAG_80_VFS (1 << 21)
1106 #define DEV_CAP_EXT_2_FLAG_FSM (1 << 20)
1108 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
1109 struct mlx4_vhcr *vhcr,
1110 struct mlx4_cmd_mailbox *inbox,
1111 struct mlx4_cmd_mailbox *outbox,
1112 struct mlx4_cmd_info *cmd)
1118 u32 bmme_flags, field32;
1122 struct mlx4_active_ports actv_ports;
1124 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
1125 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1129 /* add port mng change event capability and disable mw type 1
1130 * unconditionally to slaves
1132 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
1133 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
1134 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
1135 actv_ports = mlx4_get_active_ports(dev, slave);
1136 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
1137 for (slave_port = 0, real_port = first_port;
1138 real_port < first_port +
1139 bitmap_weight(actv_ports.ports, dev->caps.num_ports);
1140 ++real_port, ++slave_port) {
1141 if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port))
1142 flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port;
1144 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
1146 for (; slave_port < dev->caps.num_ports; ++slave_port)
1147 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
1149 /* Not exposing RSS IP fragments to guests */
1150 flags &= ~MLX4_DEV_CAP_FLAG_RSS_IP_FRAG;
1151 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
1153 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET);
1155 field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F;
1156 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET);
1158 /* For guests, disable timestamp */
1159 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
1161 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
1163 /* For guests, disable vxlan tunneling and QoS support */
1164 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN);
1166 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
1168 /* For guests, report Blueflame disabled */
1169 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
1171 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
1173 /* For guests, disable mw type 2 and port remap*/
1174 MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1175 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
1176 bmme_flags &= ~MLX4_FLAG_PORT_REMAP;
1177 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1179 /* turn off device-managed steering capability if not enabled */
1180 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
1181 MLX4_GET(field, outbox->buf,
1182 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
1184 MLX4_PUT(outbox->buf, field,
1185 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
1188 /* turn off ipoib managed steering for guests */
1189 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
1191 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
1193 /* turn off host side virt features (VST, FSM, etc) for guests */
1194 MLX4_GET(field32, outbox->buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
1195 field32 &= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL | DEV_CAP_EXT_2_FLAG_80_VFS |
1196 DEV_CAP_EXT_2_FLAG_FSM | DEV_CAP_EXT_2_FLAG_PFC_COUNTERS);
1197 MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
1199 /* turn off QCN for guests */
1200 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
1202 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
1204 /* turn off QP max-rate limiting for guests */
1206 MLX4_PUT(outbox->buf, field16, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET);
1208 /* turn off QoS per VF support for guests */
1209 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
1211 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
1216 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
1217 struct mlx4_vhcr *vhcr,
1218 struct mlx4_cmd_mailbox *inbox,
1219 struct mlx4_cmd_mailbox *outbox,
1220 struct mlx4_cmd_info *cmd)
1222 struct mlx4_priv *priv = mlx4_priv(dev);
1227 int admin_link_state;
1228 int port = mlx4_slave_convert_port(dev, slave,
1229 vhcr->in_modifier & 0xFF);
1231 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
1232 #define MLX4_PORT_LINK_UP_MASK 0x80
1233 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
1234 #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
1239 /* Protect against untrusted guests: enforce that this is the
1240 * QUERY_PORT general query.
1242 if (vhcr->op_modifier || vhcr->in_modifier & ~0xFF)
1245 vhcr->in_modifier = port;
1247 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
1248 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
1251 if (!err && dev->caps.function != slave) {
1252 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
1253 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
1255 /* get port type - currently only eth is enabled */
1256 MLX4_GET(port_type, outbox->buf,
1257 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1259 /* No link sensing allowed */
1260 port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK;
1261 /* set port type to currently operating port type */
1262 port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
1264 admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state;
1265 if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state)
1266 port_type |= MLX4_PORT_LINK_UP_MASK;
1267 else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state)
1268 port_type &= ~MLX4_PORT_LINK_UP_MASK;
1270 MLX4_PUT(outbox->buf, port_type,
1271 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1273 if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH)
1274 short_field = mlx4_get_slave_num_gids(dev, slave, port);
1276 short_field = 1; /* slave max gids */
1277 MLX4_PUT(outbox->buf, short_field,
1278 QUERY_PORT_CUR_MAX_GID_OFFSET);
1280 short_field = dev->caps.pkey_table_len[vhcr->in_modifier];
1281 MLX4_PUT(outbox->buf, short_field,
1282 QUERY_PORT_CUR_MAX_PKEY_OFFSET);
1288 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
1289 int *gid_tbl_len, int *pkey_tbl_len)
1291 struct mlx4_cmd_mailbox *mailbox;
1296 mailbox = mlx4_alloc_cmd_mailbox(dev);
1297 if (IS_ERR(mailbox))
1298 return PTR_ERR(mailbox);
1300 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0,
1301 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
1306 outbox = mailbox->buf;
1308 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET);
1309 *gid_tbl_len = field;
1311 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET);
1312 *pkey_tbl_len = field;
1315 mlx4_free_cmd_mailbox(dev, mailbox);
1318 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len);
1320 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
1322 struct mlx4_cmd_mailbox *mailbox;
1323 struct mlx4_icm_iter iter;
1331 mailbox = mlx4_alloc_cmd_mailbox(dev);
1332 if (IS_ERR(mailbox))
1333 return PTR_ERR(mailbox);
1334 pages = mailbox->buf;
1336 for (mlx4_icm_first(icm, &iter);
1337 !mlx4_icm_last(&iter);
1338 mlx4_icm_next(&iter)) {
1340 * We have to pass pages that are aligned to their
1341 * size, so find the least significant 1 in the
1342 * address or size and use that as our log2 size.
1344 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
1345 if (lg < MLX4_ICM_PAGE_SHIFT) {
1346 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
1348 (unsigned long long) mlx4_icm_addr(&iter),
1349 mlx4_icm_size(&iter));
1354 for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
1356 pages[nent * 2] = cpu_to_be64(virt);
1360 pages[nent * 2 + 1] =
1361 cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
1362 (lg - MLX4_ICM_PAGE_SHIFT));
1363 ts += 1 << (lg - 10);
1366 if (++nent == MLX4_MAILBOX_SIZE / 16) {
1367 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1368 MLX4_CMD_TIME_CLASS_B,
1378 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1379 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1384 case MLX4_CMD_MAP_FA:
1385 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
1387 case MLX4_CMD_MAP_ICM_AUX:
1388 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
1390 case MLX4_CMD_MAP_ICM:
1391 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
1392 tc, ts, (unsigned long long) virt - (ts << 10));
1397 mlx4_free_cmd_mailbox(dev, mailbox);
1401 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
1403 return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
1406 int mlx4_UNMAP_FA(struct mlx4_dev *dev)
1408 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
1409 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1413 int mlx4_RUN_FW(struct mlx4_dev *dev)
1415 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
1416 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1419 int mlx4_QUERY_FW(struct mlx4_dev *dev)
1421 struct mlx4_fw *fw = &mlx4_priv(dev)->fw;
1422 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
1423 struct mlx4_cmd_mailbox *mailbox;
1430 #define QUERY_FW_OUT_SIZE 0x100
1431 #define QUERY_FW_VER_OFFSET 0x00
1432 #define QUERY_FW_PPF_ID 0x09
1433 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
1434 #define QUERY_FW_MAX_CMD_OFFSET 0x0f
1435 #define QUERY_FW_ERR_START_OFFSET 0x30
1436 #define QUERY_FW_ERR_SIZE_OFFSET 0x38
1437 #define QUERY_FW_ERR_BAR_OFFSET 0x3c
1439 #define QUERY_FW_SIZE_OFFSET 0x00
1440 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
1441 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
1443 #define QUERY_FW_COMM_BASE_OFFSET 0x40
1444 #define QUERY_FW_COMM_BAR_OFFSET 0x48
1446 #define QUERY_FW_CLOCK_OFFSET 0x50
1447 #define QUERY_FW_CLOCK_BAR 0x58
1449 mailbox = mlx4_alloc_cmd_mailbox(dev);
1450 if (IS_ERR(mailbox))
1451 return PTR_ERR(mailbox);
1452 outbox = mailbox->buf;
1454 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1455 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1459 MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
1461 * FW subminor version is at more significant bits than minor
1462 * version, so swap here.
1464 dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
1465 ((fw_ver & 0xffff0000ull) >> 16) |
1466 ((fw_ver & 0x0000ffffull) << 16);
1468 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
1469 dev->caps.function = lg;
1471 if (mlx4_is_slave(dev))
1475 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
1476 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
1477 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
1478 mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
1480 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
1481 (int) (dev->caps.fw_ver >> 32),
1482 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1483 (int) dev->caps.fw_ver & 0xffff);
1484 mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
1485 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
1490 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
1491 dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
1493 MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
1494 cmd->max_cmds = 1 << lg;
1496 mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
1497 (int) (dev->caps.fw_ver >> 32),
1498 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1499 (int) dev->caps.fw_ver & 0xffff,
1500 cmd_if_rev, cmd->max_cmds);
1502 MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
1503 MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
1504 MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET);
1505 fw->catas_bar = (fw->catas_bar >> 6) * 2;
1507 mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
1508 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
1510 MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
1511 MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
1512 MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
1513 fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
1515 MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
1516 MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET);
1517 fw->comm_bar = (fw->comm_bar >> 6) * 2;
1518 mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
1519 fw->comm_bar, fw->comm_base);
1520 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
1522 MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET);
1523 MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR);
1524 fw->clock_bar = (fw->clock_bar >> 6) * 2;
1525 mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n",
1526 fw->clock_bar, fw->clock_offset);
1529 * Round up number of system pages needed in case
1530 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1533 ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
1534 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
1536 mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
1537 (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
1540 mlx4_free_cmd_mailbox(dev, mailbox);
1544 int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
1545 struct mlx4_vhcr *vhcr,
1546 struct mlx4_cmd_mailbox *inbox,
1547 struct mlx4_cmd_mailbox *outbox,
1548 struct mlx4_cmd_info *cmd)
1553 outbuf = outbox->buf;
1554 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1555 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1559 /* for slaves, set pci PPF ID to invalid and zero out everything
1560 * else except FW version */
1561 outbuf[0] = outbuf[1] = 0;
1562 memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
1563 outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID;
1568 static void get_board_id(void *vsd, char *board_id)
1572 #define VSD_OFFSET_SIG1 0x00
1573 #define VSD_OFFSET_SIG2 0xde
1574 #define VSD_OFFSET_MLX_BOARD_ID 0xd0
1575 #define VSD_OFFSET_TS_BOARD_ID 0x20
1577 #define VSD_SIGNATURE_TOPSPIN 0x5ad
1579 memset(board_id, 0, MLX4_BOARD_ID_LEN);
1581 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
1582 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
1583 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
1586 * The board ID is a string but the firmware byte
1587 * swaps each 4-byte word before passing it back to
1588 * us. Therefore we need to swab it before printing.
1590 for (i = 0; i < 4; ++i)
1591 ((u32 *) board_id)[i] =
1592 swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
1596 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
1598 struct mlx4_cmd_mailbox *mailbox;
1602 #define QUERY_ADAPTER_OUT_SIZE 0x100
1603 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
1604 #define QUERY_ADAPTER_VSD_OFFSET 0x20
1606 mailbox = mlx4_alloc_cmd_mailbox(dev);
1607 if (IS_ERR(mailbox))
1608 return PTR_ERR(mailbox);
1609 outbox = mailbox->buf;
1611 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
1612 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1616 MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
1618 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
1622 mlx4_free_cmd_mailbox(dev, mailbox);
1626 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1628 struct mlx4_cmd_mailbox *mailbox;
1631 static const u8 a0_dmfs_hw_steering[] = {
1632 [MLX4_STEERING_DMFS_A0_DEFAULT] = 0,
1633 [MLX4_STEERING_DMFS_A0_DYNAMIC] = 1,
1634 [MLX4_STEERING_DMFS_A0_STATIC] = 2,
1635 [MLX4_STEERING_DMFS_A0_DISABLE] = 3
1638 #define INIT_HCA_IN_SIZE 0x200
1639 #define INIT_HCA_VERSION_OFFSET 0x000
1640 #define INIT_HCA_VERSION 2
1641 #define INIT_HCA_VXLAN_OFFSET 0x0c
1642 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
1643 #define INIT_HCA_FLAGS_OFFSET 0x014
1644 #define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018
1645 #define INIT_HCA_QPC_OFFSET 0x020
1646 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
1647 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
1648 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
1649 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
1650 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
1651 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
1652 #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
1653 #define INIT_HCA_EQE_CQE_STRIDE_OFFSET (INIT_HCA_QPC_OFFSET + 0x3b)
1654 #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
1655 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
1656 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
1657 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
1658 #define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a)
1659 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
1660 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
1661 #define INIT_HCA_MCAST_OFFSET 0x0c0
1662 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
1663 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1664 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
1665 #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
1666 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1667 #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
1668 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0
1669 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1670 #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1671 #define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18)
1672 #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1673 #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1674 #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1675 #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1676 #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1677 #define INIT_HCA_TPT_OFFSET 0x0f0
1678 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
1679 #define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08)
1680 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
1681 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
1682 #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
1683 #define INIT_HCA_UAR_OFFSET 0x120
1684 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
1685 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
1687 mailbox = mlx4_alloc_cmd_mailbox(dev);
1688 if (IS_ERR(mailbox))
1689 return PTR_ERR(mailbox);
1690 inbox = mailbox->buf;
1692 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
1694 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
1695 (ilog2(cache_line_size()) - 4) << 5;
1697 #if defined(__LITTLE_ENDIAN)
1698 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
1699 #elif defined(__BIG_ENDIAN)
1700 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
1702 #error Host endianness not defined
1704 /* Check port for UD address vector: */
1705 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
1707 /* Enable IPoIB checksumming if we can: */
1708 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
1709 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
1711 /* Enable QoS support if module parameter set */
1712 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG && enable_qos)
1713 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
1715 /* enable counters */
1716 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
1717 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
1719 /* Enable RSS spread to fragmented IP packets when supported */
1720 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_RSS_IP_FRAG)
1721 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 13);
1723 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1724 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
1725 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
1726 dev->caps.eqe_size = 64;
1727 dev->caps.eqe_factor = 1;
1729 dev->caps.eqe_size = 32;
1730 dev->caps.eqe_factor = 0;
1733 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) {
1734 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30);
1735 dev->caps.cqe_size = 64;
1736 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1738 dev->caps.cqe_size = 32;
1741 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
1742 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) &&
1743 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) {
1744 dev->caps.eqe_size = cache_line_size();
1745 dev->caps.cqe_size = cache_line_size();
1746 dev->caps.eqe_factor = 0;
1747 MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 |
1748 (ilog2(dev->caps.eqe_size) - 5)),
1749 INIT_HCA_EQE_CQE_STRIDE_OFFSET);
1751 /* User still need to know to support CQE > 32B */
1752 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1755 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
1756 *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31);
1758 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1760 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
1761 MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET);
1762 MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET);
1763 MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
1764 MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET);
1765 MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET);
1766 MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET);
1767 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET);
1768 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
1769 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
1770 MLX4_PUT(inbox, param->num_sys_eqs, INIT_HCA_NUM_SYS_EQS_OFFSET);
1771 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
1772 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
1774 /* steering attributes */
1775 if (dev->caps.steering_mode ==
1776 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1777 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
1779 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
1781 MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
1782 MLX4_PUT(inbox, param->log_mc_entry_sz,
1783 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1784 MLX4_PUT(inbox, param->log_mc_table_sz,
1785 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1786 /* Enable Ethernet flow steering
1787 * with udp unicast and tcp unicast
1789 if (dev->caps.dmfs_high_steer_mode !=
1790 MLX4_STEERING_DMFS_A0_STATIC)
1792 (u8)(MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1793 INIT_HCA_FS_ETH_BITS_OFFSET);
1794 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1795 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
1796 /* Enable IPoIB flow steering
1797 * with udp unicast and tcp unicast
1799 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1800 INIT_HCA_FS_IB_BITS_OFFSET);
1801 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1802 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
1804 if (dev->caps.dmfs_high_steer_mode !=
1805 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
1807 ((u8)(a0_dmfs_hw_steering[dev->caps.dmfs_high_steer_mode]
1809 INIT_HCA_FS_A0_OFFSET);
1811 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
1812 MLX4_PUT(inbox, param->log_mc_entry_sz,
1813 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1814 MLX4_PUT(inbox, param->log_mc_hash_sz,
1815 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1816 MLX4_PUT(inbox, param->log_mc_table_sz,
1817 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1818 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
1819 MLX4_PUT(inbox, (u8) (1 << 3),
1820 INIT_HCA_UC_STEERING_OFFSET);
1823 /* TPT attributes */
1825 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
1826 MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET);
1827 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
1828 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
1829 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
1831 /* UAR attributes */
1833 MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1834 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
1836 /* set parser VXLAN attributes */
1837 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) {
1838 u8 parser_params = 0;
1839 MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET);
1842 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA,
1843 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1846 mlx4_err(dev, "INIT_HCA returns %d\n", err);
1848 mlx4_free_cmd_mailbox(dev, mailbox);
1852 int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1853 struct mlx4_init_hca_param *param)
1855 struct mlx4_cmd_mailbox *mailbox;
1860 static const u8 a0_dmfs_query_hw_steering[] = {
1861 [0] = MLX4_STEERING_DMFS_A0_DEFAULT,
1862 [1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
1863 [2] = MLX4_STEERING_DMFS_A0_STATIC,
1864 [3] = MLX4_STEERING_DMFS_A0_DISABLE
1867 #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
1868 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c
1870 mailbox = mlx4_alloc_cmd_mailbox(dev);
1871 if (IS_ERR(mailbox))
1872 return PTR_ERR(mailbox);
1873 outbox = mailbox->buf;
1875 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1877 MLX4_CMD_TIME_CLASS_B,
1878 !mlx4_is_slave(dev));
1882 MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
1883 MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
1885 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1887 MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
1888 MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
1889 MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
1890 MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
1891 MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
1892 MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
1893 MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
1894 MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
1895 MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
1896 MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
1897 MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
1898 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
1899 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
1901 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
1902 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
1903 param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1905 MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
1906 if (byte_field & 0x8)
1907 param->steering_mode = MLX4_STEERING_MODE_B0;
1909 param->steering_mode = MLX4_STEERING_MODE_A0;
1912 if (dword_field & (1 << 13))
1913 param->rss_ip_frags = 1;
1915 /* steering attributes */
1916 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
1917 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
1918 MLX4_GET(param->log_mc_entry_sz, outbox,
1919 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1920 MLX4_GET(param->log_mc_table_sz, outbox,
1921 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1922 MLX4_GET(byte_field, outbox,
1923 INIT_HCA_FS_A0_OFFSET);
1924 param->dmfs_high_steer_mode =
1925 a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
1927 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
1928 MLX4_GET(param->log_mc_entry_sz, outbox,
1929 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1930 MLX4_GET(param->log_mc_hash_sz, outbox,
1931 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1932 MLX4_GET(param->log_mc_table_sz, outbox,
1933 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1936 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1937 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS);
1938 if (byte_field & 0x20) /* 64-bytes eqe enabled */
1939 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
1940 if (byte_field & 0x40) /* 64-bytes cqe enabled */
1941 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
1943 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
1944 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET);
1946 param->dev_cap_enabled |= MLX4_DEV_CAP_EQE_STRIDE_ENABLED;
1947 param->dev_cap_enabled |= MLX4_DEV_CAP_CQE_STRIDE_ENABLED;
1948 param->cqe_size = 1 << ((byte_field &
1949 MLX4_CQE_SIZE_MASK_STRIDE) + 5);
1950 param->eqe_size = 1 << (((byte_field &
1951 MLX4_EQE_SIZE_MASK_STRIDE) >> 4) + 5);
1954 /* TPT attributes */
1956 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
1957 MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
1958 MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
1959 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
1960 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
1962 /* UAR attributes */
1964 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1965 MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
1968 mlx4_free_cmd_mailbox(dev, mailbox);
1973 static int mlx4_hca_core_clock_update(struct mlx4_dev *dev)
1975 struct mlx4_cmd_mailbox *mailbox;
1979 mailbox = mlx4_alloc_cmd_mailbox(dev);
1980 if (IS_ERR(mailbox)) {
1981 mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n");
1982 return PTR_ERR(mailbox);
1984 outbox = mailbox->buf;
1986 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1988 MLX4_CMD_TIME_CLASS_B,
1989 !mlx4_is_slave(dev));
1991 mlx4_warn(dev, "hca_core_clock update failed\n");
1995 MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
1998 mlx4_free_cmd_mailbox(dev, mailbox);
2003 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
2004 * and real QP0 are active, so that the paravirtualized QP0 is ready
2006 static int check_qp0_state(struct mlx4_dev *dev, int function, int port)
2008 struct mlx4_priv *priv = mlx4_priv(dev);
2009 /* irrelevant if not infiniband */
2010 if (priv->mfunc.master.qp0_state[port].proxy_qp0_active &&
2011 priv->mfunc.master.qp0_state[port].qp0_active)
2016 int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
2017 struct mlx4_vhcr *vhcr,
2018 struct mlx4_cmd_mailbox *inbox,
2019 struct mlx4_cmd_mailbox *outbox,
2020 struct mlx4_cmd_info *cmd)
2022 struct mlx4_priv *priv = mlx4_priv(dev);
2023 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
2029 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
2032 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
2033 /* Enable port only if it was previously disabled */
2034 if (!priv->mfunc.master.init_port_ref[port]) {
2035 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
2036 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2040 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
2042 if (slave == mlx4_master_func_num(dev)) {
2043 if (check_qp0_state(dev, slave, port) &&
2044 !priv->mfunc.master.qp0_state[port].port_active) {
2045 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
2046 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2049 priv->mfunc.master.qp0_state[port].port_active = 1;
2050 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
2053 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
2055 ++priv->mfunc.master.init_port_ref[port];
2059 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
2061 struct mlx4_cmd_mailbox *mailbox;
2067 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
2068 #define INIT_PORT_IN_SIZE 256
2069 #define INIT_PORT_FLAGS_OFFSET 0x00
2070 #define INIT_PORT_FLAG_SIG (1 << 18)
2071 #define INIT_PORT_FLAG_NG (1 << 17)
2072 #define INIT_PORT_FLAG_G0 (1 << 16)
2073 #define INIT_PORT_VL_SHIFT 4
2074 #define INIT_PORT_PORT_WIDTH_SHIFT 8
2075 #define INIT_PORT_MTU_OFFSET 0x04
2076 #define INIT_PORT_MAX_GID_OFFSET 0x06
2077 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a
2078 #define INIT_PORT_GUID0_OFFSET 0x10
2079 #define INIT_PORT_NODE_GUID_OFFSET 0x18
2080 #define INIT_PORT_SI_GUID_OFFSET 0x20
2082 mailbox = mlx4_alloc_cmd_mailbox(dev);
2083 if (IS_ERR(mailbox))
2084 return PTR_ERR(mailbox);
2085 inbox = mailbox->buf;
2088 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
2089 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
2090 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
2092 field = 128 << dev->caps.ib_mtu_cap[port];
2093 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
2094 field = dev->caps.gid_table_len[port];
2095 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
2096 field = dev->caps.pkey_table_len[port];
2097 MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
2099 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
2100 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2102 mlx4_free_cmd_mailbox(dev, mailbox);
2104 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
2105 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2108 mlx4_hca_core_clock_update(dev);
2112 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
2114 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
2115 struct mlx4_vhcr *vhcr,
2116 struct mlx4_cmd_mailbox *inbox,
2117 struct mlx4_cmd_mailbox *outbox,
2118 struct mlx4_cmd_info *cmd)
2120 struct mlx4_priv *priv = mlx4_priv(dev);
2121 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
2127 if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
2131 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
2132 if (priv->mfunc.master.init_port_ref[port] == 1) {
2133 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2134 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2138 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
2140 /* infiniband port */
2141 if (slave == mlx4_master_func_num(dev)) {
2142 if (!priv->mfunc.master.qp0_state[port].qp0_active &&
2143 priv->mfunc.master.qp0_state[port].port_active) {
2144 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2145 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2148 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
2149 priv->mfunc.master.qp0_state[port].port_active = 0;
2152 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
2154 --priv->mfunc.master.init_port_ref[port];
2158 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
2160 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2161 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2163 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
2165 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
2167 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA,
2168 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
2171 struct mlx4_config_dev {
2172 __be32 update_flags;
2174 __be16 vxlan_udp_dport;
2184 #define MLX4_VXLAN_UDP_DPORT (1 << 0)
2185 #define MLX4_DISABLE_RX_PORT BIT(18)
2187 static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
2190 struct mlx4_cmd_mailbox *mailbox;
2192 mailbox = mlx4_alloc_cmd_mailbox(dev);
2193 if (IS_ERR(mailbox))
2194 return PTR_ERR(mailbox);
2196 memcpy(mailbox->buf, config_dev, sizeof(*config_dev));
2198 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV,
2199 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2201 mlx4_free_cmd_mailbox(dev, mailbox);
2205 static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
2208 struct mlx4_cmd_mailbox *mailbox;
2210 mailbox = mlx4_alloc_cmd_mailbox(dev);
2211 if (IS_ERR(mailbox))
2212 return PTR_ERR(mailbox);
2214 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV,
2215 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2218 memcpy(config_dev, mailbox->buf, sizeof(*config_dev));
2220 mlx4_free_cmd_mailbox(dev, mailbox);
2224 /* Conversion between the HW values and the actual functionality.
2225 * The value represented by the array index,
2226 * and the functionality determined by the flags.
2228 static const u8 config_dev_csum_flags[] = {
2230 [1] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP,
2231 [2] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP |
2232 MLX4_RX_CSUM_MODE_L4,
2233 [3] = MLX4_RX_CSUM_MODE_L4 |
2234 MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP |
2235 MLX4_RX_CSUM_MODE_MULTI_VLAN
2238 int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
2239 struct mlx4_config_dev_params *params)
2241 struct mlx4_config_dev config_dev = {0};
2245 #define CONFIG_DEV_RX_CSUM_MODE_MASK 0x7
2246 #define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET 0
2247 #define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET 4
2249 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV))
2252 err = mlx4_CONFIG_DEV_get(dev, &config_dev);
2256 csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) &
2257 CONFIG_DEV_RX_CSUM_MODE_MASK;
2259 if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0]))
2261 params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask];
2263 csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) &
2264 CONFIG_DEV_RX_CSUM_MODE_MASK;
2266 if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0]))
2268 params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask];
2270 params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport);
2274 EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval);
2276 int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
2278 struct mlx4_config_dev config_dev;
2280 memset(&config_dev, 0, sizeof(config_dev));
2281 config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT);
2282 config_dev.vxlan_udp_dport = udp_port;
2284 return mlx4_CONFIG_DEV_set(dev, &config_dev);
2286 EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
2288 #define CONFIG_DISABLE_RX_PORT BIT(15)
2289 int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis)
2291 struct mlx4_config_dev config_dev;
2293 memset(&config_dev, 0, sizeof(config_dev));
2294 config_dev.update_flags = cpu_to_be32(MLX4_DISABLE_RX_PORT);
2296 config_dev.roce_flags =
2297 cpu_to_be32(CONFIG_DISABLE_RX_PORT);
2299 return mlx4_CONFIG_DEV_set(dev, &config_dev);
2302 int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2)
2304 struct mlx4_cmd_mailbox *mailbox;
2311 mailbox = mlx4_alloc_cmd_mailbox(dev);
2312 if (IS_ERR(mailbox))
2316 v2p->v_port1 = cpu_to_be32(port1);
2317 v2p->v_port2 = cpu_to_be32(port2);
2319 err = mlx4_cmd(dev, mailbox->dma, 0,
2320 MLX4_SET_PORT_VIRT2PHY, MLX4_CMD_VIRT_PORT_MAP,
2321 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2323 mlx4_free_cmd_mailbox(dev, mailbox);
2328 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
2330 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
2331 MLX4_CMD_SET_ICM_SIZE,
2332 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2337 * Round up number of system pages needed in case
2338 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
2340 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
2341 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
2346 int mlx4_NOP(struct mlx4_dev *dev)
2348 /* Input modifier of 0x1f means "finish as soon as possible." */
2349 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A,
2353 int mlx4_get_phys_port_id(struct mlx4_dev *dev)
2357 struct mlx4_cmd_mailbox *mailbox;
2359 u32 guid_hi, guid_lo;
2361 #define MOD_STAT_CFG_PORT_OFFSET 8
2362 #define MOD_STAT_CFG_GUID_H 0X14
2363 #define MOD_STAT_CFG_GUID_L 0X1c
2365 mailbox = mlx4_alloc_cmd_mailbox(dev);
2366 if (IS_ERR(mailbox))
2367 return PTR_ERR(mailbox);
2368 outbox = mailbox->buf;
2370 for (port = 1; port <= dev->caps.num_ports; port++) {
2371 in_mod = port << MOD_STAT_CFG_PORT_OFFSET;
2372 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2,
2373 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
2376 mlx4_err(dev, "Fail to get port %d uplink guid\n",
2380 MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H);
2381 MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L);
2382 dev->caps.phys_port_id[port] = (u64)guid_lo |
2386 mlx4_free_cmd_mailbox(dev, mailbox);
2390 #define MLX4_WOL_SETUP_MODE (5 << 28)
2391 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
2393 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
2395 return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
2396 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
2399 EXPORT_SYMBOL_GPL(mlx4_wol_read);
2401 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
2403 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
2405 return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
2406 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2408 EXPORT_SYMBOL_GPL(mlx4_wol_write);
2415 void mlx4_opreq_action(struct work_struct *work)
2417 struct mlx4_priv *priv = container_of(work, struct mlx4_priv,
2419 struct mlx4_dev *dev = &priv->dev;
2420 int num_tasks = atomic_read(&priv->opreq_count);
2421 struct mlx4_cmd_mailbox *mailbox;
2422 struct mlx4_mgm *mgm;
2434 #define GET_OP_REQ_MODIFIER_OFFSET 0x08
2435 #define GET_OP_REQ_TOKEN_OFFSET 0x14
2436 #define GET_OP_REQ_TYPE_OFFSET 0x1a
2437 #define GET_OP_REQ_DATA_OFFSET 0x20
2439 mailbox = mlx4_alloc_cmd_mailbox(dev);
2440 if (IS_ERR(mailbox)) {
2441 mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
2444 outbox = mailbox->buf;
2447 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
2448 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
2451 mlx4_err(dev, "Failed to retrieve required operation: %d\n",
2455 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
2456 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
2457 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
2462 if (dev->caps.steering_mode ==
2463 MLX4_STEERING_MODE_DEVICE_MANAGED) {
2464 mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
2468 mgm = (struct mlx4_mgm *)((u8 *)(outbox) +
2469 GET_OP_REQ_DATA_OFFSET);
2470 num_qps = be32_to_cpu(mgm->members_count) &
2472 rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1;
2473 prot = ((u8 *)(&mgm->members_count))[0] >> 6;
2475 for (i = 0; i < num_qps; i++) {
2476 qp.qpn = be32_to_cpu(mgm->qp[i]);
2478 err = mlx4_multicast_detach(dev, &qp,
2482 err = mlx4_multicast_attach(dev, &qp,
2492 mlx4_warn(dev, "Bad type for required operation\n");
2496 err = mlx4_cmd(dev, 0, ((u32) err |
2497 (__force u32)cpu_to_be32(token) << 16),
2498 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
2501 mlx4_err(dev, "Failed to acknowledge required request: %d\n",
2505 memset(outbox, 0, 0xffc);
2506 num_tasks = atomic_dec_return(&priv->opreq_count);
2510 mlx4_free_cmd_mailbox(dev, mailbox);
2513 static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev,
2514 struct mlx4_cmd_mailbox *mailbox)
2516 #define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET 0x10
2517 #define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET 0x20
2518 #define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET 0x40
2519 #define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET 0x70
2521 u32 set_attr_mask, getresp_attr_mask;
2522 u32 trap_attr_mask, traprepress_attr_mask;
2524 MLX4_GET(set_attr_mask, mailbox->buf,
2525 MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET);
2526 mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n",
2529 MLX4_GET(getresp_attr_mask, mailbox->buf,
2530 MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET);
2531 mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n",
2534 MLX4_GET(trap_attr_mask, mailbox->buf,
2535 MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET);
2536 mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n",
2539 MLX4_GET(traprepress_attr_mask, mailbox->buf,
2540 MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET);
2541 mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n",
2542 traprepress_attr_mask);
2544 if (set_attr_mask && getresp_attr_mask && trap_attr_mask &&
2545 traprepress_attr_mask)
2551 int mlx4_config_mad_demux(struct mlx4_dev *dev)
2553 struct mlx4_cmd_mailbox *mailbox;
2554 int secure_host_active;
2557 /* Check if mad_demux is supported */
2558 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX))
2561 mailbox = mlx4_alloc_cmd_mailbox(dev);
2562 if (IS_ERR(mailbox)) {
2563 mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX");
2567 /* Query mad_demux to find out which MADs are handled by internal sma */
2568 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */,
2569 MLX4_CMD_MAD_DEMUX_QUERY_RESTR, MLX4_CMD_MAD_DEMUX,
2570 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2572 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n",
2577 secure_host_active = mlx4_check_smp_firewall_active(dev, mailbox);
2579 /* Config mad_demux to handle all MADs returned by the query above */
2580 err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */,
2581 MLX4_CMD_MAD_DEMUX_CONFIG, MLX4_CMD_MAD_DEMUX,
2582 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2584 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err);
2588 if (secure_host_active)
2589 mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n");
2591 mlx4_free_cmd_mailbox(dev, mailbox);
2595 /* Access Reg commands */
2596 enum mlx4_access_reg_masks {
2597 MLX4_ACCESS_REG_STATUS_MASK = 0x7f,
2598 MLX4_ACCESS_REG_METHOD_MASK = 0x7f,
2599 MLX4_ACCESS_REG_LEN_MASK = 0x7ff
2602 struct mlx4_access_reg {
2612 #define MLX4_ACCESS_REG_HEADER_SIZE (20)
2613 u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE];
2614 } __attribute__((__packed__));
2617 * mlx4_ACCESS_REG - Generic access reg command.
2619 * @reg_id: register ID to access.
2620 * @method: Access method Read/Write.
2621 * @reg_len: register length to Read/Write in bytes.
2622 * @reg_data: reg_data pointer to Read/Write From/To.
2624 * Access ConnectX registers FW command.
2625 * Returns 0 on success and copies outbox mlx4_access_reg data
2626 * field into reg_data or a negative error code.
2628 static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id,
2629 enum mlx4_access_reg_method method,
2630 u16 reg_len, void *reg_data)
2632 struct mlx4_cmd_mailbox *inbox, *outbox;
2633 struct mlx4_access_reg *inbuf, *outbuf;
2636 inbox = mlx4_alloc_cmd_mailbox(dev);
2638 return PTR_ERR(inbox);
2640 outbox = mlx4_alloc_cmd_mailbox(dev);
2641 if (IS_ERR(outbox)) {
2642 mlx4_free_cmd_mailbox(dev, inbox);
2643 return PTR_ERR(outbox);
2647 outbuf = outbox->buf;
2649 inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4);
2650 inbuf->constant2 = 0x1;
2651 inbuf->reg_id = cpu_to_be16(reg_id);
2652 inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK;
2654 reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data)));
2656 cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) |
2659 memcpy(inbuf->reg_data, reg_data, reg_len);
2660 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0,
2661 MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
2666 if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) {
2667 err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK;
2669 "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n",
2674 memcpy(reg_data, outbuf->reg_data, reg_len);
2676 mlx4_free_cmd_mailbox(dev, inbox);
2677 mlx4_free_cmd_mailbox(dev, outbox);
2681 /* ConnectX registers IDs */
2683 MLX4_REG_ID_PTYS = 0x5004,
2687 * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed)
2690 * @method: Access method Read/Write.
2691 * @ptys_reg: PTYS register data pointer.
2693 * Access ConnectX PTYS register, to Read/Write Port Type/Speed
2695 * Returns 0 on success or a negative error code.
2697 int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
2698 enum mlx4_access_reg_method method,
2699 struct mlx4_ptys_reg *ptys_reg)
2701 return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS,
2702 method, sizeof(*ptys_reg), ptys_reg);
2704 EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG);
2706 int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
2707 struct mlx4_vhcr *vhcr,
2708 struct mlx4_cmd_mailbox *inbox,
2709 struct mlx4_cmd_mailbox *outbox,
2710 struct mlx4_cmd_info *cmd)
2712 struct mlx4_access_reg *inbuf = inbox->buf;
2713 u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK;
2714 u16 reg_id = be16_to_cpu(inbuf->reg_id);
2716 if (slave != mlx4_master_func_num(dev) &&
2717 method == MLX4_ACCESS_REG_WRITE)
2720 if (reg_id == MLX4_REG_ID_PTYS) {
2721 struct mlx4_ptys_reg *ptys_reg =
2722 (struct mlx4_ptys_reg *)inbuf->reg_data;
2724 ptys_reg->local_port =
2725 mlx4_slave_convert_port(dev, slave,
2726 ptys_reg->local_port);
2729 return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier,
2730 0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,