2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <linux/errno.h>
41 #include <linux/mlx4/cmd.h>
42 #include <linux/semaphore.h>
49 #define CMD_POLL_TOKEN 0xffff
50 #define INBOX_MASK 0xffffffffffffff00ULL
52 #define CMD_CHAN_VER 1
53 #define CMD_CHAN_IF_REV 1
56 /* command completed successfully: */
58 /* Internal error (such as a bus error) occurred while processing command: */
59 CMD_STAT_INTERNAL_ERR = 0x01,
60 /* Operation/command not supported or opcode modifier not supported: */
61 CMD_STAT_BAD_OP = 0x02,
62 /* Parameter not supported or parameter out of range: */
63 CMD_STAT_BAD_PARAM = 0x03,
64 /* System not enabled or bad system state: */
65 CMD_STAT_BAD_SYS_STATE = 0x04,
66 /* Attempt to access reserved or unallocaterd resource: */
67 CMD_STAT_BAD_RESOURCE = 0x05,
68 /* Requested resource is currently executing a command, or is otherwise busy: */
69 CMD_STAT_RESOURCE_BUSY = 0x06,
70 /* Required capability exceeds device limits: */
71 CMD_STAT_EXCEED_LIM = 0x08,
72 /* Resource is not in the appropriate state or ownership: */
73 CMD_STAT_BAD_RES_STATE = 0x09,
74 /* Index out of range: */
75 CMD_STAT_BAD_INDEX = 0x0a,
76 /* FW image corrupted: */
77 CMD_STAT_BAD_NVMEM = 0x0b,
78 /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
79 CMD_STAT_ICM_ERROR = 0x0c,
80 /* Attempt to modify a QP/EE which is not in the presumed state: */
81 CMD_STAT_BAD_QP_STATE = 0x10,
82 /* Bad segment parameters (Address/Size): */
83 CMD_STAT_BAD_SEG_PARAM = 0x20,
84 /* Memory Region has Memory Windows bound to: */
85 CMD_STAT_REG_BOUND = 0x21,
86 /* HCA local attached memory not present: */
87 CMD_STAT_LAM_NOT_PRE = 0x22,
88 /* Bad management packet (silently discarded): */
89 CMD_STAT_BAD_PKT = 0x30,
90 /* More outstanding CQEs in CQ than new CQ size: */
91 CMD_STAT_BAD_SIZE = 0x40,
92 /* Multi Function device support required: */
93 CMD_STAT_MULTI_FUNC_REQ = 0x50,
97 HCR_IN_PARAM_OFFSET = 0x00,
98 HCR_IN_MODIFIER_OFFSET = 0x08,
99 HCR_OUT_PARAM_OFFSET = 0x0c,
100 HCR_TOKEN_OFFSET = 0x14,
101 HCR_STATUS_OFFSET = 0x18,
103 HCR_OPMOD_SHIFT = 12,
110 GO_BIT_TIMEOUT_MSECS = 10000
113 struct mlx4_cmd_context {
114 struct completion done;
122 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
123 struct mlx4_vhcr_cmd *in_vhcr);
125 static int mlx4_status_to_errno(u8 status)
127 static const int trans_table[] = {
128 [CMD_STAT_INTERNAL_ERR] = -EIO,
129 [CMD_STAT_BAD_OP] = -EPERM,
130 [CMD_STAT_BAD_PARAM] = -EINVAL,
131 [CMD_STAT_BAD_SYS_STATE] = -ENXIO,
132 [CMD_STAT_BAD_RESOURCE] = -EBADF,
133 [CMD_STAT_RESOURCE_BUSY] = -EBUSY,
134 [CMD_STAT_EXCEED_LIM] = -ENOMEM,
135 [CMD_STAT_BAD_RES_STATE] = -EBADF,
136 [CMD_STAT_BAD_INDEX] = -EBADF,
137 [CMD_STAT_BAD_NVMEM] = -EFAULT,
138 [CMD_STAT_ICM_ERROR] = -ENFILE,
139 [CMD_STAT_BAD_QP_STATE] = -EINVAL,
140 [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
141 [CMD_STAT_REG_BOUND] = -EBUSY,
142 [CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
143 [CMD_STAT_BAD_PKT] = -EINVAL,
144 [CMD_STAT_BAD_SIZE] = -ENOMEM,
145 [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
148 if (status >= ARRAY_SIZE(trans_table) ||
149 (status != CMD_STAT_OK && trans_table[status] == 0))
152 return trans_table[status];
155 static int comm_pending(struct mlx4_dev *dev)
157 struct mlx4_priv *priv = mlx4_priv(dev);
158 u32 status = readl(&priv->mfunc.comm->slave_read);
160 return (swab32(status) >> 31) != priv->cmd.comm_toggle;
163 static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
165 struct mlx4_priv *priv = mlx4_priv(dev);
168 priv->cmd.comm_toggle ^= 1;
169 val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
170 __raw_writel((__force u32) cpu_to_be32(val),
171 &priv->mfunc.comm->slave_write);
175 static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
176 unsigned long timeout)
178 struct mlx4_priv *priv = mlx4_priv(dev);
181 int ret_from_pending = 0;
183 /* First, verify that the master reports correct status */
184 if (comm_pending(dev)) {
185 mlx4_warn(dev, "Communication channel is not idle."
186 "my toggle is %d (cmd:0x%x)\n",
187 priv->cmd.comm_toggle, cmd);
192 down(&priv->cmd.poll_sem);
193 mlx4_comm_cmd_post(dev, cmd, param);
195 end = msecs_to_jiffies(timeout) + jiffies;
196 while (comm_pending(dev) && time_before(jiffies, end))
198 ret_from_pending = comm_pending(dev);
199 if (ret_from_pending) {
200 /* check if the slave is trying to boot in the middle of
201 * FLR process. The only non-zero result in the RESET command
202 * is MLX4_DELAY_RESET_SLAVE*/
203 if ((MLX4_COMM_CMD_RESET == cmd)) {
204 mlx4_warn(dev, "Got slave FLRed from Communication"
205 " channel (ret:0x%x)\n", ret_from_pending);
206 err = MLX4_DELAY_RESET_SLAVE;
208 mlx4_warn(dev, "Communication channel timed out\n");
213 up(&priv->cmd.poll_sem);
217 static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
218 u16 param, unsigned long timeout)
220 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
221 struct mlx4_cmd_context *context;
224 down(&cmd->event_sem);
226 spin_lock(&cmd->context_lock);
227 BUG_ON(cmd->free_head < 0);
228 context = &cmd->context[cmd->free_head];
229 context->token += cmd->token_mask + 1;
230 cmd->free_head = context->next;
231 spin_unlock(&cmd->context_lock);
233 init_completion(&context->done);
235 mlx4_comm_cmd_post(dev, op, param);
237 if (!wait_for_completion_timeout(&context->done,
238 msecs_to_jiffies(timeout))) {
243 err = context->result;
244 if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
245 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
246 op, context->fw_status);
251 spin_lock(&cmd->context_lock);
252 context->next = cmd->free_head;
253 cmd->free_head = context - cmd->context;
254 spin_unlock(&cmd->context_lock);
260 int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
261 unsigned long timeout)
263 if (mlx4_priv(dev)->cmd.use_events)
264 return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
265 return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
268 static int cmd_pending(struct mlx4_dev *dev)
270 u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
272 return (status & swab32(1 << HCR_GO_BIT)) ||
273 (mlx4_priv(dev)->cmd.toggle ==
274 !!(status & swab32(1 << HCR_T_BIT)));
277 static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
278 u32 in_modifier, u8 op_modifier, u16 op, u16 token,
281 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
282 u32 __iomem *hcr = cmd->hcr;
286 mutex_lock(&cmd->hcr_mutex);
290 end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
292 while (cmd_pending(dev)) {
293 if (time_after_eq(jiffies, end)) {
294 mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
301 * We use writel (instead of something like memcpy_toio)
302 * because writes of less than 32 bits to the HCR don't work
303 * (and some architectures such as ia64 implement memcpy_toio
304 * in terms of writeb).
306 __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0);
307 __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1);
308 __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2);
309 __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3);
310 __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
311 __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
313 /* __raw_writel may not order writes. */
316 __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
317 (cmd->toggle << HCR_T_BIT) |
318 (event ? (1 << HCR_E_BIT) : 0) |
319 (op_modifier << HCR_OPMOD_SHIFT) |
323 * Make sure that our HCR writes don't get mixed in with
324 * writes from another CPU starting a FW command.
328 cmd->toggle = cmd->toggle ^ 1;
333 mutex_unlock(&cmd->hcr_mutex);
337 static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
338 int out_is_imm, u32 in_modifier, u8 op_modifier,
339 u16 op, unsigned long timeout)
341 struct mlx4_priv *priv = mlx4_priv(dev);
342 struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
345 down(&priv->cmd.slave_sem);
346 vhcr->in_param = cpu_to_be64(in_param);
347 vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
348 vhcr->in_modifier = cpu_to_be32(in_modifier);
349 vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
350 vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
352 vhcr->flags = !!(priv->cmd.use_events) << 6;
353 if (mlx4_is_master(dev)) {
354 ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
359 be64_to_cpu(vhcr->out_param);
361 mlx4_err(dev, "response expected while"
362 "output mailbox is NULL for "
363 "command 0x%x\n", op);
364 vhcr->status = -EINVAL;
370 ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
371 MLX4_COMM_TIME + timeout);
376 be64_to_cpu(vhcr->out_param);
378 mlx4_err(dev, "response expected while"
379 "output mailbox is NULL for "
380 "command 0x%x\n", op);
381 vhcr->status = -EINVAL;
386 mlx4_err(dev, "failed execution of VHCR_POST command"
387 "opcode 0x%x\n", op);
389 up(&priv->cmd.slave_sem);
393 static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
394 int out_is_imm, u32 in_modifier, u8 op_modifier,
395 u16 op, unsigned long timeout)
397 struct mlx4_priv *priv = mlx4_priv(dev);
398 void __iomem *hcr = priv->cmd.hcr;
403 down(&priv->cmd.poll_sem);
405 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
406 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
410 end = msecs_to_jiffies(timeout) + jiffies;
411 while (cmd_pending(dev) && time_before(jiffies, end))
414 if (cmd_pending(dev)) {
421 (u64) be32_to_cpu((__force __be32)
422 __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
423 (u64) be32_to_cpu((__force __be32)
424 __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
425 stat = be32_to_cpu((__force __be32)
426 __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
427 err = mlx4_status_to_errno(stat);
429 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
433 up(&priv->cmd.poll_sem);
437 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
439 struct mlx4_priv *priv = mlx4_priv(dev);
440 struct mlx4_cmd_context *context =
441 &priv->cmd.context[token & priv->cmd.token_mask];
443 /* previously timed out command completing at long last */
444 if (token != context->token)
447 context->fw_status = status;
448 context->result = mlx4_status_to_errno(status);
449 context->out_param = out_param;
451 complete(&context->done);
454 static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
455 int out_is_imm, u32 in_modifier, u8 op_modifier,
456 u16 op, unsigned long timeout)
458 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
459 struct mlx4_cmd_context *context;
462 down(&cmd->event_sem);
464 spin_lock(&cmd->context_lock);
465 BUG_ON(cmd->free_head < 0);
466 context = &cmd->context[cmd->free_head];
467 context->token += cmd->token_mask + 1;
468 cmd->free_head = context->next;
469 spin_unlock(&cmd->context_lock);
471 init_completion(&context->done);
473 mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
474 in_modifier, op_modifier, op, context->token, 1);
476 if (!wait_for_completion_timeout(&context->done,
477 msecs_to_jiffies(timeout))) {
482 err = context->result;
484 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
485 op, context->fw_status);
490 *out_param = context->out_param;
493 spin_lock(&cmd->context_lock);
494 context->next = cmd->free_head;
495 cmd->free_head = context - cmd->context;
496 spin_unlock(&cmd->context_lock);
502 int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
503 int out_is_imm, u32 in_modifier, u8 op_modifier,
504 u16 op, unsigned long timeout, int native)
506 if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
507 if (mlx4_priv(dev)->cmd.use_events)
508 return mlx4_cmd_wait(dev, in_param, out_param,
509 out_is_imm, in_modifier,
510 op_modifier, op, timeout);
512 return mlx4_cmd_poll(dev, in_param, out_param,
513 out_is_imm, in_modifier,
514 op_modifier, op, timeout);
516 return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
517 in_modifier, op_modifier, op, timeout);
519 EXPORT_SYMBOL_GPL(__mlx4_cmd);
522 static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
524 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
525 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
528 static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
529 int slave, u64 slave_addr,
530 int size, int is_read)
535 if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
536 (slave & ~0x7f) | (size & 0xff)) {
537 mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
538 "master_addr:0x%llx slave_id:%d size:%d\n",
539 slave_addr, master_addr, slave, size);
544 in_param = (u64) slave | slave_addr;
545 out_param = (u64) dev->caps.function | master_addr;
547 in_param = (u64) dev->caps.function | master_addr;
548 out_param = (u64) slave | slave_addr;
551 return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
553 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
556 int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
557 struct mlx4_vhcr *vhcr,
558 struct mlx4_cmd_mailbox *inbox,
559 struct mlx4_cmd_mailbox *outbox,
560 struct mlx4_cmd_info *cmd)
566 in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
567 out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
568 if (cmd->encode_slave_id) {
569 in_param &= 0xffffffffffffff00ll;
573 err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
574 vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
575 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
578 vhcr->out_param = out_param;
583 static struct mlx4_cmd_info cmd_info[] = {
585 .opcode = MLX4_CMD_QUERY_FW,
589 .encode_slave_id = false,
594 .opcode = MLX4_CMD_QUERY_HCA,
598 .encode_slave_id = false,
603 .opcode = MLX4_CMD_QUERY_DEV_CAP,
607 .encode_slave_id = false,
612 .opcode = MLX4_CMD_QUERY_FUNC_CAP,
616 .encode_slave_id = false,
618 .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
621 .opcode = MLX4_CMD_QUERY_ADAPTER,
625 .encode_slave_id = false,
630 .opcode = MLX4_CMD_INIT_PORT,
634 .encode_slave_id = false,
636 .wrapper = mlx4_INIT_PORT_wrapper
639 .opcode = MLX4_CMD_CLOSE_PORT,
643 .encode_slave_id = false,
645 .wrapper = mlx4_CLOSE_PORT_wrapper
648 .opcode = MLX4_CMD_QUERY_PORT,
652 .encode_slave_id = false,
654 .wrapper = mlx4_QUERY_PORT_wrapper
657 .opcode = MLX4_CMD_SET_PORT,
661 .encode_slave_id = false,
663 .wrapper = mlx4_SET_PORT_wrapper
666 .opcode = MLX4_CMD_MAP_EQ,
670 .encode_slave_id = false,
672 .wrapper = mlx4_MAP_EQ_wrapper
675 .opcode = MLX4_CMD_SW2HW_EQ,
679 .encode_slave_id = true,
681 .wrapper = mlx4_SW2HW_EQ_wrapper
684 .opcode = MLX4_CMD_HW_HEALTH_CHECK,
688 .encode_slave_id = false,
693 .opcode = MLX4_CMD_NOP,
697 .encode_slave_id = false,
702 .opcode = MLX4_CMD_ALLOC_RES,
706 .encode_slave_id = false,
708 .wrapper = mlx4_ALLOC_RES_wrapper
711 .opcode = MLX4_CMD_FREE_RES,
715 .encode_slave_id = false,
717 .wrapper = mlx4_FREE_RES_wrapper
720 .opcode = MLX4_CMD_SW2HW_MPT,
724 .encode_slave_id = true,
726 .wrapper = mlx4_SW2HW_MPT_wrapper
729 .opcode = MLX4_CMD_QUERY_MPT,
733 .encode_slave_id = false,
735 .wrapper = mlx4_QUERY_MPT_wrapper
738 .opcode = MLX4_CMD_HW2SW_MPT,
742 .encode_slave_id = false,
744 .wrapper = mlx4_HW2SW_MPT_wrapper
747 .opcode = MLX4_CMD_READ_MTT,
751 .encode_slave_id = false,
756 .opcode = MLX4_CMD_WRITE_MTT,
760 .encode_slave_id = false,
762 .wrapper = mlx4_WRITE_MTT_wrapper
765 .opcode = MLX4_CMD_SYNC_TPT,
769 .encode_slave_id = false,
774 .opcode = MLX4_CMD_HW2SW_EQ,
778 .encode_slave_id = true,
780 .wrapper = mlx4_HW2SW_EQ_wrapper
783 .opcode = MLX4_CMD_QUERY_EQ,
787 .encode_slave_id = true,
789 .wrapper = mlx4_QUERY_EQ_wrapper
792 .opcode = MLX4_CMD_SW2HW_CQ,
796 .encode_slave_id = true,
798 .wrapper = mlx4_SW2HW_CQ_wrapper
801 .opcode = MLX4_CMD_HW2SW_CQ,
805 .encode_slave_id = false,
807 .wrapper = mlx4_HW2SW_CQ_wrapper
810 .opcode = MLX4_CMD_QUERY_CQ,
814 .encode_slave_id = false,
816 .wrapper = mlx4_QUERY_CQ_wrapper
819 .opcode = MLX4_CMD_MODIFY_CQ,
823 .encode_slave_id = false,
825 .wrapper = mlx4_MODIFY_CQ_wrapper
828 .opcode = MLX4_CMD_SW2HW_SRQ,
832 .encode_slave_id = true,
834 .wrapper = mlx4_SW2HW_SRQ_wrapper
837 .opcode = MLX4_CMD_HW2SW_SRQ,
841 .encode_slave_id = false,
843 .wrapper = mlx4_HW2SW_SRQ_wrapper
846 .opcode = MLX4_CMD_QUERY_SRQ,
850 .encode_slave_id = false,
852 .wrapper = mlx4_QUERY_SRQ_wrapper
855 .opcode = MLX4_CMD_ARM_SRQ,
859 .encode_slave_id = false,
861 .wrapper = mlx4_ARM_SRQ_wrapper
864 .opcode = MLX4_CMD_RST2INIT_QP,
868 .encode_slave_id = true,
870 .wrapper = mlx4_RST2INIT_QP_wrapper
873 .opcode = MLX4_CMD_INIT2INIT_QP,
877 .encode_slave_id = false,
879 .wrapper = mlx4_GEN_QP_wrapper
882 .opcode = MLX4_CMD_INIT2RTR_QP,
886 .encode_slave_id = false,
888 .wrapper = mlx4_INIT2RTR_QP_wrapper
891 .opcode = MLX4_CMD_RTR2RTS_QP,
895 .encode_slave_id = false,
897 .wrapper = mlx4_GEN_QP_wrapper
900 .opcode = MLX4_CMD_RTS2RTS_QP,
904 .encode_slave_id = false,
906 .wrapper = mlx4_GEN_QP_wrapper
909 .opcode = MLX4_CMD_SQERR2RTS_QP,
913 .encode_slave_id = false,
915 .wrapper = mlx4_GEN_QP_wrapper
918 .opcode = MLX4_CMD_2ERR_QP,
922 .encode_slave_id = false,
924 .wrapper = mlx4_GEN_QP_wrapper
927 .opcode = MLX4_CMD_RTS2SQD_QP,
931 .encode_slave_id = false,
933 .wrapper = mlx4_GEN_QP_wrapper
936 .opcode = MLX4_CMD_SQD2SQD_QP,
940 .encode_slave_id = false,
942 .wrapper = mlx4_GEN_QP_wrapper
945 .opcode = MLX4_CMD_SQD2RTS_QP,
949 .encode_slave_id = false,
951 .wrapper = mlx4_GEN_QP_wrapper
954 .opcode = MLX4_CMD_2RST_QP,
958 .encode_slave_id = false,
960 .wrapper = mlx4_2RST_QP_wrapper
963 .opcode = MLX4_CMD_QUERY_QP,
967 .encode_slave_id = false,
969 .wrapper = mlx4_GEN_QP_wrapper
972 .opcode = MLX4_CMD_SUSPEND_QP,
976 .encode_slave_id = false,
978 .wrapper = mlx4_GEN_QP_wrapper
981 .opcode = MLX4_CMD_UNSUSPEND_QP,
985 .encode_slave_id = false,
987 .wrapper = mlx4_GEN_QP_wrapper
990 .opcode = MLX4_CMD_QUERY_IF_STAT,
994 .encode_slave_id = false,
996 .wrapper = mlx4_QUERY_IF_STAT_wrapper
998 /* Native multicast commands are not available for guests */
1000 .opcode = MLX4_CMD_QP_ATTACH,
1002 .has_outbox = false,
1003 .out_is_imm = false,
1004 .encode_slave_id = false,
1006 .wrapper = mlx4_QP_ATTACH_wrapper
1009 .opcode = MLX4_CMD_PROMISC,
1011 .has_outbox = false,
1012 .out_is_imm = false,
1013 .encode_slave_id = false,
1015 .wrapper = mlx4_PROMISC_wrapper
1017 /* Ethernet specific commands */
1019 .opcode = MLX4_CMD_SET_VLAN_FLTR,
1021 .has_outbox = false,
1022 .out_is_imm = false,
1023 .encode_slave_id = false,
1025 .wrapper = mlx4_SET_VLAN_FLTR_wrapper
1028 .opcode = MLX4_CMD_SET_MCAST_FLTR,
1030 .has_outbox = false,
1031 .out_is_imm = false,
1032 .encode_slave_id = false,
1034 .wrapper = mlx4_SET_MCAST_FLTR_wrapper
1037 .opcode = MLX4_CMD_DUMP_ETH_STATS,
1040 .out_is_imm = false,
1041 .encode_slave_id = false,
1043 .wrapper = mlx4_DUMP_ETH_STATS_wrapper
1046 .opcode = MLX4_CMD_INFORM_FLR_DONE,
1048 .has_outbox = false,
1049 .out_is_imm = false,
1050 .encode_slave_id = false,
1056 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1057 struct mlx4_vhcr_cmd *in_vhcr)
1059 struct mlx4_priv *priv = mlx4_priv(dev);
1060 struct mlx4_cmd_info *cmd = NULL;
1061 struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1062 struct mlx4_vhcr *vhcr;
1063 struct mlx4_cmd_mailbox *inbox = NULL;
1064 struct mlx4_cmd_mailbox *outbox = NULL;
1070 /* Create sw representation of Virtual HCR */
1071 vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1075 /* DMA in the vHCR */
1077 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1078 priv->mfunc.master.slave_state[slave].vhcr_dma,
1079 ALIGN(sizeof(struct mlx4_vhcr_cmd),
1080 MLX4_ACCESS_MEM_ALIGN), 1);
1082 mlx4_err(dev, "%s:Failed reading vhcr"
1083 "ret: 0x%x\n", __func__, ret);
1089 /* Fill SW VHCR fields */
1090 vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1091 vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1092 vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1093 vhcr->token = be16_to_cpu(vhcr_cmd->token);
1094 vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1095 vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1096 vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1098 /* Lookup command */
1099 for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1100 if (vhcr->op == cmd_info[i].opcode) {
1106 mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
1108 vhcr_cmd->status = -EINVAL;
1113 if (cmd->has_inbox) {
1114 vhcr->in_param &= INBOX_MASK;
1115 inbox = mlx4_alloc_cmd_mailbox(dev);
1116 if (IS_ERR(inbox)) {
1117 ret = PTR_ERR(inbox);
1122 ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1124 MLX4_MAILBOX_SIZE, 1);
1126 mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
1127 __func__, cmd->opcode);
1132 /* Apply permission and bound checks if applicable */
1133 if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1134 mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
1135 "checks for resource_id:%d\n", vhcr->op, slave,
1137 vhcr_cmd->status = -EPERM;
1141 /* Allocate outbox */
1142 if (cmd->has_outbox) {
1143 outbox = mlx4_alloc_cmd_mailbox(dev);
1144 if (IS_ERR(outbox)) {
1145 ret = PTR_ERR(outbox);
1151 /* Execute the command! */
1153 vhcr_cmd->status = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1155 if (cmd->out_is_imm)
1156 vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1158 in_param = cmd->has_inbox ? (u64) inbox->dma :
1160 out_param = cmd->has_outbox ? (u64) outbox->dma :
1162 vhcr_cmd->status = __mlx4_cmd(dev, in_param, &out_param,
1163 cmd->out_is_imm, vhcr->in_modifier,
1164 vhcr->op_modifier, vhcr->op,
1165 MLX4_CMD_TIME_CLASS_A,
1168 if (vhcr_cmd->status) {
1169 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
1170 " error:%d, status %d\n",
1171 vhcr->op, slave, vhcr->errno,
1173 ret = vhcr_cmd->status;
1177 if (cmd->out_is_imm) {
1178 vhcr->out_param = out_param;
1179 vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1183 /* Write outbox if command completed successfully */
1184 if (cmd->has_outbox && !vhcr->errno) {
1185 ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1187 MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1189 mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
1195 /* DMA back vhcr result */
1197 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1198 priv->mfunc.master.slave_state[slave].vhcr_dma,
1199 ALIGN(sizeof(struct mlx4_vhcr),
1200 MLX4_ACCESS_MEM_ALIGN),
1203 mlx4_err(dev, "%s:Failed writing vhcr result\n",
1205 else if (vhcr->e_bit &&
1206 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1207 mlx4_warn(dev, "Failed to generate command completion "
1208 "eqe for slave %d\n", slave);
1213 mlx4_free_cmd_mailbox(dev, inbox);
1214 mlx4_free_cmd_mailbox(dev, outbox);
1218 static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1219 u16 param, u8 toggle)
1221 struct mlx4_priv *priv = mlx4_priv(dev);
1222 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1224 u32 slave_status = 0;
1225 u8 is_going_down = 0;
1227 slave_state[slave].comm_toggle ^= 1;
1228 reply = (u32) slave_state[slave].comm_toggle << 31;
1229 if (toggle != slave_state[slave].comm_toggle) {
1230 mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
1231 "STATE COMPROMISIED ***\n", toggle, slave);
1234 if (cmd == MLX4_COMM_CMD_RESET) {
1235 mlx4_warn(dev, "Received reset from slave:%d\n", slave);
1236 slave_state[slave].active = false;
1237 /*check if we are in the middle of FLR process,
1238 if so return "retry" status to the slave*/
1239 if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
1240 slave_status = MLX4_DELAY_RESET_SLAVE;
1241 goto inform_slave_state;
1244 /* write the version in the event field */
1245 reply |= mlx4_comm_get_version();
1249 /*command from slave in the middle of FLR*/
1250 if (cmd != MLX4_COMM_CMD_RESET &&
1251 MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
1252 mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
1253 "in the middle of FLR\n", slave, cmd);
1258 case MLX4_COMM_CMD_VHCR0:
1259 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
1261 slave_state[slave].vhcr_dma = ((u64) param) << 48;
1262 priv->mfunc.master.slave_state[slave].cookie = 0;
1263 mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
1265 case MLX4_COMM_CMD_VHCR1:
1266 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
1268 slave_state[slave].vhcr_dma |= ((u64) param) << 32;
1270 case MLX4_COMM_CMD_VHCR2:
1271 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
1273 slave_state[slave].vhcr_dma |= ((u64) param) << 16;
1275 case MLX4_COMM_CMD_VHCR_EN:
1276 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
1278 slave_state[slave].vhcr_dma |= param;
1279 slave_state[slave].active = true;
1281 case MLX4_COMM_CMD_VHCR_POST:
1282 if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
1283 (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
1285 down(&priv->cmd.slave_sem);
1286 if (mlx4_master_process_vhcr(dev, slave, NULL)) {
1287 mlx4_err(dev, "Failed processing vhcr for slave:%d,"
1288 " reseting slave.\n", slave);
1289 up(&priv->cmd.slave_sem);
1292 up(&priv->cmd.slave_sem);
1295 mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
1298 spin_lock(&priv->mfunc.master.slave_state_lock);
1299 if (!slave_state[slave].is_slave_going_down)
1300 slave_state[slave].last_cmd = cmd;
1303 spin_unlock(&priv->mfunc.master.slave_state_lock);
1304 if (is_going_down) {
1305 mlx4_warn(dev, "Slave is going down aborting command(%d)"
1306 " executing from slave:%d\n",
1310 __raw_writel((__force u32) cpu_to_be32(reply),
1311 &priv->mfunc.comm[slave].slave_read);
1317 /* cleanup any slave resources */
1318 mlx4_delete_all_resources_for_slave(dev, slave);
1319 spin_lock(&priv->mfunc.master.slave_state_lock);
1320 if (!slave_state[slave].is_slave_going_down)
1321 slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
1322 spin_unlock(&priv->mfunc.master.slave_state_lock);
1323 /*with slave in the middle of flr, no need to clean resources again.*/
1325 memset(&slave_state[slave].event_eq, 0,
1326 sizeof(struct mlx4_slave_event_eq_info));
1327 __raw_writel((__force u32) cpu_to_be32(reply),
1328 &priv->mfunc.comm[slave].slave_read);
1332 /* master command processing */
1333 void mlx4_master_comm_channel(struct work_struct *work)
1335 struct mlx4_mfunc_master_ctx *master =
1337 struct mlx4_mfunc_master_ctx,
1339 struct mlx4_mfunc *mfunc =
1340 container_of(master, struct mlx4_mfunc, master);
1341 struct mlx4_priv *priv =
1342 container_of(mfunc, struct mlx4_priv, mfunc);
1343 struct mlx4_dev *dev = &priv->dev;
1353 bit_vec = master->comm_arm_bit_vector;
1354 for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
1355 vec = be32_to_cpu(bit_vec[i]);
1356 for (j = 0; j < 32; j++) {
1357 if (!(vec & (1 << j)))
1360 slave = (i * 32) + j;
1361 comm_cmd = swab32(readl(
1362 &mfunc->comm[slave].slave_write));
1363 slt = swab32(readl(&mfunc->comm[slave].slave_read))
1365 toggle = comm_cmd >> 31;
1366 if (toggle != slt) {
1367 if (master->slave_state[slave].comm_toggle
1369 printk(KERN_INFO "slave %d out of sync."
1370 " read toggle %d, state toggle %d. "
1371 "Resynching.\n", slave, slt,
1372 master->slave_state[slave].comm_toggle);
1373 master->slave_state[slave].comm_toggle =
1376 mlx4_master_do_cmd(dev, slave,
1377 comm_cmd >> 16 & 0xff,
1378 comm_cmd & 0xffff, toggle);
1384 if (reported && reported != served)
1385 mlx4_warn(dev, "Got command event with bitmask from %d slaves"
1386 " but %d were served\n",
1389 if (mlx4_ARM_COMM_CHANNEL(dev))
1390 mlx4_warn(dev, "Failed to arm comm channel events\n");
1393 static int sync_toggles(struct mlx4_dev *dev)
1395 struct mlx4_priv *priv = mlx4_priv(dev);
1400 wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
1401 end = jiffies + msecs_to_jiffies(5000);
1403 while (time_before(jiffies, end)) {
1404 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
1405 if (rd_toggle == wr_toggle) {
1406 priv->cmd.comm_toggle = rd_toggle;
1414 * we could reach here if for example the previous VM using this
1415 * function misbehaved and left the channel with unsynced state. We
1416 * should fix this here and give this VM a chance to use a properly
1419 mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
1420 __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
1421 __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
1422 priv->cmd.comm_toggle = 0;
1427 int mlx4_multi_func_init(struct mlx4_dev *dev)
1429 struct mlx4_priv *priv = mlx4_priv(dev);
1430 struct mlx4_slave_state *s_state;
1433 priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
1434 &priv->mfunc.vhcr_dma,
1436 if (!priv->mfunc.vhcr) {
1437 mlx4_err(dev, "Couldn't allocate vhcr.\n");
1441 if (mlx4_is_master(dev))
1443 ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
1444 priv->fw.comm_base, MLX4_COMM_PAGESIZE);
1447 ioremap(pci_resource_start(dev->pdev, 2) +
1448 MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
1449 if (!priv->mfunc.comm) {
1450 mlx4_err(dev, "Couldn't map communication vector.\n");
1454 if (mlx4_is_master(dev)) {
1455 priv->mfunc.master.slave_state =
1456 kzalloc(dev->num_slaves *
1457 sizeof(struct mlx4_slave_state), GFP_KERNEL);
1458 if (!priv->mfunc.master.slave_state)
1461 for (i = 0; i < dev->num_slaves; ++i) {
1462 s_state = &priv->mfunc.master.slave_state[i];
1463 s_state->last_cmd = MLX4_COMM_CMD_RESET;
1464 __raw_writel((__force u32) 0,
1465 &priv->mfunc.comm[i].slave_write);
1466 __raw_writel((__force u32) 0,
1467 &priv->mfunc.comm[i].slave_read);
1469 for (port = 1; port <= MLX4_MAX_PORTS; port++) {
1470 s_state->vlan_filter[port] =
1471 kzalloc(sizeof(struct mlx4_vlan_fltr),
1473 if (!s_state->vlan_filter[port]) {
1475 kfree(s_state->vlan_filter[port]);
1478 INIT_LIST_HEAD(&s_state->mcast_filters[port]);
1480 spin_lock_init(&s_state->lock);
1483 memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
1484 priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
1485 INIT_WORK(&priv->mfunc.master.comm_work,
1486 mlx4_master_comm_channel);
1487 INIT_WORK(&priv->mfunc.master.slave_event_work,
1488 mlx4_gen_slave_eqe);
1489 INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
1490 mlx4_master_handle_slave_flr);
1491 spin_lock_init(&priv->mfunc.master.slave_state_lock);
1492 priv->mfunc.master.comm_wq =
1493 create_singlethread_workqueue("mlx4_comm");
1494 if (!priv->mfunc.master.comm_wq)
1497 if (mlx4_init_resource_tracker(dev))
1500 sema_init(&priv->cmd.slave_sem, 1);
1501 err = mlx4_ARM_COMM_CHANNEL(dev);
1503 mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
1509 err = sync_toggles(dev);
1511 mlx4_err(dev, "Couldn't sync toggles\n");
1515 sema_init(&priv->cmd.slave_sem, 1);
1520 mlx4_free_resource_tracker(dev);
1522 flush_workqueue(priv->mfunc.master.comm_wq);
1523 destroy_workqueue(priv->mfunc.master.comm_wq);
1526 for (port = 1; port <= MLX4_MAX_PORTS; port++)
1527 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
1529 kfree(priv->mfunc.master.slave_state);
1531 iounmap(priv->mfunc.comm);
1533 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
1535 priv->mfunc.vhcr_dma);
1536 priv->mfunc.vhcr = NULL;
1540 int mlx4_cmd_init(struct mlx4_dev *dev)
1542 struct mlx4_priv *priv = mlx4_priv(dev);
1544 mutex_init(&priv->cmd.hcr_mutex);
1545 sema_init(&priv->cmd.poll_sem, 1);
1546 priv->cmd.use_events = 0;
1547 priv->cmd.toggle = 1;
1549 priv->cmd.hcr = NULL;
1550 priv->mfunc.vhcr = NULL;
1552 if (!mlx4_is_slave(dev)) {
1553 priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
1554 MLX4_HCR_BASE, MLX4_HCR_SIZE);
1555 if (!priv->cmd.hcr) {
1556 mlx4_err(dev, "Couldn't map command register.\n");
1561 priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
1563 MLX4_MAILBOX_SIZE, 0);
1564 if (!priv->cmd.pool)
1570 if (!mlx4_is_slave(dev))
1571 iounmap(priv->cmd.hcr);
1575 void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
1577 struct mlx4_priv *priv = mlx4_priv(dev);
1580 if (mlx4_is_master(dev)) {
1581 flush_workqueue(priv->mfunc.master.comm_wq);
1582 destroy_workqueue(priv->mfunc.master.comm_wq);
1583 for (i = 0; i < dev->num_slaves; i++) {
1584 for (port = 1; port <= MLX4_MAX_PORTS; port++)
1585 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
1587 kfree(priv->mfunc.master.slave_state);
1588 iounmap(priv->mfunc.comm);
1589 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
1591 priv->mfunc.vhcr_dma);
1592 priv->mfunc.vhcr = NULL;
1596 void mlx4_cmd_cleanup(struct mlx4_dev *dev)
1598 struct mlx4_priv *priv = mlx4_priv(dev);
1600 pci_pool_destroy(priv->cmd.pool);
1602 if (!mlx4_is_slave(dev))
1603 iounmap(priv->cmd.hcr);
1607 * Switch to using events to issue FW commands (can only be called
1608 * after event queue for command events has been initialized).
1610 int mlx4_cmd_use_events(struct mlx4_dev *dev)
1612 struct mlx4_priv *priv = mlx4_priv(dev);
1616 priv->cmd.context = kmalloc(priv->cmd.max_cmds *
1617 sizeof (struct mlx4_cmd_context),
1619 if (!priv->cmd.context)
1622 for (i = 0; i < priv->cmd.max_cmds; ++i) {
1623 priv->cmd.context[i].token = i;
1624 priv->cmd.context[i].next = i + 1;
1627 priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
1628 priv->cmd.free_head = 0;
1630 sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
1631 spin_lock_init(&priv->cmd.context_lock);
1633 for (priv->cmd.token_mask = 1;
1634 priv->cmd.token_mask < priv->cmd.max_cmds;
1635 priv->cmd.token_mask <<= 1)
1637 --priv->cmd.token_mask;
1639 down(&priv->cmd.poll_sem);
1640 priv->cmd.use_events = 1;
1646 * Switch back to polling (used when shutting down the device)
1648 void mlx4_cmd_use_polling(struct mlx4_dev *dev)
1650 struct mlx4_priv *priv = mlx4_priv(dev);
1653 priv->cmd.use_events = 0;
1655 for (i = 0; i < priv->cmd.max_cmds; ++i)
1656 down(&priv->cmd.event_sem);
1658 kfree(priv->cmd.context);
1660 up(&priv->cmd.poll_sem);
1663 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
1665 struct mlx4_cmd_mailbox *mailbox;
1667 mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
1669 return ERR_PTR(-ENOMEM);
1671 mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
1673 if (!mailbox->buf) {
1675 return ERR_PTR(-ENOMEM);
1680 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
1682 void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
1683 struct mlx4_cmd_mailbox *mailbox)
1688 pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
1691 EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
1693 u32 mlx4_comm_get_version(void)
1695 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;