2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
50 #define MLX4_MAC_VALID (1ull << 63)
53 struct list_head list;
61 struct list_head list;
69 struct list_head list;
84 struct list_head list;
86 enum mlx4_protocol prot;
87 enum mlx4_steer_type steer;
92 RES_QP_BUSY = RES_ANY_BUSY,
94 /* QP number was allocated */
97 /* ICM memory for QP context was mapped */
100 /* QP is in hw ownership */
105 struct res_common com;
110 struct list_head mcg_list;
115 /* saved qp params before VST enforcement in order to restore on VGT */
125 enum res_mtt_states {
126 RES_MTT_BUSY = RES_ANY_BUSY,
130 static inline const char *mtt_states_str(enum res_mtt_states state)
133 case RES_MTT_BUSY: return "RES_MTT_BUSY";
134 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
135 default: return "Unknown";
140 struct res_common com;
145 enum res_mpt_states {
146 RES_MPT_BUSY = RES_ANY_BUSY,
153 struct res_common com;
159 RES_EQ_BUSY = RES_ANY_BUSY,
165 struct res_common com;
170 RES_CQ_BUSY = RES_ANY_BUSY,
176 struct res_common com;
181 enum res_srq_states {
182 RES_SRQ_BUSY = RES_ANY_BUSY,
188 struct res_common com;
194 enum res_counter_states {
195 RES_COUNTER_BUSY = RES_ANY_BUSY,
196 RES_COUNTER_ALLOCATED,
200 struct res_common com;
204 enum res_xrcdn_states {
205 RES_XRCD_BUSY = RES_ANY_BUSY,
210 struct res_common com;
214 enum res_fs_rule_states {
215 RES_FS_RULE_BUSY = RES_ANY_BUSY,
216 RES_FS_RULE_ALLOCATED,
220 struct res_common com;
224 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
226 struct rb_node *node = root->rb_node;
229 struct res_common *res = container_of(node, struct res_common,
232 if (res_id < res->res_id)
233 node = node->rb_left;
234 else if (res_id > res->res_id)
235 node = node->rb_right;
242 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
244 struct rb_node **new = &(root->rb_node), *parent = NULL;
246 /* Figure out where to put new node */
248 struct res_common *this = container_of(*new, struct res_common,
252 if (res->res_id < this->res_id)
253 new = &((*new)->rb_left);
254 else if (res->res_id > this->res_id)
255 new = &((*new)->rb_right);
260 /* Add new node and rebalance tree. */
261 rb_link_node(&res->node, parent, new);
262 rb_insert_color(&res->node, root);
277 static const char *resource_str(enum mlx4_resource rt)
280 case RES_QP: return "RES_QP";
281 case RES_CQ: return "RES_CQ";
282 case RES_SRQ: return "RES_SRQ";
283 case RES_MPT: return "RES_MPT";
284 case RES_MTT: return "RES_MTT";
285 case RES_MAC: return "RES_MAC";
286 case RES_VLAN: return "RES_VLAN";
287 case RES_EQ: return "RES_EQ";
288 case RES_COUNTER: return "RES_COUNTER";
289 case RES_FS_RULE: return "RES_FS_RULE";
290 case RES_XRCD: return "RES_XRCD";
291 default: return "Unknown resource type !!!";
295 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
296 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
297 enum mlx4_resource res_type, int count,
300 struct mlx4_priv *priv = mlx4_priv(dev);
301 struct resource_allocator *res_alloc =
302 &priv->mfunc.master.res_tracker.res_alloc[res_type];
304 int allocated, free, reserved, guaranteed, from_free;
307 if (slave > dev->persist->num_vfs)
310 spin_lock(&res_alloc->alloc_lock);
311 allocated = (port > 0) ?
312 res_alloc->allocated[(port - 1) *
313 (dev->persist->num_vfs + 1) + slave] :
314 res_alloc->allocated[slave];
315 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
317 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
318 res_alloc->res_reserved;
319 guaranteed = res_alloc->guaranteed[slave];
321 if (allocated + count > res_alloc->quota[slave]) {
322 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
323 slave, port, resource_str(res_type), count,
324 allocated, res_alloc->quota[slave]);
328 if (allocated + count <= guaranteed) {
332 /* portion may need to be obtained from free area */
333 if (guaranteed - allocated > 0)
334 from_free = count - (guaranteed - allocated);
338 from_rsvd = count - from_free;
340 if (free - from_free >= reserved)
343 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
344 slave, port, resource_str(res_type), free,
345 from_free, reserved);
349 /* grant the request */
351 res_alloc->allocated[(port - 1) *
352 (dev->persist->num_vfs + 1) + slave] += count;
353 res_alloc->res_port_free[port - 1] -= count;
354 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
356 res_alloc->allocated[slave] += count;
357 res_alloc->res_free -= count;
358 res_alloc->res_reserved -= from_rsvd;
363 spin_unlock(&res_alloc->alloc_lock);
367 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
368 enum mlx4_resource res_type, int count,
371 struct mlx4_priv *priv = mlx4_priv(dev);
372 struct resource_allocator *res_alloc =
373 &priv->mfunc.master.res_tracker.res_alloc[res_type];
374 int allocated, guaranteed, from_rsvd;
376 if (slave > dev->persist->num_vfs)
379 spin_lock(&res_alloc->alloc_lock);
381 allocated = (port > 0) ?
382 res_alloc->allocated[(port - 1) *
383 (dev->persist->num_vfs + 1) + slave] :
384 res_alloc->allocated[slave];
385 guaranteed = res_alloc->guaranteed[slave];
387 if (allocated - count >= guaranteed) {
390 /* portion may need to be returned to reserved area */
391 if (allocated - guaranteed > 0)
392 from_rsvd = count - (allocated - guaranteed);
398 res_alloc->allocated[(port - 1) *
399 (dev->persist->num_vfs + 1) + slave] -= count;
400 res_alloc->res_port_free[port - 1] += count;
401 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
403 res_alloc->allocated[slave] -= count;
404 res_alloc->res_free += count;
405 res_alloc->res_reserved += from_rsvd;
408 spin_unlock(&res_alloc->alloc_lock);
412 static inline void initialize_res_quotas(struct mlx4_dev *dev,
413 struct resource_allocator *res_alloc,
414 enum mlx4_resource res_type,
415 int vf, int num_instances)
417 res_alloc->guaranteed[vf] = num_instances /
418 (2 * (dev->persist->num_vfs + 1));
419 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
420 if (vf == mlx4_master_func_num(dev)) {
421 res_alloc->res_free = num_instances;
422 if (res_type == RES_MTT) {
423 /* reserved mtts will be taken out of the PF allocation */
424 res_alloc->res_free += dev->caps.reserved_mtts;
425 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
426 res_alloc->quota[vf] += dev->caps.reserved_mtts;
431 void mlx4_init_quotas(struct mlx4_dev *dev)
433 struct mlx4_priv *priv = mlx4_priv(dev);
436 /* quotas for VFs are initialized in mlx4_slave_cap */
437 if (mlx4_is_slave(dev))
440 if (!mlx4_is_mfunc(dev)) {
441 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
442 mlx4_num_reserved_sqps(dev);
443 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
444 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
445 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
446 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
450 pf = mlx4_master_func_num(dev);
452 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
454 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
456 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
458 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
460 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
462 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
464 struct mlx4_priv *priv = mlx4_priv(dev);
468 priv->mfunc.master.res_tracker.slave_list =
469 kzalloc(dev->num_slaves * sizeof(struct slave_list),
471 if (!priv->mfunc.master.res_tracker.slave_list)
474 for (i = 0 ; i < dev->num_slaves; i++) {
475 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
476 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
477 slave_list[i].res_list[t]);
478 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
481 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
483 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
484 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
486 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
487 struct resource_allocator *res_alloc =
488 &priv->mfunc.master.res_tracker.res_alloc[i];
489 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
490 sizeof(int), GFP_KERNEL);
491 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
492 sizeof(int), GFP_KERNEL);
493 if (i == RES_MAC || i == RES_VLAN)
494 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
495 (dev->persist->num_vfs
497 sizeof(int), GFP_KERNEL);
499 res_alloc->allocated = kzalloc((dev->persist->
501 sizeof(int), GFP_KERNEL);
503 if (!res_alloc->quota || !res_alloc->guaranteed ||
504 !res_alloc->allocated)
507 spin_lock_init(&res_alloc->alloc_lock);
508 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
509 struct mlx4_active_ports actv_ports =
510 mlx4_get_active_ports(dev, t);
513 initialize_res_quotas(dev, res_alloc, RES_QP,
514 t, dev->caps.num_qps -
515 dev->caps.reserved_qps -
516 mlx4_num_reserved_sqps(dev));
519 initialize_res_quotas(dev, res_alloc, RES_CQ,
520 t, dev->caps.num_cqs -
521 dev->caps.reserved_cqs);
524 initialize_res_quotas(dev, res_alloc, RES_SRQ,
525 t, dev->caps.num_srqs -
526 dev->caps.reserved_srqs);
529 initialize_res_quotas(dev, res_alloc, RES_MPT,
530 t, dev->caps.num_mpts -
531 dev->caps.reserved_mrws);
534 initialize_res_quotas(dev, res_alloc, RES_MTT,
535 t, dev->caps.num_mtts -
536 dev->caps.reserved_mtts);
539 if (t == mlx4_master_func_num(dev)) {
540 int max_vfs_pport = 0;
541 /* Calculate the max vfs per port for */
543 for (j = 0; j < dev->caps.num_ports;
545 struct mlx4_slaves_pport slaves_pport =
546 mlx4_phys_to_slaves_pport(dev, j + 1);
547 unsigned current_slaves =
548 bitmap_weight(slaves_pport.slaves,
549 dev->caps.num_ports) - 1;
550 if (max_vfs_pport < current_slaves)
554 res_alloc->quota[t] =
557 res_alloc->guaranteed[t] = 2;
558 for (j = 0; j < MLX4_MAX_PORTS; j++)
559 res_alloc->res_port_free[j] =
562 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
563 res_alloc->guaranteed[t] = 2;
567 if (t == mlx4_master_func_num(dev)) {
568 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
569 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
570 for (j = 0; j < MLX4_MAX_PORTS; j++)
571 res_alloc->res_port_free[j] =
574 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
575 res_alloc->guaranteed[t] = 0;
579 res_alloc->quota[t] = dev->caps.max_counters;
580 res_alloc->guaranteed[t] = 0;
581 if (t == mlx4_master_func_num(dev))
582 res_alloc->res_free = res_alloc->quota[t];
587 if (i == RES_MAC || i == RES_VLAN) {
588 for (j = 0; j < dev->caps.num_ports; j++)
589 if (test_bit(j, actv_ports.ports))
590 res_alloc->res_port_rsvd[j] +=
591 res_alloc->guaranteed[t];
593 res_alloc->res_reserved += res_alloc->guaranteed[t];
597 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
601 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
602 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
603 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
604 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
605 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
606 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
607 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
612 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
613 enum mlx4_res_tracker_free_type type)
615 struct mlx4_priv *priv = mlx4_priv(dev);
618 if (priv->mfunc.master.res_tracker.slave_list) {
619 if (type != RES_TR_FREE_STRUCTS_ONLY) {
620 for (i = 0; i < dev->num_slaves; i++) {
621 if (type == RES_TR_FREE_ALL ||
622 dev->caps.function != i)
623 mlx4_delete_all_resources_for_slave(dev, i);
625 /* free master's vlans */
626 i = dev->caps.function;
627 mlx4_reset_roce_gids(dev, i);
628 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
629 rem_slave_vlans(dev, i);
630 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
633 if (type != RES_TR_FREE_SLAVES_ONLY) {
634 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
635 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
636 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
637 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
638 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
639 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
640 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
642 kfree(priv->mfunc.master.res_tracker.slave_list);
643 priv->mfunc.master.res_tracker.slave_list = NULL;
648 static void update_pkey_index(struct mlx4_dev *dev, int slave,
649 struct mlx4_cmd_mailbox *inbox)
651 u8 sched = *(u8 *)(inbox->buf + 64);
652 u8 orig_index = *(u8 *)(inbox->buf + 35);
654 struct mlx4_priv *priv = mlx4_priv(dev);
657 port = (sched >> 6 & 1) + 1;
659 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
660 *(u8 *)(inbox->buf + 35) = new_index;
663 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
666 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
667 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
668 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
671 if (MLX4_QP_ST_UD == ts) {
672 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
673 if (mlx4_is_eth(dev, port))
674 qp_ctx->pri_path.mgid_index =
675 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
677 qp_ctx->pri_path.mgid_index = slave | 0x80;
679 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
680 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
681 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
682 if (mlx4_is_eth(dev, port)) {
683 qp_ctx->pri_path.mgid_index +=
684 mlx4_get_base_gid_ix(dev, slave, port);
685 qp_ctx->pri_path.mgid_index &= 0x7f;
687 qp_ctx->pri_path.mgid_index = slave & 0x7F;
690 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
691 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
692 if (mlx4_is_eth(dev, port)) {
693 qp_ctx->alt_path.mgid_index +=
694 mlx4_get_base_gid_ix(dev, slave, port);
695 qp_ctx->alt_path.mgid_index &= 0x7f;
697 qp_ctx->alt_path.mgid_index = slave & 0x7F;
703 static int update_vport_qp_param(struct mlx4_dev *dev,
704 struct mlx4_cmd_mailbox *inbox,
707 struct mlx4_qp_context *qpc = inbox->buf + 8;
708 struct mlx4_vport_oper_state *vp_oper;
709 struct mlx4_priv *priv;
713 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
714 priv = mlx4_priv(dev);
715 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
716 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
718 if (MLX4_VGT != vp_oper->state.default_vlan) {
719 /* the reserved QPs (special, proxy, tunnel)
720 * do not operate over vlans
722 if (mlx4_is_qp_reserved(dev, qpn))
725 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
726 if (qp_type == MLX4_QP_ST_UD ||
727 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
728 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
729 *(__be32 *)inbox->buf =
730 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
731 MLX4_QP_OPTPAR_VLAN_STRIPPING);
732 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
734 struct mlx4_update_qp_params params = {.flags = 0};
736 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms);
742 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
743 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
744 qpc->pri_path.vlan_control =
745 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
746 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
747 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
748 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
749 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
750 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
751 } else if (0 != vp_oper->state.default_vlan) {
752 qpc->pri_path.vlan_control =
753 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
754 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
755 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
756 } else { /* priority tagged */
757 qpc->pri_path.vlan_control =
758 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
759 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
762 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
763 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
764 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
765 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
766 qpc->pri_path.sched_queue &= 0xC7;
767 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
769 if (vp_oper->state.spoofchk) {
770 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
771 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
777 static int mpt_mask(struct mlx4_dev *dev)
779 return dev->caps.num_mpts - 1;
782 static void *find_res(struct mlx4_dev *dev, u64 res_id,
783 enum mlx4_resource type)
785 struct mlx4_priv *priv = mlx4_priv(dev);
787 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
791 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
792 enum mlx4_resource type,
795 struct res_common *r;
798 spin_lock_irq(mlx4_tlock(dev));
799 r = find_res(dev, res_id, type);
805 if (r->state == RES_ANY_BUSY) {
810 if (r->owner != slave) {
815 r->from_state = r->state;
816 r->state = RES_ANY_BUSY;
819 *((struct res_common **)res) = r;
822 spin_unlock_irq(mlx4_tlock(dev));
826 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
827 enum mlx4_resource type,
828 u64 res_id, int *slave)
831 struct res_common *r;
837 spin_lock(mlx4_tlock(dev));
839 r = find_res(dev, id, type);
844 spin_unlock(mlx4_tlock(dev));
849 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
850 enum mlx4_resource type)
852 struct res_common *r;
854 spin_lock_irq(mlx4_tlock(dev));
855 r = find_res(dev, res_id, type);
857 r->state = r->from_state;
858 spin_unlock_irq(mlx4_tlock(dev));
861 static struct res_common *alloc_qp_tr(int id)
865 ret = kzalloc(sizeof *ret, GFP_KERNEL);
869 ret->com.res_id = id;
870 ret->com.state = RES_QP_RESERVED;
872 INIT_LIST_HEAD(&ret->mcg_list);
873 spin_lock_init(&ret->mcg_spl);
874 atomic_set(&ret->ref_count, 0);
879 static struct res_common *alloc_mtt_tr(int id, int order)
883 ret = kzalloc(sizeof *ret, GFP_KERNEL);
887 ret->com.res_id = id;
889 ret->com.state = RES_MTT_ALLOCATED;
890 atomic_set(&ret->ref_count, 0);
895 static struct res_common *alloc_mpt_tr(int id, int key)
899 ret = kzalloc(sizeof *ret, GFP_KERNEL);
903 ret->com.res_id = id;
904 ret->com.state = RES_MPT_RESERVED;
910 static struct res_common *alloc_eq_tr(int id)
914 ret = kzalloc(sizeof *ret, GFP_KERNEL);
918 ret->com.res_id = id;
919 ret->com.state = RES_EQ_RESERVED;
924 static struct res_common *alloc_cq_tr(int id)
928 ret = kzalloc(sizeof *ret, GFP_KERNEL);
932 ret->com.res_id = id;
933 ret->com.state = RES_CQ_ALLOCATED;
934 atomic_set(&ret->ref_count, 0);
939 static struct res_common *alloc_srq_tr(int id)
943 ret = kzalloc(sizeof *ret, GFP_KERNEL);
947 ret->com.res_id = id;
948 ret->com.state = RES_SRQ_ALLOCATED;
949 atomic_set(&ret->ref_count, 0);
954 static struct res_common *alloc_counter_tr(int id)
956 struct res_counter *ret;
958 ret = kzalloc(sizeof *ret, GFP_KERNEL);
962 ret->com.res_id = id;
963 ret->com.state = RES_COUNTER_ALLOCATED;
968 static struct res_common *alloc_xrcdn_tr(int id)
970 struct res_xrcdn *ret;
972 ret = kzalloc(sizeof *ret, GFP_KERNEL);
976 ret->com.res_id = id;
977 ret->com.state = RES_XRCD_ALLOCATED;
982 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
984 struct res_fs_rule *ret;
986 ret = kzalloc(sizeof *ret, GFP_KERNEL);
990 ret->com.res_id = id;
991 ret->com.state = RES_FS_RULE_ALLOCATED;
996 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
999 struct res_common *ret;
1003 ret = alloc_qp_tr(id);
1006 ret = alloc_mpt_tr(id, extra);
1009 ret = alloc_mtt_tr(id, extra);
1012 ret = alloc_eq_tr(id);
1015 ret = alloc_cq_tr(id);
1018 ret = alloc_srq_tr(id);
1021 pr_err("implementation missing\n");
1024 ret = alloc_counter_tr(id);
1027 ret = alloc_xrcdn_tr(id);
1030 ret = alloc_fs_rule_tr(id, extra);
1041 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1042 enum mlx4_resource type, int extra)
1046 struct mlx4_priv *priv = mlx4_priv(dev);
1047 struct res_common **res_arr;
1048 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1049 struct rb_root *root = &tracker->res_tree[type];
1051 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1055 for (i = 0; i < count; ++i) {
1056 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1058 for (--i; i >= 0; --i)
1066 spin_lock_irq(mlx4_tlock(dev));
1067 for (i = 0; i < count; ++i) {
1068 if (find_res(dev, base + i, type)) {
1072 err = res_tracker_insert(root, res_arr[i]);
1075 list_add_tail(&res_arr[i]->list,
1076 &tracker->slave_list[slave].res_list[type]);
1078 spin_unlock_irq(mlx4_tlock(dev));
1084 for (--i; i >= base; --i)
1085 rb_erase(&res_arr[i]->node, root);
1087 spin_unlock_irq(mlx4_tlock(dev));
1089 for (i = 0; i < count; ++i)
1097 static int remove_qp_ok(struct res_qp *res)
1099 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1100 !list_empty(&res->mcg_list)) {
1101 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1102 res->com.state, atomic_read(&res->ref_count));
1104 } else if (res->com.state != RES_QP_RESERVED) {
1111 static int remove_mtt_ok(struct res_mtt *res, int order)
1113 if (res->com.state == RES_MTT_BUSY ||
1114 atomic_read(&res->ref_count)) {
1115 pr_devel("%s-%d: state %s, ref_count %d\n",
1117 mtt_states_str(res->com.state),
1118 atomic_read(&res->ref_count));
1120 } else if (res->com.state != RES_MTT_ALLOCATED)
1122 else if (res->order != order)
1128 static int remove_mpt_ok(struct res_mpt *res)
1130 if (res->com.state == RES_MPT_BUSY)
1132 else if (res->com.state != RES_MPT_RESERVED)
1138 static int remove_eq_ok(struct res_eq *res)
1140 if (res->com.state == RES_MPT_BUSY)
1142 else if (res->com.state != RES_MPT_RESERVED)
1148 static int remove_counter_ok(struct res_counter *res)
1150 if (res->com.state == RES_COUNTER_BUSY)
1152 else if (res->com.state != RES_COUNTER_ALLOCATED)
1158 static int remove_xrcdn_ok(struct res_xrcdn *res)
1160 if (res->com.state == RES_XRCD_BUSY)
1162 else if (res->com.state != RES_XRCD_ALLOCATED)
1168 static int remove_fs_rule_ok(struct res_fs_rule *res)
1170 if (res->com.state == RES_FS_RULE_BUSY)
1172 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1178 static int remove_cq_ok(struct res_cq *res)
1180 if (res->com.state == RES_CQ_BUSY)
1182 else if (res->com.state != RES_CQ_ALLOCATED)
1188 static int remove_srq_ok(struct res_srq *res)
1190 if (res->com.state == RES_SRQ_BUSY)
1192 else if (res->com.state != RES_SRQ_ALLOCATED)
1198 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1202 return remove_qp_ok((struct res_qp *)res);
1204 return remove_cq_ok((struct res_cq *)res);
1206 return remove_srq_ok((struct res_srq *)res);
1208 return remove_mpt_ok((struct res_mpt *)res);
1210 return remove_mtt_ok((struct res_mtt *)res, extra);
1214 return remove_eq_ok((struct res_eq *)res);
1216 return remove_counter_ok((struct res_counter *)res);
1218 return remove_xrcdn_ok((struct res_xrcdn *)res);
1220 return remove_fs_rule_ok((struct res_fs_rule *)res);
1226 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1227 enum mlx4_resource type, int extra)
1231 struct mlx4_priv *priv = mlx4_priv(dev);
1232 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1233 struct res_common *r;
1235 spin_lock_irq(mlx4_tlock(dev));
1236 for (i = base; i < base + count; ++i) {
1237 r = res_tracker_lookup(&tracker->res_tree[type], i);
1242 if (r->owner != slave) {
1246 err = remove_ok(r, type, extra);
1251 for (i = base; i < base + count; ++i) {
1252 r = res_tracker_lookup(&tracker->res_tree[type], i);
1253 rb_erase(&r->node, &tracker->res_tree[type]);
1260 spin_unlock_irq(mlx4_tlock(dev));
1265 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1266 enum res_qp_states state, struct res_qp **qp,
1269 struct mlx4_priv *priv = mlx4_priv(dev);
1270 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1274 spin_lock_irq(mlx4_tlock(dev));
1275 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1278 else if (r->com.owner != slave)
1283 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1284 __func__, r->com.res_id);
1288 case RES_QP_RESERVED:
1289 if (r->com.state == RES_QP_MAPPED && !alloc)
1292 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1297 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1298 r->com.state == RES_QP_HW)
1301 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1309 if (r->com.state != RES_QP_MAPPED)
1317 r->com.from_state = r->com.state;
1318 r->com.to_state = state;
1319 r->com.state = RES_QP_BUSY;
1325 spin_unlock_irq(mlx4_tlock(dev));
1330 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1331 enum res_mpt_states state, struct res_mpt **mpt)
1333 struct mlx4_priv *priv = mlx4_priv(dev);
1334 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1338 spin_lock_irq(mlx4_tlock(dev));
1339 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1342 else if (r->com.owner != slave)
1350 case RES_MPT_RESERVED:
1351 if (r->com.state != RES_MPT_MAPPED)
1355 case RES_MPT_MAPPED:
1356 if (r->com.state != RES_MPT_RESERVED &&
1357 r->com.state != RES_MPT_HW)
1362 if (r->com.state != RES_MPT_MAPPED)
1370 r->com.from_state = r->com.state;
1371 r->com.to_state = state;
1372 r->com.state = RES_MPT_BUSY;
1378 spin_unlock_irq(mlx4_tlock(dev));
1383 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1384 enum res_eq_states state, struct res_eq **eq)
1386 struct mlx4_priv *priv = mlx4_priv(dev);
1387 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1391 spin_lock_irq(mlx4_tlock(dev));
1392 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1395 else if (r->com.owner != slave)
1403 case RES_EQ_RESERVED:
1404 if (r->com.state != RES_EQ_HW)
1409 if (r->com.state != RES_EQ_RESERVED)
1418 r->com.from_state = r->com.state;
1419 r->com.to_state = state;
1420 r->com.state = RES_EQ_BUSY;
1426 spin_unlock_irq(mlx4_tlock(dev));
1431 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1432 enum res_cq_states state, struct res_cq **cq)
1434 struct mlx4_priv *priv = mlx4_priv(dev);
1435 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1439 spin_lock_irq(mlx4_tlock(dev));
1440 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1443 } else if (r->com.owner != slave) {
1445 } else if (state == RES_CQ_ALLOCATED) {
1446 if (r->com.state != RES_CQ_HW)
1448 else if (atomic_read(&r->ref_count))
1452 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1459 r->com.from_state = r->com.state;
1460 r->com.to_state = state;
1461 r->com.state = RES_CQ_BUSY;
1466 spin_unlock_irq(mlx4_tlock(dev));
1471 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1472 enum res_srq_states state, struct res_srq **srq)
1474 struct mlx4_priv *priv = mlx4_priv(dev);
1475 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1479 spin_lock_irq(mlx4_tlock(dev));
1480 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1483 } else if (r->com.owner != slave) {
1485 } else if (state == RES_SRQ_ALLOCATED) {
1486 if (r->com.state != RES_SRQ_HW)
1488 else if (atomic_read(&r->ref_count))
1490 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1495 r->com.from_state = r->com.state;
1496 r->com.to_state = state;
1497 r->com.state = RES_SRQ_BUSY;
1502 spin_unlock_irq(mlx4_tlock(dev));
1507 static void res_abort_move(struct mlx4_dev *dev, int slave,
1508 enum mlx4_resource type, int id)
1510 struct mlx4_priv *priv = mlx4_priv(dev);
1511 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1512 struct res_common *r;
1514 spin_lock_irq(mlx4_tlock(dev));
1515 r = res_tracker_lookup(&tracker->res_tree[type], id);
1516 if (r && (r->owner == slave))
1517 r->state = r->from_state;
1518 spin_unlock_irq(mlx4_tlock(dev));
1521 static void res_end_move(struct mlx4_dev *dev, int slave,
1522 enum mlx4_resource type, int id)
1524 struct mlx4_priv *priv = mlx4_priv(dev);
1525 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1526 struct res_common *r;
1528 spin_lock_irq(mlx4_tlock(dev));
1529 r = res_tracker_lookup(&tracker->res_tree[type], id);
1530 if (r && (r->owner == slave))
1531 r->state = r->to_state;
1532 spin_unlock_irq(mlx4_tlock(dev));
1535 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1537 return mlx4_is_qp_reserved(dev, qpn) &&
1538 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1541 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1543 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1546 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1547 u64 in_param, u64 *out_param)
1557 case RES_OP_RESERVE:
1558 count = get_param_l(&in_param) & 0xffffff;
1559 /* Turn off all unsupported QP allocation flags that the
1560 * slave tries to set.
1562 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1563 align = get_param_h(&in_param);
1564 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1568 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1570 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1574 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1576 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1577 __mlx4_qp_release_range(dev, base, count);
1580 set_param_l(out_param, base);
1582 case RES_OP_MAP_ICM:
1583 qpn = get_param_l(&in_param) & 0x7fffff;
1584 if (valid_reserved(dev, slave, qpn)) {
1585 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1590 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1595 if (!fw_reserved(dev, qpn)) {
1596 err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1598 res_abort_move(dev, slave, RES_QP, qpn);
1603 res_end_move(dev, slave, RES_QP, qpn);
1613 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1614 u64 in_param, u64 *out_param)
1620 if (op != RES_OP_RESERVE_AND_MAP)
1623 order = get_param_l(&in_param);
1625 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1629 base = __mlx4_alloc_mtt_range(dev, order);
1631 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1635 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1637 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1638 __mlx4_free_mtt_range(dev, base, order);
1640 set_param_l(out_param, base);
1646 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1647 u64 in_param, u64 *out_param)
1652 struct res_mpt *mpt;
1655 case RES_OP_RESERVE:
1656 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1660 index = __mlx4_mpt_reserve(dev);
1662 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1665 id = index & mpt_mask(dev);
1667 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1669 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1670 __mlx4_mpt_release(dev, index);
1673 set_param_l(out_param, index);
1675 case RES_OP_MAP_ICM:
1676 index = get_param_l(&in_param);
1677 id = index & mpt_mask(dev);
1678 err = mr_res_start_move_to(dev, slave, id,
1679 RES_MPT_MAPPED, &mpt);
1683 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1685 res_abort_move(dev, slave, RES_MPT, id);
1689 res_end_move(dev, slave, RES_MPT, id);
1695 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1696 u64 in_param, u64 *out_param)
1702 case RES_OP_RESERVE_AND_MAP:
1703 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1707 err = __mlx4_cq_alloc_icm(dev, &cqn);
1709 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1713 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1715 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1716 __mlx4_cq_free_icm(dev, cqn);
1720 set_param_l(out_param, cqn);
1730 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1731 u64 in_param, u64 *out_param)
1737 case RES_OP_RESERVE_AND_MAP:
1738 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1742 err = __mlx4_srq_alloc_icm(dev, &srqn);
1744 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1748 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1750 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1751 __mlx4_srq_free_icm(dev, srqn);
1755 set_param_l(out_param, srqn);
1765 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1766 u8 smac_index, u64 *mac)
1768 struct mlx4_priv *priv = mlx4_priv(dev);
1769 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1770 struct list_head *mac_list =
1771 &tracker->slave_list[slave].res_list[RES_MAC];
1772 struct mac_res *res, *tmp;
1774 list_for_each_entry_safe(res, tmp, mac_list, list) {
1775 if (res->smac_index == smac_index && res->port == (u8) port) {
1783 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1785 struct mlx4_priv *priv = mlx4_priv(dev);
1786 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1787 struct list_head *mac_list =
1788 &tracker->slave_list[slave].res_list[RES_MAC];
1789 struct mac_res *res, *tmp;
1791 list_for_each_entry_safe(res, tmp, mac_list, list) {
1792 if (res->mac == mac && res->port == (u8) port) {
1793 /* mac found. update ref count */
1799 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1801 res = kzalloc(sizeof *res, GFP_KERNEL);
1803 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1807 res->port = (u8) port;
1808 res->smac_index = smac_index;
1810 list_add_tail(&res->list,
1811 &tracker->slave_list[slave].res_list[RES_MAC]);
1815 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1818 struct mlx4_priv *priv = mlx4_priv(dev);
1819 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1820 struct list_head *mac_list =
1821 &tracker->slave_list[slave].res_list[RES_MAC];
1822 struct mac_res *res, *tmp;
1824 list_for_each_entry_safe(res, tmp, mac_list, list) {
1825 if (res->mac == mac && res->port == (u8) port) {
1826 if (!--res->ref_count) {
1827 list_del(&res->list);
1828 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1836 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1838 struct mlx4_priv *priv = mlx4_priv(dev);
1839 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1840 struct list_head *mac_list =
1841 &tracker->slave_list[slave].res_list[RES_MAC];
1842 struct mac_res *res, *tmp;
1845 list_for_each_entry_safe(res, tmp, mac_list, list) {
1846 list_del(&res->list);
1847 /* dereference the mac the num times the slave referenced it */
1848 for (i = 0; i < res->ref_count; i++)
1849 __mlx4_unregister_mac(dev, res->port, res->mac);
1850 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1855 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1856 u64 in_param, u64 *out_param, int in_port)
1863 if (op != RES_OP_RESERVE_AND_MAP)
1866 port = !in_port ? get_param_l(out_param) : in_port;
1867 port = mlx4_slave_convert_port(
1874 err = __mlx4_register_mac(dev, port, mac);
1877 set_param_l(out_param, err);
1882 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1884 __mlx4_unregister_mac(dev, port, mac);
1889 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1890 int port, int vlan_index)
1892 struct mlx4_priv *priv = mlx4_priv(dev);
1893 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1894 struct list_head *vlan_list =
1895 &tracker->slave_list[slave].res_list[RES_VLAN];
1896 struct vlan_res *res, *tmp;
1898 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1899 if (res->vlan == vlan && res->port == (u8) port) {
1900 /* vlan found. update ref count */
1906 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1908 res = kzalloc(sizeof(*res), GFP_KERNEL);
1910 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1914 res->port = (u8) port;
1915 res->vlan_index = vlan_index;
1917 list_add_tail(&res->list,
1918 &tracker->slave_list[slave].res_list[RES_VLAN]);
1923 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1926 struct mlx4_priv *priv = mlx4_priv(dev);
1927 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1928 struct list_head *vlan_list =
1929 &tracker->slave_list[slave].res_list[RES_VLAN];
1930 struct vlan_res *res, *tmp;
1932 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1933 if (res->vlan == vlan && res->port == (u8) port) {
1934 if (!--res->ref_count) {
1935 list_del(&res->list);
1936 mlx4_release_resource(dev, slave, RES_VLAN,
1945 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1947 struct mlx4_priv *priv = mlx4_priv(dev);
1948 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1949 struct list_head *vlan_list =
1950 &tracker->slave_list[slave].res_list[RES_VLAN];
1951 struct vlan_res *res, *tmp;
1954 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1955 list_del(&res->list);
1956 /* dereference the vlan the num times the slave referenced it */
1957 for (i = 0; i < res->ref_count; i++)
1958 __mlx4_unregister_vlan(dev, res->port, res->vlan);
1959 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1964 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1965 u64 in_param, u64 *out_param, int in_port)
1967 struct mlx4_priv *priv = mlx4_priv(dev);
1968 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1974 port = !in_port ? get_param_l(out_param) : in_port;
1976 if (!port || op != RES_OP_RESERVE_AND_MAP)
1979 port = mlx4_slave_convert_port(
1984 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1985 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1986 slave_state[slave].old_vlan_api = true;
1990 vlan = (u16) in_param;
1992 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1994 set_param_l(out_param, (u32) vlan_index);
1995 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1997 __mlx4_unregister_vlan(dev, port, vlan);
2002 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2003 u64 in_param, u64 *out_param)
2008 if (op != RES_OP_RESERVE)
2011 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2015 err = __mlx4_counter_alloc(dev, &index);
2017 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2021 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2023 __mlx4_counter_free(dev, index);
2024 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2026 set_param_l(out_param, index);
2032 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2033 u64 in_param, u64 *out_param)
2038 if (op != RES_OP_RESERVE)
2041 err = __mlx4_xrcd_alloc(dev, &xrcdn);
2045 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2047 __mlx4_xrcd_free(dev, xrcdn);
2049 set_param_l(out_param, xrcdn);
2054 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2055 struct mlx4_vhcr *vhcr,
2056 struct mlx4_cmd_mailbox *inbox,
2057 struct mlx4_cmd_mailbox *outbox,
2058 struct mlx4_cmd_info *cmd)
2061 int alop = vhcr->op_modifier;
2063 switch (vhcr->in_modifier & 0xFF) {
2065 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2066 vhcr->in_param, &vhcr->out_param);
2070 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2071 vhcr->in_param, &vhcr->out_param);
2075 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2076 vhcr->in_param, &vhcr->out_param);
2080 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2081 vhcr->in_param, &vhcr->out_param);
2085 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2086 vhcr->in_param, &vhcr->out_param);
2090 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2091 vhcr->in_param, &vhcr->out_param,
2092 (vhcr->in_modifier >> 8) & 0xFF);
2096 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2097 vhcr->in_param, &vhcr->out_param,
2098 (vhcr->in_modifier >> 8) & 0xFF);
2102 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2103 vhcr->in_param, &vhcr->out_param);
2107 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2108 vhcr->in_param, &vhcr->out_param);
2119 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2128 case RES_OP_RESERVE:
2129 base = get_param_l(&in_param) & 0x7fffff;
2130 count = get_param_h(&in_param);
2131 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2134 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2135 __mlx4_qp_release_range(dev, base, count);
2137 case RES_OP_MAP_ICM:
2138 qpn = get_param_l(&in_param) & 0x7fffff;
2139 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2144 if (!fw_reserved(dev, qpn))
2145 __mlx4_qp_free_icm(dev, qpn);
2147 res_end_move(dev, slave, RES_QP, qpn);
2149 if (valid_reserved(dev, slave, qpn))
2150 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2159 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2160 u64 in_param, u64 *out_param)
2166 if (op != RES_OP_RESERVE_AND_MAP)
2169 base = get_param_l(&in_param);
2170 order = get_param_h(&in_param);
2171 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2173 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2174 __mlx4_free_mtt_range(dev, base, order);
2179 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2185 struct res_mpt *mpt;
2188 case RES_OP_RESERVE:
2189 index = get_param_l(&in_param);
2190 id = index & mpt_mask(dev);
2191 err = get_res(dev, slave, id, RES_MPT, &mpt);
2195 put_res(dev, slave, id, RES_MPT);
2197 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2200 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2201 __mlx4_mpt_release(dev, index);
2203 case RES_OP_MAP_ICM:
2204 index = get_param_l(&in_param);
2205 id = index & mpt_mask(dev);
2206 err = mr_res_start_move_to(dev, slave, id,
2207 RES_MPT_RESERVED, &mpt);
2211 __mlx4_mpt_free_icm(dev, mpt->key);
2212 res_end_move(dev, slave, RES_MPT, id);
2222 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2223 u64 in_param, u64 *out_param)
2229 case RES_OP_RESERVE_AND_MAP:
2230 cqn = get_param_l(&in_param);
2231 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2235 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2236 __mlx4_cq_free_icm(dev, cqn);
2247 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2248 u64 in_param, u64 *out_param)
2254 case RES_OP_RESERVE_AND_MAP:
2255 srqn = get_param_l(&in_param);
2256 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2260 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2261 __mlx4_srq_free_icm(dev, srqn);
2272 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2273 u64 in_param, u64 *out_param, int in_port)
2279 case RES_OP_RESERVE_AND_MAP:
2280 port = !in_port ? get_param_l(out_param) : in_port;
2281 port = mlx4_slave_convert_port(
2286 mac_del_from_slave(dev, slave, in_param, port);
2287 __mlx4_unregister_mac(dev, port, in_param);
2298 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2299 u64 in_param, u64 *out_param, int port)
2301 struct mlx4_priv *priv = mlx4_priv(dev);
2302 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2305 port = mlx4_slave_convert_port(
2311 case RES_OP_RESERVE_AND_MAP:
2312 if (slave_state[slave].old_vlan_api)
2316 vlan_del_from_slave(dev, slave, in_param, port);
2317 __mlx4_unregister_vlan(dev, port, in_param);
2327 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2328 u64 in_param, u64 *out_param)
2333 if (op != RES_OP_RESERVE)
2336 index = get_param_l(&in_param);
2337 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2341 __mlx4_counter_free(dev, index);
2342 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2347 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2348 u64 in_param, u64 *out_param)
2353 if (op != RES_OP_RESERVE)
2356 xrcdn = get_param_l(&in_param);
2357 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2361 __mlx4_xrcd_free(dev, xrcdn);
2366 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2367 struct mlx4_vhcr *vhcr,
2368 struct mlx4_cmd_mailbox *inbox,
2369 struct mlx4_cmd_mailbox *outbox,
2370 struct mlx4_cmd_info *cmd)
2373 int alop = vhcr->op_modifier;
2375 switch (vhcr->in_modifier & 0xFF) {
2377 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2382 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2383 vhcr->in_param, &vhcr->out_param);
2387 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2392 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2393 vhcr->in_param, &vhcr->out_param);
2397 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2398 vhcr->in_param, &vhcr->out_param);
2402 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2403 vhcr->in_param, &vhcr->out_param,
2404 (vhcr->in_modifier >> 8) & 0xFF);
2408 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2409 vhcr->in_param, &vhcr->out_param,
2410 (vhcr->in_modifier >> 8) & 0xFF);
2414 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2415 vhcr->in_param, &vhcr->out_param);
2419 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2420 vhcr->in_param, &vhcr->out_param);
2428 /* ugly but other choices are uglier */
2429 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2431 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2434 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2436 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2439 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2441 return be32_to_cpu(mpt->mtt_sz);
2444 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2446 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2449 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2451 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2454 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2456 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2459 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2461 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2464 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2466 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2469 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2471 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2474 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2476 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2477 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2478 int log_sq_sride = qpc->sq_size_stride & 7;
2479 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2480 int log_rq_stride = qpc->rq_size_stride & 7;
2481 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2482 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2483 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2484 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2489 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2491 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2492 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2493 total_mem = sq_size + rq_size;
2495 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2501 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2502 int size, struct res_mtt *mtt)
2504 int res_start = mtt->com.res_id;
2505 int res_size = (1 << mtt->order);
2507 if (start < res_start || start + size > res_start + res_size)
2512 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2513 struct mlx4_vhcr *vhcr,
2514 struct mlx4_cmd_mailbox *inbox,
2515 struct mlx4_cmd_mailbox *outbox,
2516 struct mlx4_cmd_info *cmd)
2519 int index = vhcr->in_modifier;
2520 struct res_mtt *mtt;
2521 struct res_mpt *mpt;
2522 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2528 id = index & mpt_mask(dev);
2529 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2533 /* Disable memory windows for VFs. */
2534 if (!mr_is_region(inbox->buf)) {
2539 /* Make sure that the PD bits related to the slave id are zeros. */
2540 pd = mr_get_pd(inbox->buf);
2541 pd_slave = (pd >> 17) & 0x7f;
2542 if (pd_slave != 0 && --pd_slave != slave) {
2547 if (mr_is_fmr(inbox->buf)) {
2548 /* FMR and Bind Enable are forbidden in slave devices. */
2549 if (mr_is_bind_enabled(inbox->buf)) {
2553 /* FMR and Memory Windows are also forbidden. */
2554 if (!mr_is_region(inbox->buf)) {
2560 phys = mr_phys_mpt(inbox->buf);
2562 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2566 err = check_mtt_range(dev, slave, mtt_base,
2567 mr_get_mtt_size(inbox->buf), mtt);
2574 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2579 atomic_inc(&mtt->ref_count);
2580 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2583 res_end_move(dev, slave, RES_MPT, id);
2588 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2590 res_abort_move(dev, slave, RES_MPT, id);
2595 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2596 struct mlx4_vhcr *vhcr,
2597 struct mlx4_cmd_mailbox *inbox,
2598 struct mlx4_cmd_mailbox *outbox,
2599 struct mlx4_cmd_info *cmd)
2602 int index = vhcr->in_modifier;
2603 struct res_mpt *mpt;
2606 id = index & mpt_mask(dev);
2607 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2611 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2616 atomic_dec(&mpt->mtt->ref_count);
2618 res_end_move(dev, slave, RES_MPT, id);
2622 res_abort_move(dev, slave, RES_MPT, id);
2627 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2628 struct mlx4_vhcr *vhcr,
2629 struct mlx4_cmd_mailbox *inbox,
2630 struct mlx4_cmd_mailbox *outbox,
2631 struct mlx4_cmd_info *cmd)
2634 int index = vhcr->in_modifier;
2635 struct res_mpt *mpt;
2638 id = index & mpt_mask(dev);
2639 err = get_res(dev, slave, id, RES_MPT, &mpt);
2643 if (mpt->com.from_state == RES_MPT_MAPPED) {
2644 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2645 * that, the VF must read the MPT. But since the MPT entry memory is not
2646 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2647 * entry contents. To guarantee that the MPT cannot be changed, the driver
2648 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2649 * ownership fofollowing the change. The change here allows the VF to
2650 * perform QUERY_MPT also when the entry is in SW ownership.
2652 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2653 &mlx4_priv(dev)->mr_table.dmpt_table,
2656 if (NULL == mpt_entry || NULL == outbox->buf) {
2661 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2664 } else if (mpt->com.from_state == RES_MPT_HW) {
2665 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2673 put_res(dev, slave, id, RES_MPT);
2677 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2679 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2682 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2684 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2687 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2689 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2692 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2693 struct mlx4_qp_context *context)
2695 u32 qpn = vhcr->in_modifier & 0xffffff;
2698 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2701 /* adjust qkey in qp context */
2702 context->qkey = cpu_to_be32(qkey);
2705 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2706 struct mlx4_vhcr *vhcr,
2707 struct mlx4_cmd_mailbox *inbox,
2708 struct mlx4_cmd_mailbox *outbox,
2709 struct mlx4_cmd_info *cmd)
2712 int qpn = vhcr->in_modifier & 0x7fffff;
2713 struct res_mtt *mtt;
2715 struct mlx4_qp_context *qpc = inbox->buf + 8;
2716 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2717 int mtt_size = qp_get_mtt_size(qpc);
2720 int rcqn = qp_get_rcqn(qpc);
2721 int scqn = qp_get_scqn(qpc);
2722 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2723 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2724 struct res_srq *srq;
2725 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2727 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2730 qp->local_qpn = local_qpn;
2731 qp->sched_queue = 0;
2733 qp->vlan_control = 0;
2735 qp->pri_path_fl = 0;
2738 qp->qpc_flags = be32_to_cpu(qpc->flags);
2740 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2744 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2748 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2753 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2760 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2765 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2766 update_pkey_index(dev, slave, inbox);
2767 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2770 atomic_inc(&mtt->ref_count);
2772 atomic_inc(&rcq->ref_count);
2774 atomic_inc(&scq->ref_count);
2778 put_res(dev, slave, scqn, RES_CQ);
2781 atomic_inc(&srq->ref_count);
2782 put_res(dev, slave, srqn, RES_SRQ);
2785 put_res(dev, slave, rcqn, RES_CQ);
2786 put_res(dev, slave, mtt_base, RES_MTT);
2787 res_end_move(dev, slave, RES_QP, qpn);
2793 put_res(dev, slave, srqn, RES_SRQ);
2796 put_res(dev, slave, scqn, RES_CQ);
2798 put_res(dev, slave, rcqn, RES_CQ);
2800 put_res(dev, slave, mtt_base, RES_MTT);
2802 res_abort_move(dev, slave, RES_QP, qpn);
2807 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2809 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2812 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2814 int log_eq_size = eqc->log_eq_size & 0x1f;
2815 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2817 if (log_eq_size + 5 < page_shift)
2820 return 1 << (log_eq_size + 5 - page_shift);
2823 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2825 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2828 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2830 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2831 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2833 if (log_cq_size + 5 < page_shift)
2836 return 1 << (log_cq_size + 5 - page_shift);
2839 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2840 struct mlx4_vhcr *vhcr,
2841 struct mlx4_cmd_mailbox *inbox,
2842 struct mlx4_cmd_mailbox *outbox,
2843 struct mlx4_cmd_info *cmd)
2846 int eqn = vhcr->in_modifier;
2847 int res_id = (slave << 8) | eqn;
2848 struct mlx4_eq_context *eqc = inbox->buf;
2849 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2850 int mtt_size = eq_get_mtt_size(eqc);
2852 struct res_mtt *mtt;
2854 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2857 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2861 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2865 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2869 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2873 atomic_inc(&mtt->ref_count);
2875 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2876 res_end_move(dev, slave, RES_EQ, res_id);
2880 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2882 res_abort_move(dev, slave, RES_EQ, res_id);
2884 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2888 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
2889 struct mlx4_vhcr *vhcr,
2890 struct mlx4_cmd_mailbox *inbox,
2891 struct mlx4_cmd_mailbox *outbox,
2892 struct mlx4_cmd_info *cmd)
2895 u8 get = vhcr->op_modifier;
2900 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2905 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2906 int len, struct res_mtt **res)
2908 struct mlx4_priv *priv = mlx4_priv(dev);
2909 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2910 struct res_mtt *mtt;
2913 spin_lock_irq(mlx4_tlock(dev));
2914 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2916 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2918 mtt->com.from_state = mtt->com.state;
2919 mtt->com.state = RES_MTT_BUSY;
2924 spin_unlock_irq(mlx4_tlock(dev));
2929 static int verify_qp_parameters(struct mlx4_dev *dev,
2930 struct mlx4_vhcr *vhcr,
2931 struct mlx4_cmd_mailbox *inbox,
2932 enum qp_transition transition, u8 slave)
2936 struct mlx4_qp_context *qp_ctx;
2937 enum mlx4_qp_optpar optpar;
2941 qp_ctx = inbox->buf + 8;
2942 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2943 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2945 if (slave != mlx4_master_func_num(dev)) {
2946 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
2947 /* setting QP rate-limit is disallowed for VFs */
2948 if (qp_ctx->rate_limit_params)
2954 case MLX4_QP_ST_XRC:
2956 switch (transition) {
2957 case QP_TRANS_INIT2RTR:
2958 case QP_TRANS_RTR2RTS:
2959 case QP_TRANS_RTS2RTS:
2960 case QP_TRANS_SQD2SQD:
2961 case QP_TRANS_SQD2RTS:
2962 if (slave != mlx4_master_func_num(dev))
2963 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2964 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2965 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2966 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2969 if (qp_ctx->pri_path.mgid_index >= num_gids)
2972 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2973 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2974 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2975 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2978 if (qp_ctx->alt_path.mgid_index >= num_gids)
2987 case MLX4_QP_ST_MLX:
2988 qpn = vhcr->in_modifier & 0x7fffff;
2989 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2990 if (transition == QP_TRANS_INIT2RTR &&
2991 slave != mlx4_master_func_num(dev) &&
2992 mlx4_is_qp_reserved(dev, qpn) &&
2993 !mlx4_vf_smi_enabled(dev, slave, port)) {
2994 /* only enabled VFs may create MLX proxy QPs */
2995 mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
2996 __func__, slave, port);
3008 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3009 struct mlx4_vhcr *vhcr,
3010 struct mlx4_cmd_mailbox *inbox,
3011 struct mlx4_cmd_mailbox *outbox,
3012 struct mlx4_cmd_info *cmd)
3014 struct mlx4_mtt mtt;
3015 __be64 *page_list = inbox->buf;
3016 u64 *pg_list = (u64 *)page_list;
3018 struct res_mtt *rmtt = NULL;
3019 int start = be64_to_cpu(page_list[0]);
3020 int npages = vhcr->in_modifier;
3023 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3027 /* Call the SW implementation of write_mtt:
3028 * - Prepare a dummy mtt struct
3029 * - Translate inbox contents to simple addresses in host endianness */
3030 mtt.offset = 0; /* TBD this is broken but I don't handle it since
3031 we don't really use it */
3034 for (i = 0; i < npages; ++i)
3035 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3037 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3038 ((u64 *)page_list + 2));
3041 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3046 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3047 struct mlx4_vhcr *vhcr,
3048 struct mlx4_cmd_mailbox *inbox,
3049 struct mlx4_cmd_mailbox *outbox,
3050 struct mlx4_cmd_info *cmd)
3052 int eqn = vhcr->in_modifier;
3053 int res_id = eqn | (slave << 8);
3057 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3061 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3065 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3069 atomic_dec(&eq->mtt->ref_count);
3070 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3071 res_end_move(dev, slave, RES_EQ, res_id);
3072 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3077 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3079 res_abort_move(dev, slave, RES_EQ, res_id);
3084 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3086 struct mlx4_priv *priv = mlx4_priv(dev);
3087 struct mlx4_slave_event_eq_info *event_eq;
3088 struct mlx4_cmd_mailbox *mailbox;
3089 u32 in_modifier = 0;
3094 if (!priv->mfunc.master.slave_state)
3097 /* check for slave valid, slave not PF, and slave active */
3098 if (slave < 0 || slave > dev->persist->num_vfs ||
3099 slave == dev->caps.function ||
3100 !priv->mfunc.master.slave_state[slave].active)
3103 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3105 /* Create the event only if the slave is registered */
3106 if (event_eq->eqn < 0)
3109 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3110 res_id = (slave << 8) | event_eq->eqn;
3111 err = get_res(dev, slave, res_id, RES_EQ, &req);
3115 if (req->com.from_state != RES_EQ_HW) {
3120 mailbox = mlx4_alloc_cmd_mailbox(dev);
3121 if (IS_ERR(mailbox)) {
3122 err = PTR_ERR(mailbox);
3126 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3128 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3131 memcpy(mailbox->buf, (u8 *) eqe, 28);
3133 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
3135 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3136 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3139 put_res(dev, slave, res_id, RES_EQ);
3140 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3141 mlx4_free_cmd_mailbox(dev, mailbox);
3145 put_res(dev, slave, res_id, RES_EQ);
3148 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3152 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3153 struct mlx4_vhcr *vhcr,
3154 struct mlx4_cmd_mailbox *inbox,
3155 struct mlx4_cmd_mailbox *outbox,
3156 struct mlx4_cmd_info *cmd)
3158 int eqn = vhcr->in_modifier;
3159 int res_id = eqn | (slave << 8);
3163 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3167 if (eq->com.from_state != RES_EQ_HW) {
3172 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3175 put_res(dev, slave, res_id, RES_EQ);
3179 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3180 struct mlx4_vhcr *vhcr,
3181 struct mlx4_cmd_mailbox *inbox,
3182 struct mlx4_cmd_mailbox *outbox,
3183 struct mlx4_cmd_info *cmd)
3186 int cqn = vhcr->in_modifier;
3187 struct mlx4_cq_context *cqc = inbox->buf;
3188 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3190 struct res_mtt *mtt;
3192 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3195 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3198 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3201 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3204 atomic_inc(&mtt->ref_count);
3206 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3207 res_end_move(dev, slave, RES_CQ, cqn);
3211 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3213 res_abort_move(dev, slave, RES_CQ, cqn);
3217 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3218 struct mlx4_vhcr *vhcr,
3219 struct mlx4_cmd_mailbox *inbox,
3220 struct mlx4_cmd_mailbox *outbox,
3221 struct mlx4_cmd_info *cmd)
3224 int cqn = vhcr->in_modifier;
3227 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3230 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3233 atomic_dec(&cq->mtt->ref_count);
3234 res_end_move(dev, slave, RES_CQ, cqn);
3238 res_abort_move(dev, slave, RES_CQ, cqn);
3242 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3243 struct mlx4_vhcr *vhcr,
3244 struct mlx4_cmd_mailbox *inbox,
3245 struct mlx4_cmd_mailbox *outbox,
3246 struct mlx4_cmd_info *cmd)
3248 int cqn = vhcr->in_modifier;
3252 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3256 if (cq->com.from_state != RES_CQ_HW)
3259 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3261 put_res(dev, slave, cqn, RES_CQ);
3266 static int handle_resize(struct mlx4_dev *dev, int slave,
3267 struct mlx4_vhcr *vhcr,
3268 struct mlx4_cmd_mailbox *inbox,
3269 struct mlx4_cmd_mailbox *outbox,
3270 struct mlx4_cmd_info *cmd,
3274 struct res_mtt *orig_mtt;
3275 struct res_mtt *mtt;
3276 struct mlx4_cq_context *cqc = inbox->buf;
3277 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3279 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3283 if (orig_mtt != cq->mtt) {
3288 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3292 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3295 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3298 atomic_dec(&orig_mtt->ref_count);
3299 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3300 atomic_inc(&mtt->ref_count);
3302 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3306 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3308 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3314 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3315 struct mlx4_vhcr *vhcr,
3316 struct mlx4_cmd_mailbox *inbox,
3317 struct mlx4_cmd_mailbox *outbox,
3318 struct mlx4_cmd_info *cmd)
3320 int cqn = vhcr->in_modifier;
3324 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3328 if (cq->com.from_state != RES_CQ_HW)
3331 if (vhcr->op_modifier == 0) {
3332 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3336 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3338 put_res(dev, slave, cqn, RES_CQ);
3343 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3345 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3346 int log_rq_stride = srqc->logstride & 7;
3347 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3349 if (log_srq_size + log_rq_stride + 4 < page_shift)
3352 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3355 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3356 struct mlx4_vhcr *vhcr,
3357 struct mlx4_cmd_mailbox *inbox,
3358 struct mlx4_cmd_mailbox *outbox,
3359 struct mlx4_cmd_info *cmd)
3362 int srqn = vhcr->in_modifier;
3363 struct res_mtt *mtt;
3364 struct res_srq *srq;
3365 struct mlx4_srq_context *srqc = inbox->buf;
3366 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3368 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3371 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3374 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3377 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3382 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3386 atomic_inc(&mtt->ref_count);
3388 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3389 res_end_move(dev, slave, RES_SRQ, srqn);
3393 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3395 res_abort_move(dev, slave, RES_SRQ, srqn);
3400 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3401 struct mlx4_vhcr *vhcr,
3402 struct mlx4_cmd_mailbox *inbox,
3403 struct mlx4_cmd_mailbox *outbox,
3404 struct mlx4_cmd_info *cmd)
3407 int srqn = vhcr->in_modifier;
3408 struct res_srq *srq;
3410 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3413 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3416 atomic_dec(&srq->mtt->ref_count);
3418 atomic_dec(&srq->cq->ref_count);
3419 res_end_move(dev, slave, RES_SRQ, srqn);
3424 res_abort_move(dev, slave, RES_SRQ, srqn);
3429 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3430 struct mlx4_vhcr *vhcr,
3431 struct mlx4_cmd_mailbox *inbox,
3432 struct mlx4_cmd_mailbox *outbox,
3433 struct mlx4_cmd_info *cmd)
3436 int srqn = vhcr->in_modifier;
3437 struct res_srq *srq;
3439 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3442 if (srq->com.from_state != RES_SRQ_HW) {
3446 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3448 put_res(dev, slave, srqn, RES_SRQ);
3452 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3453 struct mlx4_vhcr *vhcr,
3454 struct mlx4_cmd_mailbox *inbox,
3455 struct mlx4_cmd_mailbox *outbox,
3456 struct mlx4_cmd_info *cmd)
3459 int srqn = vhcr->in_modifier;
3460 struct res_srq *srq;
3462 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3466 if (srq->com.from_state != RES_SRQ_HW) {
3471 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3473 put_res(dev, slave, srqn, RES_SRQ);
3477 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3478 struct mlx4_vhcr *vhcr,
3479 struct mlx4_cmd_mailbox *inbox,
3480 struct mlx4_cmd_mailbox *outbox,
3481 struct mlx4_cmd_info *cmd)
3484 int qpn = vhcr->in_modifier & 0x7fffff;
3487 err = get_res(dev, slave, qpn, RES_QP, &qp);
3490 if (qp->com.from_state != RES_QP_HW) {
3495 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3497 put_res(dev, slave, qpn, RES_QP);
3501 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3502 struct mlx4_vhcr *vhcr,
3503 struct mlx4_cmd_mailbox *inbox,
3504 struct mlx4_cmd_mailbox *outbox,
3505 struct mlx4_cmd_info *cmd)
3507 struct mlx4_qp_context *context = inbox->buf + 8;
3508 adjust_proxy_tun_qkey(dev, vhcr, context);
3509 update_pkey_index(dev, slave, inbox);
3510 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3513 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3514 struct mlx4_qp_context *qpc,
3515 struct mlx4_cmd_mailbox *inbox)
3517 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3519 int port = mlx4_slave_convert_port(
3520 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3525 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3528 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
3529 mlx4_is_eth(dev, port + 1)) {
3530 qpc->pri_path.sched_queue = pri_sched_queue;
3533 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3534 port = mlx4_slave_convert_port(
3535 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3539 qpc->alt_path.sched_queue =
3540 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3546 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3547 struct mlx4_qp_context *qpc,
3548 struct mlx4_cmd_mailbox *inbox)
3552 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3553 u8 sched = *(u8 *)(inbox->buf + 64);
3556 port = (sched >> 6 & 1) + 1;
3557 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3558 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3559 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3565 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3566 struct mlx4_vhcr *vhcr,
3567 struct mlx4_cmd_mailbox *inbox,
3568 struct mlx4_cmd_mailbox *outbox,
3569 struct mlx4_cmd_info *cmd)
3572 struct mlx4_qp_context *qpc = inbox->buf + 8;
3573 int qpn = vhcr->in_modifier & 0x7fffff;
3575 u8 orig_sched_queue;
3576 __be32 orig_param3 = qpc->param3;
3577 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3578 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3579 u8 orig_pri_path_fl = qpc->pri_path.fl;
3580 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3581 u8 orig_feup = qpc->pri_path.feup;
3583 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3586 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3590 if (roce_verify_mac(dev, slave, qpc, inbox))
3593 update_pkey_index(dev, slave, inbox);
3594 update_gid(dev, inbox, (u8)slave);
3595 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3596 orig_sched_queue = qpc->pri_path.sched_queue;
3597 err = update_vport_qp_param(dev, inbox, slave, qpn);
3601 err = get_res(dev, slave, qpn, RES_QP, &qp);
3604 if (qp->com.from_state != RES_QP_HW) {
3609 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3611 /* if no error, save sched queue value passed in by VF. This is
3612 * essentially the QOS value provided by the VF. This will be useful
3613 * if we allow dynamic changes from VST back to VGT
3616 qp->sched_queue = orig_sched_queue;
3617 qp->param3 = orig_param3;
3618 qp->vlan_control = orig_vlan_control;
3619 qp->fvl_rx = orig_fvl_rx;
3620 qp->pri_path_fl = orig_pri_path_fl;
3621 qp->vlan_index = orig_vlan_index;
3622 qp->feup = orig_feup;
3624 put_res(dev, slave, qpn, RES_QP);
3628 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3629 struct mlx4_vhcr *vhcr,
3630 struct mlx4_cmd_mailbox *inbox,
3631 struct mlx4_cmd_mailbox *outbox,
3632 struct mlx4_cmd_info *cmd)
3635 struct mlx4_qp_context *context = inbox->buf + 8;
3637 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3640 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3644 update_pkey_index(dev, slave, inbox);
3645 update_gid(dev, inbox, (u8)slave);
3646 adjust_proxy_tun_qkey(dev, vhcr, context);
3647 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3650 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3651 struct mlx4_vhcr *vhcr,
3652 struct mlx4_cmd_mailbox *inbox,
3653 struct mlx4_cmd_mailbox *outbox,
3654 struct mlx4_cmd_info *cmd)
3657 struct mlx4_qp_context *context = inbox->buf + 8;
3659 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3662 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3666 update_pkey_index(dev, slave, inbox);
3667 update_gid(dev, inbox, (u8)slave);
3668 adjust_proxy_tun_qkey(dev, vhcr, context);
3669 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3673 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3674 struct mlx4_vhcr *vhcr,
3675 struct mlx4_cmd_mailbox *inbox,
3676 struct mlx4_cmd_mailbox *outbox,
3677 struct mlx4_cmd_info *cmd)
3679 struct mlx4_qp_context *context = inbox->buf + 8;
3680 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3683 adjust_proxy_tun_qkey(dev, vhcr, context);
3684 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3687 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3688 struct mlx4_vhcr *vhcr,
3689 struct mlx4_cmd_mailbox *inbox,
3690 struct mlx4_cmd_mailbox *outbox,
3691 struct mlx4_cmd_info *cmd)
3694 struct mlx4_qp_context *context = inbox->buf + 8;
3696 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3699 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3703 adjust_proxy_tun_qkey(dev, vhcr, context);
3704 update_gid(dev, inbox, (u8)slave);
3705 update_pkey_index(dev, slave, inbox);
3706 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3709 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3710 struct mlx4_vhcr *vhcr,
3711 struct mlx4_cmd_mailbox *inbox,
3712 struct mlx4_cmd_mailbox *outbox,
3713 struct mlx4_cmd_info *cmd)
3716 struct mlx4_qp_context *context = inbox->buf + 8;
3718 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3721 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3725 adjust_proxy_tun_qkey(dev, vhcr, context);
3726 update_gid(dev, inbox, (u8)slave);
3727 update_pkey_index(dev, slave, inbox);
3728 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3731 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3732 struct mlx4_vhcr *vhcr,
3733 struct mlx4_cmd_mailbox *inbox,
3734 struct mlx4_cmd_mailbox *outbox,
3735 struct mlx4_cmd_info *cmd)
3738 int qpn = vhcr->in_modifier & 0x7fffff;
3741 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3744 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3748 atomic_dec(&qp->mtt->ref_count);
3749 atomic_dec(&qp->rcq->ref_count);
3750 atomic_dec(&qp->scq->ref_count);
3752 atomic_dec(&qp->srq->ref_count);
3753 res_end_move(dev, slave, RES_QP, qpn);
3757 res_abort_move(dev, slave, RES_QP, qpn);
3762 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3763 struct res_qp *rqp, u8 *gid)
3765 struct res_gid *res;
3767 list_for_each_entry(res, &rqp->mcg_list, list) {
3768 if (!memcmp(res->gid, gid, 16))
3774 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3775 u8 *gid, enum mlx4_protocol prot,
3776 enum mlx4_steer_type steer, u64 reg_id)
3778 struct res_gid *res;
3781 res = kzalloc(sizeof *res, GFP_KERNEL);
3785 spin_lock_irq(&rqp->mcg_spl);
3786 if (find_gid(dev, slave, rqp, gid)) {
3790 memcpy(res->gid, gid, 16);
3793 res->reg_id = reg_id;
3794 list_add_tail(&res->list, &rqp->mcg_list);
3797 spin_unlock_irq(&rqp->mcg_spl);
3802 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3803 u8 *gid, enum mlx4_protocol prot,
3804 enum mlx4_steer_type steer, u64 *reg_id)
3806 struct res_gid *res;
3809 spin_lock_irq(&rqp->mcg_spl);
3810 res = find_gid(dev, slave, rqp, gid);
3811 if (!res || res->prot != prot || res->steer != steer)
3814 *reg_id = res->reg_id;
3815 list_del(&res->list);
3819 spin_unlock_irq(&rqp->mcg_spl);
3824 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3825 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3826 enum mlx4_steer_type type, u64 *reg_id)
3828 switch (dev->caps.steering_mode) {
3829 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3830 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3833 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
3834 block_loopback, prot,
3837 case MLX4_STEERING_MODE_B0:
3838 if (prot == MLX4_PROT_ETH) {
3839 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3844 return mlx4_qp_attach_common(dev, qp, gid,
3845 block_loopback, prot, type);
3851 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3852 u8 gid[16], enum mlx4_protocol prot,
3853 enum mlx4_steer_type type, u64 reg_id)
3855 switch (dev->caps.steering_mode) {
3856 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3857 return mlx4_flow_detach(dev, reg_id);
3858 case MLX4_STEERING_MODE_B0:
3859 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3865 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3866 u8 *gid, enum mlx4_protocol prot)
3870 if (prot != MLX4_PROT_ETH)
3873 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3874 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3875 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3884 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3885 struct mlx4_vhcr *vhcr,
3886 struct mlx4_cmd_mailbox *inbox,
3887 struct mlx4_cmd_mailbox *outbox,
3888 struct mlx4_cmd_info *cmd)
3890 struct mlx4_qp qp; /* dummy for calling attach/detach */
3891 u8 *gid = inbox->buf;
3892 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3897 int attach = vhcr->op_modifier;
3898 int block_loopback = vhcr->in_modifier >> 31;
3899 u8 steer_type_mask = 2;
3900 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3902 qpn = vhcr->in_modifier & 0xffffff;
3903 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3909 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
3912 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3915 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3919 err = mlx4_adjust_port(dev, slave, gid, prot);
3923 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
3927 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3929 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3932 put_res(dev, slave, qpn, RES_QP);
3936 qp_detach(dev, &qp, gid, prot, type, reg_id);
3938 put_res(dev, slave, qpn, RES_QP);
3943 * MAC validation for Flow Steering rules.
3944 * VF can attach rules only with a mac address which is assigned to it.
3946 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3947 struct list_head *rlist)
3949 struct mac_res *res, *tmp;
3952 /* make sure it isn't multicast or broadcast mac*/
3953 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3954 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3955 list_for_each_entry_safe(res, tmp, rlist, list) {
3956 be_mac = cpu_to_be64(res->mac << 16);
3957 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
3960 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3961 eth_header->eth.dst_mac, slave);
3968 * In case of missing eth header, append eth header with a MAC address
3969 * assigned to the VF.
3971 static int add_eth_header(struct mlx4_dev *dev, int slave,
3972 struct mlx4_cmd_mailbox *inbox,
3973 struct list_head *rlist, int header_id)
3975 struct mac_res *res, *tmp;
3977 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3978 struct mlx4_net_trans_rule_hw_eth *eth_header;
3979 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3980 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3982 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3984 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3986 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3988 /* Clear a space in the inbox for eth header */
3989 switch (header_id) {
3990 case MLX4_NET_TRANS_RULE_ID_IPV4:
3992 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3993 memmove(ip_header, eth_header,
3994 sizeof(*ip_header) + sizeof(*l4_header));
3996 case MLX4_NET_TRANS_RULE_ID_TCP:
3997 case MLX4_NET_TRANS_RULE_ID_UDP:
3998 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4000 memmove(l4_header, eth_header, sizeof(*l4_header));
4005 list_for_each_entry_safe(res, tmp, rlist, list) {
4006 if (port == res->port) {
4007 be_mac = cpu_to_be64(res->mac << 16);
4012 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4017 memset(eth_header, 0, sizeof(*eth_header));
4018 eth_header->size = sizeof(*eth_header) >> 2;
4019 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4020 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4021 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4027 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
4028 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4029 struct mlx4_vhcr *vhcr,
4030 struct mlx4_cmd_mailbox *inbox,
4031 struct mlx4_cmd_mailbox *outbox,
4032 struct mlx4_cmd_info *cmd_info)
4035 u32 qpn = vhcr->in_modifier & 0xffffff;
4039 u64 pri_addr_path_mask;
4040 struct mlx4_update_qp_context *cmd;
4043 cmd = (struct mlx4_update_qp_context *)inbox->buf;
4045 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4046 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4047 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4050 /* Just change the smac for the QP */
4051 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4053 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4057 port = (rqp->sched_queue >> 6 & 1) + 1;
4059 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4060 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4061 err = mac_find_smac_ix_in_slave(dev, slave, port,
4065 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4071 err = mlx4_cmd(dev, inbox->dma,
4072 vhcr->in_modifier, 0,
4073 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4076 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4081 put_res(dev, slave, qpn, RES_QP);
4085 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4086 struct mlx4_vhcr *vhcr,
4087 struct mlx4_cmd_mailbox *inbox,
4088 struct mlx4_cmd_mailbox *outbox,
4089 struct mlx4_cmd_info *cmd)
4092 struct mlx4_priv *priv = mlx4_priv(dev);
4093 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4094 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4098 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4099 struct _rule_hw *rule_header;
4102 if (dev->caps.steering_mode !=
4103 MLX4_STEERING_MODE_DEVICE_MANAGED)
4106 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4107 ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4108 if (ctrl->port <= 0)
4110 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4111 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4113 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4116 rule_header = (struct _rule_hw *)(ctrl + 1);
4117 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4119 switch (header_id) {
4120 case MLX4_NET_TRANS_RULE_ID_ETH:
4121 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4126 case MLX4_NET_TRANS_RULE_ID_IB:
4128 case MLX4_NET_TRANS_RULE_ID_IPV4:
4129 case MLX4_NET_TRANS_RULE_ID_TCP:
4130 case MLX4_NET_TRANS_RULE_ID_UDP:
4131 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4132 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4136 vhcr->in_modifier +=
4137 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4140 pr_err("Corrupted mailbox\n");
4145 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4146 vhcr->in_modifier, 0,
4147 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4152 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4154 mlx4_err(dev, "Fail to add flow steering resources\n");
4156 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4157 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4161 atomic_inc(&rqp->ref_count);
4163 put_res(dev, slave, qpn, RES_QP);
4167 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4168 struct mlx4_vhcr *vhcr,
4169 struct mlx4_cmd_mailbox *inbox,
4170 struct mlx4_cmd_mailbox *outbox,
4171 struct mlx4_cmd_info *cmd)
4175 struct res_fs_rule *rrule;
4177 if (dev->caps.steering_mode !=
4178 MLX4_STEERING_MODE_DEVICE_MANAGED)
4181 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4184 /* Release the rule form busy state before removal */
4185 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4186 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4190 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4192 mlx4_err(dev, "Fail to remove flow steering resources\n");
4196 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4197 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4200 atomic_dec(&rqp->ref_count);
4202 put_res(dev, slave, rrule->qpn, RES_QP);
4207 BUSY_MAX_RETRIES = 10
4210 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4211 struct mlx4_vhcr *vhcr,
4212 struct mlx4_cmd_mailbox *inbox,
4213 struct mlx4_cmd_mailbox *outbox,
4214 struct mlx4_cmd_info *cmd)
4217 int index = vhcr->in_modifier & 0xffff;
4219 err = get_res(dev, slave, index, RES_COUNTER, NULL);
4223 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4224 put_res(dev, slave, index, RES_COUNTER);
4228 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4230 struct res_gid *rgid;
4231 struct res_gid *tmp;
4232 struct mlx4_qp qp; /* dummy for calling attach/detach */
4234 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4235 switch (dev->caps.steering_mode) {
4236 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4237 mlx4_flow_detach(dev, rgid->reg_id);
4239 case MLX4_STEERING_MODE_B0:
4240 qp.qpn = rqp->local_qpn;
4241 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4242 rgid->prot, rgid->steer);
4245 list_del(&rgid->list);
4250 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4251 enum mlx4_resource type, int print)
4253 struct mlx4_priv *priv = mlx4_priv(dev);
4254 struct mlx4_resource_tracker *tracker =
4255 &priv->mfunc.master.res_tracker;
4256 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4257 struct res_common *r;
4258 struct res_common *tmp;
4262 spin_lock_irq(mlx4_tlock(dev));
4263 list_for_each_entry_safe(r, tmp, rlist, list) {
4264 if (r->owner == slave) {
4266 if (r->state == RES_ANY_BUSY) {
4269 "%s id 0x%llx is busy\n",
4274 r->from_state = r->state;
4275 r->state = RES_ANY_BUSY;
4281 spin_unlock_irq(mlx4_tlock(dev));
4286 static int move_all_busy(struct mlx4_dev *dev, int slave,
4287 enum mlx4_resource type)
4289 unsigned long begin;
4294 busy = _move_all_busy(dev, slave, type, 0);
4295 if (time_after(jiffies, begin + 5 * HZ))
4302 busy = _move_all_busy(dev, slave, type, 1);
4306 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4308 struct mlx4_priv *priv = mlx4_priv(dev);
4309 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4310 struct list_head *qp_list =
4311 &tracker->slave_list[slave].res_list[RES_QP];
4319 err = move_all_busy(dev, slave, RES_QP);
4321 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4324 spin_lock_irq(mlx4_tlock(dev));
4325 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4326 spin_unlock_irq(mlx4_tlock(dev));
4327 if (qp->com.owner == slave) {
4328 qpn = qp->com.res_id;
4329 detach_qp(dev, slave, qp);
4330 state = qp->com.from_state;
4331 while (state != 0) {
4333 case RES_QP_RESERVED:
4334 spin_lock_irq(mlx4_tlock(dev));
4335 rb_erase(&qp->com.node,
4336 &tracker->res_tree[RES_QP]);
4337 list_del(&qp->com.list);
4338 spin_unlock_irq(mlx4_tlock(dev));
4339 if (!valid_reserved(dev, slave, qpn)) {
4340 __mlx4_qp_release_range(dev, qpn, 1);
4341 mlx4_release_resource(dev, slave,
4348 if (!valid_reserved(dev, slave, qpn))
4349 __mlx4_qp_free_icm(dev, qpn);
4350 state = RES_QP_RESERVED;
4354 err = mlx4_cmd(dev, in_param,
4357 MLX4_CMD_TIME_CLASS_A,
4360 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4361 slave, qp->local_qpn);
4362 atomic_dec(&qp->rcq->ref_count);
4363 atomic_dec(&qp->scq->ref_count);
4364 atomic_dec(&qp->mtt->ref_count);
4366 atomic_dec(&qp->srq->ref_count);
4367 state = RES_QP_MAPPED;
4374 spin_lock_irq(mlx4_tlock(dev));
4376 spin_unlock_irq(mlx4_tlock(dev));
4379 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4381 struct mlx4_priv *priv = mlx4_priv(dev);
4382 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4383 struct list_head *srq_list =
4384 &tracker->slave_list[slave].res_list[RES_SRQ];
4385 struct res_srq *srq;
4386 struct res_srq *tmp;
4393 err = move_all_busy(dev, slave, RES_SRQ);
4395 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4398 spin_lock_irq(mlx4_tlock(dev));
4399 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4400 spin_unlock_irq(mlx4_tlock(dev));
4401 if (srq->com.owner == slave) {
4402 srqn = srq->com.res_id;
4403 state = srq->com.from_state;
4404 while (state != 0) {
4406 case RES_SRQ_ALLOCATED:
4407 __mlx4_srq_free_icm(dev, srqn);
4408 spin_lock_irq(mlx4_tlock(dev));
4409 rb_erase(&srq->com.node,
4410 &tracker->res_tree[RES_SRQ]);
4411 list_del(&srq->com.list);
4412 spin_unlock_irq(mlx4_tlock(dev));
4413 mlx4_release_resource(dev, slave,
4421 err = mlx4_cmd(dev, in_param, srqn, 1,
4423 MLX4_CMD_TIME_CLASS_A,
4426 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4429 atomic_dec(&srq->mtt->ref_count);
4431 atomic_dec(&srq->cq->ref_count);
4432 state = RES_SRQ_ALLOCATED;
4440 spin_lock_irq(mlx4_tlock(dev));
4442 spin_unlock_irq(mlx4_tlock(dev));
4445 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4447 struct mlx4_priv *priv = mlx4_priv(dev);
4448 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4449 struct list_head *cq_list =
4450 &tracker->slave_list[slave].res_list[RES_CQ];
4459 err = move_all_busy(dev, slave, RES_CQ);
4461 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4464 spin_lock_irq(mlx4_tlock(dev));
4465 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4466 spin_unlock_irq(mlx4_tlock(dev));
4467 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4468 cqn = cq->com.res_id;
4469 state = cq->com.from_state;
4470 while (state != 0) {
4472 case RES_CQ_ALLOCATED:
4473 __mlx4_cq_free_icm(dev, cqn);
4474 spin_lock_irq(mlx4_tlock(dev));
4475 rb_erase(&cq->com.node,
4476 &tracker->res_tree[RES_CQ]);
4477 list_del(&cq->com.list);
4478 spin_unlock_irq(mlx4_tlock(dev));
4479 mlx4_release_resource(dev, slave,
4487 err = mlx4_cmd(dev, in_param, cqn, 1,
4489 MLX4_CMD_TIME_CLASS_A,
4492 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4494 atomic_dec(&cq->mtt->ref_count);
4495 state = RES_CQ_ALLOCATED;
4503 spin_lock_irq(mlx4_tlock(dev));
4505 spin_unlock_irq(mlx4_tlock(dev));
4508 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4510 struct mlx4_priv *priv = mlx4_priv(dev);
4511 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4512 struct list_head *mpt_list =
4513 &tracker->slave_list[slave].res_list[RES_MPT];
4514 struct res_mpt *mpt;
4515 struct res_mpt *tmp;
4522 err = move_all_busy(dev, slave, RES_MPT);
4524 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4527 spin_lock_irq(mlx4_tlock(dev));
4528 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4529 spin_unlock_irq(mlx4_tlock(dev));
4530 if (mpt->com.owner == slave) {
4531 mptn = mpt->com.res_id;
4532 state = mpt->com.from_state;
4533 while (state != 0) {
4535 case RES_MPT_RESERVED:
4536 __mlx4_mpt_release(dev, mpt->key);
4537 spin_lock_irq(mlx4_tlock(dev));
4538 rb_erase(&mpt->com.node,
4539 &tracker->res_tree[RES_MPT]);
4540 list_del(&mpt->com.list);
4541 spin_unlock_irq(mlx4_tlock(dev));
4542 mlx4_release_resource(dev, slave,
4548 case RES_MPT_MAPPED:
4549 __mlx4_mpt_free_icm(dev, mpt->key);
4550 state = RES_MPT_RESERVED;
4555 err = mlx4_cmd(dev, in_param, mptn, 0,
4557 MLX4_CMD_TIME_CLASS_A,
4560 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4563 atomic_dec(&mpt->mtt->ref_count);
4564 state = RES_MPT_MAPPED;
4571 spin_lock_irq(mlx4_tlock(dev));
4573 spin_unlock_irq(mlx4_tlock(dev));
4576 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4578 struct mlx4_priv *priv = mlx4_priv(dev);
4579 struct mlx4_resource_tracker *tracker =
4580 &priv->mfunc.master.res_tracker;
4581 struct list_head *mtt_list =
4582 &tracker->slave_list[slave].res_list[RES_MTT];
4583 struct res_mtt *mtt;
4584 struct res_mtt *tmp;
4590 err = move_all_busy(dev, slave, RES_MTT);
4592 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4595 spin_lock_irq(mlx4_tlock(dev));
4596 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4597 spin_unlock_irq(mlx4_tlock(dev));
4598 if (mtt->com.owner == slave) {
4599 base = mtt->com.res_id;
4600 state = mtt->com.from_state;
4601 while (state != 0) {
4603 case RES_MTT_ALLOCATED:
4604 __mlx4_free_mtt_range(dev, base,
4606 spin_lock_irq(mlx4_tlock(dev));
4607 rb_erase(&mtt->com.node,
4608 &tracker->res_tree[RES_MTT]);
4609 list_del(&mtt->com.list);
4610 spin_unlock_irq(mlx4_tlock(dev));
4611 mlx4_release_resource(dev, slave, RES_MTT,
4612 1 << mtt->order, 0);
4622 spin_lock_irq(mlx4_tlock(dev));
4624 spin_unlock_irq(mlx4_tlock(dev));
4627 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4629 struct mlx4_priv *priv = mlx4_priv(dev);
4630 struct mlx4_resource_tracker *tracker =
4631 &priv->mfunc.master.res_tracker;
4632 struct list_head *fs_rule_list =
4633 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4634 struct res_fs_rule *fs_rule;
4635 struct res_fs_rule *tmp;
4640 err = move_all_busy(dev, slave, RES_FS_RULE);
4642 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4645 spin_lock_irq(mlx4_tlock(dev));
4646 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4647 spin_unlock_irq(mlx4_tlock(dev));
4648 if (fs_rule->com.owner == slave) {
4649 base = fs_rule->com.res_id;
4650 state = fs_rule->com.from_state;
4651 while (state != 0) {
4653 case RES_FS_RULE_ALLOCATED:
4655 err = mlx4_cmd(dev, base, 0, 0,
4656 MLX4_QP_FLOW_STEERING_DETACH,
4657 MLX4_CMD_TIME_CLASS_A,
4660 spin_lock_irq(mlx4_tlock(dev));
4661 rb_erase(&fs_rule->com.node,
4662 &tracker->res_tree[RES_FS_RULE]);
4663 list_del(&fs_rule->com.list);
4664 spin_unlock_irq(mlx4_tlock(dev));
4674 spin_lock_irq(mlx4_tlock(dev));
4676 spin_unlock_irq(mlx4_tlock(dev));
4679 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4681 struct mlx4_priv *priv = mlx4_priv(dev);
4682 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4683 struct list_head *eq_list =
4684 &tracker->slave_list[slave].res_list[RES_EQ];
4692 err = move_all_busy(dev, slave, RES_EQ);
4694 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4697 spin_lock_irq(mlx4_tlock(dev));
4698 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4699 spin_unlock_irq(mlx4_tlock(dev));
4700 if (eq->com.owner == slave) {
4701 eqn = eq->com.res_id;
4702 state = eq->com.from_state;
4703 while (state != 0) {
4705 case RES_EQ_RESERVED:
4706 spin_lock_irq(mlx4_tlock(dev));
4707 rb_erase(&eq->com.node,
4708 &tracker->res_tree[RES_EQ]);
4709 list_del(&eq->com.list);
4710 spin_unlock_irq(mlx4_tlock(dev));
4716 err = mlx4_cmd(dev, slave, eqn & 0xff,
4717 1, MLX4_CMD_HW2SW_EQ,
4718 MLX4_CMD_TIME_CLASS_A,
4721 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4723 atomic_dec(&eq->mtt->ref_count);
4724 state = RES_EQ_RESERVED;
4732 spin_lock_irq(mlx4_tlock(dev));
4734 spin_unlock_irq(mlx4_tlock(dev));
4737 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4739 struct mlx4_priv *priv = mlx4_priv(dev);
4740 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4741 struct list_head *counter_list =
4742 &tracker->slave_list[slave].res_list[RES_COUNTER];
4743 struct res_counter *counter;
4744 struct res_counter *tmp;
4748 err = move_all_busy(dev, slave, RES_COUNTER);
4750 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4753 spin_lock_irq(mlx4_tlock(dev));
4754 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4755 if (counter->com.owner == slave) {
4756 index = counter->com.res_id;
4757 rb_erase(&counter->com.node,
4758 &tracker->res_tree[RES_COUNTER]);
4759 list_del(&counter->com.list);
4761 __mlx4_counter_free(dev, index);
4762 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4765 spin_unlock_irq(mlx4_tlock(dev));
4768 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4770 struct mlx4_priv *priv = mlx4_priv(dev);
4771 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4772 struct list_head *xrcdn_list =
4773 &tracker->slave_list[slave].res_list[RES_XRCD];
4774 struct res_xrcdn *xrcd;
4775 struct res_xrcdn *tmp;
4779 err = move_all_busy(dev, slave, RES_XRCD);
4781 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4784 spin_lock_irq(mlx4_tlock(dev));
4785 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4786 if (xrcd->com.owner == slave) {
4787 xrcdn = xrcd->com.res_id;
4788 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4789 list_del(&xrcd->com.list);
4791 __mlx4_xrcd_free(dev, xrcdn);
4794 spin_unlock_irq(mlx4_tlock(dev));
4797 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4799 struct mlx4_priv *priv = mlx4_priv(dev);
4800 mlx4_reset_roce_gids(dev, slave);
4801 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4802 rem_slave_vlans(dev, slave);
4803 rem_slave_macs(dev, slave);
4804 rem_slave_fs_rule(dev, slave);
4805 rem_slave_qps(dev, slave);
4806 rem_slave_srqs(dev, slave);
4807 rem_slave_cqs(dev, slave);
4808 rem_slave_mrs(dev, slave);
4809 rem_slave_eqs(dev, slave);
4810 rem_slave_mtts(dev, slave);
4811 rem_slave_counters(dev, slave);
4812 rem_slave_xrcdns(dev, slave);
4813 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4816 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4818 struct mlx4_vf_immed_vlan_work *work =
4819 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4820 struct mlx4_cmd_mailbox *mailbox;
4821 struct mlx4_update_qp_context *upd_context;
4822 struct mlx4_dev *dev = &work->priv->dev;
4823 struct mlx4_resource_tracker *tracker =
4824 &work->priv->mfunc.master.res_tracker;
4825 struct list_head *qp_list =
4826 &tracker->slave_list[work->slave].res_list[RES_QP];
4829 u64 qp_path_mask_vlan_ctrl =
4830 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4831 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4832 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4833 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4834 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4835 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4837 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4838 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4839 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4840 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4841 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4842 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4843 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4846 int port, errors = 0;
4849 if (mlx4_is_slave(dev)) {
4850 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4855 mailbox = mlx4_alloc_cmd_mailbox(dev);
4856 if (IS_ERR(mailbox))
4858 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4859 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4860 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4861 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4862 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4863 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4864 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4865 else if (!work->vlan_id)
4866 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4867 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4869 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4870 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4871 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4873 upd_context = mailbox->buf;
4874 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
4876 spin_lock_irq(mlx4_tlock(dev));
4877 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4878 spin_unlock_irq(mlx4_tlock(dev));
4879 if (qp->com.owner == work->slave) {
4880 if (qp->com.from_state != RES_QP_HW ||
4881 !qp->sched_queue || /* no INIT2RTR trans yet */
4882 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4883 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4884 spin_lock_irq(mlx4_tlock(dev));
4887 port = (qp->sched_queue >> 6 & 1) + 1;
4888 if (port != work->port) {
4889 spin_lock_irq(mlx4_tlock(dev));
4892 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4893 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4895 upd_context->primary_addr_path_mask =
4896 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4897 if (work->vlan_id == MLX4_VGT) {
4898 upd_context->qp_context.param3 = qp->param3;
4899 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4900 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4901 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4902 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4903 upd_context->qp_context.pri_path.feup = qp->feup;
4904 upd_context->qp_context.pri_path.sched_queue =
4907 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4908 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4909 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4910 upd_context->qp_context.pri_path.fvl_rx =
4911 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4912 upd_context->qp_context.pri_path.fl =
4913 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4914 upd_context->qp_context.pri_path.feup =
4915 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4916 upd_context->qp_context.pri_path.sched_queue =
4917 qp->sched_queue & 0xC7;
4918 upd_context->qp_context.pri_path.sched_queue |=
4919 ((work->qos & 0x7) << 3);
4922 err = mlx4_cmd(dev, mailbox->dma,
4923 qp->local_qpn & 0xffffff,
4924 0, MLX4_CMD_UPDATE_QP,
4925 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4927 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4928 work->slave, port, qp->local_qpn, err);
4932 spin_lock_irq(mlx4_tlock(dev));
4934 spin_unlock_irq(mlx4_tlock(dev));
4935 mlx4_free_cmd_mailbox(dev, mailbox);
4938 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4939 errors, work->slave, work->port);
4941 /* unregister previous vlan_id if needed and we had no errors
4942 * while updating the QPs
4944 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4945 NO_INDX != work->orig_vlan_ix)
4946 __mlx4_unregister_vlan(&work->priv->dev, work->port,
4947 work->orig_vlan_id);