2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
50 #define MLX4_MAC_VALID (1ull << 63)
53 struct list_head list;
59 struct list_head list;
74 struct list_head list;
76 enum mlx4_protocol prot;
77 enum mlx4_steer_type steer;
82 RES_QP_BUSY = RES_ANY_BUSY,
84 /* QP number was allocated */
87 /* ICM memory for QP context was mapped */
90 /* QP is in hw ownership */
95 struct res_common com;
100 struct list_head mcg_list;
108 enum res_mtt_states {
109 RES_MTT_BUSY = RES_ANY_BUSY,
113 static inline const char *mtt_states_str(enum res_mtt_states state)
116 case RES_MTT_BUSY: return "RES_MTT_BUSY";
117 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
118 default: return "Unknown";
123 struct res_common com;
128 enum res_mpt_states {
129 RES_MPT_BUSY = RES_ANY_BUSY,
136 struct res_common com;
142 RES_EQ_BUSY = RES_ANY_BUSY,
148 struct res_common com;
153 RES_CQ_BUSY = RES_ANY_BUSY,
159 struct res_common com;
164 enum res_srq_states {
165 RES_SRQ_BUSY = RES_ANY_BUSY,
171 struct res_common com;
177 enum res_counter_states {
178 RES_COUNTER_BUSY = RES_ANY_BUSY,
179 RES_COUNTER_ALLOCATED,
183 struct res_common com;
187 enum res_xrcdn_states {
188 RES_XRCD_BUSY = RES_ANY_BUSY,
193 struct res_common com;
197 enum res_fs_rule_states {
198 RES_FS_RULE_BUSY = RES_ANY_BUSY,
199 RES_FS_RULE_ALLOCATED,
203 struct res_common com;
207 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
209 struct rb_node *node = root->rb_node;
212 struct res_common *res = container_of(node, struct res_common,
215 if (res_id < res->res_id)
216 node = node->rb_left;
217 else if (res_id > res->res_id)
218 node = node->rb_right;
225 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
227 struct rb_node **new = &(root->rb_node), *parent = NULL;
229 /* Figure out where to put new node */
231 struct res_common *this = container_of(*new, struct res_common,
235 if (res->res_id < this->res_id)
236 new = &((*new)->rb_left);
237 else if (res->res_id > this->res_id)
238 new = &((*new)->rb_right);
243 /* Add new node and rebalance tree. */
244 rb_link_node(&res->node, parent, new);
245 rb_insert_color(&res->node, root);
260 static const char *ResourceType(enum mlx4_resource rt)
263 case RES_QP: return "RES_QP";
264 case RES_CQ: return "RES_CQ";
265 case RES_SRQ: return "RES_SRQ";
266 case RES_MPT: return "RES_MPT";
267 case RES_MTT: return "RES_MTT";
268 case RES_MAC: return "RES_MAC";
269 case RES_EQ: return "RES_EQ";
270 case RES_COUNTER: return "RES_COUNTER";
271 case RES_FS_RULE: return "RES_FS_RULE";
272 case RES_XRCD: return "RES_XRCD";
273 default: return "Unknown resource type !!!";
277 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
279 struct mlx4_priv *priv = mlx4_priv(dev);
283 priv->mfunc.master.res_tracker.slave_list =
284 kzalloc(dev->num_slaves * sizeof(struct slave_list),
286 if (!priv->mfunc.master.res_tracker.slave_list)
289 for (i = 0 ; i < dev->num_slaves; i++) {
290 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
291 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
292 slave_list[i].res_list[t]);
293 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
296 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
298 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
299 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
301 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
305 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
306 enum mlx4_res_tracker_free_type type)
308 struct mlx4_priv *priv = mlx4_priv(dev);
311 if (priv->mfunc.master.res_tracker.slave_list) {
312 if (type != RES_TR_FREE_STRUCTS_ONLY)
313 for (i = 0 ; i < dev->num_slaves; i++)
314 if (type == RES_TR_FREE_ALL ||
315 dev->caps.function != i)
316 mlx4_delete_all_resources_for_slave(dev, i);
318 if (type != RES_TR_FREE_SLAVES_ONLY) {
319 kfree(priv->mfunc.master.res_tracker.slave_list);
320 priv->mfunc.master.res_tracker.slave_list = NULL;
325 static void update_pkey_index(struct mlx4_dev *dev, int slave,
326 struct mlx4_cmd_mailbox *inbox)
328 u8 sched = *(u8 *)(inbox->buf + 64);
329 u8 orig_index = *(u8 *)(inbox->buf + 35);
331 struct mlx4_priv *priv = mlx4_priv(dev);
334 port = (sched >> 6 & 1) + 1;
336 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
337 *(u8 *)(inbox->buf + 35) = new_index;
340 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
343 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
344 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
345 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
347 if (MLX4_QP_ST_UD == ts)
348 qp_ctx->pri_path.mgid_index = 0x80 | slave;
350 if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
351 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
352 qp_ctx->pri_path.mgid_index = slave & 0x7F;
353 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
354 qp_ctx->alt_path.mgid_index = slave & 0x7F;
358 static int update_vport_qp_param(struct mlx4_dev *dev,
359 struct mlx4_cmd_mailbox *inbox,
362 struct mlx4_qp_context *qpc = inbox->buf + 8;
363 struct mlx4_vport_oper_state *vp_oper;
364 struct mlx4_priv *priv;
368 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
369 priv = mlx4_priv(dev);
370 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
372 if (MLX4_VGT != vp_oper->state.default_vlan) {
373 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
374 if (MLX4_QP_ST_RC == qp_type ||
375 (MLX4_QP_ST_UD == qp_type &&
376 !mlx4_is_qp_reserved(dev, qpn)))
379 /* the reserved QPs (special, proxy, tunnel)
380 * do not operate over vlans
382 if (mlx4_is_qp_reserved(dev, qpn))
385 /* force strip vlan by clear vsd */
386 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
387 if (0 != vp_oper->state.default_vlan) {
388 qpc->pri_path.vlan_control =
389 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
390 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
391 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
392 } else { /* priority tagged */
393 qpc->pri_path.vlan_control =
394 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
395 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
398 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
399 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
400 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
401 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
402 qpc->pri_path.sched_queue &= 0xC7;
403 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
405 if (vp_oper->state.spoofchk) {
406 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
407 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
412 static int mpt_mask(struct mlx4_dev *dev)
414 return dev->caps.num_mpts - 1;
417 static void *find_res(struct mlx4_dev *dev, u64 res_id,
418 enum mlx4_resource type)
420 struct mlx4_priv *priv = mlx4_priv(dev);
422 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
426 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
427 enum mlx4_resource type,
430 struct res_common *r;
433 spin_lock_irq(mlx4_tlock(dev));
434 r = find_res(dev, res_id, type);
440 if (r->state == RES_ANY_BUSY) {
445 if (r->owner != slave) {
450 r->from_state = r->state;
451 r->state = RES_ANY_BUSY;
454 *((struct res_common **)res) = r;
457 spin_unlock_irq(mlx4_tlock(dev));
461 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
462 enum mlx4_resource type,
463 u64 res_id, int *slave)
466 struct res_common *r;
472 spin_lock(mlx4_tlock(dev));
474 r = find_res(dev, id, type);
479 spin_unlock(mlx4_tlock(dev));
484 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
485 enum mlx4_resource type)
487 struct res_common *r;
489 spin_lock_irq(mlx4_tlock(dev));
490 r = find_res(dev, res_id, type);
492 r->state = r->from_state;
493 spin_unlock_irq(mlx4_tlock(dev));
496 static struct res_common *alloc_qp_tr(int id)
500 ret = kzalloc(sizeof *ret, GFP_KERNEL);
504 ret->com.res_id = id;
505 ret->com.state = RES_QP_RESERVED;
507 INIT_LIST_HEAD(&ret->mcg_list);
508 spin_lock_init(&ret->mcg_spl);
509 atomic_set(&ret->ref_count, 0);
514 static struct res_common *alloc_mtt_tr(int id, int order)
518 ret = kzalloc(sizeof *ret, GFP_KERNEL);
522 ret->com.res_id = id;
524 ret->com.state = RES_MTT_ALLOCATED;
525 atomic_set(&ret->ref_count, 0);
530 static struct res_common *alloc_mpt_tr(int id, int key)
534 ret = kzalloc(sizeof *ret, GFP_KERNEL);
538 ret->com.res_id = id;
539 ret->com.state = RES_MPT_RESERVED;
545 static struct res_common *alloc_eq_tr(int id)
549 ret = kzalloc(sizeof *ret, GFP_KERNEL);
553 ret->com.res_id = id;
554 ret->com.state = RES_EQ_RESERVED;
559 static struct res_common *alloc_cq_tr(int id)
563 ret = kzalloc(sizeof *ret, GFP_KERNEL);
567 ret->com.res_id = id;
568 ret->com.state = RES_CQ_ALLOCATED;
569 atomic_set(&ret->ref_count, 0);
574 static struct res_common *alloc_srq_tr(int id)
578 ret = kzalloc(sizeof *ret, GFP_KERNEL);
582 ret->com.res_id = id;
583 ret->com.state = RES_SRQ_ALLOCATED;
584 atomic_set(&ret->ref_count, 0);
589 static struct res_common *alloc_counter_tr(int id)
591 struct res_counter *ret;
593 ret = kzalloc(sizeof *ret, GFP_KERNEL);
597 ret->com.res_id = id;
598 ret->com.state = RES_COUNTER_ALLOCATED;
603 static struct res_common *alloc_xrcdn_tr(int id)
605 struct res_xrcdn *ret;
607 ret = kzalloc(sizeof *ret, GFP_KERNEL);
611 ret->com.res_id = id;
612 ret->com.state = RES_XRCD_ALLOCATED;
617 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
619 struct res_fs_rule *ret;
621 ret = kzalloc(sizeof *ret, GFP_KERNEL);
625 ret->com.res_id = id;
626 ret->com.state = RES_FS_RULE_ALLOCATED;
631 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
634 struct res_common *ret;
638 ret = alloc_qp_tr(id);
641 ret = alloc_mpt_tr(id, extra);
644 ret = alloc_mtt_tr(id, extra);
647 ret = alloc_eq_tr(id);
650 ret = alloc_cq_tr(id);
653 ret = alloc_srq_tr(id);
656 printk(KERN_ERR "implementation missing\n");
659 ret = alloc_counter_tr(id);
662 ret = alloc_xrcdn_tr(id);
665 ret = alloc_fs_rule_tr(id, extra);
676 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
677 enum mlx4_resource type, int extra)
681 struct mlx4_priv *priv = mlx4_priv(dev);
682 struct res_common **res_arr;
683 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
684 struct rb_root *root = &tracker->res_tree[type];
686 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
690 for (i = 0; i < count; ++i) {
691 res_arr[i] = alloc_tr(base + i, type, slave, extra);
693 for (--i; i >= 0; --i)
701 spin_lock_irq(mlx4_tlock(dev));
702 for (i = 0; i < count; ++i) {
703 if (find_res(dev, base + i, type)) {
707 err = res_tracker_insert(root, res_arr[i]);
710 list_add_tail(&res_arr[i]->list,
711 &tracker->slave_list[slave].res_list[type]);
713 spin_unlock_irq(mlx4_tlock(dev));
719 for (--i; i >= base; --i)
720 rb_erase(&res_arr[i]->node, root);
722 spin_unlock_irq(mlx4_tlock(dev));
724 for (i = 0; i < count; ++i)
732 static int remove_qp_ok(struct res_qp *res)
734 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
735 !list_empty(&res->mcg_list)) {
736 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
737 res->com.state, atomic_read(&res->ref_count));
739 } else if (res->com.state != RES_QP_RESERVED) {
746 static int remove_mtt_ok(struct res_mtt *res, int order)
748 if (res->com.state == RES_MTT_BUSY ||
749 atomic_read(&res->ref_count)) {
750 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
752 mtt_states_str(res->com.state),
753 atomic_read(&res->ref_count));
755 } else if (res->com.state != RES_MTT_ALLOCATED)
757 else if (res->order != order)
763 static int remove_mpt_ok(struct res_mpt *res)
765 if (res->com.state == RES_MPT_BUSY)
767 else if (res->com.state != RES_MPT_RESERVED)
773 static int remove_eq_ok(struct res_eq *res)
775 if (res->com.state == RES_MPT_BUSY)
777 else if (res->com.state != RES_MPT_RESERVED)
783 static int remove_counter_ok(struct res_counter *res)
785 if (res->com.state == RES_COUNTER_BUSY)
787 else if (res->com.state != RES_COUNTER_ALLOCATED)
793 static int remove_xrcdn_ok(struct res_xrcdn *res)
795 if (res->com.state == RES_XRCD_BUSY)
797 else if (res->com.state != RES_XRCD_ALLOCATED)
803 static int remove_fs_rule_ok(struct res_fs_rule *res)
805 if (res->com.state == RES_FS_RULE_BUSY)
807 else if (res->com.state != RES_FS_RULE_ALLOCATED)
813 static int remove_cq_ok(struct res_cq *res)
815 if (res->com.state == RES_CQ_BUSY)
817 else if (res->com.state != RES_CQ_ALLOCATED)
823 static int remove_srq_ok(struct res_srq *res)
825 if (res->com.state == RES_SRQ_BUSY)
827 else if (res->com.state != RES_SRQ_ALLOCATED)
833 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
837 return remove_qp_ok((struct res_qp *)res);
839 return remove_cq_ok((struct res_cq *)res);
841 return remove_srq_ok((struct res_srq *)res);
843 return remove_mpt_ok((struct res_mpt *)res);
845 return remove_mtt_ok((struct res_mtt *)res, extra);
849 return remove_eq_ok((struct res_eq *)res);
851 return remove_counter_ok((struct res_counter *)res);
853 return remove_xrcdn_ok((struct res_xrcdn *)res);
855 return remove_fs_rule_ok((struct res_fs_rule *)res);
861 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
862 enum mlx4_resource type, int extra)
866 struct mlx4_priv *priv = mlx4_priv(dev);
867 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
868 struct res_common *r;
870 spin_lock_irq(mlx4_tlock(dev));
871 for (i = base; i < base + count; ++i) {
872 r = res_tracker_lookup(&tracker->res_tree[type], i);
877 if (r->owner != slave) {
881 err = remove_ok(r, type, extra);
886 for (i = base; i < base + count; ++i) {
887 r = res_tracker_lookup(&tracker->res_tree[type], i);
888 rb_erase(&r->node, &tracker->res_tree[type]);
895 spin_unlock_irq(mlx4_tlock(dev));
900 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
901 enum res_qp_states state, struct res_qp **qp,
904 struct mlx4_priv *priv = mlx4_priv(dev);
905 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
909 spin_lock_irq(mlx4_tlock(dev));
910 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
913 else if (r->com.owner != slave)
918 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
919 __func__, r->com.res_id);
923 case RES_QP_RESERVED:
924 if (r->com.state == RES_QP_MAPPED && !alloc)
927 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
932 if ((r->com.state == RES_QP_RESERVED && alloc) ||
933 r->com.state == RES_QP_HW)
936 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
944 if (r->com.state != RES_QP_MAPPED)
952 r->com.from_state = r->com.state;
953 r->com.to_state = state;
954 r->com.state = RES_QP_BUSY;
960 spin_unlock_irq(mlx4_tlock(dev));
965 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
966 enum res_mpt_states state, struct res_mpt **mpt)
968 struct mlx4_priv *priv = mlx4_priv(dev);
969 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
973 spin_lock_irq(mlx4_tlock(dev));
974 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
977 else if (r->com.owner != slave)
985 case RES_MPT_RESERVED:
986 if (r->com.state != RES_MPT_MAPPED)
991 if (r->com.state != RES_MPT_RESERVED &&
992 r->com.state != RES_MPT_HW)
997 if (r->com.state != RES_MPT_MAPPED)
1005 r->com.from_state = r->com.state;
1006 r->com.to_state = state;
1007 r->com.state = RES_MPT_BUSY;
1013 spin_unlock_irq(mlx4_tlock(dev));
1018 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1019 enum res_eq_states state, struct res_eq **eq)
1021 struct mlx4_priv *priv = mlx4_priv(dev);
1022 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1026 spin_lock_irq(mlx4_tlock(dev));
1027 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1030 else if (r->com.owner != slave)
1038 case RES_EQ_RESERVED:
1039 if (r->com.state != RES_EQ_HW)
1044 if (r->com.state != RES_EQ_RESERVED)
1053 r->com.from_state = r->com.state;
1054 r->com.to_state = state;
1055 r->com.state = RES_EQ_BUSY;
1061 spin_unlock_irq(mlx4_tlock(dev));
1066 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1067 enum res_cq_states state, struct res_cq **cq)
1069 struct mlx4_priv *priv = mlx4_priv(dev);
1070 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1074 spin_lock_irq(mlx4_tlock(dev));
1075 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1078 else if (r->com.owner != slave)
1086 case RES_CQ_ALLOCATED:
1087 if (r->com.state != RES_CQ_HW)
1089 else if (atomic_read(&r->ref_count))
1096 if (r->com.state != RES_CQ_ALLOCATED)
1107 r->com.from_state = r->com.state;
1108 r->com.to_state = state;
1109 r->com.state = RES_CQ_BUSY;
1115 spin_unlock_irq(mlx4_tlock(dev));
1120 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1121 enum res_cq_states state, struct res_srq **srq)
1123 struct mlx4_priv *priv = mlx4_priv(dev);
1124 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1128 spin_lock_irq(mlx4_tlock(dev));
1129 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1132 else if (r->com.owner != slave)
1140 case RES_SRQ_ALLOCATED:
1141 if (r->com.state != RES_SRQ_HW)
1143 else if (atomic_read(&r->ref_count))
1148 if (r->com.state != RES_SRQ_ALLOCATED)
1157 r->com.from_state = r->com.state;
1158 r->com.to_state = state;
1159 r->com.state = RES_SRQ_BUSY;
1165 spin_unlock_irq(mlx4_tlock(dev));
1170 static void res_abort_move(struct mlx4_dev *dev, int slave,
1171 enum mlx4_resource type, int id)
1173 struct mlx4_priv *priv = mlx4_priv(dev);
1174 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1175 struct res_common *r;
1177 spin_lock_irq(mlx4_tlock(dev));
1178 r = res_tracker_lookup(&tracker->res_tree[type], id);
1179 if (r && (r->owner == slave))
1180 r->state = r->from_state;
1181 spin_unlock_irq(mlx4_tlock(dev));
1184 static void res_end_move(struct mlx4_dev *dev, int slave,
1185 enum mlx4_resource type, int id)
1187 struct mlx4_priv *priv = mlx4_priv(dev);
1188 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1189 struct res_common *r;
1191 spin_lock_irq(mlx4_tlock(dev));
1192 r = res_tracker_lookup(&tracker->res_tree[type], id);
1193 if (r && (r->owner == slave))
1194 r->state = r->to_state;
1195 spin_unlock_irq(mlx4_tlock(dev));
1198 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1200 return mlx4_is_qp_reserved(dev, qpn) &&
1201 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1204 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1206 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1209 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1210 u64 in_param, u64 *out_param)
1219 case RES_OP_RESERVE:
1220 count = get_param_l(&in_param);
1221 align = get_param_h(&in_param);
1222 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1226 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1228 __mlx4_qp_release_range(dev, base, count);
1231 set_param_l(out_param, base);
1233 case RES_OP_MAP_ICM:
1234 qpn = get_param_l(&in_param) & 0x7fffff;
1235 if (valid_reserved(dev, slave, qpn)) {
1236 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1241 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1246 if (!fw_reserved(dev, qpn)) {
1247 err = __mlx4_qp_alloc_icm(dev, qpn);
1249 res_abort_move(dev, slave, RES_QP, qpn);
1254 res_end_move(dev, slave, RES_QP, qpn);
1264 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1265 u64 in_param, u64 *out_param)
1271 if (op != RES_OP_RESERVE_AND_MAP)
1274 order = get_param_l(&in_param);
1275 base = __mlx4_alloc_mtt_range(dev, order);
1279 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1281 __mlx4_free_mtt_range(dev, base, order);
1283 set_param_l(out_param, base);
1288 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1289 u64 in_param, u64 *out_param)
1294 struct res_mpt *mpt;
1297 case RES_OP_RESERVE:
1298 index = __mlx4_mpt_reserve(dev);
1301 id = index & mpt_mask(dev);
1303 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1305 __mlx4_mpt_release(dev, index);
1308 set_param_l(out_param, index);
1310 case RES_OP_MAP_ICM:
1311 index = get_param_l(&in_param);
1312 id = index & mpt_mask(dev);
1313 err = mr_res_start_move_to(dev, slave, id,
1314 RES_MPT_MAPPED, &mpt);
1318 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1320 res_abort_move(dev, slave, RES_MPT, id);
1324 res_end_move(dev, slave, RES_MPT, id);
1330 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1331 u64 in_param, u64 *out_param)
1337 case RES_OP_RESERVE_AND_MAP:
1338 err = __mlx4_cq_alloc_icm(dev, &cqn);
1342 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1344 __mlx4_cq_free_icm(dev, cqn);
1348 set_param_l(out_param, cqn);
1358 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1359 u64 in_param, u64 *out_param)
1365 case RES_OP_RESERVE_AND_MAP:
1366 err = __mlx4_srq_alloc_icm(dev, &srqn);
1370 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1372 __mlx4_srq_free_icm(dev, srqn);
1376 set_param_l(out_param, srqn);
1386 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1388 struct mlx4_priv *priv = mlx4_priv(dev);
1389 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1390 struct mac_res *res;
1392 res = kzalloc(sizeof *res, GFP_KERNEL);
1396 res->port = (u8) port;
1397 list_add_tail(&res->list,
1398 &tracker->slave_list[slave].res_list[RES_MAC]);
1402 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1405 struct mlx4_priv *priv = mlx4_priv(dev);
1406 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1407 struct list_head *mac_list =
1408 &tracker->slave_list[slave].res_list[RES_MAC];
1409 struct mac_res *res, *tmp;
1411 list_for_each_entry_safe(res, tmp, mac_list, list) {
1412 if (res->mac == mac && res->port == (u8) port) {
1413 list_del(&res->list);
1420 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1422 struct mlx4_priv *priv = mlx4_priv(dev);
1423 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1424 struct list_head *mac_list =
1425 &tracker->slave_list[slave].res_list[RES_MAC];
1426 struct mac_res *res, *tmp;
1428 list_for_each_entry_safe(res, tmp, mac_list, list) {
1429 list_del(&res->list);
1430 __mlx4_unregister_mac(dev, res->port, res->mac);
1435 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1436 u64 in_param, u64 *out_param)
1442 if (op != RES_OP_RESERVE_AND_MAP)
1445 port = get_param_l(out_param);
1448 err = __mlx4_register_mac(dev, port, mac);
1450 set_param_l(out_param, err);
1455 err = mac_add_to_slave(dev, slave, mac, port);
1457 __mlx4_unregister_mac(dev, port, mac);
1462 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1463 u64 in_param, u64 *out_param)
1468 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1469 u64 in_param, u64 *out_param)
1474 if (op != RES_OP_RESERVE)
1477 err = __mlx4_counter_alloc(dev, &index);
1481 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1483 __mlx4_counter_free(dev, index);
1485 set_param_l(out_param, index);
1490 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1491 u64 in_param, u64 *out_param)
1496 if (op != RES_OP_RESERVE)
1499 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1503 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1505 __mlx4_xrcd_free(dev, xrcdn);
1507 set_param_l(out_param, xrcdn);
1512 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1513 struct mlx4_vhcr *vhcr,
1514 struct mlx4_cmd_mailbox *inbox,
1515 struct mlx4_cmd_mailbox *outbox,
1516 struct mlx4_cmd_info *cmd)
1519 int alop = vhcr->op_modifier;
1521 switch (vhcr->in_modifier) {
1523 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1524 vhcr->in_param, &vhcr->out_param);
1528 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1529 vhcr->in_param, &vhcr->out_param);
1533 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1534 vhcr->in_param, &vhcr->out_param);
1538 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1539 vhcr->in_param, &vhcr->out_param);
1543 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1544 vhcr->in_param, &vhcr->out_param);
1548 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1549 vhcr->in_param, &vhcr->out_param);
1553 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1554 vhcr->in_param, &vhcr->out_param);
1558 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1559 vhcr->in_param, &vhcr->out_param);
1563 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1564 vhcr->in_param, &vhcr->out_param);
1575 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1584 case RES_OP_RESERVE:
1585 base = get_param_l(&in_param) & 0x7fffff;
1586 count = get_param_h(&in_param);
1587 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1590 __mlx4_qp_release_range(dev, base, count);
1592 case RES_OP_MAP_ICM:
1593 qpn = get_param_l(&in_param) & 0x7fffff;
1594 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1599 if (!fw_reserved(dev, qpn))
1600 __mlx4_qp_free_icm(dev, qpn);
1602 res_end_move(dev, slave, RES_QP, qpn);
1604 if (valid_reserved(dev, slave, qpn))
1605 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1614 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1615 u64 in_param, u64 *out_param)
1621 if (op != RES_OP_RESERVE_AND_MAP)
1624 base = get_param_l(&in_param);
1625 order = get_param_h(&in_param);
1626 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1628 __mlx4_free_mtt_range(dev, base, order);
1632 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1638 struct res_mpt *mpt;
1641 case RES_OP_RESERVE:
1642 index = get_param_l(&in_param);
1643 id = index & mpt_mask(dev);
1644 err = get_res(dev, slave, id, RES_MPT, &mpt);
1648 put_res(dev, slave, id, RES_MPT);
1650 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1653 __mlx4_mpt_release(dev, index);
1655 case RES_OP_MAP_ICM:
1656 index = get_param_l(&in_param);
1657 id = index & mpt_mask(dev);
1658 err = mr_res_start_move_to(dev, slave, id,
1659 RES_MPT_RESERVED, &mpt);
1663 __mlx4_mpt_free_icm(dev, mpt->key);
1664 res_end_move(dev, slave, RES_MPT, id);
1674 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1675 u64 in_param, u64 *out_param)
1681 case RES_OP_RESERVE_AND_MAP:
1682 cqn = get_param_l(&in_param);
1683 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1687 __mlx4_cq_free_icm(dev, cqn);
1698 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1699 u64 in_param, u64 *out_param)
1705 case RES_OP_RESERVE_AND_MAP:
1706 srqn = get_param_l(&in_param);
1707 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1711 __mlx4_srq_free_icm(dev, srqn);
1722 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1723 u64 in_param, u64 *out_param)
1729 case RES_OP_RESERVE_AND_MAP:
1730 port = get_param_l(out_param);
1731 mac_del_from_slave(dev, slave, in_param, port);
1732 __mlx4_unregister_mac(dev, port, in_param);
1743 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1744 u64 in_param, u64 *out_param)
1749 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1750 u64 in_param, u64 *out_param)
1755 if (op != RES_OP_RESERVE)
1758 index = get_param_l(&in_param);
1759 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1763 __mlx4_counter_free(dev, index);
1768 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1769 u64 in_param, u64 *out_param)
1774 if (op != RES_OP_RESERVE)
1777 xrcdn = get_param_l(&in_param);
1778 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1782 __mlx4_xrcd_free(dev, xrcdn);
1787 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1788 struct mlx4_vhcr *vhcr,
1789 struct mlx4_cmd_mailbox *inbox,
1790 struct mlx4_cmd_mailbox *outbox,
1791 struct mlx4_cmd_info *cmd)
1794 int alop = vhcr->op_modifier;
1796 switch (vhcr->in_modifier) {
1798 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1803 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1804 vhcr->in_param, &vhcr->out_param);
1808 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1813 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1814 vhcr->in_param, &vhcr->out_param);
1818 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1819 vhcr->in_param, &vhcr->out_param);
1823 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1824 vhcr->in_param, &vhcr->out_param);
1828 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1829 vhcr->in_param, &vhcr->out_param);
1833 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1834 vhcr->in_param, &vhcr->out_param);
1838 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1839 vhcr->in_param, &vhcr->out_param);
1847 /* ugly but other choices are uglier */
1848 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1850 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1853 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1855 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1858 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1860 return be32_to_cpu(mpt->mtt_sz);
1863 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
1865 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
1868 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
1870 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
1873 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
1875 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
1878 static int mr_is_region(struct mlx4_mpt_entry *mpt)
1880 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
1883 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1885 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1888 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1890 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1893 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1895 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1896 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1897 int log_sq_sride = qpc->sq_size_stride & 7;
1898 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1899 int log_rq_stride = qpc->rq_size_stride & 7;
1900 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1901 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1902 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1907 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1909 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1910 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1911 total_mem = sq_size + rq_size;
1913 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1919 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1920 int size, struct res_mtt *mtt)
1922 int res_start = mtt->com.res_id;
1923 int res_size = (1 << mtt->order);
1925 if (start < res_start || start + size > res_start + res_size)
1930 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1931 struct mlx4_vhcr *vhcr,
1932 struct mlx4_cmd_mailbox *inbox,
1933 struct mlx4_cmd_mailbox *outbox,
1934 struct mlx4_cmd_info *cmd)
1937 int index = vhcr->in_modifier;
1938 struct res_mtt *mtt;
1939 struct res_mpt *mpt;
1940 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1946 id = index & mpt_mask(dev);
1947 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1951 /* Disable memory windows for VFs. */
1952 if (!mr_is_region(inbox->buf)) {
1957 /* Make sure that the PD bits related to the slave id are zeros. */
1958 pd = mr_get_pd(inbox->buf);
1959 pd_slave = (pd >> 17) & 0x7f;
1960 if (pd_slave != 0 && pd_slave != slave) {
1965 if (mr_is_fmr(inbox->buf)) {
1966 /* FMR and Bind Enable are forbidden in slave devices. */
1967 if (mr_is_bind_enabled(inbox->buf)) {
1971 /* FMR and Memory Windows are also forbidden. */
1972 if (!mr_is_region(inbox->buf)) {
1978 phys = mr_phys_mpt(inbox->buf);
1980 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1984 err = check_mtt_range(dev, slave, mtt_base,
1985 mr_get_mtt_size(inbox->buf), mtt);
1992 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1997 atomic_inc(&mtt->ref_count);
1998 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2001 res_end_move(dev, slave, RES_MPT, id);
2006 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2008 res_abort_move(dev, slave, RES_MPT, id);
2013 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2014 struct mlx4_vhcr *vhcr,
2015 struct mlx4_cmd_mailbox *inbox,
2016 struct mlx4_cmd_mailbox *outbox,
2017 struct mlx4_cmd_info *cmd)
2020 int index = vhcr->in_modifier;
2021 struct res_mpt *mpt;
2024 id = index & mpt_mask(dev);
2025 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2029 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2034 atomic_dec(&mpt->mtt->ref_count);
2036 res_end_move(dev, slave, RES_MPT, id);
2040 res_abort_move(dev, slave, RES_MPT, id);
2045 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2046 struct mlx4_vhcr *vhcr,
2047 struct mlx4_cmd_mailbox *inbox,
2048 struct mlx4_cmd_mailbox *outbox,
2049 struct mlx4_cmd_info *cmd)
2052 int index = vhcr->in_modifier;
2053 struct res_mpt *mpt;
2056 id = index & mpt_mask(dev);
2057 err = get_res(dev, slave, id, RES_MPT, &mpt);
2061 if (mpt->com.from_state != RES_MPT_HW) {
2066 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2069 put_res(dev, slave, id, RES_MPT);
2073 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2075 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2078 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2080 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2083 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2085 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2088 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2089 struct mlx4_qp_context *context)
2091 u32 qpn = vhcr->in_modifier & 0xffffff;
2094 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2097 /* adjust qkey in qp context */
2098 context->qkey = cpu_to_be32(qkey);
2101 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2102 struct mlx4_vhcr *vhcr,
2103 struct mlx4_cmd_mailbox *inbox,
2104 struct mlx4_cmd_mailbox *outbox,
2105 struct mlx4_cmd_info *cmd)
2108 int qpn = vhcr->in_modifier & 0x7fffff;
2109 struct res_mtt *mtt;
2111 struct mlx4_qp_context *qpc = inbox->buf + 8;
2112 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2113 int mtt_size = qp_get_mtt_size(qpc);
2116 int rcqn = qp_get_rcqn(qpc);
2117 int scqn = qp_get_scqn(qpc);
2118 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2119 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2120 struct res_srq *srq;
2121 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2123 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2126 qp->local_qpn = local_qpn;
2127 qp->sched_queue = 0;
2128 qp->qpc_flags = be32_to_cpu(qpc->flags);
2130 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2134 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2138 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2143 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2150 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2155 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2156 update_pkey_index(dev, slave, inbox);
2157 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2160 atomic_inc(&mtt->ref_count);
2162 atomic_inc(&rcq->ref_count);
2164 atomic_inc(&scq->ref_count);
2168 put_res(dev, slave, scqn, RES_CQ);
2171 atomic_inc(&srq->ref_count);
2172 put_res(dev, slave, srqn, RES_SRQ);
2175 put_res(dev, slave, rcqn, RES_CQ);
2176 put_res(dev, slave, mtt_base, RES_MTT);
2177 res_end_move(dev, slave, RES_QP, qpn);
2183 put_res(dev, slave, srqn, RES_SRQ);
2186 put_res(dev, slave, scqn, RES_CQ);
2188 put_res(dev, slave, rcqn, RES_CQ);
2190 put_res(dev, slave, mtt_base, RES_MTT);
2192 res_abort_move(dev, slave, RES_QP, qpn);
2197 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2199 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2202 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2204 int log_eq_size = eqc->log_eq_size & 0x1f;
2205 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2207 if (log_eq_size + 5 < page_shift)
2210 return 1 << (log_eq_size + 5 - page_shift);
2213 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2215 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2218 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2220 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2221 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2223 if (log_cq_size + 5 < page_shift)
2226 return 1 << (log_cq_size + 5 - page_shift);
2229 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2230 struct mlx4_vhcr *vhcr,
2231 struct mlx4_cmd_mailbox *inbox,
2232 struct mlx4_cmd_mailbox *outbox,
2233 struct mlx4_cmd_info *cmd)
2236 int eqn = vhcr->in_modifier;
2237 int res_id = (slave << 8) | eqn;
2238 struct mlx4_eq_context *eqc = inbox->buf;
2239 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2240 int mtt_size = eq_get_mtt_size(eqc);
2242 struct res_mtt *mtt;
2244 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2247 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2251 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2255 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2259 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2263 atomic_inc(&mtt->ref_count);
2265 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2266 res_end_move(dev, slave, RES_EQ, res_id);
2270 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2272 res_abort_move(dev, slave, RES_EQ, res_id);
2274 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2278 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2279 int len, struct res_mtt **res)
2281 struct mlx4_priv *priv = mlx4_priv(dev);
2282 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2283 struct res_mtt *mtt;
2286 spin_lock_irq(mlx4_tlock(dev));
2287 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2289 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2291 mtt->com.from_state = mtt->com.state;
2292 mtt->com.state = RES_MTT_BUSY;
2297 spin_unlock_irq(mlx4_tlock(dev));
2302 static int verify_qp_parameters(struct mlx4_dev *dev,
2303 struct mlx4_cmd_mailbox *inbox,
2304 enum qp_transition transition, u8 slave)
2307 struct mlx4_qp_context *qp_ctx;
2308 enum mlx4_qp_optpar optpar;
2310 qp_ctx = inbox->buf + 8;
2311 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2312 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2317 switch (transition) {
2318 case QP_TRANS_INIT2RTR:
2319 case QP_TRANS_RTR2RTS:
2320 case QP_TRANS_RTS2RTS:
2321 case QP_TRANS_SQD2SQD:
2322 case QP_TRANS_SQD2RTS:
2323 if (slave != mlx4_master_func_num(dev))
2324 /* slaves have only gid index 0 */
2325 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2326 if (qp_ctx->pri_path.mgid_index)
2328 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2329 if (qp_ctx->alt_path.mgid_index)
2344 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2345 struct mlx4_vhcr *vhcr,
2346 struct mlx4_cmd_mailbox *inbox,
2347 struct mlx4_cmd_mailbox *outbox,
2348 struct mlx4_cmd_info *cmd)
2350 struct mlx4_mtt mtt;
2351 __be64 *page_list = inbox->buf;
2352 u64 *pg_list = (u64 *)page_list;
2354 struct res_mtt *rmtt = NULL;
2355 int start = be64_to_cpu(page_list[0]);
2356 int npages = vhcr->in_modifier;
2359 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2363 /* Call the SW implementation of write_mtt:
2364 * - Prepare a dummy mtt struct
2365 * - Translate inbox contents to simple addresses in host endianess */
2366 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2367 we don't really use it */
2370 for (i = 0; i < npages; ++i)
2371 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2373 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2374 ((u64 *)page_list + 2));
2377 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2382 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2383 struct mlx4_vhcr *vhcr,
2384 struct mlx4_cmd_mailbox *inbox,
2385 struct mlx4_cmd_mailbox *outbox,
2386 struct mlx4_cmd_info *cmd)
2388 int eqn = vhcr->in_modifier;
2389 int res_id = eqn | (slave << 8);
2393 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2397 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2401 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2405 atomic_dec(&eq->mtt->ref_count);
2406 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2407 res_end_move(dev, slave, RES_EQ, res_id);
2408 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2413 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2415 res_abort_move(dev, slave, RES_EQ, res_id);
2420 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2422 struct mlx4_priv *priv = mlx4_priv(dev);
2423 struct mlx4_slave_event_eq_info *event_eq;
2424 struct mlx4_cmd_mailbox *mailbox;
2425 u32 in_modifier = 0;
2430 if (!priv->mfunc.master.slave_state)
2433 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2435 /* Create the event only if the slave is registered */
2436 if (event_eq->eqn < 0)
2439 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2440 res_id = (slave << 8) | event_eq->eqn;
2441 err = get_res(dev, slave, res_id, RES_EQ, &req);
2445 if (req->com.from_state != RES_EQ_HW) {
2450 mailbox = mlx4_alloc_cmd_mailbox(dev);
2451 if (IS_ERR(mailbox)) {
2452 err = PTR_ERR(mailbox);
2456 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2458 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2461 memcpy(mailbox->buf, (u8 *) eqe, 28);
2463 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2465 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2466 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2469 put_res(dev, slave, res_id, RES_EQ);
2470 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2471 mlx4_free_cmd_mailbox(dev, mailbox);
2475 put_res(dev, slave, res_id, RES_EQ);
2478 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2482 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2483 struct mlx4_vhcr *vhcr,
2484 struct mlx4_cmd_mailbox *inbox,
2485 struct mlx4_cmd_mailbox *outbox,
2486 struct mlx4_cmd_info *cmd)
2488 int eqn = vhcr->in_modifier;
2489 int res_id = eqn | (slave << 8);
2493 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2497 if (eq->com.from_state != RES_EQ_HW) {
2502 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2505 put_res(dev, slave, res_id, RES_EQ);
2509 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2510 struct mlx4_vhcr *vhcr,
2511 struct mlx4_cmd_mailbox *inbox,
2512 struct mlx4_cmd_mailbox *outbox,
2513 struct mlx4_cmd_info *cmd)
2516 int cqn = vhcr->in_modifier;
2517 struct mlx4_cq_context *cqc = inbox->buf;
2518 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2520 struct res_mtt *mtt;
2522 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2525 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2528 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2531 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2534 atomic_inc(&mtt->ref_count);
2536 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2537 res_end_move(dev, slave, RES_CQ, cqn);
2541 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2543 res_abort_move(dev, slave, RES_CQ, cqn);
2547 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2548 struct mlx4_vhcr *vhcr,
2549 struct mlx4_cmd_mailbox *inbox,
2550 struct mlx4_cmd_mailbox *outbox,
2551 struct mlx4_cmd_info *cmd)
2554 int cqn = vhcr->in_modifier;
2557 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2560 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2563 atomic_dec(&cq->mtt->ref_count);
2564 res_end_move(dev, slave, RES_CQ, cqn);
2568 res_abort_move(dev, slave, RES_CQ, cqn);
2572 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2573 struct mlx4_vhcr *vhcr,
2574 struct mlx4_cmd_mailbox *inbox,
2575 struct mlx4_cmd_mailbox *outbox,
2576 struct mlx4_cmd_info *cmd)
2578 int cqn = vhcr->in_modifier;
2582 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2586 if (cq->com.from_state != RES_CQ_HW)
2589 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2591 put_res(dev, slave, cqn, RES_CQ);
2596 static int handle_resize(struct mlx4_dev *dev, int slave,
2597 struct mlx4_vhcr *vhcr,
2598 struct mlx4_cmd_mailbox *inbox,
2599 struct mlx4_cmd_mailbox *outbox,
2600 struct mlx4_cmd_info *cmd,
2604 struct res_mtt *orig_mtt;
2605 struct res_mtt *mtt;
2606 struct mlx4_cq_context *cqc = inbox->buf;
2607 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2609 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2613 if (orig_mtt != cq->mtt) {
2618 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2622 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2625 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2628 atomic_dec(&orig_mtt->ref_count);
2629 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2630 atomic_inc(&mtt->ref_count);
2632 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2636 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2638 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2644 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2645 struct mlx4_vhcr *vhcr,
2646 struct mlx4_cmd_mailbox *inbox,
2647 struct mlx4_cmd_mailbox *outbox,
2648 struct mlx4_cmd_info *cmd)
2650 int cqn = vhcr->in_modifier;
2654 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2658 if (cq->com.from_state != RES_CQ_HW)
2661 if (vhcr->op_modifier == 0) {
2662 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2666 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2668 put_res(dev, slave, cqn, RES_CQ);
2673 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2675 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2676 int log_rq_stride = srqc->logstride & 7;
2677 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2679 if (log_srq_size + log_rq_stride + 4 < page_shift)
2682 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2685 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2686 struct mlx4_vhcr *vhcr,
2687 struct mlx4_cmd_mailbox *inbox,
2688 struct mlx4_cmd_mailbox *outbox,
2689 struct mlx4_cmd_info *cmd)
2692 int srqn = vhcr->in_modifier;
2693 struct res_mtt *mtt;
2694 struct res_srq *srq;
2695 struct mlx4_srq_context *srqc = inbox->buf;
2696 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2698 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2701 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2704 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2707 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2712 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2716 atomic_inc(&mtt->ref_count);
2718 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2719 res_end_move(dev, slave, RES_SRQ, srqn);
2723 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2725 res_abort_move(dev, slave, RES_SRQ, srqn);
2730 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2731 struct mlx4_vhcr *vhcr,
2732 struct mlx4_cmd_mailbox *inbox,
2733 struct mlx4_cmd_mailbox *outbox,
2734 struct mlx4_cmd_info *cmd)
2737 int srqn = vhcr->in_modifier;
2738 struct res_srq *srq;
2740 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2743 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2746 atomic_dec(&srq->mtt->ref_count);
2748 atomic_dec(&srq->cq->ref_count);
2749 res_end_move(dev, slave, RES_SRQ, srqn);
2754 res_abort_move(dev, slave, RES_SRQ, srqn);
2759 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2760 struct mlx4_vhcr *vhcr,
2761 struct mlx4_cmd_mailbox *inbox,
2762 struct mlx4_cmd_mailbox *outbox,
2763 struct mlx4_cmd_info *cmd)
2766 int srqn = vhcr->in_modifier;
2767 struct res_srq *srq;
2769 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2772 if (srq->com.from_state != RES_SRQ_HW) {
2776 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2778 put_res(dev, slave, srqn, RES_SRQ);
2782 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2783 struct mlx4_vhcr *vhcr,
2784 struct mlx4_cmd_mailbox *inbox,
2785 struct mlx4_cmd_mailbox *outbox,
2786 struct mlx4_cmd_info *cmd)
2789 int srqn = vhcr->in_modifier;
2790 struct res_srq *srq;
2792 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2796 if (srq->com.from_state != RES_SRQ_HW) {
2801 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2803 put_res(dev, slave, srqn, RES_SRQ);
2807 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2808 struct mlx4_vhcr *vhcr,
2809 struct mlx4_cmd_mailbox *inbox,
2810 struct mlx4_cmd_mailbox *outbox,
2811 struct mlx4_cmd_info *cmd)
2814 int qpn = vhcr->in_modifier & 0x7fffff;
2817 err = get_res(dev, slave, qpn, RES_QP, &qp);
2820 if (qp->com.from_state != RES_QP_HW) {
2825 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2827 put_res(dev, slave, qpn, RES_QP);
2831 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2832 struct mlx4_vhcr *vhcr,
2833 struct mlx4_cmd_mailbox *inbox,
2834 struct mlx4_cmd_mailbox *outbox,
2835 struct mlx4_cmd_info *cmd)
2837 struct mlx4_qp_context *context = inbox->buf + 8;
2838 adjust_proxy_tun_qkey(dev, vhcr, context);
2839 update_pkey_index(dev, slave, inbox);
2840 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2843 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2844 struct mlx4_vhcr *vhcr,
2845 struct mlx4_cmd_mailbox *inbox,
2846 struct mlx4_cmd_mailbox *outbox,
2847 struct mlx4_cmd_info *cmd)
2850 struct mlx4_qp_context *qpc = inbox->buf + 8;
2851 int qpn = vhcr->in_modifier & 0x7fffff;
2853 u8 orig_sched_queue;
2855 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
2859 update_pkey_index(dev, slave, inbox);
2860 update_gid(dev, inbox, (u8)slave);
2861 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2862 orig_sched_queue = qpc->pri_path.sched_queue;
2863 err = update_vport_qp_param(dev, inbox, slave, qpn);
2867 err = get_res(dev, slave, qpn, RES_QP, &qp);
2870 if (qp->com.from_state != RES_QP_HW) {
2875 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2877 /* if no error, save sched queue value passed in by VF. This is
2878 * essentially the QOS value provided by the VF. This will be useful
2879 * if we allow dynamic changes from VST back to VGT
2882 qp->sched_queue = orig_sched_queue;
2884 put_res(dev, slave, qpn, RES_QP);
2888 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2889 struct mlx4_vhcr *vhcr,
2890 struct mlx4_cmd_mailbox *inbox,
2891 struct mlx4_cmd_mailbox *outbox,
2892 struct mlx4_cmd_info *cmd)
2895 struct mlx4_qp_context *context = inbox->buf + 8;
2897 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
2901 update_pkey_index(dev, slave, inbox);
2902 update_gid(dev, inbox, (u8)slave);
2903 adjust_proxy_tun_qkey(dev, vhcr, context);
2904 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2907 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2908 struct mlx4_vhcr *vhcr,
2909 struct mlx4_cmd_mailbox *inbox,
2910 struct mlx4_cmd_mailbox *outbox,
2911 struct mlx4_cmd_info *cmd)
2914 struct mlx4_qp_context *context = inbox->buf + 8;
2916 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
2920 update_pkey_index(dev, slave, inbox);
2921 update_gid(dev, inbox, (u8)slave);
2922 adjust_proxy_tun_qkey(dev, vhcr, context);
2923 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2927 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2928 struct mlx4_vhcr *vhcr,
2929 struct mlx4_cmd_mailbox *inbox,
2930 struct mlx4_cmd_mailbox *outbox,
2931 struct mlx4_cmd_info *cmd)
2933 struct mlx4_qp_context *context = inbox->buf + 8;
2934 adjust_proxy_tun_qkey(dev, vhcr, context);
2935 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2938 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
2939 struct mlx4_vhcr *vhcr,
2940 struct mlx4_cmd_mailbox *inbox,
2941 struct mlx4_cmd_mailbox *outbox,
2942 struct mlx4_cmd_info *cmd)
2945 struct mlx4_qp_context *context = inbox->buf + 8;
2947 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
2951 adjust_proxy_tun_qkey(dev, vhcr, context);
2952 update_gid(dev, inbox, (u8)slave);
2953 update_pkey_index(dev, slave, inbox);
2954 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2957 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2958 struct mlx4_vhcr *vhcr,
2959 struct mlx4_cmd_mailbox *inbox,
2960 struct mlx4_cmd_mailbox *outbox,
2961 struct mlx4_cmd_info *cmd)
2964 struct mlx4_qp_context *context = inbox->buf + 8;
2966 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
2970 adjust_proxy_tun_qkey(dev, vhcr, context);
2971 update_gid(dev, inbox, (u8)slave);
2972 update_pkey_index(dev, slave, inbox);
2973 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2976 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2977 struct mlx4_vhcr *vhcr,
2978 struct mlx4_cmd_mailbox *inbox,
2979 struct mlx4_cmd_mailbox *outbox,
2980 struct mlx4_cmd_info *cmd)
2983 int qpn = vhcr->in_modifier & 0x7fffff;
2986 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2989 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2993 atomic_dec(&qp->mtt->ref_count);
2994 atomic_dec(&qp->rcq->ref_count);
2995 atomic_dec(&qp->scq->ref_count);
2997 atomic_dec(&qp->srq->ref_count);
2998 res_end_move(dev, slave, RES_QP, qpn);
3002 res_abort_move(dev, slave, RES_QP, qpn);
3007 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3008 struct res_qp *rqp, u8 *gid)
3010 struct res_gid *res;
3012 list_for_each_entry(res, &rqp->mcg_list, list) {
3013 if (!memcmp(res->gid, gid, 16))
3019 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3020 u8 *gid, enum mlx4_protocol prot,
3021 enum mlx4_steer_type steer, u64 reg_id)
3023 struct res_gid *res;
3026 res = kzalloc(sizeof *res, GFP_KERNEL);
3030 spin_lock_irq(&rqp->mcg_spl);
3031 if (find_gid(dev, slave, rqp, gid)) {
3035 memcpy(res->gid, gid, 16);
3038 res->reg_id = reg_id;
3039 list_add_tail(&res->list, &rqp->mcg_list);
3042 spin_unlock_irq(&rqp->mcg_spl);
3047 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3048 u8 *gid, enum mlx4_protocol prot,
3049 enum mlx4_steer_type steer, u64 *reg_id)
3051 struct res_gid *res;
3054 spin_lock_irq(&rqp->mcg_spl);
3055 res = find_gid(dev, slave, rqp, gid);
3056 if (!res || res->prot != prot || res->steer != steer)
3059 *reg_id = res->reg_id;
3060 list_del(&res->list);
3064 spin_unlock_irq(&rqp->mcg_spl);
3069 static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3070 int block_loopback, enum mlx4_protocol prot,
3071 enum mlx4_steer_type type, u64 *reg_id)
3073 switch (dev->caps.steering_mode) {
3074 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3075 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3076 block_loopback, prot,
3078 case MLX4_STEERING_MODE_B0:
3079 return mlx4_qp_attach_common(dev, qp, gid,
3080 block_loopback, prot, type);
3086 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3087 enum mlx4_protocol prot, enum mlx4_steer_type type,
3090 switch (dev->caps.steering_mode) {
3091 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3092 return mlx4_flow_detach(dev, reg_id);
3093 case MLX4_STEERING_MODE_B0:
3094 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3100 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3101 struct mlx4_vhcr *vhcr,
3102 struct mlx4_cmd_mailbox *inbox,
3103 struct mlx4_cmd_mailbox *outbox,
3104 struct mlx4_cmd_info *cmd)
3106 struct mlx4_qp qp; /* dummy for calling attach/detach */
3107 u8 *gid = inbox->buf;
3108 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3113 int attach = vhcr->op_modifier;
3114 int block_loopback = vhcr->in_modifier >> 31;
3115 u8 steer_type_mask = 2;
3116 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3118 qpn = vhcr->in_modifier & 0xffffff;
3119 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3125 err = qp_attach(dev, &qp, gid, block_loopback, prot,
3128 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3131 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3135 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
3139 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3141 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3144 put_res(dev, slave, qpn, RES_QP);
3148 qp_detach(dev, &qp, gid, prot, type, reg_id);
3150 put_res(dev, slave, qpn, RES_QP);
3155 * MAC validation for Flow Steering rules.
3156 * VF can attach rules only with a mac address which is assigned to it.
3158 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3159 struct list_head *rlist)
3161 struct mac_res *res, *tmp;
3164 /* make sure it isn't multicast or broadcast mac*/
3165 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3166 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3167 list_for_each_entry_safe(res, tmp, rlist, list) {
3168 be_mac = cpu_to_be64(res->mac << 16);
3169 if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3172 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3173 eth_header->eth.dst_mac, slave);
3180 * In case of missing eth header, append eth header with a MAC address
3181 * assigned to the VF.
3183 static int add_eth_header(struct mlx4_dev *dev, int slave,
3184 struct mlx4_cmd_mailbox *inbox,
3185 struct list_head *rlist, int header_id)
3187 struct mac_res *res, *tmp;
3189 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3190 struct mlx4_net_trans_rule_hw_eth *eth_header;
3191 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3192 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3194 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3196 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3198 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3200 /* Clear a space in the inbox for eth header */
3201 switch (header_id) {
3202 case MLX4_NET_TRANS_RULE_ID_IPV4:
3204 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3205 memmove(ip_header, eth_header,
3206 sizeof(*ip_header) + sizeof(*l4_header));
3208 case MLX4_NET_TRANS_RULE_ID_TCP:
3209 case MLX4_NET_TRANS_RULE_ID_UDP:
3210 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3212 memmove(l4_header, eth_header, sizeof(*l4_header));
3217 list_for_each_entry_safe(res, tmp, rlist, list) {
3218 if (port == res->port) {
3219 be_mac = cpu_to_be64(res->mac << 16);
3224 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3229 memset(eth_header, 0, sizeof(*eth_header));
3230 eth_header->size = sizeof(*eth_header) >> 2;
3231 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3232 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3233 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3239 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3240 struct mlx4_vhcr *vhcr,
3241 struct mlx4_cmd_mailbox *inbox,
3242 struct mlx4_cmd_mailbox *outbox,
3243 struct mlx4_cmd_info *cmd)
3246 struct mlx4_priv *priv = mlx4_priv(dev);
3247 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3248 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3252 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3253 struct _rule_hw *rule_header;
3256 if (dev->caps.steering_mode !=
3257 MLX4_STEERING_MODE_DEVICE_MANAGED)
3260 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3261 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3262 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3264 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3267 rule_header = (struct _rule_hw *)(ctrl + 1);
3268 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3270 switch (header_id) {
3271 case MLX4_NET_TRANS_RULE_ID_ETH:
3272 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3277 case MLX4_NET_TRANS_RULE_ID_IB:
3279 case MLX4_NET_TRANS_RULE_ID_IPV4:
3280 case MLX4_NET_TRANS_RULE_ID_TCP:
3281 case MLX4_NET_TRANS_RULE_ID_UDP:
3282 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3283 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3287 vhcr->in_modifier +=
3288 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3291 pr_err("Corrupted mailbox.\n");
3296 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3297 vhcr->in_modifier, 0,
3298 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3303 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3305 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3307 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3308 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3312 atomic_inc(&rqp->ref_count);
3314 put_res(dev, slave, qpn, RES_QP);
3318 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3319 struct mlx4_vhcr *vhcr,
3320 struct mlx4_cmd_mailbox *inbox,
3321 struct mlx4_cmd_mailbox *outbox,
3322 struct mlx4_cmd_info *cmd)
3326 struct res_fs_rule *rrule;
3328 if (dev->caps.steering_mode !=
3329 MLX4_STEERING_MODE_DEVICE_MANAGED)
3332 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3335 /* Release the rule form busy state before removal */
3336 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3337 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3341 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3343 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3347 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3348 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3351 atomic_dec(&rqp->ref_count);
3353 put_res(dev, slave, rrule->qpn, RES_QP);
3358 BUSY_MAX_RETRIES = 10
3361 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3362 struct mlx4_vhcr *vhcr,
3363 struct mlx4_cmd_mailbox *inbox,
3364 struct mlx4_cmd_mailbox *outbox,
3365 struct mlx4_cmd_info *cmd)
3368 int index = vhcr->in_modifier & 0xffff;
3370 err = get_res(dev, slave, index, RES_COUNTER, NULL);
3374 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3375 put_res(dev, slave, index, RES_COUNTER);
3379 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3381 struct res_gid *rgid;
3382 struct res_gid *tmp;
3383 struct mlx4_qp qp; /* dummy for calling attach/detach */
3385 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3386 switch (dev->caps.steering_mode) {
3387 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3388 mlx4_flow_detach(dev, rgid->reg_id);
3390 case MLX4_STEERING_MODE_B0:
3391 qp.qpn = rqp->local_qpn;
3392 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3393 rgid->prot, rgid->steer);
3396 list_del(&rgid->list);
3401 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3402 enum mlx4_resource type, int print)
3404 struct mlx4_priv *priv = mlx4_priv(dev);
3405 struct mlx4_resource_tracker *tracker =
3406 &priv->mfunc.master.res_tracker;
3407 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3408 struct res_common *r;
3409 struct res_common *tmp;
3413 spin_lock_irq(mlx4_tlock(dev));
3414 list_for_each_entry_safe(r, tmp, rlist, list) {
3415 if (r->owner == slave) {
3417 if (r->state == RES_ANY_BUSY) {
3420 "%s id 0x%llx is busy\n",
3425 r->from_state = r->state;
3426 r->state = RES_ANY_BUSY;
3432 spin_unlock_irq(mlx4_tlock(dev));
3437 static int move_all_busy(struct mlx4_dev *dev, int slave,
3438 enum mlx4_resource type)
3440 unsigned long begin;
3445 busy = _move_all_busy(dev, slave, type, 0);
3446 if (time_after(jiffies, begin + 5 * HZ))
3453 busy = _move_all_busy(dev, slave, type, 1);
3457 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3459 struct mlx4_priv *priv = mlx4_priv(dev);
3460 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3461 struct list_head *qp_list =
3462 &tracker->slave_list[slave].res_list[RES_QP];
3470 err = move_all_busy(dev, slave, RES_QP);
3472 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3473 "for slave %d\n", slave);
3475 spin_lock_irq(mlx4_tlock(dev));
3476 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3477 spin_unlock_irq(mlx4_tlock(dev));
3478 if (qp->com.owner == slave) {
3479 qpn = qp->com.res_id;
3480 detach_qp(dev, slave, qp);
3481 state = qp->com.from_state;
3482 while (state != 0) {
3484 case RES_QP_RESERVED:
3485 spin_lock_irq(mlx4_tlock(dev));
3486 rb_erase(&qp->com.node,
3487 &tracker->res_tree[RES_QP]);
3488 list_del(&qp->com.list);
3489 spin_unlock_irq(mlx4_tlock(dev));
3494 if (!valid_reserved(dev, slave, qpn))
3495 __mlx4_qp_free_icm(dev, qpn);
3496 state = RES_QP_RESERVED;
3500 err = mlx4_cmd(dev, in_param,
3503 MLX4_CMD_TIME_CLASS_A,
3506 mlx4_dbg(dev, "rem_slave_qps: failed"
3507 " to move slave %d qpn %d to"
3510 atomic_dec(&qp->rcq->ref_count);
3511 atomic_dec(&qp->scq->ref_count);
3512 atomic_dec(&qp->mtt->ref_count);
3514 atomic_dec(&qp->srq->ref_count);
3515 state = RES_QP_MAPPED;
3522 spin_lock_irq(mlx4_tlock(dev));
3524 spin_unlock_irq(mlx4_tlock(dev));
3527 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3529 struct mlx4_priv *priv = mlx4_priv(dev);
3530 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3531 struct list_head *srq_list =
3532 &tracker->slave_list[slave].res_list[RES_SRQ];
3533 struct res_srq *srq;
3534 struct res_srq *tmp;
3541 err = move_all_busy(dev, slave, RES_SRQ);
3543 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3544 "busy for slave %d\n", slave);
3546 spin_lock_irq(mlx4_tlock(dev));
3547 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3548 spin_unlock_irq(mlx4_tlock(dev));
3549 if (srq->com.owner == slave) {
3550 srqn = srq->com.res_id;
3551 state = srq->com.from_state;
3552 while (state != 0) {
3554 case RES_SRQ_ALLOCATED:
3555 __mlx4_srq_free_icm(dev, srqn);
3556 spin_lock_irq(mlx4_tlock(dev));
3557 rb_erase(&srq->com.node,
3558 &tracker->res_tree[RES_SRQ]);
3559 list_del(&srq->com.list);
3560 spin_unlock_irq(mlx4_tlock(dev));
3567 err = mlx4_cmd(dev, in_param, srqn, 1,
3569 MLX4_CMD_TIME_CLASS_A,
3572 mlx4_dbg(dev, "rem_slave_srqs: failed"
3573 " to move slave %d srq %d to"
3577 atomic_dec(&srq->mtt->ref_count);
3579 atomic_dec(&srq->cq->ref_count);
3580 state = RES_SRQ_ALLOCATED;
3588 spin_lock_irq(mlx4_tlock(dev));
3590 spin_unlock_irq(mlx4_tlock(dev));
3593 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3595 struct mlx4_priv *priv = mlx4_priv(dev);
3596 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3597 struct list_head *cq_list =
3598 &tracker->slave_list[slave].res_list[RES_CQ];
3607 err = move_all_busy(dev, slave, RES_CQ);
3609 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3610 "busy for slave %d\n", slave);
3612 spin_lock_irq(mlx4_tlock(dev));
3613 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3614 spin_unlock_irq(mlx4_tlock(dev));
3615 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3616 cqn = cq->com.res_id;
3617 state = cq->com.from_state;
3618 while (state != 0) {
3620 case RES_CQ_ALLOCATED:
3621 __mlx4_cq_free_icm(dev, cqn);
3622 spin_lock_irq(mlx4_tlock(dev));
3623 rb_erase(&cq->com.node,
3624 &tracker->res_tree[RES_CQ]);
3625 list_del(&cq->com.list);
3626 spin_unlock_irq(mlx4_tlock(dev));
3633 err = mlx4_cmd(dev, in_param, cqn, 1,
3635 MLX4_CMD_TIME_CLASS_A,
3638 mlx4_dbg(dev, "rem_slave_cqs: failed"
3639 " to move slave %d cq %d to"
3642 atomic_dec(&cq->mtt->ref_count);
3643 state = RES_CQ_ALLOCATED;
3651 spin_lock_irq(mlx4_tlock(dev));
3653 spin_unlock_irq(mlx4_tlock(dev));
3656 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3658 struct mlx4_priv *priv = mlx4_priv(dev);
3659 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3660 struct list_head *mpt_list =
3661 &tracker->slave_list[slave].res_list[RES_MPT];
3662 struct res_mpt *mpt;
3663 struct res_mpt *tmp;
3670 err = move_all_busy(dev, slave, RES_MPT);
3672 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3673 "busy for slave %d\n", slave);
3675 spin_lock_irq(mlx4_tlock(dev));
3676 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3677 spin_unlock_irq(mlx4_tlock(dev));
3678 if (mpt->com.owner == slave) {
3679 mptn = mpt->com.res_id;
3680 state = mpt->com.from_state;
3681 while (state != 0) {
3683 case RES_MPT_RESERVED:
3684 __mlx4_mpt_release(dev, mpt->key);
3685 spin_lock_irq(mlx4_tlock(dev));
3686 rb_erase(&mpt->com.node,
3687 &tracker->res_tree[RES_MPT]);
3688 list_del(&mpt->com.list);
3689 spin_unlock_irq(mlx4_tlock(dev));
3694 case RES_MPT_MAPPED:
3695 __mlx4_mpt_free_icm(dev, mpt->key);
3696 state = RES_MPT_RESERVED;
3701 err = mlx4_cmd(dev, in_param, mptn, 0,
3703 MLX4_CMD_TIME_CLASS_A,
3706 mlx4_dbg(dev, "rem_slave_mrs: failed"
3707 " to move slave %d mpt %d to"
3711 atomic_dec(&mpt->mtt->ref_count);
3712 state = RES_MPT_MAPPED;
3719 spin_lock_irq(mlx4_tlock(dev));
3721 spin_unlock_irq(mlx4_tlock(dev));
3724 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3726 struct mlx4_priv *priv = mlx4_priv(dev);
3727 struct mlx4_resource_tracker *tracker =
3728 &priv->mfunc.master.res_tracker;
3729 struct list_head *mtt_list =
3730 &tracker->slave_list[slave].res_list[RES_MTT];
3731 struct res_mtt *mtt;
3732 struct res_mtt *tmp;
3738 err = move_all_busy(dev, slave, RES_MTT);
3740 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3741 "busy for slave %d\n", slave);
3743 spin_lock_irq(mlx4_tlock(dev));
3744 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3745 spin_unlock_irq(mlx4_tlock(dev));
3746 if (mtt->com.owner == slave) {
3747 base = mtt->com.res_id;
3748 state = mtt->com.from_state;
3749 while (state != 0) {
3751 case RES_MTT_ALLOCATED:
3752 __mlx4_free_mtt_range(dev, base,
3754 spin_lock_irq(mlx4_tlock(dev));
3755 rb_erase(&mtt->com.node,
3756 &tracker->res_tree[RES_MTT]);
3757 list_del(&mtt->com.list);
3758 spin_unlock_irq(mlx4_tlock(dev));
3768 spin_lock_irq(mlx4_tlock(dev));
3770 spin_unlock_irq(mlx4_tlock(dev));
3773 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3775 struct mlx4_priv *priv = mlx4_priv(dev);
3776 struct mlx4_resource_tracker *tracker =
3777 &priv->mfunc.master.res_tracker;
3778 struct list_head *fs_rule_list =
3779 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3780 struct res_fs_rule *fs_rule;
3781 struct res_fs_rule *tmp;
3786 err = move_all_busy(dev, slave, RES_FS_RULE);
3788 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3791 spin_lock_irq(mlx4_tlock(dev));
3792 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3793 spin_unlock_irq(mlx4_tlock(dev));
3794 if (fs_rule->com.owner == slave) {
3795 base = fs_rule->com.res_id;
3796 state = fs_rule->com.from_state;
3797 while (state != 0) {
3799 case RES_FS_RULE_ALLOCATED:
3801 err = mlx4_cmd(dev, base, 0, 0,
3802 MLX4_QP_FLOW_STEERING_DETACH,
3803 MLX4_CMD_TIME_CLASS_A,
3806 spin_lock_irq(mlx4_tlock(dev));
3807 rb_erase(&fs_rule->com.node,
3808 &tracker->res_tree[RES_FS_RULE]);
3809 list_del(&fs_rule->com.list);
3810 spin_unlock_irq(mlx4_tlock(dev));
3820 spin_lock_irq(mlx4_tlock(dev));
3822 spin_unlock_irq(mlx4_tlock(dev));
3825 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3827 struct mlx4_priv *priv = mlx4_priv(dev);
3828 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3829 struct list_head *eq_list =
3830 &tracker->slave_list[slave].res_list[RES_EQ];
3837 struct mlx4_cmd_mailbox *mailbox;
3839 err = move_all_busy(dev, slave, RES_EQ);
3841 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3842 "busy for slave %d\n", slave);
3844 spin_lock_irq(mlx4_tlock(dev));
3845 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3846 spin_unlock_irq(mlx4_tlock(dev));
3847 if (eq->com.owner == slave) {
3848 eqn = eq->com.res_id;
3849 state = eq->com.from_state;
3850 while (state != 0) {
3852 case RES_EQ_RESERVED:
3853 spin_lock_irq(mlx4_tlock(dev));
3854 rb_erase(&eq->com.node,
3855 &tracker->res_tree[RES_EQ]);
3856 list_del(&eq->com.list);
3857 spin_unlock_irq(mlx4_tlock(dev));
3863 mailbox = mlx4_alloc_cmd_mailbox(dev);
3864 if (IS_ERR(mailbox)) {
3868 err = mlx4_cmd_box(dev, slave, 0,
3871 MLX4_CMD_TIME_CLASS_A,
3874 mlx4_dbg(dev, "rem_slave_eqs: failed"
3875 " to move slave %d eqs %d to"
3876 " SW ownership\n", slave, eqn);
3877 mlx4_free_cmd_mailbox(dev, mailbox);
3878 atomic_dec(&eq->mtt->ref_count);
3879 state = RES_EQ_RESERVED;
3887 spin_lock_irq(mlx4_tlock(dev));
3889 spin_unlock_irq(mlx4_tlock(dev));
3892 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3894 struct mlx4_priv *priv = mlx4_priv(dev);
3895 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3896 struct list_head *counter_list =
3897 &tracker->slave_list[slave].res_list[RES_COUNTER];
3898 struct res_counter *counter;
3899 struct res_counter *tmp;
3903 err = move_all_busy(dev, slave, RES_COUNTER);
3905 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3906 "busy for slave %d\n", slave);
3908 spin_lock_irq(mlx4_tlock(dev));
3909 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3910 if (counter->com.owner == slave) {
3911 index = counter->com.res_id;
3912 rb_erase(&counter->com.node,
3913 &tracker->res_tree[RES_COUNTER]);
3914 list_del(&counter->com.list);
3916 __mlx4_counter_free(dev, index);
3919 spin_unlock_irq(mlx4_tlock(dev));
3922 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3924 struct mlx4_priv *priv = mlx4_priv(dev);
3925 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3926 struct list_head *xrcdn_list =
3927 &tracker->slave_list[slave].res_list[RES_XRCD];
3928 struct res_xrcdn *xrcd;
3929 struct res_xrcdn *tmp;
3933 err = move_all_busy(dev, slave, RES_XRCD);
3935 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3936 "busy for slave %d\n", slave);
3938 spin_lock_irq(mlx4_tlock(dev));
3939 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3940 if (xrcd->com.owner == slave) {
3941 xrcdn = xrcd->com.res_id;
3942 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
3943 list_del(&xrcd->com.list);
3945 __mlx4_xrcd_free(dev, xrcdn);
3948 spin_unlock_irq(mlx4_tlock(dev));
3951 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3953 struct mlx4_priv *priv = mlx4_priv(dev);
3955 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3957 rem_slave_macs(dev, slave);
3958 rem_slave_fs_rule(dev, slave);
3959 rem_slave_qps(dev, slave);
3960 rem_slave_srqs(dev, slave);
3961 rem_slave_cqs(dev, slave);
3962 rem_slave_mrs(dev, slave);
3963 rem_slave_eqs(dev, slave);
3964 rem_slave_mtts(dev, slave);
3965 rem_slave_counters(dev, slave);
3966 rem_slave_xrcdns(dev, slave);
3967 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3970 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
3972 struct mlx4_vf_immed_vlan_work *work =
3973 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
3974 struct mlx4_cmd_mailbox *mailbox;
3975 struct mlx4_update_qp_context *upd_context;
3976 struct mlx4_dev *dev = &work->priv->dev;
3977 struct mlx4_resource_tracker *tracker =
3978 &work->priv->mfunc.master.res_tracker;
3979 struct list_head *qp_list =
3980 &tracker->slave_list[work->slave].res_list[RES_QP];
3983 u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
3984 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
3985 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
3986 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
3987 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
3988 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) |
3989 (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
3990 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
3993 int port, errors = 0;
3996 if (mlx4_is_slave(dev)) {
3997 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4002 mailbox = mlx4_alloc_cmd_mailbox(dev);
4003 if (IS_ERR(mailbox))
4007 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4008 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4010 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4011 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4012 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4014 upd_context = mailbox->buf;
4015 upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask);
4016 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4017 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4019 spin_lock_irq(mlx4_tlock(dev));
4020 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4021 spin_unlock_irq(mlx4_tlock(dev));
4022 if (qp->com.owner == work->slave) {
4023 if (qp->com.from_state != RES_QP_HW ||
4024 !qp->sched_queue || /* no INIT2RTR trans yet */
4025 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4026 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4027 spin_lock_irq(mlx4_tlock(dev));
4030 port = (qp->sched_queue >> 6 & 1) + 1;
4031 if (port != work->port) {
4032 spin_lock_irq(mlx4_tlock(dev));
4035 upd_context->qp_context.pri_path.sched_queue =
4036 qp->sched_queue & 0xC7;
4037 upd_context->qp_context.pri_path.sched_queue |=
4038 ((work->qos & 0x7) << 3);
4040 err = mlx4_cmd(dev, mailbox->dma,
4041 qp->local_qpn & 0xffffff,
4042 0, MLX4_CMD_UPDATE_QP,
4043 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4045 mlx4_info(dev, "UPDATE_QP failed for slave %d, "
4046 "port %d, qpn %d (%d)\n",
4047 work->slave, port, qp->local_qpn,
4052 spin_lock_irq(mlx4_tlock(dev));
4054 spin_unlock_irq(mlx4_tlock(dev));
4055 mlx4_free_cmd_mailbox(dev, mailbox);
4058 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4059 errors, work->slave, work->port);
4061 /* unregister previous vlan_id if needed and we had no errors
4062 * while updating the QPs
4064 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4065 NO_INDX != work->orig_vlan_ix)
4066 __mlx4_unregister_vlan(&work->priv->dev, work->port,
4067 work->orig_vlan_ix);