2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
50 #define MLX4_MAC_VALID (1ull << 63)
53 struct list_head list;
59 struct list_head list;
74 struct list_head list;
76 enum mlx4_protocol prot;
77 enum mlx4_steer_type steer;
81 RES_QP_BUSY = RES_ANY_BUSY,
83 /* QP number was allocated */
86 /* ICM memory for QP context was mapped */
89 /* QP is in hw ownership */
94 struct res_common com;
99 struct list_head mcg_list;
104 enum res_mtt_states {
105 RES_MTT_BUSY = RES_ANY_BUSY,
109 static inline const char *mtt_states_str(enum res_mtt_states state)
112 case RES_MTT_BUSY: return "RES_MTT_BUSY";
113 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
114 default: return "Unknown";
119 struct res_common com;
124 enum res_mpt_states {
125 RES_MPT_BUSY = RES_ANY_BUSY,
132 struct res_common com;
138 RES_EQ_BUSY = RES_ANY_BUSY,
144 struct res_common com;
149 RES_CQ_BUSY = RES_ANY_BUSY,
155 struct res_common com;
160 enum res_srq_states {
161 RES_SRQ_BUSY = RES_ANY_BUSY,
167 struct res_common com;
173 enum res_counter_states {
174 RES_COUNTER_BUSY = RES_ANY_BUSY,
175 RES_COUNTER_ALLOCATED,
179 struct res_common com;
183 enum res_xrcdn_states {
184 RES_XRCD_BUSY = RES_ANY_BUSY,
189 struct res_common com;
193 enum res_fs_rule_states {
194 RES_FS_RULE_BUSY = RES_ANY_BUSY,
195 RES_FS_RULE_ALLOCATED,
199 struct res_common com;
202 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
204 struct rb_node *node = root->rb_node;
207 struct res_common *res = container_of(node, struct res_common,
210 if (res_id < res->res_id)
211 node = node->rb_left;
212 else if (res_id > res->res_id)
213 node = node->rb_right;
220 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
222 struct rb_node **new = &(root->rb_node), *parent = NULL;
224 /* Figure out where to put new node */
226 struct res_common *this = container_of(*new, struct res_common,
230 if (res->res_id < this->res_id)
231 new = &((*new)->rb_left);
232 else if (res->res_id > this->res_id)
233 new = &((*new)->rb_right);
238 /* Add new node and rebalance tree. */
239 rb_link_node(&res->node, parent, new);
240 rb_insert_color(&res->node, root);
255 static const char *ResourceType(enum mlx4_resource rt)
258 case RES_QP: return "RES_QP";
259 case RES_CQ: return "RES_CQ";
260 case RES_SRQ: return "RES_SRQ";
261 case RES_MPT: return "RES_MPT";
262 case RES_MTT: return "RES_MTT";
263 case RES_MAC: return "RES_MAC";
264 case RES_EQ: return "RES_EQ";
265 case RES_COUNTER: return "RES_COUNTER";
266 case RES_FS_RULE: return "RES_FS_RULE";
267 case RES_XRCD: return "RES_XRCD";
268 default: return "Unknown resource type !!!";
272 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
274 struct mlx4_priv *priv = mlx4_priv(dev);
278 priv->mfunc.master.res_tracker.slave_list =
279 kzalloc(dev->num_slaves * sizeof(struct slave_list),
281 if (!priv->mfunc.master.res_tracker.slave_list)
284 for (i = 0 ; i < dev->num_slaves; i++) {
285 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
286 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
287 slave_list[i].res_list[t]);
288 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
291 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
293 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
294 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
296 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
300 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
301 enum mlx4_res_tracker_free_type type)
303 struct mlx4_priv *priv = mlx4_priv(dev);
306 if (priv->mfunc.master.res_tracker.slave_list) {
307 if (type != RES_TR_FREE_STRUCTS_ONLY)
308 for (i = 0 ; i < dev->num_slaves; i++)
309 if (type == RES_TR_FREE_ALL ||
310 dev->caps.function != i)
311 mlx4_delete_all_resources_for_slave(dev, i);
313 if (type != RES_TR_FREE_SLAVES_ONLY) {
314 kfree(priv->mfunc.master.res_tracker.slave_list);
315 priv->mfunc.master.res_tracker.slave_list = NULL;
320 static void update_pkey_index(struct mlx4_dev *dev, int slave,
321 struct mlx4_cmd_mailbox *inbox)
323 u8 sched = *(u8 *)(inbox->buf + 64);
324 u8 orig_index = *(u8 *)(inbox->buf + 35);
326 struct mlx4_priv *priv = mlx4_priv(dev);
329 port = (sched >> 6 & 1) + 1;
331 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
332 *(u8 *)(inbox->buf + 35) = new_index;
334 mlx4_dbg(dev, "port = %d, orig pkey index = %d, "
335 "new pkey index = %d\n", port, orig_index, new_index);
338 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
341 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
342 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
343 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
345 if (MLX4_QP_ST_UD == ts)
346 qp_ctx->pri_path.mgid_index = 0x80 | slave;
348 if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
349 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
350 qp_ctx->pri_path.mgid_index = slave & 0x7F;
351 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
352 qp_ctx->alt_path.mgid_index = slave & 0x7F;
355 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
356 slave, qp_ctx->pri_path.mgid_index);
359 static int mpt_mask(struct mlx4_dev *dev)
361 return dev->caps.num_mpts - 1;
364 static void *find_res(struct mlx4_dev *dev, int res_id,
365 enum mlx4_resource type)
367 struct mlx4_priv *priv = mlx4_priv(dev);
369 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
373 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
374 enum mlx4_resource type,
377 struct res_common *r;
380 spin_lock_irq(mlx4_tlock(dev));
381 r = find_res(dev, res_id, type);
387 if (r->state == RES_ANY_BUSY) {
392 if (r->owner != slave) {
397 r->from_state = r->state;
398 r->state = RES_ANY_BUSY;
401 *((struct res_common **)res) = r;
404 spin_unlock_irq(mlx4_tlock(dev));
408 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
409 enum mlx4_resource type,
410 u64 res_id, int *slave)
413 struct res_common *r;
419 spin_lock(mlx4_tlock(dev));
421 r = find_res(dev, id, type);
426 spin_unlock(mlx4_tlock(dev));
431 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
432 enum mlx4_resource type)
434 struct res_common *r;
436 spin_lock_irq(mlx4_tlock(dev));
437 r = find_res(dev, res_id, type);
439 r->state = r->from_state;
440 spin_unlock_irq(mlx4_tlock(dev));
443 static struct res_common *alloc_qp_tr(int id)
447 ret = kzalloc(sizeof *ret, GFP_KERNEL);
451 ret->com.res_id = id;
452 ret->com.state = RES_QP_RESERVED;
454 INIT_LIST_HEAD(&ret->mcg_list);
455 spin_lock_init(&ret->mcg_spl);
460 static struct res_common *alloc_mtt_tr(int id, int order)
464 ret = kzalloc(sizeof *ret, GFP_KERNEL);
468 ret->com.res_id = id;
470 ret->com.state = RES_MTT_ALLOCATED;
471 atomic_set(&ret->ref_count, 0);
476 static struct res_common *alloc_mpt_tr(int id, int key)
480 ret = kzalloc(sizeof *ret, GFP_KERNEL);
484 ret->com.res_id = id;
485 ret->com.state = RES_MPT_RESERVED;
491 static struct res_common *alloc_eq_tr(int id)
495 ret = kzalloc(sizeof *ret, GFP_KERNEL);
499 ret->com.res_id = id;
500 ret->com.state = RES_EQ_RESERVED;
505 static struct res_common *alloc_cq_tr(int id)
509 ret = kzalloc(sizeof *ret, GFP_KERNEL);
513 ret->com.res_id = id;
514 ret->com.state = RES_CQ_ALLOCATED;
515 atomic_set(&ret->ref_count, 0);
520 static struct res_common *alloc_srq_tr(int id)
524 ret = kzalloc(sizeof *ret, GFP_KERNEL);
528 ret->com.res_id = id;
529 ret->com.state = RES_SRQ_ALLOCATED;
530 atomic_set(&ret->ref_count, 0);
535 static struct res_common *alloc_counter_tr(int id)
537 struct res_counter *ret;
539 ret = kzalloc(sizeof *ret, GFP_KERNEL);
543 ret->com.res_id = id;
544 ret->com.state = RES_COUNTER_ALLOCATED;
549 static struct res_common *alloc_xrcdn_tr(int id)
551 struct res_xrcdn *ret;
553 ret = kzalloc(sizeof *ret, GFP_KERNEL);
557 ret->com.res_id = id;
558 ret->com.state = RES_XRCD_ALLOCATED;
563 static struct res_common *alloc_fs_rule_tr(u64 id)
565 struct res_fs_rule *ret;
567 ret = kzalloc(sizeof *ret, GFP_KERNEL);
571 ret->com.res_id = id;
572 ret->com.state = RES_FS_RULE_ALLOCATED;
577 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
580 struct res_common *ret;
584 ret = alloc_qp_tr(id);
587 ret = alloc_mpt_tr(id, extra);
590 ret = alloc_mtt_tr(id, extra);
593 ret = alloc_eq_tr(id);
596 ret = alloc_cq_tr(id);
599 ret = alloc_srq_tr(id);
602 printk(KERN_ERR "implementation missing\n");
605 ret = alloc_counter_tr(id);
608 ret = alloc_xrcdn_tr(id);
611 ret = alloc_fs_rule_tr(id);
622 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
623 enum mlx4_resource type, int extra)
627 struct mlx4_priv *priv = mlx4_priv(dev);
628 struct res_common **res_arr;
629 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
630 struct rb_root *root = &tracker->res_tree[type];
632 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
636 for (i = 0; i < count; ++i) {
637 res_arr[i] = alloc_tr(base + i, type, slave, extra);
639 for (--i; i >= 0; --i)
647 spin_lock_irq(mlx4_tlock(dev));
648 for (i = 0; i < count; ++i) {
649 if (find_res(dev, base + i, type)) {
653 err = res_tracker_insert(root, res_arr[i]);
656 list_add_tail(&res_arr[i]->list,
657 &tracker->slave_list[slave].res_list[type]);
659 spin_unlock_irq(mlx4_tlock(dev));
665 for (--i; i >= base; --i)
666 rb_erase(&res_arr[i]->node, root);
668 spin_unlock_irq(mlx4_tlock(dev));
670 for (i = 0; i < count; ++i)
678 static int remove_qp_ok(struct res_qp *res)
680 if (res->com.state == RES_QP_BUSY)
682 else if (res->com.state != RES_QP_RESERVED)
688 static int remove_mtt_ok(struct res_mtt *res, int order)
690 if (res->com.state == RES_MTT_BUSY ||
691 atomic_read(&res->ref_count)) {
692 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
694 mtt_states_str(res->com.state),
695 atomic_read(&res->ref_count));
697 } else if (res->com.state != RES_MTT_ALLOCATED)
699 else if (res->order != order)
705 static int remove_mpt_ok(struct res_mpt *res)
707 if (res->com.state == RES_MPT_BUSY)
709 else if (res->com.state != RES_MPT_RESERVED)
715 static int remove_eq_ok(struct res_eq *res)
717 if (res->com.state == RES_MPT_BUSY)
719 else if (res->com.state != RES_MPT_RESERVED)
725 static int remove_counter_ok(struct res_counter *res)
727 if (res->com.state == RES_COUNTER_BUSY)
729 else if (res->com.state != RES_COUNTER_ALLOCATED)
735 static int remove_xrcdn_ok(struct res_xrcdn *res)
737 if (res->com.state == RES_XRCD_BUSY)
739 else if (res->com.state != RES_XRCD_ALLOCATED)
745 static int remove_fs_rule_ok(struct res_fs_rule *res)
747 if (res->com.state == RES_FS_RULE_BUSY)
749 else if (res->com.state != RES_FS_RULE_ALLOCATED)
755 static int remove_cq_ok(struct res_cq *res)
757 if (res->com.state == RES_CQ_BUSY)
759 else if (res->com.state != RES_CQ_ALLOCATED)
765 static int remove_srq_ok(struct res_srq *res)
767 if (res->com.state == RES_SRQ_BUSY)
769 else if (res->com.state != RES_SRQ_ALLOCATED)
775 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
779 return remove_qp_ok((struct res_qp *)res);
781 return remove_cq_ok((struct res_cq *)res);
783 return remove_srq_ok((struct res_srq *)res);
785 return remove_mpt_ok((struct res_mpt *)res);
787 return remove_mtt_ok((struct res_mtt *)res, extra);
791 return remove_eq_ok((struct res_eq *)res);
793 return remove_counter_ok((struct res_counter *)res);
795 return remove_xrcdn_ok((struct res_xrcdn *)res);
797 return remove_fs_rule_ok((struct res_fs_rule *)res);
803 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
804 enum mlx4_resource type, int extra)
808 struct mlx4_priv *priv = mlx4_priv(dev);
809 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
810 struct res_common *r;
812 spin_lock_irq(mlx4_tlock(dev));
813 for (i = base; i < base + count; ++i) {
814 r = res_tracker_lookup(&tracker->res_tree[type], i);
819 if (r->owner != slave) {
823 err = remove_ok(r, type, extra);
828 for (i = base; i < base + count; ++i) {
829 r = res_tracker_lookup(&tracker->res_tree[type], i);
830 rb_erase(&r->node, &tracker->res_tree[type]);
837 spin_unlock_irq(mlx4_tlock(dev));
842 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
843 enum res_qp_states state, struct res_qp **qp,
846 struct mlx4_priv *priv = mlx4_priv(dev);
847 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
851 spin_lock_irq(mlx4_tlock(dev));
852 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
855 else if (r->com.owner != slave)
860 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
861 __func__, r->com.res_id);
865 case RES_QP_RESERVED:
866 if (r->com.state == RES_QP_MAPPED && !alloc)
869 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
874 if ((r->com.state == RES_QP_RESERVED && alloc) ||
875 r->com.state == RES_QP_HW)
878 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
886 if (r->com.state != RES_QP_MAPPED)
894 r->com.from_state = r->com.state;
895 r->com.to_state = state;
896 r->com.state = RES_QP_BUSY;
902 spin_unlock_irq(mlx4_tlock(dev));
907 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
908 enum res_mpt_states state, struct res_mpt **mpt)
910 struct mlx4_priv *priv = mlx4_priv(dev);
911 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
915 spin_lock_irq(mlx4_tlock(dev));
916 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
919 else if (r->com.owner != slave)
927 case RES_MPT_RESERVED:
928 if (r->com.state != RES_MPT_MAPPED)
933 if (r->com.state != RES_MPT_RESERVED &&
934 r->com.state != RES_MPT_HW)
939 if (r->com.state != RES_MPT_MAPPED)
947 r->com.from_state = r->com.state;
948 r->com.to_state = state;
949 r->com.state = RES_MPT_BUSY;
955 spin_unlock_irq(mlx4_tlock(dev));
960 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
961 enum res_eq_states state, struct res_eq **eq)
963 struct mlx4_priv *priv = mlx4_priv(dev);
964 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
968 spin_lock_irq(mlx4_tlock(dev));
969 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
972 else if (r->com.owner != slave)
980 case RES_EQ_RESERVED:
981 if (r->com.state != RES_EQ_HW)
986 if (r->com.state != RES_EQ_RESERVED)
995 r->com.from_state = r->com.state;
996 r->com.to_state = state;
997 r->com.state = RES_EQ_BUSY;
1003 spin_unlock_irq(mlx4_tlock(dev));
1008 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1009 enum res_cq_states state, struct res_cq **cq)
1011 struct mlx4_priv *priv = mlx4_priv(dev);
1012 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1016 spin_lock_irq(mlx4_tlock(dev));
1017 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1020 else if (r->com.owner != slave)
1028 case RES_CQ_ALLOCATED:
1029 if (r->com.state != RES_CQ_HW)
1031 else if (atomic_read(&r->ref_count))
1038 if (r->com.state != RES_CQ_ALLOCATED)
1049 r->com.from_state = r->com.state;
1050 r->com.to_state = state;
1051 r->com.state = RES_CQ_BUSY;
1057 spin_unlock_irq(mlx4_tlock(dev));
1062 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1063 enum res_cq_states state, struct res_srq **srq)
1065 struct mlx4_priv *priv = mlx4_priv(dev);
1066 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1070 spin_lock_irq(mlx4_tlock(dev));
1071 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1074 else if (r->com.owner != slave)
1082 case RES_SRQ_ALLOCATED:
1083 if (r->com.state != RES_SRQ_HW)
1085 else if (atomic_read(&r->ref_count))
1090 if (r->com.state != RES_SRQ_ALLOCATED)
1099 r->com.from_state = r->com.state;
1100 r->com.to_state = state;
1101 r->com.state = RES_SRQ_BUSY;
1107 spin_unlock_irq(mlx4_tlock(dev));
1112 static void res_abort_move(struct mlx4_dev *dev, int slave,
1113 enum mlx4_resource type, int id)
1115 struct mlx4_priv *priv = mlx4_priv(dev);
1116 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1117 struct res_common *r;
1119 spin_lock_irq(mlx4_tlock(dev));
1120 r = res_tracker_lookup(&tracker->res_tree[type], id);
1121 if (r && (r->owner == slave))
1122 r->state = r->from_state;
1123 spin_unlock_irq(mlx4_tlock(dev));
1126 static void res_end_move(struct mlx4_dev *dev, int slave,
1127 enum mlx4_resource type, int id)
1129 struct mlx4_priv *priv = mlx4_priv(dev);
1130 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1131 struct res_common *r;
1133 spin_lock_irq(mlx4_tlock(dev));
1134 r = res_tracker_lookup(&tracker->res_tree[type], id);
1135 if (r && (r->owner == slave))
1136 r->state = r->to_state;
1137 spin_unlock_irq(mlx4_tlock(dev));
1140 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1142 return mlx4_is_qp_reserved(dev, qpn) &&
1143 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1146 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1148 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1151 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1152 u64 in_param, u64 *out_param)
1161 case RES_OP_RESERVE:
1162 count = get_param_l(&in_param);
1163 align = get_param_h(&in_param);
1164 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1168 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1170 __mlx4_qp_release_range(dev, base, count);
1173 set_param_l(out_param, base);
1175 case RES_OP_MAP_ICM:
1176 qpn = get_param_l(&in_param) & 0x7fffff;
1177 if (valid_reserved(dev, slave, qpn)) {
1178 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1183 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1188 if (!fw_reserved(dev, qpn)) {
1189 err = __mlx4_qp_alloc_icm(dev, qpn);
1191 res_abort_move(dev, slave, RES_QP, qpn);
1196 res_end_move(dev, slave, RES_QP, qpn);
1206 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1207 u64 in_param, u64 *out_param)
1213 if (op != RES_OP_RESERVE_AND_MAP)
1216 order = get_param_l(&in_param);
1217 base = __mlx4_alloc_mtt_range(dev, order);
1221 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1223 __mlx4_free_mtt_range(dev, base, order);
1225 set_param_l(out_param, base);
1230 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1231 u64 in_param, u64 *out_param)
1236 struct res_mpt *mpt;
1239 case RES_OP_RESERVE:
1240 index = __mlx4_mr_reserve(dev);
1243 id = index & mpt_mask(dev);
1245 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1247 __mlx4_mr_release(dev, index);
1250 set_param_l(out_param, index);
1252 case RES_OP_MAP_ICM:
1253 index = get_param_l(&in_param);
1254 id = index & mpt_mask(dev);
1255 err = mr_res_start_move_to(dev, slave, id,
1256 RES_MPT_MAPPED, &mpt);
1260 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1262 res_abort_move(dev, slave, RES_MPT, id);
1266 res_end_move(dev, slave, RES_MPT, id);
1272 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1273 u64 in_param, u64 *out_param)
1279 case RES_OP_RESERVE_AND_MAP:
1280 err = __mlx4_cq_alloc_icm(dev, &cqn);
1284 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1286 __mlx4_cq_free_icm(dev, cqn);
1290 set_param_l(out_param, cqn);
1300 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1301 u64 in_param, u64 *out_param)
1307 case RES_OP_RESERVE_AND_MAP:
1308 err = __mlx4_srq_alloc_icm(dev, &srqn);
1312 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1314 __mlx4_srq_free_icm(dev, srqn);
1318 set_param_l(out_param, srqn);
1328 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1330 struct mlx4_priv *priv = mlx4_priv(dev);
1331 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1332 struct mac_res *res;
1334 res = kzalloc(sizeof *res, GFP_KERNEL);
1338 res->port = (u8) port;
1339 list_add_tail(&res->list,
1340 &tracker->slave_list[slave].res_list[RES_MAC]);
1344 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1347 struct mlx4_priv *priv = mlx4_priv(dev);
1348 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1349 struct list_head *mac_list =
1350 &tracker->slave_list[slave].res_list[RES_MAC];
1351 struct mac_res *res, *tmp;
1353 list_for_each_entry_safe(res, tmp, mac_list, list) {
1354 if (res->mac == mac && res->port == (u8) port) {
1355 list_del(&res->list);
1362 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1364 struct mlx4_priv *priv = mlx4_priv(dev);
1365 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1366 struct list_head *mac_list =
1367 &tracker->slave_list[slave].res_list[RES_MAC];
1368 struct mac_res *res, *tmp;
1370 list_for_each_entry_safe(res, tmp, mac_list, list) {
1371 list_del(&res->list);
1372 __mlx4_unregister_mac(dev, res->port, res->mac);
1377 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1378 u64 in_param, u64 *out_param)
1384 if (op != RES_OP_RESERVE_AND_MAP)
1387 port = get_param_l(out_param);
1390 err = __mlx4_register_mac(dev, port, mac);
1392 set_param_l(out_param, err);
1397 err = mac_add_to_slave(dev, slave, mac, port);
1399 __mlx4_unregister_mac(dev, port, mac);
1404 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1405 u64 in_param, u64 *out_param)
1410 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1411 u64 in_param, u64 *out_param)
1416 if (op != RES_OP_RESERVE)
1419 err = __mlx4_counter_alloc(dev, &index);
1423 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1425 __mlx4_counter_free(dev, index);
1427 set_param_l(out_param, index);
1432 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1433 u64 in_param, u64 *out_param)
1438 if (op != RES_OP_RESERVE)
1441 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1445 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1447 __mlx4_xrcd_free(dev, xrcdn);
1449 set_param_l(out_param, xrcdn);
1454 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1455 struct mlx4_vhcr *vhcr,
1456 struct mlx4_cmd_mailbox *inbox,
1457 struct mlx4_cmd_mailbox *outbox,
1458 struct mlx4_cmd_info *cmd)
1461 int alop = vhcr->op_modifier;
1463 switch (vhcr->in_modifier) {
1465 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1466 vhcr->in_param, &vhcr->out_param);
1470 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1471 vhcr->in_param, &vhcr->out_param);
1475 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1476 vhcr->in_param, &vhcr->out_param);
1480 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1481 vhcr->in_param, &vhcr->out_param);
1485 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1486 vhcr->in_param, &vhcr->out_param);
1490 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1491 vhcr->in_param, &vhcr->out_param);
1495 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1496 vhcr->in_param, &vhcr->out_param);
1500 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1501 vhcr->in_param, &vhcr->out_param);
1505 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1506 vhcr->in_param, &vhcr->out_param);
1517 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1526 case RES_OP_RESERVE:
1527 base = get_param_l(&in_param) & 0x7fffff;
1528 count = get_param_h(&in_param);
1529 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1532 __mlx4_qp_release_range(dev, base, count);
1534 case RES_OP_MAP_ICM:
1535 qpn = get_param_l(&in_param) & 0x7fffff;
1536 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1541 if (!fw_reserved(dev, qpn))
1542 __mlx4_qp_free_icm(dev, qpn);
1544 res_end_move(dev, slave, RES_QP, qpn);
1546 if (valid_reserved(dev, slave, qpn))
1547 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1556 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1557 u64 in_param, u64 *out_param)
1563 if (op != RES_OP_RESERVE_AND_MAP)
1566 base = get_param_l(&in_param);
1567 order = get_param_h(&in_param);
1568 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1570 __mlx4_free_mtt_range(dev, base, order);
1574 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1580 struct res_mpt *mpt;
1583 case RES_OP_RESERVE:
1584 index = get_param_l(&in_param);
1585 id = index & mpt_mask(dev);
1586 err = get_res(dev, slave, id, RES_MPT, &mpt);
1590 put_res(dev, slave, id, RES_MPT);
1592 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1595 __mlx4_mr_release(dev, index);
1597 case RES_OP_MAP_ICM:
1598 index = get_param_l(&in_param);
1599 id = index & mpt_mask(dev);
1600 err = mr_res_start_move_to(dev, slave, id,
1601 RES_MPT_RESERVED, &mpt);
1605 __mlx4_mr_free_icm(dev, mpt->key);
1606 res_end_move(dev, slave, RES_MPT, id);
1616 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1617 u64 in_param, u64 *out_param)
1623 case RES_OP_RESERVE_AND_MAP:
1624 cqn = get_param_l(&in_param);
1625 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1629 __mlx4_cq_free_icm(dev, cqn);
1640 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1641 u64 in_param, u64 *out_param)
1647 case RES_OP_RESERVE_AND_MAP:
1648 srqn = get_param_l(&in_param);
1649 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1653 __mlx4_srq_free_icm(dev, srqn);
1664 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1665 u64 in_param, u64 *out_param)
1671 case RES_OP_RESERVE_AND_MAP:
1672 port = get_param_l(out_param);
1673 mac_del_from_slave(dev, slave, in_param, port);
1674 __mlx4_unregister_mac(dev, port, in_param);
1685 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1686 u64 in_param, u64 *out_param)
1691 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1692 u64 in_param, u64 *out_param)
1697 if (op != RES_OP_RESERVE)
1700 index = get_param_l(&in_param);
1701 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1705 __mlx4_counter_free(dev, index);
1710 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1711 u64 in_param, u64 *out_param)
1716 if (op != RES_OP_RESERVE)
1719 xrcdn = get_param_l(&in_param);
1720 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1724 __mlx4_xrcd_free(dev, xrcdn);
1729 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1730 struct mlx4_vhcr *vhcr,
1731 struct mlx4_cmd_mailbox *inbox,
1732 struct mlx4_cmd_mailbox *outbox,
1733 struct mlx4_cmd_info *cmd)
1736 int alop = vhcr->op_modifier;
1738 switch (vhcr->in_modifier) {
1740 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1745 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1746 vhcr->in_param, &vhcr->out_param);
1750 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1755 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1756 vhcr->in_param, &vhcr->out_param);
1760 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1761 vhcr->in_param, &vhcr->out_param);
1765 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1766 vhcr->in_param, &vhcr->out_param);
1770 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1771 vhcr->in_param, &vhcr->out_param);
1775 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1776 vhcr->in_param, &vhcr->out_param);
1780 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1781 vhcr->in_param, &vhcr->out_param);
1789 /* ugly but other choices are uglier */
1790 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1792 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1795 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1797 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1800 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1802 return be32_to_cpu(mpt->mtt_sz);
1805 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1807 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1810 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1812 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1815 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1817 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1818 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1819 int log_sq_sride = qpc->sq_size_stride & 7;
1820 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1821 int log_rq_stride = qpc->rq_size_stride & 7;
1822 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1823 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1824 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1829 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1831 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1832 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1833 total_mem = sq_size + rq_size;
1835 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1841 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1842 int size, struct res_mtt *mtt)
1844 int res_start = mtt->com.res_id;
1845 int res_size = (1 << mtt->order);
1847 if (start < res_start || start + size > res_start + res_size)
1852 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1853 struct mlx4_vhcr *vhcr,
1854 struct mlx4_cmd_mailbox *inbox,
1855 struct mlx4_cmd_mailbox *outbox,
1856 struct mlx4_cmd_info *cmd)
1859 int index = vhcr->in_modifier;
1860 struct res_mtt *mtt;
1861 struct res_mpt *mpt;
1862 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1866 id = index & mpt_mask(dev);
1867 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1871 phys = mr_phys_mpt(inbox->buf);
1873 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1877 err = check_mtt_range(dev, slave, mtt_base,
1878 mr_get_mtt_size(inbox->buf), mtt);
1885 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1890 atomic_inc(&mtt->ref_count);
1891 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1894 res_end_move(dev, slave, RES_MPT, id);
1899 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1901 res_abort_move(dev, slave, RES_MPT, id);
1906 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1907 struct mlx4_vhcr *vhcr,
1908 struct mlx4_cmd_mailbox *inbox,
1909 struct mlx4_cmd_mailbox *outbox,
1910 struct mlx4_cmd_info *cmd)
1913 int index = vhcr->in_modifier;
1914 struct res_mpt *mpt;
1917 id = index & mpt_mask(dev);
1918 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1922 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1927 atomic_dec(&mpt->mtt->ref_count);
1929 res_end_move(dev, slave, RES_MPT, id);
1933 res_abort_move(dev, slave, RES_MPT, id);
1938 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1939 struct mlx4_vhcr *vhcr,
1940 struct mlx4_cmd_mailbox *inbox,
1941 struct mlx4_cmd_mailbox *outbox,
1942 struct mlx4_cmd_info *cmd)
1945 int index = vhcr->in_modifier;
1946 struct res_mpt *mpt;
1949 id = index & mpt_mask(dev);
1950 err = get_res(dev, slave, id, RES_MPT, &mpt);
1954 if (mpt->com.from_state != RES_MPT_HW) {
1959 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1962 put_res(dev, slave, id, RES_MPT);
1966 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1968 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1971 static int qp_get_scqn(struct mlx4_qp_context *qpc)
1973 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1976 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1978 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1981 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
1982 struct mlx4_qp_context *context)
1984 u32 qpn = vhcr->in_modifier & 0xffffff;
1987 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
1990 /* adjust qkey in qp context */
1991 context->qkey = cpu_to_be32(qkey);
1994 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1995 struct mlx4_vhcr *vhcr,
1996 struct mlx4_cmd_mailbox *inbox,
1997 struct mlx4_cmd_mailbox *outbox,
1998 struct mlx4_cmd_info *cmd)
2001 int qpn = vhcr->in_modifier & 0x7fffff;
2002 struct res_mtt *mtt;
2004 struct mlx4_qp_context *qpc = inbox->buf + 8;
2005 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2006 int mtt_size = qp_get_mtt_size(qpc);
2009 int rcqn = qp_get_rcqn(qpc);
2010 int scqn = qp_get_scqn(qpc);
2011 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2012 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2013 struct res_srq *srq;
2014 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2016 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2019 qp->local_qpn = local_qpn;
2021 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2025 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2029 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2034 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2041 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2046 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2047 update_pkey_index(dev, slave, inbox);
2048 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2051 atomic_inc(&mtt->ref_count);
2053 atomic_inc(&rcq->ref_count);
2055 atomic_inc(&scq->ref_count);
2059 put_res(dev, slave, scqn, RES_CQ);
2062 atomic_inc(&srq->ref_count);
2063 put_res(dev, slave, srqn, RES_SRQ);
2066 put_res(dev, slave, rcqn, RES_CQ);
2067 put_res(dev, slave, mtt_base, RES_MTT);
2068 res_end_move(dev, slave, RES_QP, qpn);
2074 put_res(dev, slave, srqn, RES_SRQ);
2077 put_res(dev, slave, scqn, RES_CQ);
2079 put_res(dev, slave, rcqn, RES_CQ);
2081 put_res(dev, slave, mtt_base, RES_MTT);
2083 res_abort_move(dev, slave, RES_QP, qpn);
2088 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2090 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2093 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2095 int log_eq_size = eqc->log_eq_size & 0x1f;
2096 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2098 if (log_eq_size + 5 < page_shift)
2101 return 1 << (log_eq_size + 5 - page_shift);
2104 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2106 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2109 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2111 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2112 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2114 if (log_cq_size + 5 < page_shift)
2117 return 1 << (log_cq_size + 5 - page_shift);
2120 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2121 struct mlx4_vhcr *vhcr,
2122 struct mlx4_cmd_mailbox *inbox,
2123 struct mlx4_cmd_mailbox *outbox,
2124 struct mlx4_cmd_info *cmd)
2127 int eqn = vhcr->in_modifier;
2128 int res_id = (slave << 8) | eqn;
2129 struct mlx4_eq_context *eqc = inbox->buf;
2130 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2131 int mtt_size = eq_get_mtt_size(eqc);
2133 struct res_mtt *mtt;
2135 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2138 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2142 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2146 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2150 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2154 atomic_inc(&mtt->ref_count);
2156 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2157 res_end_move(dev, slave, RES_EQ, res_id);
2161 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2163 res_abort_move(dev, slave, RES_EQ, res_id);
2165 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2169 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2170 int len, struct res_mtt **res)
2172 struct mlx4_priv *priv = mlx4_priv(dev);
2173 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2174 struct res_mtt *mtt;
2177 spin_lock_irq(mlx4_tlock(dev));
2178 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2180 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2182 mtt->com.from_state = mtt->com.state;
2183 mtt->com.state = RES_MTT_BUSY;
2188 spin_unlock_irq(mlx4_tlock(dev));
2193 static int verify_qp_parameters(struct mlx4_dev *dev,
2194 struct mlx4_cmd_mailbox *inbox,
2195 enum qp_transition transition, u8 slave)
2198 struct mlx4_qp_context *qp_ctx;
2199 enum mlx4_qp_optpar optpar;
2201 qp_ctx = inbox->buf + 8;
2202 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2203 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2208 switch (transition) {
2209 case QP_TRANS_INIT2RTR:
2210 case QP_TRANS_RTR2RTS:
2211 case QP_TRANS_RTS2RTS:
2212 case QP_TRANS_SQD2SQD:
2213 case QP_TRANS_SQD2RTS:
2214 if (slave != mlx4_master_func_num(dev))
2215 /* slaves have only gid index 0 */
2216 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2217 if (qp_ctx->pri_path.mgid_index)
2219 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2220 if (qp_ctx->alt_path.mgid_index)
2235 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2236 struct mlx4_vhcr *vhcr,
2237 struct mlx4_cmd_mailbox *inbox,
2238 struct mlx4_cmd_mailbox *outbox,
2239 struct mlx4_cmd_info *cmd)
2241 struct mlx4_mtt mtt;
2242 __be64 *page_list = inbox->buf;
2243 u64 *pg_list = (u64 *)page_list;
2245 struct res_mtt *rmtt = NULL;
2246 int start = be64_to_cpu(page_list[0]);
2247 int npages = vhcr->in_modifier;
2250 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2254 /* Call the SW implementation of write_mtt:
2255 * - Prepare a dummy mtt struct
2256 * - Translate inbox contents to simple addresses in host endianess */
2257 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2258 we don't really use it */
2261 for (i = 0; i < npages; ++i)
2262 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2264 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2265 ((u64 *)page_list + 2));
2268 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2273 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2274 struct mlx4_vhcr *vhcr,
2275 struct mlx4_cmd_mailbox *inbox,
2276 struct mlx4_cmd_mailbox *outbox,
2277 struct mlx4_cmd_info *cmd)
2279 int eqn = vhcr->in_modifier;
2280 int res_id = eqn | (slave << 8);
2284 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2288 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2292 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2296 atomic_dec(&eq->mtt->ref_count);
2297 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2298 res_end_move(dev, slave, RES_EQ, res_id);
2299 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2304 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2306 res_abort_move(dev, slave, RES_EQ, res_id);
2311 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2313 struct mlx4_priv *priv = mlx4_priv(dev);
2314 struct mlx4_slave_event_eq_info *event_eq;
2315 struct mlx4_cmd_mailbox *mailbox;
2316 u32 in_modifier = 0;
2321 if (!priv->mfunc.master.slave_state)
2324 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2326 /* Create the event only if the slave is registered */
2327 if (event_eq->eqn < 0)
2330 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2331 res_id = (slave << 8) | event_eq->eqn;
2332 err = get_res(dev, slave, res_id, RES_EQ, &req);
2336 if (req->com.from_state != RES_EQ_HW) {
2341 mailbox = mlx4_alloc_cmd_mailbox(dev);
2342 if (IS_ERR(mailbox)) {
2343 err = PTR_ERR(mailbox);
2347 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2349 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2352 memcpy(mailbox->buf, (u8 *) eqe, 28);
2354 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2356 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2357 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2360 put_res(dev, slave, res_id, RES_EQ);
2361 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2362 mlx4_free_cmd_mailbox(dev, mailbox);
2366 put_res(dev, slave, res_id, RES_EQ);
2369 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2373 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2374 struct mlx4_vhcr *vhcr,
2375 struct mlx4_cmd_mailbox *inbox,
2376 struct mlx4_cmd_mailbox *outbox,
2377 struct mlx4_cmd_info *cmd)
2379 int eqn = vhcr->in_modifier;
2380 int res_id = eqn | (slave << 8);
2384 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2388 if (eq->com.from_state != RES_EQ_HW) {
2393 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2396 put_res(dev, slave, res_id, RES_EQ);
2400 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2401 struct mlx4_vhcr *vhcr,
2402 struct mlx4_cmd_mailbox *inbox,
2403 struct mlx4_cmd_mailbox *outbox,
2404 struct mlx4_cmd_info *cmd)
2407 int cqn = vhcr->in_modifier;
2408 struct mlx4_cq_context *cqc = inbox->buf;
2409 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2411 struct res_mtt *mtt;
2413 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2416 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2419 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2422 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2425 atomic_inc(&mtt->ref_count);
2427 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2428 res_end_move(dev, slave, RES_CQ, cqn);
2432 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2434 res_abort_move(dev, slave, RES_CQ, cqn);
2438 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2439 struct mlx4_vhcr *vhcr,
2440 struct mlx4_cmd_mailbox *inbox,
2441 struct mlx4_cmd_mailbox *outbox,
2442 struct mlx4_cmd_info *cmd)
2445 int cqn = vhcr->in_modifier;
2448 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2451 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2454 atomic_dec(&cq->mtt->ref_count);
2455 res_end_move(dev, slave, RES_CQ, cqn);
2459 res_abort_move(dev, slave, RES_CQ, cqn);
2463 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2464 struct mlx4_vhcr *vhcr,
2465 struct mlx4_cmd_mailbox *inbox,
2466 struct mlx4_cmd_mailbox *outbox,
2467 struct mlx4_cmd_info *cmd)
2469 int cqn = vhcr->in_modifier;
2473 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2477 if (cq->com.from_state != RES_CQ_HW)
2480 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2482 put_res(dev, slave, cqn, RES_CQ);
2487 static int handle_resize(struct mlx4_dev *dev, int slave,
2488 struct mlx4_vhcr *vhcr,
2489 struct mlx4_cmd_mailbox *inbox,
2490 struct mlx4_cmd_mailbox *outbox,
2491 struct mlx4_cmd_info *cmd,
2495 struct res_mtt *orig_mtt;
2496 struct res_mtt *mtt;
2497 struct mlx4_cq_context *cqc = inbox->buf;
2498 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2500 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2504 if (orig_mtt != cq->mtt) {
2509 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2513 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2516 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2519 atomic_dec(&orig_mtt->ref_count);
2520 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2521 atomic_inc(&mtt->ref_count);
2523 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2527 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2529 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2535 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2536 struct mlx4_vhcr *vhcr,
2537 struct mlx4_cmd_mailbox *inbox,
2538 struct mlx4_cmd_mailbox *outbox,
2539 struct mlx4_cmd_info *cmd)
2541 int cqn = vhcr->in_modifier;
2545 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2549 if (cq->com.from_state != RES_CQ_HW)
2552 if (vhcr->op_modifier == 0) {
2553 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2557 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2559 put_res(dev, slave, cqn, RES_CQ);
2564 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2566 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2567 int log_rq_stride = srqc->logstride & 7;
2568 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2570 if (log_srq_size + log_rq_stride + 4 < page_shift)
2573 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2576 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2577 struct mlx4_vhcr *vhcr,
2578 struct mlx4_cmd_mailbox *inbox,
2579 struct mlx4_cmd_mailbox *outbox,
2580 struct mlx4_cmd_info *cmd)
2583 int srqn = vhcr->in_modifier;
2584 struct res_mtt *mtt;
2585 struct res_srq *srq;
2586 struct mlx4_srq_context *srqc = inbox->buf;
2587 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2589 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2592 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2595 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2598 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2603 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2607 atomic_inc(&mtt->ref_count);
2609 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2610 res_end_move(dev, slave, RES_SRQ, srqn);
2614 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2616 res_abort_move(dev, slave, RES_SRQ, srqn);
2621 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2622 struct mlx4_vhcr *vhcr,
2623 struct mlx4_cmd_mailbox *inbox,
2624 struct mlx4_cmd_mailbox *outbox,
2625 struct mlx4_cmd_info *cmd)
2628 int srqn = vhcr->in_modifier;
2629 struct res_srq *srq;
2631 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2634 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2637 atomic_dec(&srq->mtt->ref_count);
2639 atomic_dec(&srq->cq->ref_count);
2640 res_end_move(dev, slave, RES_SRQ, srqn);
2645 res_abort_move(dev, slave, RES_SRQ, srqn);
2650 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2651 struct mlx4_vhcr *vhcr,
2652 struct mlx4_cmd_mailbox *inbox,
2653 struct mlx4_cmd_mailbox *outbox,
2654 struct mlx4_cmd_info *cmd)
2657 int srqn = vhcr->in_modifier;
2658 struct res_srq *srq;
2660 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2663 if (srq->com.from_state != RES_SRQ_HW) {
2667 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2669 put_res(dev, slave, srqn, RES_SRQ);
2673 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2674 struct mlx4_vhcr *vhcr,
2675 struct mlx4_cmd_mailbox *inbox,
2676 struct mlx4_cmd_mailbox *outbox,
2677 struct mlx4_cmd_info *cmd)
2680 int srqn = vhcr->in_modifier;
2681 struct res_srq *srq;
2683 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2687 if (srq->com.from_state != RES_SRQ_HW) {
2692 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2694 put_res(dev, slave, srqn, RES_SRQ);
2698 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2699 struct mlx4_vhcr *vhcr,
2700 struct mlx4_cmd_mailbox *inbox,
2701 struct mlx4_cmd_mailbox *outbox,
2702 struct mlx4_cmd_info *cmd)
2705 int qpn = vhcr->in_modifier & 0x7fffff;
2708 err = get_res(dev, slave, qpn, RES_QP, &qp);
2711 if (qp->com.from_state != RES_QP_HW) {
2716 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2718 put_res(dev, slave, qpn, RES_QP);
2722 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2723 struct mlx4_vhcr *vhcr,
2724 struct mlx4_cmd_mailbox *inbox,
2725 struct mlx4_cmd_mailbox *outbox,
2726 struct mlx4_cmd_info *cmd)
2728 struct mlx4_qp_context *context = inbox->buf + 8;
2729 adjust_proxy_tun_qkey(dev, vhcr, context);
2730 update_pkey_index(dev, slave, inbox);
2731 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2734 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2735 struct mlx4_vhcr *vhcr,
2736 struct mlx4_cmd_mailbox *inbox,
2737 struct mlx4_cmd_mailbox *outbox,
2738 struct mlx4_cmd_info *cmd)
2741 struct mlx4_qp_context *qpc = inbox->buf + 8;
2743 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
2747 update_pkey_index(dev, slave, inbox);
2748 update_gid(dev, inbox, (u8)slave);
2749 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2751 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2754 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2755 struct mlx4_vhcr *vhcr,
2756 struct mlx4_cmd_mailbox *inbox,
2757 struct mlx4_cmd_mailbox *outbox,
2758 struct mlx4_cmd_info *cmd)
2761 struct mlx4_qp_context *context = inbox->buf + 8;
2763 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
2767 update_pkey_index(dev, slave, inbox);
2768 update_gid(dev, inbox, (u8)slave);
2769 adjust_proxy_tun_qkey(dev, vhcr, context);
2770 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2773 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2774 struct mlx4_vhcr *vhcr,
2775 struct mlx4_cmd_mailbox *inbox,
2776 struct mlx4_cmd_mailbox *outbox,
2777 struct mlx4_cmd_info *cmd)
2780 struct mlx4_qp_context *context = inbox->buf + 8;
2782 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
2786 update_pkey_index(dev, slave, inbox);
2787 update_gid(dev, inbox, (u8)slave);
2788 adjust_proxy_tun_qkey(dev, vhcr, context);
2789 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2793 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2794 struct mlx4_vhcr *vhcr,
2795 struct mlx4_cmd_mailbox *inbox,
2796 struct mlx4_cmd_mailbox *outbox,
2797 struct mlx4_cmd_info *cmd)
2799 struct mlx4_qp_context *context = inbox->buf + 8;
2800 adjust_proxy_tun_qkey(dev, vhcr, context);
2801 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2804 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
2805 struct mlx4_vhcr *vhcr,
2806 struct mlx4_cmd_mailbox *inbox,
2807 struct mlx4_cmd_mailbox *outbox,
2808 struct mlx4_cmd_info *cmd)
2811 struct mlx4_qp_context *context = inbox->buf + 8;
2813 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
2817 adjust_proxy_tun_qkey(dev, vhcr, context);
2818 update_gid(dev, inbox, (u8)slave);
2819 update_pkey_index(dev, slave, inbox);
2820 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2823 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2824 struct mlx4_vhcr *vhcr,
2825 struct mlx4_cmd_mailbox *inbox,
2826 struct mlx4_cmd_mailbox *outbox,
2827 struct mlx4_cmd_info *cmd)
2830 struct mlx4_qp_context *context = inbox->buf + 8;
2832 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
2836 adjust_proxy_tun_qkey(dev, vhcr, context);
2837 update_gid(dev, inbox, (u8)slave);
2838 update_pkey_index(dev, slave, inbox);
2839 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2842 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2843 struct mlx4_vhcr *vhcr,
2844 struct mlx4_cmd_mailbox *inbox,
2845 struct mlx4_cmd_mailbox *outbox,
2846 struct mlx4_cmd_info *cmd)
2849 int qpn = vhcr->in_modifier & 0x7fffff;
2852 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2855 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2859 atomic_dec(&qp->mtt->ref_count);
2860 atomic_dec(&qp->rcq->ref_count);
2861 atomic_dec(&qp->scq->ref_count);
2863 atomic_dec(&qp->srq->ref_count);
2864 res_end_move(dev, slave, RES_QP, qpn);
2868 res_abort_move(dev, slave, RES_QP, qpn);
2873 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2874 struct res_qp *rqp, u8 *gid)
2876 struct res_gid *res;
2878 list_for_each_entry(res, &rqp->mcg_list, list) {
2879 if (!memcmp(res->gid, gid, 16))
2885 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2886 u8 *gid, enum mlx4_protocol prot,
2887 enum mlx4_steer_type steer)
2889 struct res_gid *res;
2892 res = kzalloc(sizeof *res, GFP_KERNEL);
2896 spin_lock_irq(&rqp->mcg_spl);
2897 if (find_gid(dev, slave, rqp, gid)) {
2901 memcpy(res->gid, gid, 16);
2904 list_add_tail(&res->list, &rqp->mcg_list);
2907 spin_unlock_irq(&rqp->mcg_spl);
2912 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2913 u8 *gid, enum mlx4_protocol prot,
2914 enum mlx4_steer_type steer)
2916 struct res_gid *res;
2919 spin_lock_irq(&rqp->mcg_spl);
2920 res = find_gid(dev, slave, rqp, gid);
2921 if (!res || res->prot != prot || res->steer != steer)
2924 list_del(&res->list);
2928 spin_unlock_irq(&rqp->mcg_spl);
2933 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2934 struct mlx4_vhcr *vhcr,
2935 struct mlx4_cmd_mailbox *inbox,
2936 struct mlx4_cmd_mailbox *outbox,
2937 struct mlx4_cmd_info *cmd)
2939 struct mlx4_qp qp; /* dummy for calling attach/detach */
2940 u8 *gid = inbox->buf;
2941 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2945 int attach = vhcr->op_modifier;
2946 int block_loopback = vhcr->in_modifier >> 31;
2947 u8 steer_type_mask = 2;
2948 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
2950 qpn = vhcr->in_modifier & 0xffffff;
2951 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2957 err = add_mcg_res(dev, slave, rqp, gid, prot, type);
2961 err = mlx4_qp_attach_common(dev, &qp, gid,
2962 block_loopback, prot, type);
2966 err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
2969 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2972 put_res(dev, slave, qpn, RES_QP);
2976 /* ignore error return below, already in error */
2977 (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
2979 put_res(dev, slave, qpn, RES_QP);
2985 * MAC validation for Flow Steering rules.
2986 * VF can attach rules only with a mac address which is assigned to it.
2988 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
2989 struct list_head *rlist)
2991 struct mac_res *res, *tmp;
2994 /* make sure it isn't multicast or broadcast mac*/
2995 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
2996 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
2997 list_for_each_entry_safe(res, tmp, rlist, list) {
2998 be_mac = cpu_to_be64(res->mac << 16);
2999 if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3002 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3003 eth_header->eth.dst_mac, slave);
3010 * In case of missing eth header, append eth header with a MAC address
3011 * assigned to the VF.
3013 static int add_eth_header(struct mlx4_dev *dev, int slave,
3014 struct mlx4_cmd_mailbox *inbox,
3015 struct list_head *rlist, int header_id)
3017 struct mac_res *res, *tmp;
3019 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3020 struct mlx4_net_trans_rule_hw_eth *eth_header;
3021 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3022 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3024 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3026 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3027 port = be32_to_cpu(ctrl->vf_vep_port) & 0xff;
3028 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3030 /* Clear a space in the inbox for eth header */
3031 switch (header_id) {
3032 case MLX4_NET_TRANS_RULE_ID_IPV4:
3034 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3035 memmove(ip_header, eth_header,
3036 sizeof(*ip_header) + sizeof(*l4_header));
3038 case MLX4_NET_TRANS_RULE_ID_TCP:
3039 case MLX4_NET_TRANS_RULE_ID_UDP:
3040 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3042 memmove(l4_header, eth_header, sizeof(*l4_header));
3047 list_for_each_entry_safe(res, tmp, rlist, list) {
3048 if (port == res->port) {
3049 be_mac = cpu_to_be64(res->mac << 16);
3054 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3059 memset(eth_header, 0, sizeof(*eth_header));
3060 eth_header->size = sizeof(*eth_header) >> 2;
3061 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3062 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3063 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3069 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3070 struct mlx4_vhcr *vhcr,
3071 struct mlx4_cmd_mailbox *inbox,
3072 struct mlx4_cmd_mailbox *outbox,
3073 struct mlx4_cmd_info *cmd)
3076 struct mlx4_priv *priv = mlx4_priv(dev);
3077 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3078 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3080 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3081 struct _rule_hw *rule_header;
3084 if (dev->caps.steering_mode !=
3085 MLX4_STEERING_MODE_DEVICE_MANAGED)
3088 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3089 rule_header = (struct _rule_hw *)(ctrl + 1);
3090 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3092 switch (header_id) {
3093 case MLX4_NET_TRANS_RULE_ID_ETH:
3094 if (validate_eth_header_mac(slave, rule_header, rlist))
3097 case MLX4_NET_TRANS_RULE_ID_IPV4:
3098 case MLX4_NET_TRANS_RULE_ID_TCP:
3099 case MLX4_NET_TRANS_RULE_ID_UDP:
3100 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3101 if (add_eth_header(dev, slave, inbox, rlist, header_id))
3103 vhcr->in_modifier +=
3104 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3107 pr_err("Corrupted mailbox.\n");
3111 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3112 vhcr->in_modifier, 0,
3113 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3118 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
3120 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3122 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3123 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3129 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3130 struct mlx4_vhcr *vhcr,
3131 struct mlx4_cmd_mailbox *inbox,
3132 struct mlx4_cmd_mailbox *outbox,
3133 struct mlx4_cmd_info *cmd)
3137 if (dev->caps.steering_mode !=
3138 MLX4_STEERING_MODE_DEVICE_MANAGED)
3141 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3143 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3147 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3148 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3154 BUSY_MAX_RETRIES = 10
3157 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3158 struct mlx4_vhcr *vhcr,
3159 struct mlx4_cmd_mailbox *inbox,
3160 struct mlx4_cmd_mailbox *outbox,
3161 struct mlx4_cmd_info *cmd)
3164 int index = vhcr->in_modifier & 0xffff;
3166 err = get_res(dev, slave, index, RES_COUNTER, NULL);
3170 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3171 put_res(dev, slave, index, RES_COUNTER);
3175 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3177 struct res_gid *rgid;
3178 struct res_gid *tmp;
3179 struct mlx4_qp qp; /* dummy for calling attach/detach */
3181 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3182 qp.qpn = rqp->local_qpn;
3183 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
3185 list_del(&rgid->list);
3190 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3191 enum mlx4_resource type, int print)
3193 struct mlx4_priv *priv = mlx4_priv(dev);
3194 struct mlx4_resource_tracker *tracker =
3195 &priv->mfunc.master.res_tracker;
3196 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3197 struct res_common *r;
3198 struct res_common *tmp;
3202 spin_lock_irq(mlx4_tlock(dev));
3203 list_for_each_entry_safe(r, tmp, rlist, list) {
3204 if (r->owner == slave) {
3206 if (r->state == RES_ANY_BUSY) {
3209 "%s id 0x%llx is busy\n",
3214 r->from_state = r->state;
3215 r->state = RES_ANY_BUSY;
3221 spin_unlock_irq(mlx4_tlock(dev));
3226 static int move_all_busy(struct mlx4_dev *dev, int slave,
3227 enum mlx4_resource type)
3229 unsigned long begin;
3234 busy = _move_all_busy(dev, slave, type, 0);
3235 if (time_after(jiffies, begin + 5 * HZ))
3242 busy = _move_all_busy(dev, slave, type, 1);
3246 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3248 struct mlx4_priv *priv = mlx4_priv(dev);
3249 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3250 struct list_head *qp_list =
3251 &tracker->slave_list[slave].res_list[RES_QP];
3259 err = move_all_busy(dev, slave, RES_QP);
3261 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3262 "for slave %d\n", slave);
3264 spin_lock_irq(mlx4_tlock(dev));
3265 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3266 spin_unlock_irq(mlx4_tlock(dev));
3267 if (qp->com.owner == slave) {
3268 qpn = qp->com.res_id;
3269 detach_qp(dev, slave, qp);
3270 state = qp->com.from_state;
3271 while (state != 0) {
3273 case RES_QP_RESERVED:
3274 spin_lock_irq(mlx4_tlock(dev));
3275 rb_erase(&qp->com.node,
3276 &tracker->res_tree[RES_QP]);
3277 list_del(&qp->com.list);
3278 spin_unlock_irq(mlx4_tlock(dev));
3283 if (!valid_reserved(dev, slave, qpn))
3284 __mlx4_qp_free_icm(dev, qpn);
3285 state = RES_QP_RESERVED;
3289 err = mlx4_cmd(dev, in_param,
3292 MLX4_CMD_TIME_CLASS_A,
3295 mlx4_dbg(dev, "rem_slave_qps: failed"
3296 " to move slave %d qpn %d to"
3299 atomic_dec(&qp->rcq->ref_count);
3300 atomic_dec(&qp->scq->ref_count);
3301 atomic_dec(&qp->mtt->ref_count);
3303 atomic_dec(&qp->srq->ref_count);
3304 state = RES_QP_MAPPED;
3311 spin_lock_irq(mlx4_tlock(dev));
3313 spin_unlock_irq(mlx4_tlock(dev));
3316 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3318 struct mlx4_priv *priv = mlx4_priv(dev);
3319 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3320 struct list_head *srq_list =
3321 &tracker->slave_list[slave].res_list[RES_SRQ];
3322 struct res_srq *srq;
3323 struct res_srq *tmp;
3330 err = move_all_busy(dev, slave, RES_SRQ);
3332 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3333 "busy for slave %d\n", slave);
3335 spin_lock_irq(mlx4_tlock(dev));
3336 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3337 spin_unlock_irq(mlx4_tlock(dev));
3338 if (srq->com.owner == slave) {
3339 srqn = srq->com.res_id;
3340 state = srq->com.from_state;
3341 while (state != 0) {
3343 case RES_SRQ_ALLOCATED:
3344 __mlx4_srq_free_icm(dev, srqn);
3345 spin_lock_irq(mlx4_tlock(dev));
3346 rb_erase(&srq->com.node,
3347 &tracker->res_tree[RES_SRQ]);
3348 list_del(&srq->com.list);
3349 spin_unlock_irq(mlx4_tlock(dev));
3356 err = mlx4_cmd(dev, in_param, srqn, 1,
3358 MLX4_CMD_TIME_CLASS_A,
3361 mlx4_dbg(dev, "rem_slave_srqs: failed"
3362 " to move slave %d srq %d to"
3366 atomic_dec(&srq->mtt->ref_count);
3368 atomic_dec(&srq->cq->ref_count);
3369 state = RES_SRQ_ALLOCATED;
3377 spin_lock_irq(mlx4_tlock(dev));
3379 spin_unlock_irq(mlx4_tlock(dev));
3382 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3384 struct mlx4_priv *priv = mlx4_priv(dev);
3385 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3386 struct list_head *cq_list =
3387 &tracker->slave_list[slave].res_list[RES_CQ];
3396 err = move_all_busy(dev, slave, RES_CQ);
3398 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3399 "busy for slave %d\n", slave);
3401 spin_lock_irq(mlx4_tlock(dev));
3402 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3403 spin_unlock_irq(mlx4_tlock(dev));
3404 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3405 cqn = cq->com.res_id;
3406 state = cq->com.from_state;
3407 while (state != 0) {
3409 case RES_CQ_ALLOCATED:
3410 __mlx4_cq_free_icm(dev, cqn);
3411 spin_lock_irq(mlx4_tlock(dev));
3412 rb_erase(&cq->com.node,
3413 &tracker->res_tree[RES_CQ]);
3414 list_del(&cq->com.list);
3415 spin_unlock_irq(mlx4_tlock(dev));
3422 err = mlx4_cmd(dev, in_param, cqn, 1,
3424 MLX4_CMD_TIME_CLASS_A,
3427 mlx4_dbg(dev, "rem_slave_cqs: failed"
3428 " to move slave %d cq %d to"
3431 atomic_dec(&cq->mtt->ref_count);
3432 state = RES_CQ_ALLOCATED;
3440 spin_lock_irq(mlx4_tlock(dev));
3442 spin_unlock_irq(mlx4_tlock(dev));
3445 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3447 struct mlx4_priv *priv = mlx4_priv(dev);
3448 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3449 struct list_head *mpt_list =
3450 &tracker->slave_list[slave].res_list[RES_MPT];
3451 struct res_mpt *mpt;
3452 struct res_mpt *tmp;
3459 err = move_all_busy(dev, slave, RES_MPT);
3461 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3462 "busy for slave %d\n", slave);
3464 spin_lock_irq(mlx4_tlock(dev));
3465 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3466 spin_unlock_irq(mlx4_tlock(dev));
3467 if (mpt->com.owner == slave) {
3468 mptn = mpt->com.res_id;
3469 state = mpt->com.from_state;
3470 while (state != 0) {
3472 case RES_MPT_RESERVED:
3473 __mlx4_mr_release(dev, mpt->key);
3474 spin_lock_irq(mlx4_tlock(dev));
3475 rb_erase(&mpt->com.node,
3476 &tracker->res_tree[RES_MPT]);
3477 list_del(&mpt->com.list);
3478 spin_unlock_irq(mlx4_tlock(dev));
3483 case RES_MPT_MAPPED:
3484 __mlx4_mr_free_icm(dev, mpt->key);
3485 state = RES_MPT_RESERVED;
3490 err = mlx4_cmd(dev, in_param, mptn, 0,
3492 MLX4_CMD_TIME_CLASS_A,
3495 mlx4_dbg(dev, "rem_slave_mrs: failed"
3496 " to move slave %d mpt %d to"
3500 atomic_dec(&mpt->mtt->ref_count);
3501 state = RES_MPT_MAPPED;
3508 spin_lock_irq(mlx4_tlock(dev));
3510 spin_unlock_irq(mlx4_tlock(dev));
3513 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3515 struct mlx4_priv *priv = mlx4_priv(dev);
3516 struct mlx4_resource_tracker *tracker =
3517 &priv->mfunc.master.res_tracker;
3518 struct list_head *mtt_list =
3519 &tracker->slave_list[slave].res_list[RES_MTT];
3520 struct res_mtt *mtt;
3521 struct res_mtt *tmp;
3527 err = move_all_busy(dev, slave, RES_MTT);
3529 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3530 "busy for slave %d\n", slave);
3532 spin_lock_irq(mlx4_tlock(dev));
3533 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3534 spin_unlock_irq(mlx4_tlock(dev));
3535 if (mtt->com.owner == slave) {
3536 base = mtt->com.res_id;
3537 state = mtt->com.from_state;
3538 while (state != 0) {
3540 case RES_MTT_ALLOCATED:
3541 __mlx4_free_mtt_range(dev, base,
3543 spin_lock_irq(mlx4_tlock(dev));
3544 rb_erase(&mtt->com.node,
3545 &tracker->res_tree[RES_MTT]);
3546 list_del(&mtt->com.list);
3547 spin_unlock_irq(mlx4_tlock(dev));
3557 spin_lock_irq(mlx4_tlock(dev));
3559 spin_unlock_irq(mlx4_tlock(dev));
3562 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3564 struct mlx4_priv *priv = mlx4_priv(dev);
3565 struct mlx4_resource_tracker *tracker =
3566 &priv->mfunc.master.res_tracker;
3567 struct list_head *fs_rule_list =
3568 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3569 struct res_fs_rule *fs_rule;
3570 struct res_fs_rule *tmp;
3575 err = move_all_busy(dev, slave, RES_FS_RULE);
3577 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3580 spin_lock_irq(mlx4_tlock(dev));
3581 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3582 spin_unlock_irq(mlx4_tlock(dev));
3583 if (fs_rule->com.owner == slave) {
3584 base = fs_rule->com.res_id;
3585 state = fs_rule->com.from_state;
3586 while (state != 0) {
3588 case RES_FS_RULE_ALLOCATED:
3590 err = mlx4_cmd(dev, base, 0, 0,
3591 MLX4_QP_FLOW_STEERING_DETACH,
3592 MLX4_CMD_TIME_CLASS_A,
3595 spin_lock_irq(mlx4_tlock(dev));
3596 rb_erase(&fs_rule->com.node,
3597 &tracker->res_tree[RES_FS_RULE]);
3598 list_del(&fs_rule->com.list);
3599 spin_unlock_irq(mlx4_tlock(dev));
3609 spin_lock_irq(mlx4_tlock(dev));
3611 spin_unlock_irq(mlx4_tlock(dev));
3614 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3616 struct mlx4_priv *priv = mlx4_priv(dev);
3617 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3618 struct list_head *eq_list =
3619 &tracker->slave_list[slave].res_list[RES_EQ];
3626 struct mlx4_cmd_mailbox *mailbox;
3628 err = move_all_busy(dev, slave, RES_EQ);
3630 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3631 "busy for slave %d\n", slave);
3633 spin_lock_irq(mlx4_tlock(dev));
3634 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3635 spin_unlock_irq(mlx4_tlock(dev));
3636 if (eq->com.owner == slave) {
3637 eqn = eq->com.res_id;
3638 state = eq->com.from_state;
3639 while (state != 0) {
3641 case RES_EQ_RESERVED:
3642 spin_lock_irq(mlx4_tlock(dev));
3643 rb_erase(&eq->com.node,
3644 &tracker->res_tree[RES_EQ]);
3645 list_del(&eq->com.list);
3646 spin_unlock_irq(mlx4_tlock(dev));
3652 mailbox = mlx4_alloc_cmd_mailbox(dev);
3653 if (IS_ERR(mailbox)) {
3657 err = mlx4_cmd_box(dev, slave, 0,
3660 MLX4_CMD_TIME_CLASS_A,
3663 mlx4_dbg(dev, "rem_slave_eqs: failed"
3664 " to move slave %d eqs %d to"
3665 " SW ownership\n", slave, eqn);
3666 mlx4_free_cmd_mailbox(dev, mailbox);
3667 atomic_dec(&eq->mtt->ref_count);
3668 state = RES_EQ_RESERVED;
3676 spin_lock_irq(mlx4_tlock(dev));
3678 spin_unlock_irq(mlx4_tlock(dev));
3681 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3683 struct mlx4_priv *priv = mlx4_priv(dev);
3684 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3685 struct list_head *counter_list =
3686 &tracker->slave_list[slave].res_list[RES_COUNTER];
3687 struct res_counter *counter;
3688 struct res_counter *tmp;
3692 err = move_all_busy(dev, slave, RES_COUNTER);
3694 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3695 "busy for slave %d\n", slave);
3697 spin_lock_irq(mlx4_tlock(dev));
3698 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3699 if (counter->com.owner == slave) {
3700 index = counter->com.res_id;
3701 rb_erase(&counter->com.node,
3702 &tracker->res_tree[RES_COUNTER]);
3703 list_del(&counter->com.list);
3705 __mlx4_counter_free(dev, index);
3708 spin_unlock_irq(mlx4_tlock(dev));
3711 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3713 struct mlx4_priv *priv = mlx4_priv(dev);
3714 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3715 struct list_head *xrcdn_list =
3716 &tracker->slave_list[slave].res_list[RES_XRCD];
3717 struct res_xrcdn *xrcd;
3718 struct res_xrcdn *tmp;
3722 err = move_all_busy(dev, slave, RES_XRCD);
3724 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3725 "busy for slave %d\n", slave);
3727 spin_lock_irq(mlx4_tlock(dev));
3728 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3729 if (xrcd->com.owner == slave) {
3730 xrcdn = xrcd->com.res_id;
3731 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
3732 list_del(&xrcd->com.list);
3734 __mlx4_xrcd_free(dev, xrcdn);
3737 spin_unlock_irq(mlx4_tlock(dev));
3740 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3742 struct mlx4_priv *priv = mlx4_priv(dev);
3744 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3746 rem_slave_macs(dev, slave);
3747 rem_slave_qps(dev, slave);
3748 rem_slave_srqs(dev, slave);
3749 rem_slave_cqs(dev, slave);
3750 rem_slave_mrs(dev, slave);
3751 rem_slave_eqs(dev, slave);
3752 rem_slave_mtts(dev, slave);
3753 rem_slave_counters(dev, slave);
3754 rem_slave_xrcdns(dev, slave);
3755 rem_slave_fs_rule(dev, slave);
3756 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);