2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/mlx4/cmd.h>
42 #include <linux/mlx4/qp.h>
47 #define MLX4_MAC_VALID (1ull << 63)
48 #define MLX4_MAC_MASK 0x7fffffffffffffffULL
52 struct list_head list;
58 struct list_head list;
72 struct list_head list;
74 enum mlx4_protocol prot;
78 RES_QP_BUSY = RES_ANY_BUSY,
80 /* QP number was allocated */
83 /* ICM memory for QP context was mapped */
86 /* QP is in hw ownership */
90 static inline const char *qp_states_str(enum res_qp_states state)
93 case RES_QP_BUSY: return "RES_QP_BUSY";
94 case RES_QP_RESERVED: return "RES_QP_RESERVED";
95 case RES_QP_MAPPED: return "RES_QP_MAPPED";
96 case RES_QP_HW: return "RES_QP_HW";
97 default: return "Unknown";
102 struct res_common com;
107 struct list_head mcg_list;
112 enum res_mtt_states {
113 RES_MTT_BUSY = RES_ANY_BUSY,
117 static inline const char *mtt_states_str(enum res_mtt_states state)
120 case RES_MTT_BUSY: return "RES_MTT_BUSY";
121 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
122 default: return "Unknown";
127 struct res_common com;
132 enum res_mpt_states {
133 RES_MPT_BUSY = RES_ANY_BUSY,
140 struct res_common com;
146 RES_EQ_BUSY = RES_ANY_BUSY,
152 struct res_common com;
157 RES_CQ_BUSY = RES_ANY_BUSY,
163 struct res_common com;
168 enum res_srq_states {
169 RES_SRQ_BUSY = RES_ANY_BUSY,
174 static inline const char *srq_states_str(enum res_srq_states state)
177 case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
178 case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
179 case RES_SRQ_HW: return "RES_SRQ_HW";
180 default: return "Unknown";
185 struct res_common com;
191 enum res_counter_states {
192 RES_COUNTER_BUSY = RES_ANY_BUSY,
193 RES_COUNTER_ALLOCATED,
196 static inline const char *counter_states_str(enum res_counter_states state)
199 case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
200 case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
201 default: return "Unknown";
206 struct res_common com;
211 static const char *ResourceType(enum mlx4_resource rt)
214 case RES_QP: return "RES_QP";
215 case RES_CQ: return "RES_CQ";
216 case RES_SRQ: return "RES_SRQ";
217 case RES_MPT: return "RES_MPT";
218 case RES_MTT: return "RES_MTT";
219 case RES_MAC: return "RES_MAC";
220 case RES_EQ: return "RES_EQ";
221 case RES_COUNTER: return "RES_COUNTER";
222 default: return "Unknown resource type !!!";
226 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
228 struct mlx4_priv *priv = mlx4_priv(dev);
232 priv->mfunc.master.res_tracker.slave_list =
233 kzalloc(dev->num_slaves * sizeof(struct slave_list),
235 if (!priv->mfunc.master.res_tracker.slave_list)
238 for (i = 0 ; i < dev->num_slaves; i++) {
239 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
240 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
241 slave_list[i].res_list[t]);
242 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
245 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
247 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
248 INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
249 GFP_ATOMIC|__GFP_NOWARN);
251 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
255 void mlx4_free_resource_tracker(struct mlx4_dev *dev)
257 struct mlx4_priv *priv = mlx4_priv(dev);
260 if (priv->mfunc.master.res_tracker.slave_list) {
261 for (i = 0 ; i < dev->num_slaves; i++)
262 mlx4_delete_all_resources_for_slave(dev, i);
264 kfree(priv->mfunc.master.res_tracker.slave_list);
268 static void update_ud_gid(struct mlx4_dev *dev,
269 struct mlx4_qp_context *qp_ctx, u8 slave)
271 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
273 if (MLX4_QP_ST_UD == ts)
274 qp_ctx->pri_path.mgid_index = 0x80 | slave;
276 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
277 slave, qp_ctx->pri_path.mgid_index);
280 static int mpt_mask(struct mlx4_dev *dev)
282 return dev->caps.num_mpts - 1;
285 static void *find_res(struct mlx4_dev *dev, int res_id,
286 enum mlx4_resource type)
288 struct mlx4_priv *priv = mlx4_priv(dev);
290 return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
294 static int get_res(struct mlx4_dev *dev, int slave, int res_id,
295 enum mlx4_resource type,
298 struct res_common *r;
301 spin_lock_irq(mlx4_tlock(dev));
302 r = find_res(dev, res_id, type);
308 if (r->state == RES_ANY_BUSY) {
313 if (r->owner != slave) {
318 r->from_state = r->state;
319 r->state = RES_ANY_BUSY;
320 mlx4_dbg(dev, "res %s id 0x%x to busy\n",
321 ResourceType(type), r->res_id);
324 *((struct res_common **)res) = r;
327 spin_unlock_irq(mlx4_tlock(dev));
331 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
332 enum mlx4_resource type,
333 int res_id, int *slave)
336 struct res_common *r;
342 spin_lock_irq(mlx4_tlock(dev));
344 r = find_res(dev, id, type);
349 spin_unlock_irq(mlx4_tlock(dev));
354 static void put_res(struct mlx4_dev *dev, int slave, int res_id,
355 enum mlx4_resource type)
357 struct res_common *r;
359 spin_lock_irq(mlx4_tlock(dev));
360 r = find_res(dev, res_id, type);
362 r->state = r->from_state;
363 spin_unlock_irq(mlx4_tlock(dev));
366 static struct res_common *alloc_qp_tr(int id)
370 ret = kzalloc(sizeof *ret, GFP_KERNEL);
374 ret->com.res_id = id;
375 ret->com.state = RES_QP_RESERVED;
376 INIT_LIST_HEAD(&ret->mcg_list);
377 spin_lock_init(&ret->mcg_spl);
382 static struct res_common *alloc_mtt_tr(int id, int order)
386 ret = kzalloc(sizeof *ret, GFP_KERNEL);
390 ret->com.res_id = id;
392 ret->com.state = RES_MTT_ALLOCATED;
393 atomic_set(&ret->ref_count, 0);
398 static struct res_common *alloc_mpt_tr(int id, int key)
402 ret = kzalloc(sizeof *ret, GFP_KERNEL);
406 ret->com.res_id = id;
407 ret->com.state = RES_MPT_RESERVED;
413 static struct res_common *alloc_eq_tr(int id)
417 ret = kzalloc(sizeof *ret, GFP_KERNEL);
421 ret->com.res_id = id;
422 ret->com.state = RES_EQ_RESERVED;
427 static struct res_common *alloc_cq_tr(int id)
431 ret = kzalloc(sizeof *ret, GFP_KERNEL);
435 ret->com.res_id = id;
436 ret->com.state = RES_CQ_ALLOCATED;
437 atomic_set(&ret->ref_count, 0);
442 static struct res_common *alloc_srq_tr(int id)
446 ret = kzalloc(sizeof *ret, GFP_KERNEL);
450 ret->com.res_id = id;
451 ret->com.state = RES_SRQ_ALLOCATED;
452 atomic_set(&ret->ref_count, 0);
457 static struct res_common *alloc_counter_tr(int id)
459 struct res_counter *ret;
461 ret = kzalloc(sizeof *ret, GFP_KERNEL);
465 ret->com.res_id = id;
466 ret->com.state = RES_COUNTER_ALLOCATED;
471 static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
474 struct res_common *ret;
478 ret = alloc_qp_tr(id);
481 ret = alloc_mpt_tr(id, extra);
484 ret = alloc_mtt_tr(id, extra);
487 ret = alloc_eq_tr(id);
490 ret = alloc_cq_tr(id);
493 ret = alloc_srq_tr(id);
496 printk(KERN_ERR "implementation missing\n");
499 ret = alloc_counter_tr(id);
511 static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
512 enum mlx4_resource type, int extra)
516 struct mlx4_priv *priv = mlx4_priv(dev);
517 struct res_common **res_arr;
518 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
519 struct radix_tree_root *root = &tracker->res_tree[type];
521 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
525 for (i = 0; i < count; ++i) {
526 res_arr[i] = alloc_tr(base + i, type, slave, extra);
528 for (--i; i >= 0; --i)
536 spin_lock_irq(mlx4_tlock(dev));
537 for (i = 0; i < count; ++i) {
538 if (find_res(dev, base + i, type)) {
542 err = radix_tree_insert(root, base + i, res_arr[i]);
545 list_add_tail(&res_arr[i]->list,
546 &tracker->slave_list[slave].res_list[type]);
548 spin_unlock_irq(mlx4_tlock(dev));
554 for (--i; i >= base; --i)
555 radix_tree_delete(&tracker->res_tree[type], i);
557 spin_unlock_irq(mlx4_tlock(dev));
559 for (i = 0; i < count; ++i)
567 static int remove_qp_ok(struct res_qp *res)
569 if (res->com.state == RES_QP_BUSY)
571 else if (res->com.state != RES_QP_RESERVED)
577 static int remove_mtt_ok(struct res_mtt *res, int order)
579 if (res->com.state == RES_MTT_BUSY ||
580 atomic_read(&res->ref_count)) {
581 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
583 mtt_states_str(res->com.state),
584 atomic_read(&res->ref_count));
586 } else if (res->com.state != RES_MTT_ALLOCATED)
588 else if (res->order != order)
594 static int remove_mpt_ok(struct res_mpt *res)
596 if (res->com.state == RES_MPT_BUSY)
598 else if (res->com.state != RES_MPT_RESERVED)
604 static int remove_eq_ok(struct res_eq *res)
606 if (res->com.state == RES_MPT_BUSY)
608 else if (res->com.state != RES_MPT_RESERVED)
614 static int remove_counter_ok(struct res_counter *res)
616 if (res->com.state == RES_COUNTER_BUSY)
618 else if (res->com.state != RES_COUNTER_ALLOCATED)
624 static int remove_cq_ok(struct res_cq *res)
626 if (res->com.state == RES_CQ_BUSY)
628 else if (res->com.state != RES_CQ_ALLOCATED)
634 static int remove_srq_ok(struct res_srq *res)
636 if (res->com.state == RES_SRQ_BUSY)
638 else if (res->com.state != RES_SRQ_ALLOCATED)
644 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
648 return remove_qp_ok((struct res_qp *)res);
650 return remove_cq_ok((struct res_cq *)res);
652 return remove_srq_ok((struct res_srq *)res);
654 return remove_mpt_ok((struct res_mpt *)res);
656 return remove_mtt_ok((struct res_mtt *)res, extra);
660 return remove_eq_ok((struct res_eq *)res);
662 return remove_counter_ok((struct res_counter *)res);
668 static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
669 enum mlx4_resource type, int extra)
673 struct mlx4_priv *priv = mlx4_priv(dev);
674 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
675 struct res_common *r;
677 spin_lock_irq(mlx4_tlock(dev));
678 for (i = base; i < base + count; ++i) {
679 r = radix_tree_lookup(&tracker->res_tree[type], i);
684 if (r->owner != slave) {
688 err = remove_ok(r, type, extra);
693 for (i = base; i < base + count; ++i) {
694 r = radix_tree_lookup(&tracker->res_tree[type], i);
695 radix_tree_delete(&tracker->res_tree[type], i);
702 spin_unlock_irq(mlx4_tlock(dev));
707 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
708 enum res_qp_states state, struct res_qp **qp,
711 struct mlx4_priv *priv = mlx4_priv(dev);
712 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
716 spin_lock_irq(mlx4_tlock(dev));
717 r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
720 else if (r->com.owner != slave)
725 mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
726 __func__, r->com.res_id);
730 case RES_QP_RESERVED:
731 if (r->com.state == RES_QP_MAPPED && !alloc)
734 mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
739 if ((r->com.state == RES_QP_RESERVED && alloc) ||
740 r->com.state == RES_QP_HW)
743 mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
751 if (r->com.state != RES_QP_MAPPED)
759 r->com.from_state = r->com.state;
760 r->com.to_state = state;
761 r->com.state = RES_QP_BUSY;
763 *qp = (struct res_qp *)r;
767 spin_unlock_irq(mlx4_tlock(dev));
772 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
773 enum res_mpt_states state, struct res_mpt **mpt)
775 struct mlx4_priv *priv = mlx4_priv(dev);
776 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
780 spin_lock_irq(mlx4_tlock(dev));
781 r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
784 else if (r->com.owner != slave)
792 case RES_MPT_RESERVED:
793 if (r->com.state != RES_MPT_MAPPED)
798 if (r->com.state != RES_MPT_RESERVED &&
799 r->com.state != RES_MPT_HW)
804 if (r->com.state != RES_MPT_MAPPED)
812 r->com.from_state = r->com.state;
813 r->com.to_state = state;
814 r->com.state = RES_MPT_BUSY;
816 *mpt = (struct res_mpt *)r;
820 spin_unlock_irq(mlx4_tlock(dev));
825 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
826 enum res_eq_states state, struct res_eq **eq)
828 struct mlx4_priv *priv = mlx4_priv(dev);
829 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
833 spin_lock_irq(mlx4_tlock(dev));
834 r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
837 else if (r->com.owner != slave)
845 case RES_EQ_RESERVED:
846 if (r->com.state != RES_EQ_HW)
851 if (r->com.state != RES_EQ_RESERVED)
860 r->com.from_state = r->com.state;
861 r->com.to_state = state;
862 r->com.state = RES_EQ_BUSY;
868 spin_unlock_irq(mlx4_tlock(dev));
873 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
874 enum res_cq_states state, struct res_cq **cq)
876 struct mlx4_priv *priv = mlx4_priv(dev);
877 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
881 spin_lock_irq(mlx4_tlock(dev));
882 r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
885 else if (r->com.owner != slave)
893 case RES_CQ_ALLOCATED:
894 if (r->com.state != RES_CQ_HW)
896 else if (atomic_read(&r->ref_count))
903 if (r->com.state != RES_CQ_ALLOCATED)
914 r->com.from_state = r->com.state;
915 r->com.to_state = state;
916 r->com.state = RES_CQ_BUSY;
922 spin_unlock_irq(mlx4_tlock(dev));
927 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
928 enum res_cq_states state, struct res_srq **srq)
930 struct mlx4_priv *priv = mlx4_priv(dev);
931 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
935 spin_lock_irq(mlx4_tlock(dev));
936 r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
939 else if (r->com.owner != slave)
947 case RES_SRQ_ALLOCATED:
948 if (r->com.state != RES_SRQ_HW)
950 else if (atomic_read(&r->ref_count))
955 if (r->com.state != RES_SRQ_ALLOCATED)
964 r->com.from_state = r->com.state;
965 r->com.to_state = state;
966 r->com.state = RES_SRQ_BUSY;
972 spin_unlock_irq(mlx4_tlock(dev));
977 static void res_abort_move(struct mlx4_dev *dev, int slave,
978 enum mlx4_resource type, int id)
980 struct mlx4_priv *priv = mlx4_priv(dev);
981 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
982 struct res_common *r;
984 spin_lock_irq(mlx4_tlock(dev));
985 r = radix_tree_lookup(&tracker->res_tree[type], id);
986 if (r && (r->owner == slave))
987 r->state = r->from_state;
988 spin_unlock_irq(mlx4_tlock(dev));
991 static void res_end_move(struct mlx4_dev *dev, int slave,
992 enum mlx4_resource type, int id)
994 struct mlx4_priv *priv = mlx4_priv(dev);
995 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
996 struct res_common *r;
998 spin_lock_irq(mlx4_tlock(dev));
999 r = radix_tree_lookup(&tracker->res_tree[type], id);
1000 if (r && (r->owner == slave))
1001 r->state = r->to_state;
1002 spin_unlock_irq(mlx4_tlock(dev));
1005 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1007 return mlx4_is_qp_reserved(dev, qpn);
1010 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1011 u64 in_param, u64 *out_param)
1020 case RES_OP_RESERVE:
1021 count = get_param_l(&in_param);
1022 align = get_param_h(&in_param);
1023 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1027 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1029 __mlx4_qp_release_range(dev, base, count);
1032 set_param_l(out_param, base);
1034 case RES_OP_MAP_ICM:
1035 qpn = get_param_l(&in_param) & 0x7fffff;
1036 if (valid_reserved(dev, slave, qpn)) {
1037 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1042 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1047 if (!valid_reserved(dev, slave, qpn)) {
1048 err = __mlx4_qp_alloc_icm(dev, qpn);
1050 res_abort_move(dev, slave, RES_QP, qpn);
1055 res_end_move(dev, slave, RES_QP, qpn);
1065 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1066 u64 in_param, u64 *out_param)
1072 if (op != RES_OP_RESERVE_AND_MAP)
1075 order = get_param_l(&in_param);
1076 base = __mlx4_alloc_mtt_range(dev, order);
1080 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1082 __mlx4_free_mtt_range(dev, base, order);
1084 set_param_l(out_param, base);
1089 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1090 u64 in_param, u64 *out_param)
1095 struct res_mpt *mpt;
1098 case RES_OP_RESERVE:
1099 index = __mlx4_mr_reserve(dev);
1102 id = index & mpt_mask(dev);
1104 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1106 __mlx4_mr_release(dev, index);
1109 set_param_l(out_param, index);
1111 case RES_OP_MAP_ICM:
1112 index = get_param_l(&in_param);
1113 id = index & mpt_mask(dev);
1114 err = mr_res_start_move_to(dev, slave, id,
1115 RES_MPT_MAPPED, &mpt);
1119 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1121 res_abort_move(dev, slave, RES_MPT, id);
1125 res_end_move(dev, slave, RES_MPT, id);
1131 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1132 u64 in_param, u64 *out_param)
1138 case RES_OP_RESERVE_AND_MAP:
1139 err = __mlx4_cq_alloc_icm(dev, &cqn);
1143 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1145 __mlx4_cq_free_icm(dev, cqn);
1149 set_param_l(out_param, cqn);
1159 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1160 u64 in_param, u64 *out_param)
1166 case RES_OP_RESERVE_AND_MAP:
1167 err = __mlx4_srq_alloc_icm(dev, &srqn);
1171 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1173 __mlx4_srq_free_icm(dev, srqn);
1177 set_param_l(out_param, srqn);
1187 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1189 struct mlx4_priv *priv = mlx4_priv(dev);
1190 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1191 struct mac_res *res;
1193 res = kzalloc(sizeof *res, GFP_KERNEL);
1197 res->port = (u8) port;
1198 list_add_tail(&res->list,
1199 &tracker->slave_list[slave].res_list[RES_MAC]);
1203 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1206 struct mlx4_priv *priv = mlx4_priv(dev);
1207 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1208 struct list_head *mac_list =
1209 &tracker->slave_list[slave].res_list[RES_MAC];
1210 struct mac_res *res, *tmp;
1212 list_for_each_entry_safe(res, tmp, mac_list, list) {
1213 if (res->mac == mac && res->port == (u8) port) {
1214 list_del(&res->list);
1221 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1223 struct mlx4_priv *priv = mlx4_priv(dev);
1224 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1225 struct list_head *mac_list =
1226 &tracker->slave_list[slave].res_list[RES_MAC];
1227 struct mac_res *res, *tmp;
1229 list_for_each_entry_safe(res, tmp, mac_list, list) {
1230 list_del(&res->list);
1231 __mlx4_unregister_mac(dev, res->port, res->mac);
1236 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1237 u64 in_param, u64 *out_param)
1243 if (op != RES_OP_RESERVE_AND_MAP)
1246 port = get_param_l(out_param);
1249 err = __mlx4_register_mac(dev, port, mac);
1251 set_param_l(out_param, err);
1256 err = mac_add_to_slave(dev, slave, mac, port);
1258 __mlx4_unregister_mac(dev, port, mac);
1263 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1264 u64 in_param, u64 *out_param)
1269 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1270 struct mlx4_vhcr *vhcr,
1271 struct mlx4_cmd_mailbox *inbox,
1272 struct mlx4_cmd_mailbox *outbox,
1273 struct mlx4_cmd_info *cmd)
1276 int alop = vhcr->op_modifier;
1278 switch (vhcr->in_modifier) {
1280 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1281 vhcr->in_param, &vhcr->out_param);
1285 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1286 vhcr->in_param, &vhcr->out_param);
1290 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1291 vhcr->in_param, &vhcr->out_param);
1295 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1296 vhcr->in_param, &vhcr->out_param);
1300 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1301 vhcr->in_param, &vhcr->out_param);
1305 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1306 vhcr->in_param, &vhcr->out_param);
1310 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1311 vhcr->in_param, &vhcr->out_param);
1322 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1331 case RES_OP_RESERVE:
1332 base = get_param_l(&in_param) & 0x7fffff;
1333 count = get_param_h(&in_param);
1334 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1337 __mlx4_qp_release_range(dev, base, count);
1339 case RES_OP_MAP_ICM:
1340 qpn = get_param_l(&in_param) & 0x7fffff;
1341 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1346 if (!valid_reserved(dev, slave, qpn))
1347 __mlx4_qp_free_icm(dev, qpn);
1349 res_end_move(dev, slave, RES_QP, qpn);
1351 if (valid_reserved(dev, slave, qpn))
1352 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1361 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1362 u64 in_param, u64 *out_param)
1368 if (op != RES_OP_RESERVE_AND_MAP)
1371 base = get_param_l(&in_param);
1372 order = get_param_h(&in_param);
1373 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1375 __mlx4_free_mtt_range(dev, base, order);
1379 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1385 struct res_mpt *mpt;
1388 case RES_OP_RESERVE:
1389 index = get_param_l(&in_param);
1390 id = index & mpt_mask(dev);
1391 err = get_res(dev, slave, id, RES_MPT, &mpt);
1395 put_res(dev, slave, id, RES_MPT);
1397 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1400 __mlx4_mr_release(dev, index);
1402 case RES_OP_MAP_ICM:
1403 index = get_param_l(&in_param);
1404 id = index & mpt_mask(dev);
1405 err = mr_res_start_move_to(dev, slave, id,
1406 RES_MPT_RESERVED, &mpt);
1410 __mlx4_mr_free_icm(dev, mpt->key);
1411 res_end_move(dev, slave, RES_MPT, id);
1421 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1422 u64 in_param, u64 *out_param)
1428 case RES_OP_RESERVE_AND_MAP:
1429 cqn = get_param_l(&in_param);
1430 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1434 __mlx4_cq_free_icm(dev, cqn);
1445 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1446 u64 in_param, u64 *out_param)
1452 case RES_OP_RESERVE_AND_MAP:
1453 srqn = get_param_l(&in_param);
1454 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1458 __mlx4_srq_free_icm(dev, srqn);
1469 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1470 u64 in_param, u64 *out_param)
1476 case RES_OP_RESERVE_AND_MAP:
1477 port = get_param_l(out_param);
1478 mac_del_from_slave(dev, slave, in_param, port);
1479 __mlx4_unregister_mac(dev, port, in_param);
1490 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1491 u64 in_param, u64 *out_param)
1496 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1497 struct mlx4_vhcr *vhcr,
1498 struct mlx4_cmd_mailbox *inbox,
1499 struct mlx4_cmd_mailbox *outbox,
1500 struct mlx4_cmd_info *cmd)
1503 int alop = vhcr->op_modifier;
1505 switch (vhcr->in_modifier) {
1507 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1512 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1513 vhcr->in_param, &vhcr->out_param);
1517 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1522 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1523 vhcr->in_param, &vhcr->out_param);
1527 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1528 vhcr->in_param, &vhcr->out_param);
1532 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1533 vhcr->in_param, &vhcr->out_param);
1537 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1538 vhcr->in_param, &vhcr->out_param);
1547 /* ugly but other choices are uglier */
1548 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1550 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1553 static int mr_get_mtt_seg(struct mlx4_mpt_entry *mpt)
1555 return (int)be64_to_cpu(mpt->mtt_seg) & 0xfffffff8;
1558 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1560 return be32_to_cpu(mpt->mtt_sz);
1563 static int mr_get_pdn(struct mlx4_mpt_entry *mpt)
1565 return be32_to_cpu(mpt->pd_flags) & 0xffffff;
1568 static int qp_get_mtt_seg(struct mlx4_qp_context *qpc)
1570 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1573 static int srq_get_mtt_seg(struct mlx4_srq_context *srqc)
1575 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1578 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1580 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1581 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1582 int log_sq_sride = qpc->sq_size_stride & 7;
1583 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1584 int log_rq_stride = qpc->rq_size_stride & 7;
1585 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1586 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1587 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1592 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1594 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1595 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1596 total_mem = sq_size + rq_size;
1598 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1604 static int qp_get_pdn(struct mlx4_qp_context *qpc)
1606 return be32_to_cpu(qpc->pd) & 0xffffff;
1609 static int pdn2slave(int pdn)
1611 return (pdn >> NOT_MASKED_PD_BITS) - 1;
1614 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1615 int size, struct res_mtt *mtt)
1617 int res_start = mtt->com.res_id * dev->caps.mtts_per_seg;
1618 int res_size = (1 << mtt->order) * dev->caps.mtts_per_seg;
1620 if (start < res_start || start + size > res_start + res_size)
1625 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1626 struct mlx4_vhcr *vhcr,
1627 struct mlx4_cmd_mailbox *inbox,
1628 struct mlx4_cmd_mailbox *outbox,
1629 struct mlx4_cmd_info *cmd)
1632 int index = vhcr->in_modifier;
1633 struct res_mtt *mtt;
1634 struct res_mpt *mpt;
1635 int mtt_base = (mr_get_mtt_seg(inbox->buf) / dev->caps.mtt_entry_sz) *
1636 dev->caps.mtts_per_seg;
1640 id = index & mpt_mask(dev);
1641 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1645 phys = mr_phys_mpt(inbox->buf);
1647 err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg,
1652 err = check_mtt_range(dev, slave, mtt_base,
1653 mr_get_mtt_size(inbox->buf), mtt);
1660 if (pdn2slave(mr_get_pdn(inbox->buf)) != slave) {
1665 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1670 atomic_inc(&mtt->ref_count);
1671 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1674 res_end_move(dev, slave, RES_MPT, id);
1679 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1681 res_abort_move(dev, slave, RES_MPT, id);
1686 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1687 struct mlx4_vhcr *vhcr,
1688 struct mlx4_cmd_mailbox *inbox,
1689 struct mlx4_cmd_mailbox *outbox,
1690 struct mlx4_cmd_info *cmd)
1693 int index = vhcr->in_modifier;
1694 struct res_mpt *mpt;
1697 id = index & mpt_mask(dev);
1698 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1702 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1707 atomic_dec(&mpt->mtt->ref_count);
1709 res_end_move(dev, slave, RES_MPT, id);
1713 res_abort_move(dev, slave, RES_MPT, id);
1718 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1719 struct mlx4_vhcr *vhcr,
1720 struct mlx4_cmd_mailbox *inbox,
1721 struct mlx4_cmd_mailbox *outbox,
1722 struct mlx4_cmd_info *cmd)
1725 int index = vhcr->in_modifier;
1726 struct res_mpt *mpt;
1729 id = index & mpt_mask(dev);
1730 err = get_res(dev, slave, id, RES_MPT, &mpt);
1734 if (mpt->com.from_state != RES_MPT_HW) {
1739 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1742 put_res(dev, slave, id, RES_MPT);
1746 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1748 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1751 static int qp_get_scqn(struct mlx4_qp_context *qpc)
1753 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1756 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1758 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1761 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1762 struct mlx4_vhcr *vhcr,
1763 struct mlx4_cmd_mailbox *inbox,
1764 struct mlx4_cmd_mailbox *outbox,
1765 struct mlx4_cmd_info *cmd)
1768 int qpn = vhcr->in_modifier & 0x7fffff;
1769 struct res_mtt *mtt;
1771 struct mlx4_qp_context *qpc = inbox->buf + 8;
1772 int mtt_base = (qp_get_mtt_seg(qpc) / dev->caps.mtt_entry_sz) *
1773 dev->caps.mtts_per_seg;
1774 int mtt_size = qp_get_mtt_size(qpc);
1777 int rcqn = qp_get_rcqn(qpc);
1778 int scqn = qp_get_scqn(qpc);
1779 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1780 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1781 struct res_srq *srq;
1782 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1784 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1787 qp->local_qpn = local_qpn;
1789 err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT,
1794 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1798 if (pdn2slave(qp_get_pdn(qpc)) != slave) {
1803 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1808 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1815 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1820 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1823 atomic_inc(&mtt->ref_count);
1825 atomic_inc(&rcq->ref_count);
1827 atomic_inc(&scq->ref_count);
1831 put_res(dev, slave, scqn, RES_CQ);
1834 atomic_inc(&srq->ref_count);
1835 put_res(dev, slave, srqn, RES_SRQ);
1838 put_res(dev, slave, rcqn, RES_CQ);
1839 put_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT);
1840 res_end_move(dev, slave, RES_QP, qpn);
1846 put_res(dev, slave, srqn, RES_SRQ);
1849 put_res(dev, slave, scqn, RES_CQ);
1851 put_res(dev, slave, rcqn, RES_CQ);
1853 put_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT);
1855 res_abort_move(dev, slave, RES_QP, qpn);
1860 static int eq_get_mtt_seg(struct mlx4_eq_context *eqc)
1862 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
1865 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
1867 int log_eq_size = eqc->log_eq_size & 0x1f;
1868 int page_shift = (eqc->log_page_size & 0x3f) + 12;
1870 if (log_eq_size + 5 < page_shift)
1873 return 1 << (log_eq_size + 5 - page_shift);
1876 static int cq_get_mtt_seg(struct mlx4_cq_context *cqc)
1878 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
1881 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
1883 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
1884 int page_shift = (cqc->log_page_size & 0x3f) + 12;
1886 if (log_cq_size + 5 < page_shift)
1889 return 1 << (log_cq_size + 5 - page_shift);
1892 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1893 struct mlx4_vhcr *vhcr,
1894 struct mlx4_cmd_mailbox *inbox,
1895 struct mlx4_cmd_mailbox *outbox,
1896 struct mlx4_cmd_info *cmd)
1899 int eqn = vhcr->in_modifier;
1900 int res_id = (slave << 8) | eqn;
1901 struct mlx4_eq_context *eqc = inbox->buf;
1902 int mtt_base = (eq_get_mtt_seg(eqc) / dev->caps.mtt_entry_sz) *
1903 dev->caps.mtts_per_seg;
1904 int mtt_size = eq_get_mtt_size(eqc);
1906 struct res_mtt *mtt;
1908 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1911 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
1915 err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT,
1920 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1924 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1928 atomic_inc(&mtt->ref_count);
1930 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1931 res_end_move(dev, slave, RES_EQ, res_id);
1935 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1937 res_abort_move(dev, slave, RES_EQ, res_id);
1939 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1943 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
1944 int len, struct res_mtt **res)
1946 struct mlx4_priv *priv = mlx4_priv(dev);
1947 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1948 struct res_mtt *mtt;
1951 spin_lock_irq(mlx4_tlock(dev));
1952 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
1954 if (!check_mtt_range(dev, slave, start, len, mtt)) {
1956 mtt->com.from_state = mtt->com.state;
1957 mtt->com.state = RES_MTT_BUSY;
1962 spin_unlock_irq(mlx4_tlock(dev));
1967 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
1968 struct mlx4_vhcr *vhcr,
1969 struct mlx4_cmd_mailbox *inbox,
1970 struct mlx4_cmd_mailbox *outbox,
1971 struct mlx4_cmd_info *cmd)
1973 struct mlx4_mtt mtt;
1974 __be64 *page_list = inbox->buf;
1975 u64 *pg_list = (u64 *)page_list;
1977 struct res_mtt *rmtt = NULL;
1978 int start = be64_to_cpu(page_list[0]);
1979 int npages = vhcr->in_modifier;
1982 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
1986 /* Call the SW implementation of write_mtt:
1987 * - Prepare a dummy mtt struct
1988 * - Translate inbox contents to simple addresses in host endianess */
1992 for (i = 0; i < npages; ++i)
1993 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
1995 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
1996 ((u64 *)page_list + 2));
1999 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2004 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2005 struct mlx4_vhcr *vhcr,
2006 struct mlx4_cmd_mailbox *inbox,
2007 struct mlx4_cmd_mailbox *outbox,
2008 struct mlx4_cmd_info *cmd)
2010 int eqn = vhcr->in_modifier;
2011 int res_id = eqn | (slave << 8);
2015 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2019 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2023 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2027 atomic_dec(&eq->mtt->ref_count);
2028 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2029 res_end_move(dev, slave, RES_EQ, res_id);
2030 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2035 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2037 res_abort_move(dev, slave, RES_EQ, res_id);
2042 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2044 struct mlx4_priv *priv = mlx4_priv(dev);
2045 struct mlx4_slave_event_eq_info *event_eq;
2046 struct mlx4_cmd_mailbox *mailbox;
2047 u32 in_modifier = 0;
2052 if (!priv->mfunc.master.slave_state)
2055 event_eq = &priv->mfunc.master.slave_state[slave].event_eq;
2057 /* Create the event only if the slave is registered */
2058 if ((event_eq->event_type & (1 << eqe->type)) == 0)
2061 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2062 res_id = (slave << 8) | event_eq->eqn;
2063 err = get_res(dev, slave, res_id, RES_EQ, &req);
2067 if (req->com.from_state != RES_EQ_HW) {
2072 mailbox = mlx4_alloc_cmd_mailbox(dev);
2073 if (IS_ERR(mailbox)) {
2074 err = PTR_ERR(mailbox);
2078 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2080 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2083 memcpy(mailbox->buf, (u8 *) eqe, 28);
2085 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2087 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2088 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2091 put_res(dev, slave, res_id, RES_EQ);
2092 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2093 mlx4_free_cmd_mailbox(dev, mailbox);
2097 put_res(dev, slave, res_id, RES_EQ);
2100 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2104 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2105 struct mlx4_vhcr *vhcr,
2106 struct mlx4_cmd_mailbox *inbox,
2107 struct mlx4_cmd_mailbox *outbox,
2108 struct mlx4_cmd_info *cmd)
2110 int eqn = vhcr->in_modifier;
2111 int res_id = eqn | (slave << 8);
2115 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2119 if (eq->com.from_state != RES_EQ_HW) {
2124 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2127 put_res(dev, slave, res_id, RES_EQ);
2131 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2132 struct mlx4_vhcr *vhcr,
2133 struct mlx4_cmd_mailbox *inbox,
2134 struct mlx4_cmd_mailbox *outbox,
2135 struct mlx4_cmd_info *cmd)
2138 int cqn = vhcr->in_modifier;
2139 struct mlx4_cq_context *cqc = inbox->buf;
2140 int mtt_base = (cq_get_mtt_seg(cqc) / dev->caps.mtt_entry_sz) *
2141 dev->caps.mtts_per_seg;
2143 struct res_mtt *mtt;
2145 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2148 err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT,
2152 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2155 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2158 atomic_inc(&mtt->ref_count);
2160 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2161 res_end_move(dev, slave, RES_CQ, cqn);
2165 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2167 res_abort_move(dev, slave, RES_CQ, cqn);
2171 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2172 struct mlx4_vhcr *vhcr,
2173 struct mlx4_cmd_mailbox *inbox,
2174 struct mlx4_cmd_mailbox *outbox,
2175 struct mlx4_cmd_info *cmd)
2178 int cqn = vhcr->in_modifier;
2181 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2184 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2187 atomic_dec(&cq->mtt->ref_count);
2188 res_end_move(dev, slave, RES_CQ, cqn);
2192 res_abort_move(dev, slave, RES_CQ, cqn);
2196 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2197 struct mlx4_vhcr *vhcr,
2198 struct mlx4_cmd_mailbox *inbox,
2199 struct mlx4_cmd_mailbox *outbox,
2200 struct mlx4_cmd_info *cmd)
2202 int cqn = vhcr->in_modifier;
2206 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2210 if (cq->com.from_state != RES_CQ_HW)
2213 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2215 put_res(dev, slave, cqn, RES_CQ);
2220 static int handle_resize(struct mlx4_dev *dev, int slave,
2221 struct mlx4_vhcr *vhcr,
2222 struct mlx4_cmd_mailbox *inbox,
2223 struct mlx4_cmd_mailbox *outbox,
2224 struct mlx4_cmd_info *cmd,
2228 struct res_mtt *orig_mtt;
2229 struct res_mtt *mtt;
2230 struct mlx4_cq_context *cqc = inbox->buf;
2231 int mtt_base = (cq_get_mtt_seg(cqc) / dev->caps.mtt_entry_sz) *
2232 dev->caps.mtts_per_seg;
2234 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2238 if (orig_mtt != cq->mtt) {
2243 err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT,
2248 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2251 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2254 atomic_dec(&orig_mtt->ref_count);
2255 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2256 atomic_inc(&mtt->ref_count);
2258 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2262 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2264 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2270 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2271 struct mlx4_vhcr *vhcr,
2272 struct mlx4_cmd_mailbox *inbox,
2273 struct mlx4_cmd_mailbox *outbox,
2274 struct mlx4_cmd_info *cmd)
2276 int cqn = vhcr->in_modifier;
2280 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2284 if (cq->com.from_state != RES_CQ_HW)
2287 if (vhcr->op_modifier == 0) {
2288 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2293 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2295 put_res(dev, slave, cqn, RES_CQ);
2300 static int srq_get_pdn(struct mlx4_srq_context *srqc)
2302 return be32_to_cpu(srqc->pd) & 0xffffff;
2305 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2307 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2308 int log_rq_stride = srqc->logstride & 7;
2309 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2311 if (log_srq_size + log_rq_stride + 4 < page_shift)
2314 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2317 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2318 struct mlx4_vhcr *vhcr,
2319 struct mlx4_cmd_mailbox *inbox,
2320 struct mlx4_cmd_mailbox *outbox,
2321 struct mlx4_cmd_info *cmd)
2324 int srqn = vhcr->in_modifier;
2325 struct res_mtt *mtt;
2326 struct res_srq *srq;
2327 struct mlx4_srq_context *srqc = inbox->buf;
2328 int mtt_base = (srq_get_mtt_seg(srqc) / dev->caps.mtt_entry_sz) *
2329 dev->caps.mtts_per_seg;
2331 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2334 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2337 err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg,
2341 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2346 if (pdn2slave(srq_get_pdn(srqc)) != slave) {
2351 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2355 atomic_inc(&mtt->ref_count);
2357 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2358 res_end_move(dev, slave, RES_SRQ, srqn);
2362 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2364 res_abort_move(dev, slave, RES_SRQ, srqn);
2369 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2370 struct mlx4_vhcr *vhcr,
2371 struct mlx4_cmd_mailbox *inbox,
2372 struct mlx4_cmd_mailbox *outbox,
2373 struct mlx4_cmd_info *cmd)
2376 int srqn = vhcr->in_modifier;
2377 struct res_srq *srq;
2379 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2382 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2385 atomic_dec(&srq->mtt->ref_count);
2387 atomic_dec(&srq->cq->ref_count);
2388 res_end_move(dev, slave, RES_SRQ, srqn);
2393 res_abort_move(dev, slave, RES_SRQ, srqn);
2398 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2399 struct mlx4_vhcr *vhcr,
2400 struct mlx4_cmd_mailbox *inbox,
2401 struct mlx4_cmd_mailbox *outbox,
2402 struct mlx4_cmd_info *cmd)
2405 int srqn = vhcr->in_modifier;
2406 struct res_srq *srq;
2408 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2411 if (srq->com.from_state != RES_SRQ_HW) {
2415 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2417 put_res(dev, slave, srqn, RES_SRQ);
2421 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2422 struct mlx4_vhcr *vhcr,
2423 struct mlx4_cmd_mailbox *inbox,
2424 struct mlx4_cmd_mailbox *outbox,
2425 struct mlx4_cmd_info *cmd)
2428 int srqn = vhcr->in_modifier;
2429 struct res_srq *srq;
2431 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2435 if (srq->com.from_state != RES_SRQ_HW) {
2440 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2442 put_res(dev, slave, srqn, RES_SRQ);
2446 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2447 struct mlx4_vhcr *vhcr,
2448 struct mlx4_cmd_mailbox *inbox,
2449 struct mlx4_cmd_mailbox *outbox,
2450 struct mlx4_cmd_info *cmd)
2453 int qpn = vhcr->in_modifier & 0x7fffff;
2456 err = get_res(dev, slave, qpn, RES_QP, &qp);
2459 if (qp->com.from_state != RES_QP_HW) {
2464 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2466 put_res(dev, slave, qpn, RES_QP);
2470 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2471 struct mlx4_vhcr *vhcr,
2472 struct mlx4_cmd_mailbox *inbox,
2473 struct mlx4_cmd_mailbox *outbox,
2474 struct mlx4_cmd_info *cmd)
2476 struct mlx4_qp_context *qpc = inbox->buf + 8;
2478 update_ud_gid(dev, qpc, (u8)slave);
2480 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2483 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2484 struct mlx4_vhcr *vhcr,
2485 struct mlx4_cmd_mailbox *inbox,
2486 struct mlx4_cmd_mailbox *outbox,
2487 struct mlx4_cmd_info *cmd)
2490 int qpn = vhcr->in_modifier & 0x7fffff;
2493 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2496 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2500 atomic_dec(&qp->mtt->ref_count);
2501 atomic_dec(&qp->rcq->ref_count);
2502 atomic_dec(&qp->scq->ref_count);
2504 atomic_dec(&qp->srq->ref_count);
2505 res_end_move(dev, slave, RES_QP, qpn);
2509 res_abort_move(dev, slave, RES_QP, qpn);
2514 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2515 struct res_qp *rqp, u8 *gid)
2517 struct res_gid *res;
2519 list_for_each_entry(res, &rqp->mcg_list, list) {
2520 if (!memcmp(res->gid, gid, 16))
2526 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2527 u8 *gid, enum mlx4_protocol prot)
2529 struct res_gid *res;
2532 res = kzalloc(sizeof *res, GFP_KERNEL);
2536 spin_lock_irq(&rqp->mcg_spl);
2537 if (find_gid(dev, slave, rqp, gid)) {
2541 memcpy(res->gid, gid, 16);
2543 list_add_tail(&res->list, &rqp->mcg_list);
2546 spin_unlock_irq(&rqp->mcg_spl);
2551 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2552 u8 *gid, enum mlx4_protocol prot)
2554 struct res_gid *res;
2557 spin_lock_irq(&rqp->mcg_spl);
2558 res = find_gid(dev, slave, rqp, gid);
2559 if (!res || res->prot != prot)
2562 list_del(&res->list);
2566 spin_unlock_irq(&rqp->mcg_spl);
2571 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2572 struct mlx4_vhcr *vhcr,
2573 struct mlx4_cmd_mailbox *inbox,
2574 struct mlx4_cmd_mailbox *outbox,
2575 struct mlx4_cmd_info *cmd)
2577 struct mlx4_qp qp; /* dummy for calling attach/detach */
2578 u8 *gid = inbox->buf;
2579 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2583 int attach = vhcr->op_modifier;
2584 int block_loopback = vhcr->in_modifier >> 31;
2585 u8 steer_type_mask = 2;
2586 enum mlx4_steer_type type = gid[7] & steer_type_mask;
2588 qpn = vhcr->in_modifier & 0xffffff;
2589 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2595 err = add_mcg_res(dev, slave, rqp, gid, prot);
2599 err = mlx4_qp_attach_common(dev, &qp, gid,
2600 block_loopback, prot, type);
2604 err = rem_mcg_res(dev, slave, rqp, gid, prot);
2607 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2610 put_res(dev, slave, qpn, RES_QP);
2614 /* ignore error return below, already in error */
2615 err1 = rem_mcg_res(dev, slave, rqp, gid, prot);
2617 put_res(dev, slave, qpn, RES_QP);
2623 BUSY_MAX_RETRIES = 10
2626 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2627 struct mlx4_vhcr *vhcr,
2628 struct mlx4_cmd_mailbox *inbox,
2629 struct mlx4_cmd_mailbox *outbox,
2630 struct mlx4_cmd_info *cmd)
2633 int index = vhcr->in_modifier & 0xffff;
2635 err = get_res(dev, slave, index, RES_COUNTER, NULL);
2639 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2640 put_res(dev, slave, index, RES_COUNTER);
2644 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2646 struct res_gid *rgid;
2647 struct res_gid *tmp;
2649 struct mlx4_qp qp; /* dummy for calling attach/detach */
2651 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2652 qp.qpn = rqp->local_qpn;
2653 err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2655 list_del(&rgid->list);
2660 static int _move_all_busy(struct mlx4_dev *dev, int slave,
2661 enum mlx4_resource type, int print)
2663 struct mlx4_priv *priv = mlx4_priv(dev);
2664 struct mlx4_resource_tracker *tracker =
2665 &priv->mfunc.master.res_tracker;
2666 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2667 struct res_common *r;
2668 struct res_common *tmp;
2672 spin_lock_irq(mlx4_tlock(dev));
2673 list_for_each_entry_safe(r, tmp, rlist, list) {
2674 if (r->owner == slave) {
2676 if (r->state == RES_ANY_BUSY) {
2679 "%s id 0x%x is busy\n",
2684 r->from_state = r->state;
2685 r->state = RES_ANY_BUSY;
2691 spin_unlock_irq(mlx4_tlock(dev));
2696 static int move_all_busy(struct mlx4_dev *dev, int slave,
2697 enum mlx4_resource type)
2699 unsigned long begin;
2704 busy = _move_all_busy(dev, slave, type, 0);
2705 if (time_after(jiffies, begin + 5 * HZ))
2712 busy = _move_all_busy(dev, slave, type, 1);
2716 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
2718 struct mlx4_priv *priv = mlx4_priv(dev);
2719 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2720 struct list_head *qp_list =
2721 &tracker->slave_list[slave].res_list[RES_QP];
2729 err = move_all_busy(dev, slave, RES_QP);
2731 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
2732 "for slave %d\n", slave);
2734 spin_lock_irq(mlx4_tlock(dev));
2735 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
2736 spin_unlock_irq(mlx4_tlock(dev));
2737 if (qp->com.owner == slave) {
2738 qpn = qp->com.res_id;
2739 detach_qp(dev, slave, qp);
2740 state = qp->com.from_state;
2741 while (state != 0) {
2743 case RES_QP_RESERVED:
2744 spin_lock_irq(mlx4_tlock(dev));
2745 radix_tree_delete(&tracker->res_tree[RES_QP],
2747 list_del(&qp->com.list);
2748 spin_unlock_irq(mlx4_tlock(dev));
2753 if (!valid_reserved(dev, slave, qpn))
2754 __mlx4_qp_free_icm(dev, qpn);
2755 state = RES_QP_RESERVED;
2759 err = mlx4_cmd(dev, in_param,
2762 MLX4_CMD_TIME_CLASS_A,
2765 mlx4_dbg(dev, "rem_slave_qps: failed"
2766 " to move slave %d qpn %d to"
2769 atomic_dec(&qp->rcq->ref_count);
2770 atomic_dec(&qp->scq->ref_count);
2771 atomic_dec(&qp->mtt->ref_count);
2773 atomic_dec(&qp->srq->ref_count);
2774 state = RES_QP_MAPPED;
2781 spin_lock_irq(mlx4_tlock(dev));
2783 spin_unlock_irq(mlx4_tlock(dev));
2786 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
2788 struct mlx4_priv *priv = mlx4_priv(dev);
2789 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2790 struct list_head *srq_list =
2791 &tracker->slave_list[slave].res_list[RES_SRQ];
2792 struct res_srq *srq;
2793 struct res_srq *tmp;
2800 err = move_all_busy(dev, slave, RES_SRQ);
2802 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
2803 "busy for slave %d\n", slave);
2805 spin_lock_irq(mlx4_tlock(dev));
2806 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
2807 spin_unlock_irq(mlx4_tlock(dev));
2808 if (srq->com.owner == slave) {
2809 srqn = srq->com.res_id;
2810 state = srq->com.from_state;
2811 while (state != 0) {
2813 case RES_SRQ_ALLOCATED:
2814 __mlx4_srq_free_icm(dev, srqn);
2815 spin_lock_irq(mlx4_tlock(dev));
2816 radix_tree_delete(&tracker->res_tree[RES_SRQ],
2818 list_del(&srq->com.list);
2819 spin_unlock_irq(mlx4_tlock(dev));
2826 err = mlx4_cmd(dev, in_param, srqn, 1,
2828 MLX4_CMD_TIME_CLASS_A,
2831 mlx4_dbg(dev, "rem_slave_srqs: failed"
2832 " to move slave %d srq %d to"
2836 atomic_dec(&srq->mtt->ref_count);
2838 atomic_dec(&srq->cq->ref_count);
2839 state = RES_SRQ_ALLOCATED;
2847 spin_lock_irq(mlx4_tlock(dev));
2849 spin_unlock_irq(mlx4_tlock(dev));
2852 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
2854 struct mlx4_priv *priv = mlx4_priv(dev);
2855 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2856 struct list_head *cq_list =
2857 &tracker->slave_list[slave].res_list[RES_CQ];
2866 err = move_all_busy(dev, slave, RES_CQ);
2868 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
2869 "busy for slave %d\n", slave);
2871 spin_lock_irq(mlx4_tlock(dev));
2872 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
2873 spin_unlock_irq(mlx4_tlock(dev));
2874 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
2875 cqn = cq->com.res_id;
2876 state = cq->com.from_state;
2877 while (state != 0) {
2879 case RES_CQ_ALLOCATED:
2880 __mlx4_cq_free_icm(dev, cqn);
2881 spin_lock_irq(mlx4_tlock(dev));
2882 radix_tree_delete(&tracker->res_tree[RES_CQ],
2884 list_del(&cq->com.list);
2885 spin_unlock_irq(mlx4_tlock(dev));
2892 err = mlx4_cmd(dev, in_param, cqn, 1,
2894 MLX4_CMD_TIME_CLASS_A,
2897 mlx4_dbg(dev, "rem_slave_cqs: failed"
2898 " to move slave %d cq %d to"
2901 atomic_dec(&cq->mtt->ref_count);
2902 state = RES_CQ_ALLOCATED;
2910 spin_lock_irq(mlx4_tlock(dev));
2912 spin_unlock_irq(mlx4_tlock(dev));
2915 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
2917 struct mlx4_priv *priv = mlx4_priv(dev);
2918 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2919 struct list_head *mpt_list =
2920 &tracker->slave_list[slave].res_list[RES_MPT];
2921 struct res_mpt *mpt;
2922 struct res_mpt *tmp;
2929 err = move_all_busy(dev, slave, RES_MPT);
2931 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
2932 "busy for slave %d\n", slave);
2934 spin_lock_irq(mlx4_tlock(dev));
2935 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
2936 spin_unlock_irq(mlx4_tlock(dev));
2937 if (mpt->com.owner == slave) {
2938 mptn = mpt->com.res_id;
2939 state = mpt->com.from_state;
2940 while (state != 0) {
2942 case RES_MPT_RESERVED:
2943 __mlx4_mr_release(dev, mpt->key);
2944 spin_lock_irq(mlx4_tlock(dev));
2945 radix_tree_delete(&tracker->res_tree[RES_MPT],
2947 list_del(&mpt->com.list);
2948 spin_unlock_irq(mlx4_tlock(dev));
2953 case RES_MPT_MAPPED:
2954 __mlx4_mr_free_icm(dev, mpt->key);
2955 state = RES_MPT_RESERVED;
2960 err = mlx4_cmd(dev, in_param, mptn, 0,
2962 MLX4_CMD_TIME_CLASS_A,
2965 mlx4_dbg(dev, "rem_slave_mrs: failed"
2966 " to move slave %d mpt %d to"
2970 atomic_dec(&mpt->mtt->ref_count);
2971 state = RES_MPT_MAPPED;
2978 spin_lock_irq(mlx4_tlock(dev));
2980 spin_unlock_irq(mlx4_tlock(dev));
2983 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
2985 struct mlx4_priv *priv = mlx4_priv(dev);
2986 struct mlx4_resource_tracker *tracker =
2987 &priv->mfunc.master.res_tracker;
2988 struct list_head *mtt_list =
2989 &tracker->slave_list[slave].res_list[RES_MTT];
2990 struct res_mtt *mtt;
2991 struct res_mtt *tmp;
2997 err = move_all_busy(dev, slave, RES_MTT);
2999 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3000 "busy for slave %d\n", slave);
3002 spin_lock_irq(mlx4_tlock(dev));
3003 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3004 spin_unlock_irq(mlx4_tlock(dev));
3005 if (mtt->com.owner == slave) {
3006 base = mtt->com.res_id;
3007 state = mtt->com.from_state;
3008 while (state != 0) {
3010 case RES_MTT_ALLOCATED:
3011 __mlx4_free_mtt_range(dev, base,
3013 spin_lock_irq(mlx4_tlock(dev));
3014 radix_tree_delete(&tracker->res_tree[RES_MTT],
3016 list_del(&mtt->com.list);
3017 spin_unlock_irq(mlx4_tlock(dev));
3027 spin_lock_irq(mlx4_tlock(dev));
3029 spin_unlock_irq(mlx4_tlock(dev));
3032 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3034 struct mlx4_priv *priv = mlx4_priv(dev);
3035 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3036 struct list_head *eq_list =
3037 &tracker->slave_list[slave].res_list[RES_EQ];
3044 struct mlx4_cmd_mailbox *mailbox;
3046 err = move_all_busy(dev, slave, RES_EQ);
3048 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3049 "busy for slave %d\n", slave);
3051 spin_lock_irq(mlx4_tlock(dev));
3052 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3053 spin_unlock_irq(mlx4_tlock(dev));
3054 if (eq->com.owner == slave) {
3055 eqn = eq->com.res_id;
3056 state = eq->com.from_state;
3057 while (state != 0) {
3059 case RES_EQ_RESERVED:
3060 spin_lock_irq(mlx4_tlock(dev));
3061 radix_tree_delete(&tracker->res_tree[RES_EQ],
3063 list_del(&eq->com.list);
3064 spin_unlock_irq(mlx4_tlock(dev));
3070 mailbox = mlx4_alloc_cmd_mailbox(dev);
3071 if (IS_ERR(mailbox)) {
3075 err = mlx4_cmd_box(dev, slave, 0,
3078 MLX4_CMD_TIME_CLASS_A,
3080 mlx4_dbg(dev, "rem_slave_eqs: failed"
3081 " to move slave %d eqs %d to"
3082 " SW ownership\n", slave, eqn);
3083 mlx4_free_cmd_mailbox(dev, mailbox);
3085 atomic_dec(&eq->mtt->ref_count);
3086 state = RES_EQ_RESERVED;
3095 spin_lock_irq(mlx4_tlock(dev));
3097 spin_unlock_irq(mlx4_tlock(dev));
3100 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3102 struct mlx4_priv *priv = mlx4_priv(dev);
3104 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3106 rem_slave_macs(dev, slave);
3107 rem_slave_qps(dev, slave);
3108 rem_slave_srqs(dev, slave);
3109 rem_slave_cqs(dev, slave);
3110 rem_slave_mrs(dev, slave);
3111 rem_slave_eqs(dev, slave);
3112 rem_slave_mtts(dev, slave);
3113 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);