2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
48 #define MLX4_MAC_VALID (1ull << 63)
49 #define MLX4_MAC_MASK 0x7fffffffffffffffULL
53 struct list_head list;
59 struct list_head list;
73 struct list_head list;
75 enum mlx4_protocol prot;
79 RES_QP_BUSY = RES_ANY_BUSY,
81 /* QP number was allocated */
84 /* ICM memory for QP context was mapped */
87 /* QP is in hw ownership */
91 static inline const char *qp_states_str(enum res_qp_states state)
94 case RES_QP_BUSY: return "RES_QP_BUSY";
95 case RES_QP_RESERVED: return "RES_QP_RESERVED";
96 case RES_QP_MAPPED: return "RES_QP_MAPPED";
97 case RES_QP_HW: return "RES_QP_HW";
98 default: return "Unknown";
103 struct res_common com;
108 struct list_head mcg_list;
113 enum res_mtt_states {
114 RES_MTT_BUSY = RES_ANY_BUSY,
118 static inline const char *mtt_states_str(enum res_mtt_states state)
121 case RES_MTT_BUSY: return "RES_MTT_BUSY";
122 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
123 default: return "Unknown";
128 struct res_common com;
133 enum res_mpt_states {
134 RES_MPT_BUSY = RES_ANY_BUSY,
141 struct res_common com;
147 RES_EQ_BUSY = RES_ANY_BUSY,
153 struct res_common com;
158 RES_CQ_BUSY = RES_ANY_BUSY,
164 struct res_common com;
169 enum res_srq_states {
170 RES_SRQ_BUSY = RES_ANY_BUSY,
175 static inline const char *srq_states_str(enum res_srq_states state)
178 case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
179 case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
180 case RES_SRQ_HW: return "RES_SRQ_HW";
181 default: return "Unknown";
186 struct res_common com;
192 enum res_counter_states {
193 RES_COUNTER_BUSY = RES_ANY_BUSY,
194 RES_COUNTER_ALLOCATED,
197 static inline const char *counter_states_str(enum res_counter_states state)
200 case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
201 case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
202 default: return "Unknown";
207 struct res_common com;
212 static const char *ResourceType(enum mlx4_resource rt)
215 case RES_QP: return "RES_QP";
216 case RES_CQ: return "RES_CQ";
217 case RES_SRQ: return "RES_SRQ";
218 case RES_MPT: return "RES_MPT";
219 case RES_MTT: return "RES_MTT";
220 case RES_MAC: return "RES_MAC";
221 case RES_EQ: return "RES_EQ";
222 case RES_COUNTER: return "RES_COUNTER";
223 default: return "Unknown resource type !!!";
227 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
229 struct mlx4_priv *priv = mlx4_priv(dev);
233 priv->mfunc.master.res_tracker.slave_list =
234 kzalloc(dev->num_slaves * sizeof(struct slave_list),
236 if (!priv->mfunc.master.res_tracker.slave_list)
239 for (i = 0 ; i < dev->num_slaves; i++) {
240 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
241 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
242 slave_list[i].res_list[t]);
243 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
246 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
248 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
249 INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
250 GFP_ATOMIC|__GFP_NOWARN);
252 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
256 void mlx4_free_resource_tracker(struct mlx4_dev *dev)
258 struct mlx4_priv *priv = mlx4_priv(dev);
261 if (priv->mfunc.master.res_tracker.slave_list) {
262 for (i = 0 ; i < dev->num_slaves; i++)
263 mlx4_delete_all_resources_for_slave(dev, i);
265 kfree(priv->mfunc.master.res_tracker.slave_list);
269 static void update_ud_gid(struct mlx4_dev *dev,
270 struct mlx4_qp_context *qp_ctx, u8 slave)
272 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
274 if (MLX4_QP_ST_UD == ts)
275 qp_ctx->pri_path.mgid_index = 0x80 | slave;
277 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
278 slave, qp_ctx->pri_path.mgid_index);
281 static int mpt_mask(struct mlx4_dev *dev)
283 return dev->caps.num_mpts - 1;
286 static void *find_res(struct mlx4_dev *dev, int res_id,
287 enum mlx4_resource type)
289 struct mlx4_priv *priv = mlx4_priv(dev);
291 return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
295 static int get_res(struct mlx4_dev *dev, int slave, int res_id,
296 enum mlx4_resource type,
299 struct res_common *r;
302 spin_lock_irq(mlx4_tlock(dev));
303 r = find_res(dev, res_id, type);
309 if (r->state == RES_ANY_BUSY) {
314 if (r->owner != slave) {
319 r->from_state = r->state;
320 r->state = RES_ANY_BUSY;
321 mlx4_dbg(dev, "res %s id 0x%x to busy\n",
322 ResourceType(type), r->res_id);
325 *((struct res_common **)res) = r;
328 spin_unlock_irq(mlx4_tlock(dev));
332 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
333 enum mlx4_resource type,
334 int res_id, int *slave)
337 struct res_common *r;
343 spin_lock(mlx4_tlock(dev));
345 r = find_res(dev, id, type);
350 spin_unlock(mlx4_tlock(dev));
355 static void put_res(struct mlx4_dev *dev, int slave, int res_id,
356 enum mlx4_resource type)
358 struct res_common *r;
360 spin_lock_irq(mlx4_tlock(dev));
361 r = find_res(dev, res_id, type);
363 r->state = r->from_state;
364 spin_unlock_irq(mlx4_tlock(dev));
367 static struct res_common *alloc_qp_tr(int id)
371 ret = kzalloc(sizeof *ret, GFP_KERNEL);
375 ret->com.res_id = id;
376 ret->com.state = RES_QP_RESERVED;
377 INIT_LIST_HEAD(&ret->mcg_list);
378 spin_lock_init(&ret->mcg_spl);
383 static struct res_common *alloc_mtt_tr(int id, int order)
387 ret = kzalloc(sizeof *ret, GFP_KERNEL);
391 ret->com.res_id = id;
393 ret->com.state = RES_MTT_ALLOCATED;
394 atomic_set(&ret->ref_count, 0);
399 static struct res_common *alloc_mpt_tr(int id, int key)
403 ret = kzalloc(sizeof *ret, GFP_KERNEL);
407 ret->com.res_id = id;
408 ret->com.state = RES_MPT_RESERVED;
414 static struct res_common *alloc_eq_tr(int id)
418 ret = kzalloc(sizeof *ret, GFP_KERNEL);
422 ret->com.res_id = id;
423 ret->com.state = RES_EQ_RESERVED;
428 static struct res_common *alloc_cq_tr(int id)
432 ret = kzalloc(sizeof *ret, GFP_KERNEL);
436 ret->com.res_id = id;
437 ret->com.state = RES_CQ_ALLOCATED;
438 atomic_set(&ret->ref_count, 0);
443 static struct res_common *alloc_srq_tr(int id)
447 ret = kzalloc(sizeof *ret, GFP_KERNEL);
451 ret->com.res_id = id;
452 ret->com.state = RES_SRQ_ALLOCATED;
453 atomic_set(&ret->ref_count, 0);
458 static struct res_common *alloc_counter_tr(int id)
460 struct res_counter *ret;
462 ret = kzalloc(sizeof *ret, GFP_KERNEL);
466 ret->com.res_id = id;
467 ret->com.state = RES_COUNTER_ALLOCATED;
472 static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
475 struct res_common *ret;
479 ret = alloc_qp_tr(id);
482 ret = alloc_mpt_tr(id, extra);
485 ret = alloc_mtt_tr(id, extra);
488 ret = alloc_eq_tr(id);
491 ret = alloc_cq_tr(id);
494 ret = alloc_srq_tr(id);
497 printk(KERN_ERR "implementation missing\n");
500 ret = alloc_counter_tr(id);
512 static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
513 enum mlx4_resource type, int extra)
517 struct mlx4_priv *priv = mlx4_priv(dev);
518 struct res_common **res_arr;
519 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
520 struct radix_tree_root *root = &tracker->res_tree[type];
522 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
526 for (i = 0; i < count; ++i) {
527 res_arr[i] = alloc_tr(base + i, type, slave, extra);
529 for (--i; i >= 0; --i)
537 spin_lock_irq(mlx4_tlock(dev));
538 for (i = 0; i < count; ++i) {
539 if (find_res(dev, base + i, type)) {
543 err = radix_tree_insert(root, base + i, res_arr[i]);
546 list_add_tail(&res_arr[i]->list,
547 &tracker->slave_list[slave].res_list[type]);
549 spin_unlock_irq(mlx4_tlock(dev));
555 for (--i; i >= base; --i)
556 radix_tree_delete(&tracker->res_tree[type], i);
558 spin_unlock_irq(mlx4_tlock(dev));
560 for (i = 0; i < count; ++i)
568 static int remove_qp_ok(struct res_qp *res)
570 if (res->com.state == RES_QP_BUSY)
572 else if (res->com.state != RES_QP_RESERVED)
578 static int remove_mtt_ok(struct res_mtt *res, int order)
580 if (res->com.state == RES_MTT_BUSY ||
581 atomic_read(&res->ref_count)) {
582 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
584 mtt_states_str(res->com.state),
585 atomic_read(&res->ref_count));
587 } else if (res->com.state != RES_MTT_ALLOCATED)
589 else if (res->order != order)
595 static int remove_mpt_ok(struct res_mpt *res)
597 if (res->com.state == RES_MPT_BUSY)
599 else if (res->com.state != RES_MPT_RESERVED)
605 static int remove_eq_ok(struct res_eq *res)
607 if (res->com.state == RES_MPT_BUSY)
609 else if (res->com.state != RES_MPT_RESERVED)
615 static int remove_counter_ok(struct res_counter *res)
617 if (res->com.state == RES_COUNTER_BUSY)
619 else if (res->com.state != RES_COUNTER_ALLOCATED)
625 static int remove_cq_ok(struct res_cq *res)
627 if (res->com.state == RES_CQ_BUSY)
629 else if (res->com.state != RES_CQ_ALLOCATED)
635 static int remove_srq_ok(struct res_srq *res)
637 if (res->com.state == RES_SRQ_BUSY)
639 else if (res->com.state != RES_SRQ_ALLOCATED)
645 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
649 return remove_qp_ok((struct res_qp *)res);
651 return remove_cq_ok((struct res_cq *)res);
653 return remove_srq_ok((struct res_srq *)res);
655 return remove_mpt_ok((struct res_mpt *)res);
657 return remove_mtt_ok((struct res_mtt *)res, extra);
661 return remove_eq_ok((struct res_eq *)res);
663 return remove_counter_ok((struct res_counter *)res);
669 static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
670 enum mlx4_resource type, int extra)
674 struct mlx4_priv *priv = mlx4_priv(dev);
675 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
676 struct res_common *r;
678 spin_lock_irq(mlx4_tlock(dev));
679 for (i = base; i < base + count; ++i) {
680 r = radix_tree_lookup(&tracker->res_tree[type], i);
685 if (r->owner != slave) {
689 err = remove_ok(r, type, extra);
694 for (i = base; i < base + count; ++i) {
695 r = radix_tree_lookup(&tracker->res_tree[type], i);
696 radix_tree_delete(&tracker->res_tree[type], i);
703 spin_unlock_irq(mlx4_tlock(dev));
708 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
709 enum res_qp_states state, struct res_qp **qp,
712 struct mlx4_priv *priv = mlx4_priv(dev);
713 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
717 spin_lock_irq(mlx4_tlock(dev));
718 r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
721 else if (r->com.owner != slave)
726 mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
727 __func__, r->com.res_id);
731 case RES_QP_RESERVED:
732 if (r->com.state == RES_QP_MAPPED && !alloc)
735 mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
740 if ((r->com.state == RES_QP_RESERVED && alloc) ||
741 r->com.state == RES_QP_HW)
744 mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
752 if (r->com.state != RES_QP_MAPPED)
760 r->com.from_state = r->com.state;
761 r->com.to_state = state;
762 r->com.state = RES_QP_BUSY;
764 *qp = (struct res_qp *)r;
768 spin_unlock_irq(mlx4_tlock(dev));
773 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
774 enum res_mpt_states state, struct res_mpt **mpt)
776 struct mlx4_priv *priv = mlx4_priv(dev);
777 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
781 spin_lock_irq(mlx4_tlock(dev));
782 r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
785 else if (r->com.owner != slave)
793 case RES_MPT_RESERVED:
794 if (r->com.state != RES_MPT_MAPPED)
799 if (r->com.state != RES_MPT_RESERVED &&
800 r->com.state != RES_MPT_HW)
805 if (r->com.state != RES_MPT_MAPPED)
813 r->com.from_state = r->com.state;
814 r->com.to_state = state;
815 r->com.state = RES_MPT_BUSY;
817 *mpt = (struct res_mpt *)r;
821 spin_unlock_irq(mlx4_tlock(dev));
826 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
827 enum res_eq_states state, struct res_eq **eq)
829 struct mlx4_priv *priv = mlx4_priv(dev);
830 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
834 spin_lock_irq(mlx4_tlock(dev));
835 r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
838 else if (r->com.owner != slave)
846 case RES_EQ_RESERVED:
847 if (r->com.state != RES_EQ_HW)
852 if (r->com.state != RES_EQ_RESERVED)
861 r->com.from_state = r->com.state;
862 r->com.to_state = state;
863 r->com.state = RES_EQ_BUSY;
869 spin_unlock_irq(mlx4_tlock(dev));
874 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
875 enum res_cq_states state, struct res_cq **cq)
877 struct mlx4_priv *priv = mlx4_priv(dev);
878 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
882 spin_lock_irq(mlx4_tlock(dev));
883 r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
886 else if (r->com.owner != slave)
894 case RES_CQ_ALLOCATED:
895 if (r->com.state != RES_CQ_HW)
897 else if (atomic_read(&r->ref_count))
904 if (r->com.state != RES_CQ_ALLOCATED)
915 r->com.from_state = r->com.state;
916 r->com.to_state = state;
917 r->com.state = RES_CQ_BUSY;
923 spin_unlock_irq(mlx4_tlock(dev));
928 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
929 enum res_cq_states state, struct res_srq **srq)
931 struct mlx4_priv *priv = mlx4_priv(dev);
932 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
936 spin_lock_irq(mlx4_tlock(dev));
937 r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
940 else if (r->com.owner != slave)
948 case RES_SRQ_ALLOCATED:
949 if (r->com.state != RES_SRQ_HW)
951 else if (atomic_read(&r->ref_count))
956 if (r->com.state != RES_SRQ_ALLOCATED)
965 r->com.from_state = r->com.state;
966 r->com.to_state = state;
967 r->com.state = RES_SRQ_BUSY;
973 spin_unlock_irq(mlx4_tlock(dev));
978 static void res_abort_move(struct mlx4_dev *dev, int slave,
979 enum mlx4_resource type, int id)
981 struct mlx4_priv *priv = mlx4_priv(dev);
982 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
983 struct res_common *r;
985 spin_lock_irq(mlx4_tlock(dev));
986 r = radix_tree_lookup(&tracker->res_tree[type], id);
987 if (r && (r->owner == slave))
988 r->state = r->from_state;
989 spin_unlock_irq(mlx4_tlock(dev));
992 static void res_end_move(struct mlx4_dev *dev, int slave,
993 enum mlx4_resource type, int id)
995 struct mlx4_priv *priv = mlx4_priv(dev);
996 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
997 struct res_common *r;
999 spin_lock_irq(mlx4_tlock(dev));
1000 r = radix_tree_lookup(&tracker->res_tree[type], id);
1001 if (r && (r->owner == slave))
1002 r->state = r->to_state;
1003 spin_unlock_irq(mlx4_tlock(dev));
1006 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1008 return mlx4_is_qp_reserved(dev, qpn);
1011 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1012 u64 in_param, u64 *out_param)
1021 case RES_OP_RESERVE:
1022 count = get_param_l(&in_param);
1023 align = get_param_h(&in_param);
1024 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1028 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1030 __mlx4_qp_release_range(dev, base, count);
1033 set_param_l(out_param, base);
1035 case RES_OP_MAP_ICM:
1036 qpn = get_param_l(&in_param) & 0x7fffff;
1037 if (valid_reserved(dev, slave, qpn)) {
1038 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1043 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1048 if (!valid_reserved(dev, slave, qpn)) {
1049 err = __mlx4_qp_alloc_icm(dev, qpn);
1051 res_abort_move(dev, slave, RES_QP, qpn);
1056 res_end_move(dev, slave, RES_QP, qpn);
1066 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1067 u64 in_param, u64 *out_param)
1073 if (op != RES_OP_RESERVE_AND_MAP)
1076 order = get_param_l(&in_param);
1077 base = __mlx4_alloc_mtt_range(dev, order);
1081 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1083 __mlx4_free_mtt_range(dev, base, order);
1085 set_param_l(out_param, base);
1090 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1091 u64 in_param, u64 *out_param)
1096 struct res_mpt *mpt;
1099 case RES_OP_RESERVE:
1100 index = __mlx4_mr_reserve(dev);
1103 id = index & mpt_mask(dev);
1105 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1107 __mlx4_mr_release(dev, index);
1110 set_param_l(out_param, index);
1112 case RES_OP_MAP_ICM:
1113 index = get_param_l(&in_param);
1114 id = index & mpt_mask(dev);
1115 err = mr_res_start_move_to(dev, slave, id,
1116 RES_MPT_MAPPED, &mpt);
1120 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1122 res_abort_move(dev, slave, RES_MPT, id);
1126 res_end_move(dev, slave, RES_MPT, id);
1132 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1133 u64 in_param, u64 *out_param)
1139 case RES_OP_RESERVE_AND_MAP:
1140 err = __mlx4_cq_alloc_icm(dev, &cqn);
1144 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1146 __mlx4_cq_free_icm(dev, cqn);
1150 set_param_l(out_param, cqn);
1160 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1161 u64 in_param, u64 *out_param)
1167 case RES_OP_RESERVE_AND_MAP:
1168 err = __mlx4_srq_alloc_icm(dev, &srqn);
1172 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1174 __mlx4_srq_free_icm(dev, srqn);
1178 set_param_l(out_param, srqn);
1188 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1190 struct mlx4_priv *priv = mlx4_priv(dev);
1191 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1192 struct mac_res *res;
1194 res = kzalloc(sizeof *res, GFP_KERNEL);
1198 res->port = (u8) port;
1199 list_add_tail(&res->list,
1200 &tracker->slave_list[slave].res_list[RES_MAC]);
1204 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1207 struct mlx4_priv *priv = mlx4_priv(dev);
1208 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1209 struct list_head *mac_list =
1210 &tracker->slave_list[slave].res_list[RES_MAC];
1211 struct mac_res *res, *tmp;
1213 list_for_each_entry_safe(res, tmp, mac_list, list) {
1214 if (res->mac == mac && res->port == (u8) port) {
1215 list_del(&res->list);
1222 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1224 struct mlx4_priv *priv = mlx4_priv(dev);
1225 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1226 struct list_head *mac_list =
1227 &tracker->slave_list[slave].res_list[RES_MAC];
1228 struct mac_res *res, *tmp;
1230 list_for_each_entry_safe(res, tmp, mac_list, list) {
1231 list_del(&res->list);
1232 __mlx4_unregister_mac(dev, res->port, res->mac);
1237 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1238 u64 in_param, u64 *out_param)
1244 if (op != RES_OP_RESERVE_AND_MAP)
1247 port = get_param_l(out_param);
1250 err = __mlx4_register_mac(dev, port, mac);
1252 set_param_l(out_param, err);
1257 err = mac_add_to_slave(dev, slave, mac, port);
1259 __mlx4_unregister_mac(dev, port, mac);
1264 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1265 u64 in_param, u64 *out_param)
1270 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1271 struct mlx4_vhcr *vhcr,
1272 struct mlx4_cmd_mailbox *inbox,
1273 struct mlx4_cmd_mailbox *outbox,
1274 struct mlx4_cmd_info *cmd)
1277 int alop = vhcr->op_modifier;
1279 switch (vhcr->in_modifier) {
1281 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1282 vhcr->in_param, &vhcr->out_param);
1286 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1287 vhcr->in_param, &vhcr->out_param);
1291 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1292 vhcr->in_param, &vhcr->out_param);
1296 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1297 vhcr->in_param, &vhcr->out_param);
1301 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1302 vhcr->in_param, &vhcr->out_param);
1306 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1307 vhcr->in_param, &vhcr->out_param);
1311 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1312 vhcr->in_param, &vhcr->out_param);
1323 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1332 case RES_OP_RESERVE:
1333 base = get_param_l(&in_param) & 0x7fffff;
1334 count = get_param_h(&in_param);
1335 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1338 __mlx4_qp_release_range(dev, base, count);
1340 case RES_OP_MAP_ICM:
1341 qpn = get_param_l(&in_param) & 0x7fffff;
1342 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1347 if (!valid_reserved(dev, slave, qpn))
1348 __mlx4_qp_free_icm(dev, qpn);
1350 res_end_move(dev, slave, RES_QP, qpn);
1352 if (valid_reserved(dev, slave, qpn))
1353 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1362 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1363 u64 in_param, u64 *out_param)
1369 if (op != RES_OP_RESERVE_AND_MAP)
1372 base = get_param_l(&in_param);
1373 order = get_param_h(&in_param);
1374 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1376 __mlx4_free_mtt_range(dev, base, order);
1380 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1386 struct res_mpt *mpt;
1389 case RES_OP_RESERVE:
1390 index = get_param_l(&in_param);
1391 id = index & mpt_mask(dev);
1392 err = get_res(dev, slave, id, RES_MPT, &mpt);
1396 put_res(dev, slave, id, RES_MPT);
1398 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1401 __mlx4_mr_release(dev, index);
1403 case RES_OP_MAP_ICM:
1404 index = get_param_l(&in_param);
1405 id = index & mpt_mask(dev);
1406 err = mr_res_start_move_to(dev, slave, id,
1407 RES_MPT_RESERVED, &mpt);
1411 __mlx4_mr_free_icm(dev, mpt->key);
1412 res_end_move(dev, slave, RES_MPT, id);
1422 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1423 u64 in_param, u64 *out_param)
1429 case RES_OP_RESERVE_AND_MAP:
1430 cqn = get_param_l(&in_param);
1431 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1435 __mlx4_cq_free_icm(dev, cqn);
1446 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1447 u64 in_param, u64 *out_param)
1453 case RES_OP_RESERVE_AND_MAP:
1454 srqn = get_param_l(&in_param);
1455 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1459 __mlx4_srq_free_icm(dev, srqn);
1470 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1471 u64 in_param, u64 *out_param)
1477 case RES_OP_RESERVE_AND_MAP:
1478 port = get_param_l(out_param);
1479 mac_del_from_slave(dev, slave, in_param, port);
1480 __mlx4_unregister_mac(dev, port, in_param);
1491 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1492 u64 in_param, u64 *out_param)
1497 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1498 struct mlx4_vhcr *vhcr,
1499 struct mlx4_cmd_mailbox *inbox,
1500 struct mlx4_cmd_mailbox *outbox,
1501 struct mlx4_cmd_info *cmd)
1504 int alop = vhcr->op_modifier;
1506 switch (vhcr->in_modifier) {
1508 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1513 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1514 vhcr->in_param, &vhcr->out_param);
1518 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1523 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1524 vhcr->in_param, &vhcr->out_param);
1528 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1529 vhcr->in_param, &vhcr->out_param);
1533 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1534 vhcr->in_param, &vhcr->out_param);
1538 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1539 vhcr->in_param, &vhcr->out_param);
1548 /* ugly but other choices are uglier */
1549 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1551 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1554 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1556 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1559 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1561 return be32_to_cpu(mpt->mtt_sz);
1564 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1566 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1569 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1571 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1574 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1576 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1577 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1578 int log_sq_sride = qpc->sq_size_stride & 7;
1579 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1580 int log_rq_stride = qpc->rq_size_stride & 7;
1581 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1582 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1583 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1588 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1590 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1591 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1592 total_mem = sq_size + rq_size;
1594 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1600 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1601 int size, struct res_mtt *mtt)
1603 int res_start = mtt->com.res_id;
1604 int res_size = (1 << mtt->order);
1606 if (start < res_start || start + size > res_start + res_size)
1611 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1612 struct mlx4_vhcr *vhcr,
1613 struct mlx4_cmd_mailbox *inbox,
1614 struct mlx4_cmd_mailbox *outbox,
1615 struct mlx4_cmd_info *cmd)
1618 int index = vhcr->in_modifier;
1619 struct res_mtt *mtt;
1620 struct res_mpt *mpt;
1621 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1625 id = index & mpt_mask(dev);
1626 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1630 phys = mr_phys_mpt(inbox->buf);
1632 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1636 err = check_mtt_range(dev, slave, mtt_base,
1637 mr_get_mtt_size(inbox->buf), mtt);
1644 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1649 atomic_inc(&mtt->ref_count);
1650 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1653 res_end_move(dev, slave, RES_MPT, id);
1658 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1660 res_abort_move(dev, slave, RES_MPT, id);
1665 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1666 struct mlx4_vhcr *vhcr,
1667 struct mlx4_cmd_mailbox *inbox,
1668 struct mlx4_cmd_mailbox *outbox,
1669 struct mlx4_cmd_info *cmd)
1672 int index = vhcr->in_modifier;
1673 struct res_mpt *mpt;
1676 id = index & mpt_mask(dev);
1677 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1681 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1686 atomic_dec(&mpt->mtt->ref_count);
1688 res_end_move(dev, slave, RES_MPT, id);
1692 res_abort_move(dev, slave, RES_MPT, id);
1697 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1698 struct mlx4_vhcr *vhcr,
1699 struct mlx4_cmd_mailbox *inbox,
1700 struct mlx4_cmd_mailbox *outbox,
1701 struct mlx4_cmd_info *cmd)
1704 int index = vhcr->in_modifier;
1705 struct res_mpt *mpt;
1708 id = index & mpt_mask(dev);
1709 err = get_res(dev, slave, id, RES_MPT, &mpt);
1713 if (mpt->com.from_state != RES_MPT_HW) {
1718 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1721 put_res(dev, slave, id, RES_MPT);
1725 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1727 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1730 static int qp_get_scqn(struct mlx4_qp_context *qpc)
1732 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1735 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1737 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1740 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1741 struct mlx4_vhcr *vhcr,
1742 struct mlx4_cmd_mailbox *inbox,
1743 struct mlx4_cmd_mailbox *outbox,
1744 struct mlx4_cmd_info *cmd)
1747 int qpn = vhcr->in_modifier & 0x7fffff;
1748 struct res_mtt *mtt;
1750 struct mlx4_qp_context *qpc = inbox->buf + 8;
1751 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
1752 int mtt_size = qp_get_mtt_size(qpc);
1755 int rcqn = qp_get_rcqn(qpc);
1756 int scqn = qp_get_scqn(qpc);
1757 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1758 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1759 struct res_srq *srq;
1760 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1762 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1765 qp->local_qpn = local_qpn;
1767 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1771 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1775 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1780 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1787 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1792 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1795 atomic_inc(&mtt->ref_count);
1797 atomic_inc(&rcq->ref_count);
1799 atomic_inc(&scq->ref_count);
1803 put_res(dev, slave, scqn, RES_CQ);
1806 atomic_inc(&srq->ref_count);
1807 put_res(dev, slave, srqn, RES_SRQ);
1810 put_res(dev, slave, rcqn, RES_CQ);
1811 put_res(dev, slave, mtt_base, RES_MTT);
1812 res_end_move(dev, slave, RES_QP, qpn);
1818 put_res(dev, slave, srqn, RES_SRQ);
1821 put_res(dev, slave, scqn, RES_CQ);
1823 put_res(dev, slave, rcqn, RES_CQ);
1825 put_res(dev, slave, mtt_base, RES_MTT);
1827 res_abort_move(dev, slave, RES_QP, qpn);
1832 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
1834 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
1837 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
1839 int log_eq_size = eqc->log_eq_size & 0x1f;
1840 int page_shift = (eqc->log_page_size & 0x3f) + 12;
1842 if (log_eq_size + 5 < page_shift)
1845 return 1 << (log_eq_size + 5 - page_shift);
1848 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
1850 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
1853 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
1855 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
1856 int page_shift = (cqc->log_page_size & 0x3f) + 12;
1858 if (log_cq_size + 5 < page_shift)
1861 return 1 << (log_cq_size + 5 - page_shift);
1864 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1865 struct mlx4_vhcr *vhcr,
1866 struct mlx4_cmd_mailbox *inbox,
1867 struct mlx4_cmd_mailbox *outbox,
1868 struct mlx4_cmd_info *cmd)
1871 int eqn = vhcr->in_modifier;
1872 int res_id = (slave << 8) | eqn;
1873 struct mlx4_eq_context *eqc = inbox->buf;
1874 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
1875 int mtt_size = eq_get_mtt_size(eqc);
1877 struct res_mtt *mtt;
1879 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1882 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
1886 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1890 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1894 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1898 atomic_inc(&mtt->ref_count);
1900 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1901 res_end_move(dev, slave, RES_EQ, res_id);
1905 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1907 res_abort_move(dev, slave, RES_EQ, res_id);
1909 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1913 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
1914 int len, struct res_mtt **res)
1916 struct mlx4_priv *priv = mlx4_priv(dev);
1917 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1918 struct res_mtt *mtt;
1921 spin_lock_irq(mlx4_tlock(dev));
1922 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
1924 if (!check_mtt_range(dev, slave, start, len, mtt)) {
1926 mtt->com.from_state = mtt->com.state;
1927 mtt->com.state = RES_MTT_BUSY;
1932 spin_unlock_irq(mlx4_tlock(dev));
1937 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
1938 struct mlx4_vhcr *vhcr,
1939 struct mlx4_cmd_mailbox *inbox,
1940 struct mlx4_cmd_mailbox *outbox,
1941 struct mlx4_cmd_info *cmd)
1943 struct mlx4_mtt mtt;
1944 __be64 *page_list = inbox->buf;
1945 u64 *pg_list = (u64 *)page_list;
1947 struct res_mtt *rmtt = NULL;
1948 int start = be64_to_cpu(page_list[0]);
1949 int npages = vhcr->in_modifier;
1952 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
1956 /* Call the SW implementation of write_mtt:
1957 * - Prepare a dummy mtt struct
1958 * - Translate inbox contents to simple addresses in host endianess */
1959 mtt.offset = 0; /* TBD this is broken but I don't handle it since
1960 we don't really use it */
1963 for (i = 0; i < npages; ++i)
1964 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
1966 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
1967 ((u64 *)page_list + 2));
1970 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
1975 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1976 struct mlx4_vhcr *vhcr,
1977 struct mlx4_cmd_mailbox *inbox,
1978 struct mlx4_cmd_mailbox *outbox,
1979 struct mlx4_cmd_info *cmd)
1981 int eqn = vhcr->in_modifier;
1982 int res_id = eqn | (slave << 8);
1986 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
1990 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
1994 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1998 atomic_dec(&eq->mtt->ref_count);
1999 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2000 res_end_move(dev, slave, RES_EQ, res_id);
2001 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2006 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2008 res_abort_move(dev, slave, RES_EQ, res_id);
2013 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2015 struct mlx4_priv *priv = mlx4_priv(dev);
2016 struct mlx4_slave_event_eq_info *event_eq;
2017 struct mlx4_cmd_mailbox *mailbox;
2018 u32 in_modifier = 0;
2023 if (!priv->mfunc.master.slave_state)
2026 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2028 /* Create the event only if the slave is registered */
2029 if (event_eq->eqn < 0)
2032 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2033 res_id = (slave << 8) | event_eq->eqn;
2034 err = get_res(dev, slave, res_id, RES_EQ, &req);
2038 if (req->com.from_state != RES_EQ_HW) {
2043 mailbox = mlx4_alloc_cmd_mailbox(dev);
2044 if (IS_ERR(mailbox)) {
2045 err = PTR_ERR(mailbox);
2049 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2051 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2054 memcpy(mailbox->buf, (u8 *) eqe, 28);
2056 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2058 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2059 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2062 put_res(dev, slave, res_id, RES_EQ);
2063 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2064 mlx4_free_cmd_mailbox(dev, mailbox);
2068 put_res(dev, slave, res_id, RES_EQ);
2071 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2075 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2076 struct mlx4_vhcr *vhcr,
2077 struct mlx4_cmd_mailbox *inbox,
2078 struct mlx4_cmd_mailbox *outbox,
2079 struct mlx4_cmd_info *cmd)
2081 int eqn = vhcr->in_modifier;
2082 int res_id = eqn | (slave << 8);
2086 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2090 if (eq->com.from_state != RES_EQ_HW) {
2095 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2098 put_res(dev, slave, res_id, RES_EQ);
2102 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2103 struct mlx4_vhcr *vhcr,
2104 struct mlx4_cmd_mailbox *inbox,
2105 struct mlx4_cmd_mailbox *outbox,
2106 struct mlx4_cmd_info *cmd)
2109 int cqn = vhcr->in_modifier;
2110 struct mlx4_cq_context *cqc = inbox->buf;
2111 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2113 struct res_mtt *mtt;
2115 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2118 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2121 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2124 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2127 atomic_inc(&mtt->ref_count);
2129 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2130 res_end_move(dev, slave, RES_CQ, cqn);
2134 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2136 res_abort_move(dev, slave, RES_CQ, cqn);
2140 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2141 struct mlx4_vhcr *vhcr,
2142 struct mlx4_cmd_mailbox *inbox,
2143 struct mlx4_cmd_mailbox *outbox,
2144 struct mlx4_cmd_info *cmd)
2147 int cqn = vhcr->in_modifier;
2150 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2153 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2156 atomic_dec(&cq->mtt->ref_count);
2157 res_end_move(dev, slave, RES_CQ, cqn);
2161 res_abort_move(dev, slave, RES_CQ, cqn);
2165 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2166 struct mlx4_vhcr *vhcr,
2167 struct mlx4_cmd_mailbox *inbox,
2168 struct mlx4_cmd_mailbox *outbox,
2169 struct mlx4_cmd_info *cmd)
2171 int cqn = vhcr->in_modifier;
2175 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2179 if (cq->com.from_state != RES_CQ_HW)
2182 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2184 put_res(dev, slave, cqn, RES_CQ);
2189 static int handle_resize(struct mlx4_dev *dev, int slave,
2190 struct mlx4_vhcr *vhcr,
2191 struct mlx4_cmd_mailbox *inbox,
2192 struct mlx4_cmd_mailbox *outbox,
2193 struct mlx4_cmd_info *cmd,
2197 struct res_mtt *orig_mtt;
2198 struct res_mtt *mtt;
2199 struct mlx4_cq_context *cqc = inbox->buf;
2200 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2202 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2206 if (orig_mtt != cq->mtt) {
2211 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2215 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2218 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2221 atomic_dec(&orig_mtt->ref_count);
2222 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2223 atomic_inc(&mtt->ref_count);
2225 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2229 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2231 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2237 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2238 struct mlx4_vhcr *vhcr,
2239 struct mlx4_cmd_mailbox *inbox,
2240 struct mlx4_cmd_mailbox *outbox,
2241 struct mlx4_cmd_info *cmd)
2243 int cqn = vhcr->in_modifier;
2247 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2251 if (cq->com.from_state != RES_CQ_HW)
2254 if (vhcr->op_modifier == 0) {
2255 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2260 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2262 put_res(dev, slave, cqn, RES_CQ);
2267 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2269 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2270 int log_rq_stride = srqc->logstride & 7;
2271 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2273 if (log_srq_size + log_rq_stride + 4 < page_shift)
2276 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2279 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2280 struct mlx4_vhcr *vhcr,
2281 struct mlx4_cmd_mailbox *inbox,
2282 struct mlx4_cmd_mailbox *outbox,
2283 struct mlx4_cmd_info *cmd)
2286 int srqn = vhcr->in_modifier;
2287 struct res_mtt *mtt;
2288 struct res_srq *srq;
2289 struct mlx4_srq_context *srqc = inbox->buf;
2290 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2292 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2295 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2298 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2301 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2306 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2310 atomic_inc(&mtt->ref_count);
2312 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2313 res_end_move(dev, slave, RES_SRQ, srqn);
2317 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2319 res_abort_move(dev, slave, RES_SRQ, srqn);
2324 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2325 struct mlx4_vhcr *vhcr,
2326 struct mlx4_cmd_mailbox *inbox,
2327 struct mlx4_cmd_mailbox *outbox,
2328 struct mlx4_cmd_info *cmd)
2331 int srqn = vhcr->in_modifier;
2332 struct res_srq *srq;
2334 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2337 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2340 atomic_dec(&srq->mtt->ref_count);
2342 atomic_dec(&srq->cq->ref_count);
2343 res_end_move(dev, slave, RES_SRQ, srqn);
2348 res_abort_move(dev, slave, RES_SRQ, srqn);
2353 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2354 struct mlx4_vhcr *vhcr,
2355 struct mlx4_cmd_mailbox *inbox,
2356 struct mlx4_cmd_mailbox *outbox,
2357 struct mlx4_cmd_info *cmd)
2360 int srqn = vhcr->in_modifier;
2361 struct res_srq *srq;
2363 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2366 if (srq->com.from_state != RES_SRQ_HW) {
2370 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2372 put_res(dev, slave, srqn, RES_SRQ);
2376 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2377 struct mlx4_vhcr *vhcr,
2378 struct mlx4_cmd_mailbox *inbox,
2379 struct mlx4_cmd_mailbox *outbox,
2380 struct mlx4_cmd_info *cmd)
2383 int srqn = vhcr->in_modifier;
2384 struct res_srq *srq;
2386 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2390 if (srq->com.from_state != RES_SRQ_HW) {
2395 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2397 put_res(dev, slave, srqn, RES_SRQ);
2401 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2402 struct mlx4_vhcr *vhcr,
2403 struct mlx4_cmd_mailbox *inbox,
2404 struct mlx4_cmd_mailbox *outbox,
2405 struct mlx4_cmd_info *cmd)
2408 int qpn = vhcr->in_modifier & 0x7fffff;
2411 err = get_res(dev, slave, qpn, RES_QP, &qp);
2414 if (qp->com.from_state != RES_QP_HW) {
2419 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2421 put_res(dev, slave, qpn, RES_QP);
2425 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2426 struct mlx4_vhcr *vhcr,
2427 struct mlx4_cmd_mailbox *inbox,
2428 struct mlx4_cmd_mailbox *outbox,
2429 struct mlx4_cmd_info *cmd)
2431 struct mlx4_qp_context *qpc = inbox->buf + 8;
2433 update_ud_gid(dev, qpc, (u8)slave);
2435 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2438 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2439 struct mlx4_vhcr *vhcr,
2440 struct mlx4_cmd_mailbox *inbox,
2441 struct mlx4_cmd_mailbox *outbox,
2442 struct mlx4_cmd_info *cmd)
2445 int qpn = vhcr->in_modifier & 0x7fffff;
2448 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2451 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2455 atomic_dec(&qp->mtt->ref_count);
2456 atomic_dec(&qp->rcq->ref_count);
2457 atomic_dec(&qp->scq->ref_count);
2459 atomic_dec(&qp->srq->ref_count);
2460 res_end_move(dev, slave, RES_QP, qpn);
2464 res_abort_move(dev, slave, RES_QP, qpn);
2469 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2470 struct res_qp *rqp, u8 *gid)
2472 struct res_gid *res;
2474 list_for_each_entry(res, &rqp->mcg_list, list) {
2475 if (!memcmp(res->gid, gid, 16))
2481 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2482 u8 *gid, enum mlx4_protocol prot)
2484 struct res_gid *res;
2487 res = kzalloc(sizeof *res, GFP_KERNEL);
2491 spin_lock_irq(&rqp->mcg_spl);
2492 if (find_gid(dev, slave, rqp, gid)) {
2496 memcpy(res->gid, gid, 16);
2498 list_add_tail(&res->list, &rqp->mcg_list);
2501 spin_unlock_irq(&rqp->mcg_spl);
2506 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2507 u8 *gid, enum mlx4_protocol prot)
2509 struct res_gid *res;
2512 spin_lock_irq(&rqp->mcg_spl);
2513 res = find_gid(dev, slave, rqp, gid);
2514 if (!res || res->prot != prot)
2517 list_del(&res->list);
2521 spin_unlock_irq(&rqp->mcg_spl);
2526 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2527 struct mlx4_vhcr *vhcr,
2528 struct mlx4_cmd_mailbox *inbox,
2529 struct mlx4_cmd_mailbox *outbox,
2530 struct mlx4_cmd_info *cmd)
2532 struct mlx4_qp qp; /* dummy for calling attach/detach */
2533 u8 *gid = inbox->buf;
2534 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2538 int attach = vhcr->op_modifier;
2539 int block_loopback = vhcr->in_modifier >> 31;
2540 u8 steer_type_mask = 2;
2541 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
2543 qpn = vhcr->in_modifier & 0xffffff;
2544 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2550 err = add_mcg_res(dev, slave, rqp, gid, prot);
2554 err = mlx4_qp_attach_common(dev, &qp, gid,
2555 block_loopback, prot, type);
2559 err = rem_mcg_res(dev, slave, rqp, gid, prot);
2562 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2565 put_res(dev, slave, qpn, RES_QP);
2569 /* ignore error return below, already in error */
2570 err1 = rem_mcg_res(dev, slave, rqp, gid, prot);
2572 put_res(dev, slave, qpn, RES_QP);
2578 BUSY_MAX_RETRIES = 10
2581 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2582 struct mlx4_vhcr *vhcr,
2583 struct mlx4_cmd_mailbox *inbox,
2584 struct mlx4_cmd_mailbox *outbox,
2585 struct mlx4_cmd_info *cmd)
2588 int index = vhcr->in_modifier & 0xffff;
2590 err = get_res(dev, slave, index, RES_COUNTER, NULL);
2594 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2595 put_res(dev, slave, index, RES_COUNTER);
2599 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2601 struct res_gid *rgid;
2602 struct res_gid *tmp;
2604 struct mlx4_qp qp; /* dummy for calling attach/detach */
2606 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2607 qp.qpn = rqp->local_qpn;
2608 err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2610 list_del(&rgid->list);
2615 static int _move_all_busy(struct mlx4_dev *dev, int slave,
2616 enum mlx4_resource type, int print)
2618 struct mlx4_priv *priv = mlx4_priv(dev);
2619 struct mlx4_resource_tracker *tracker =
2620 &priv->mfunc.master.res_tracker;
2621 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2622 struct res_common *r;
2623 struct res_common *tmp;
2627 spin_lock_irq(mlx4_tlock(dev));
2628 list_for_each_entry_safe(r, tmp, rlist, list) {
2629 if (r->owner == slave) {
2631 if (r->state == RES_ANY_BUSY) {
2634 "%s id 0x%x is busy\n",
2639 r->from_state = r->state;
2640 r->state = RES_ANY_BUSY;
2646 spin_unlock_irq(mlx4_tlock(dev));
2651 static int move_all_busy(struct mlx4_dev *dev, int slave,
2652 enum mlx4_resource type)
2654 unsigned long begin;
2659 busy = _move_all_busy(dev, slave, type, 0);
2660 if (time_after(jiffies, begin + 5 * HZ))
2667 busy = _move_all_busy(dev, slave, type, 1);
2671 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
2673 struct mlx4_priv *priv = mlx4_priv(dev);
2674 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2675 struct list_head *qp_list =
2676 &tracker->slave_list[slave].res_list[RES_QP];
2684 err = move_all_busy(dev, slave, RES_QP);
2686 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
2687 "for slave %d\n", slave);
2689 spin_lock_irq(mlx4_tlock(dev));
2690 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
2691 spin_unlock_irq(mlx4_tlock(dev));
2692 if (qp->com.owner == slave) {
2693 qpn = qp->com.res_id;
2694 detach_qp(dev, slave, qp);
2695 state = qp->com.from_state;
2696 while (state != 0) {
2698 case RES_QP_RESERVED:
2699 spin_lock_irq(mlx4_tlock(dev));
2700 radix_tree_delete(&tracker->res_tree[RES_QP],
2702 list_del(&qp->com.list);
2703 spin_unlock_irq(mlx4_tlock(dev));
2708 if (!valid_reserved(dev, slave, qpn))
2709 __mlx4_qp_free_icm(dev, qpn);
2710 state = RES_QP_RESERVED;
2714 err = mlx4_cmd(dev, in_param,
2717 MLX4_CMD_TIME_CLASS_A,
2720 mlx4_dbg(dev, "rem_slave_qps: failed"
2721 " to move slave %d qpn %d to"
2724 atomic_dec(&qp->rcq->ref_count);
2725 atomic_dec(&qp->scq->ref_count);
2726 atomic_dec(&qp->mtt->ref_count);
2728 atomic_dec(&qp->srq->ref_count);
2729 state = RES_QP_MAPPED;
2736 spin_lock_irq(mlx4_tlock(dev));
2738 spin_unlock_irq(mlx4_tlock(dev));
2741 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
2743 struct mlx4_priv *priv = mlx4_priv(dev);
2744 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2745 struct list_head *srq_list =
2746 &tracker->slave_list[slave].res_list[RES_SRQ];
2747 struct res_srq *srq;
2748 struct res_srq *tmp;
2755 err = move_all_busy(dev, slave, RES_SRQ);
2757 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
2758 "busy for slave %d\n", slave);
2760 spin_lock_irq(mlx4_tlock(dev));
2761 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
2762 spin_unlock_irq(mlx4_tlock(dev));
2763 if (srq->com.owner == slave) {
2764 srqn = srq->com.res_id;
2765 state = srq->com.from_state;
2766 while (state != 0) {
2768 case RES_SRQ_ALLOCATED:
2769 __mlx4_srq_free_icm(dev, srqn);
2770 spin_lock_irq(mlx4_tlock(dev));
2771 radix_tree_delete(&tracker->res_tree[RES_SRQ],
2773 list_del(&srq->com.list);
2774 spin_unlock_irq(mlx4_tlock(dev));
2781 err = mlx4_cmd(dev, in_param, srqn, 1,
2783 MLX4_CMD_TIME_CLASS_A,
2786 mlx4_dbg(dev, "rem_slave_srqs: failed"
2787 " to move slave %d srq %d to"
2791 atomic_dec(&srq->mtt->ref_count);
2793 atomic_dec(&srq->cq->ref_count);
2794 state = RES_SRQ_ALLOCATED;
2802 spin_lock_irq(mlx4_tlock(dev));
2804 spin_unlock_irq(mlx4_tlock(dev));
2807 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
2809 struct mlx4_priv *priv = mlx4_priv(dev);
2810 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2811 struct list_head *cq_list =
2812 &tracker->slave_list[slave].res_list[RES_CQ];
2821 err = move_all_busy(dev, slave, RES_CQ);
2823 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
2824 "busy for slave %d\n", slave);
2826 spin_lock_irq(mlx4_tlock(dev));
2827 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
2828 spin_unlock_irq(mlx4_tlock(dev));
2829 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
2830 cqn = cq->com.res_id;
2831 state = cq->com.from_state;
2832 while (state != 0) {
2834 case RES_CQ_ALLOCATED:
2835 __mlx4_cq_free_icm(dev, cqn);
2836 spin_lock_irq(mlx4_tlock(dev));
2837 radix_tree_delete(&tracker->res_tree[RES_CQ],
2839 list_del(&cq->com.list);
2840 spin_unlock_irq(mlx4_tlock(dev));
2847 err = mlx4_cmd(dev, in_param, cqn, 1,
2849 MLX4_CMD_TIME_CLASS_A,
2852 mlx4_dbg(dev, "rem_slave_cqs: failed"
2853 " to move slave %d cq %d to"
2856 atomic_dec(&cq->mtt->ref_count);
2857 state = RES_CQ_ALLOCATED;
2865 spin_lock_irq(mlx4_tlock(dev));
2867 spin_unlock_irq(mlx4_tlock(dev));
2870 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
2872 struct mlx4_priv *priv = mlx4_priv(dev);
2873 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2874 struct list_head *mpt_list =
2875 &tracker->slave_list[slave].res_list[RES_MPT];
2876 struct res_mpt *mpt;
2877 struct res_mpt *tmp;
2884 err = move_all_busy(dev, slave, RES_MPT);
2886 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
2887 "busy for slave %d\n", slave);
2889 spin_lock_irq(mlx4_tlock(dev));
2890 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
2891 spin_unlock_irq(mlx4_tlock(dev));
2892 if (mpt->com.owner == slave) {
2893 mptn = mpt->com.res_id;
2894 state = mpt->com.from_state;
2895 while (state != 0) {
2897 case RES_MPT_RESERVED:
2898 __mlx4_mr_release(dev, mpt->key);
2899 spin_lock_irq(mlx4_tlock(dev));
2900 radix_tree_delete(&tracker->res_tree[RES_MPT],
2902 list_del(&mpt->com.list);
2903 spin_unlock_irq(mlx4_tlock(dev));
2908 case RES_MPT_MAPPED:
2909 __mlx4_mr_free_icm(dev, mpt->key);
2910 state = RES_MPT_RESERVED;
2915 err = mlx4_cmd(dev, in_param, mptn, 0,
2917 MLX4_CMD_TIME_CLASS_A,
2920 mlx4_dbg(dev, "rem_slave_mrs: failed"
2921 " to move slave %d mpt %d to"
2925 atomic_dec(&mpt->mtt->ref_count);
2926 state = RES_MPT_MAPPED;
2933 spin_lock_irq(mlx4_tlock(dev));
2935 spin_unlock_irq(mlx4_tlock(dev));
2938 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
2940 struct mlx4_priv *priv = mlx4_priv(dev);
2941 struct mlx4_resource_tracker *tracker =
2942 &priv->mfunc.master.res_tracker;
2943 struct list_head *mtt_list =
2944 &tracker->slave_list[slave].res_list[RES_MTT];
2945 struct res_mtt *mtt;
2946 struct res_mtt *tmp;
2952 err = move_all_busy(dev, slave, RES_MTT);
2954 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
2955 "busy for slave %d\n", slave);
2957 spin_lock_irq(mlx4_tlock(dev));
2958 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
2959 spin_unlock_irq(mlx4_tlock(dev));
2960 if (mtt->com.owner == slave) {
2961 base = mtt->com.res_id;
2962 state = mtt->com.from_state;
2963 while (state != 0) {
2965 case RES_MTT_ALLOCATED:
2966 __mlx4_free_mtt_range(dev, base,
2968 spin_lock_irq(mlx4_tlock(dev));
2969 radix_tree_delete(&tracker->res_tree[RES_MTT],
2971 list_del(&mtt->com.list);
2972 spin_unlock_irq(mlx4_tlock(dev));
2982 spin_lock_irq(mlx4_tlock(dev));
2984 spin_unlock_irq(mlx4_tlock(dev));
2987 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
2989 struct mlx4_priv *priv = mlx4_priv(dev);
2990 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2991 struct list_head *eq_list =
2992 &tracker->slave_list[slave].res_list[RES_EQ];
2999 struct mlx4_cmd_mailbox *mailbox;
3001 err = move_all_busy(dev, slave, RES_EQ);
3003 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3004 "busy for slave %d\n", slave);
3006 spin_lock_irq(mlx4_tlock(dev));
3007 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3008 spin_unlock_irq(mlx4_tlock(dev));
3009 if (eq->com.owner == slave) {
3010 eqn = eq->com.res_id;
3011 state = eq->com.from_state;
3012 while (state != 0) {
3014 case RES_EQ_RESERVED:
3015 spin_lock_irq(mlx4_tlock(dev));
3016 radix_tree_delete(&tracker->res_tree[RES_EQ],
3018 list_del(&eq->com.list);
3019 spin_unlock_irq(mlx4_tlock(dev));
3025 mailbox = mlx4_alloc_cmd_mailbox(dev);
3026 if (IS_ERR(mailbox)) {
3030 err = mlx4_cmd_box(dev, slave, 0,
3033 MLX4_CMD_TIME_CLASS_A,
3035 mlx4_dbg(dev, "rem_slave_eqs: failed"
3036 " to move slave %d eqs %d to"
3037 " SW ownership\n", slave, eqn);
3038 mlx4_free_cmd_mailbox(dev, mailbox);
3040 atomic_dec(&eq->mtt->ref_count);
3041 state = RES_EQ_RESERVED;
3050 spin_lock_irq(mlx4_tlock(dev));
3052 spin_unlock_irq(mlx4_tlock(dev));
3055 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3057 struct mlx4_priv *priv = mlx4_priv(dev);
3059 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3061 rem_slave_macs(dev, slave);
3062 rem_slave_qps(dev, slave);
3063 rem_slave_srqs(dev, slave);
3064 rem_slave_cqs(dev, slave);
3065 rem_slave_mrs(dev, slave);
3066 rem_slave_eqs(dev, slave);
3067 rem_slave_mtts(dev, slave);
3068 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);