2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
48 #define MLX4_MAC_VALID (1ull << 63)
49 #define MLX4_MAC_MASK 0x7fffffffffffffffULL
53 struct list_head list;
59 struct list_head list;
73 struct list_head list;
75 enum mlx4_protocol prot;
79 RES_QP_BUSY = RES_ANY_BUSY,
81 /* QP number was allocated */
84 /* ICM memory for QP context was mapped */
87 /* QP is in hw ownership */
91 static inline const char *qp_states_str(enum res_qp_states state)
94 case RES_QP_BUSY: return "RES_QP_BUSY";
95 case RES_QP_RESERVED: return "RES_QP_RESERVED";
96 case RES_QP_MAPPED: return "RES_QP_MAPPED";
97 case RES_QP_HW: return "RES_QP_HW";
98 default: return "Unknown";
103 struct res_common com;
108 struct list_head mcg_list;
113 enum res_mtt_states {
114 RES_MTT_BUSY = RES_ANY_BUSY,
118 static inline const char *mtt_states_str(enum res_mtt_states state)
121 case RES_MTT_BUSY: return "RES_MTT_BUSY";
122 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
123 default: return "Unknown";
128 struct res_common com;
133 enum res_mpt_states {
134 RES_MPT_BUSY = RES_ANY_BUSY,
141 struct res_common com;
147 RES_EQ_BUSY = RES_ANY_BUSY,
153 struct res_common com;
158 RES_CQ_BUSY = RES_ANY_BUSY,
164 struct res_common com;
169 enum res_srq_states {
170 RES_SRQ_BUSY = RES_ANY_BUSY,
175 static inline const char *srq_states_str(enum res_srq_states state)
178 case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
179 case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
180 case RES_SRQ_HW: return "RES_SRQ_HW";
181 default: return "Unknown";
186 struct res_common com;
192 enum res_counter_states {
193 RES_COUNTER_BUSY = RES_ANY_BUSY,
194 RES_COUNTER_ALLOCATED,
197 static inline const char *counter_states_str(enum res_counter_states state)
200 case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
201 case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
202 default: return "Unknown";
207 struct res_common com;
212 static const char *ResourceType(enum mlx4_resource rt)
215 case RES_QP: return "RES_QP";
216 case RES_CQ: return "RES_CQ";
217 case RES_SRQ: return "RES_SRQ";
218 case RES_MPT: return "RES_MPT";
219 case RES_MTT: return "RES_MTT";
220 case RES_MAC: return "RES_MAC";
221 case RES_EQ: return "RES_EQ";
222 case RES_COUNTER: return "RES_COUNTER";
223 default: return "Unknown resource type !!!";
227 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
229 struct mlx4_priv *priv = mlx4_priv(dev);
233 priv->mfunc.master.res_tracker.slave_list =
234 kzalloc(dev->num_slaves * sizeof(struct slave_list),
236 if (!priv->mfunc.master.res_tracker.slave_list)
239 for (i = 0 ; i < dev->num_slaves; i++) {
240 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
241 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
242 slave_list[i].res_list[t]);
243 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
246 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
248 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
249 INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
250 GFP_ATOMIC|__GFP_NOWARN);
252 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
256 void mlx4_free_resource_tracker(struct mlx4_dev *dev)
258 struct mlx4_priv *priv = mlx4_priv(dev);
261 if (priv->mfunc.master.res_tracker.slave_list) {
262 for (i = 0 ; i < dev->num_slaves; i++)
263 mlx4_delete_all_resources_for_slave(dev, i);
265 kfree(priv->mfunc.master.res_tracker.slave_list);
269 static void update_ud_gid(struct mlx4_dev *dev,
270 struct mlx4_qp_context *qp_ctx, u8 slave)
272 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
274 if (MLX4_QP_ST_UD == ts)
275 qp_ctx->pri_path.mgid_index = 0x80 | slave;
277 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
278 slave, qp_ctx->pri_path.mgid_index);
281 static int mpt_mask(struct mlx4_dev *dev)
283 return dev->caps.num_mpts - 1;
286 static void *find_res(struct mlx4_dev *dev, int res_id,
287 enum mlx4_resource type)
289 struct mlx4_priv *priv = mlx4_priv(dev);
291 return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
295 static int get_res(struct mlx4_dev *dev, int slave, int res_id,
296 enum mlx4_resource type,
299 struct res_common *r;
302 spin_lock_irq(mlx4_tlock(dev));
303 r = find_res(dev, res_id, type);
309 if (r->state == RES_ANY_BUSY) {
314 if (r->owner != slave) {
319 r->from_state = r->state;
320 r->state = RES_ANY_BUSY;
321 mlx4_dbg(dev, "res %s id 0x%x to busy\n",
322 ResourceType(type), r->res_id);
325 *((struct res_common **)res) = r;
328 spin_unlock_irq(mlx4_tlock(dev));
332 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
333 enum mlx4_resource type,
334 int res_id, int *slave)
337 struct res_common *r;
343 spin_lock(mlx4_tlock(dev));
345 r = find_res(dev, id, type);
350 spin_unlock(mlx4_tlock(dev));
355 static void put_res(struct mlx4_dev *dev, int slave, int res_id,
356 enum mlx4_resource type)
358 struct res_common *r;
360 spin_lock_irq(mlx4_tlock(dev));
361 r = find_res(dev, res_id, type);
363 r->state = r->from_state;
364 spin_unlock_irq(mlx4_tlock(dev));
367 static struct res_common *alloc_qp_tr(int id)
371 ret = kzalloc(sizeof *ret, GFP_KERNEL);
375 ret->com.res_id = id;
376 ret->com.state = RES_QP_RESERVED;
378 INIT_LIST_HEAD(&ret->mcg_list);
379 spin_lock_init(&ret->mcg_spl);
384 static struct res_common *alloc_mtt_tr(int id, int order)
388 ret = kzalloc(sizeof *ret, GFP_KERNEL);
392 ret->com.res_id = id;
394 ret->com.state = RES_MTT_ALLOCATED;
395 atomic_set(&ret->ref_count, 0);
400 static struct res_common *alloc_mpt_tr(int id, int key)
404 ret = kzalloc(sizeof *ret, GFP_KERNEL);
408 ret->com.res_id = id;
409 ret->com.state = RES_MPT_RESERVED;
415 static struct res_common *alloc_eq_tr(int id)
419 ret = kzalloc(sizeof *ret, GFP_KERNEL);
423 ret->com.res_id = id;
424 ret->com.state = RES_EQ_RESERVED;
429 static struct res_common *alloc_cq_tr(int id)
433 ret = kzalloc(sizeof *ret, GFP_KERNEL);
437 ret->com.res_id = id;
438 ret->com.state = RES_CQ_ALLOCATED;
439 atomic_set(&ret->ref_count, 0);
444 static struct res_common *alloc_srq_tr(int id)
448 ret = kzalloc(sizeof *ret, GFP_KERNEL);
452 ret->com.res_id = id;
453 ret->com.state = RES_SRQ_ALLOCATED;
454 atomic_set(&ret->ref_count, 0);
459 static struct res_common *alloc_counter_tr(int id)
461 struct res_counter *ret;
463 ret = kzalloc(sizeof *ret, GFP_KERNEL);
467 ret->com.res_id = id;
468 ret->com.state = RES_COUNTER_ALLOCATED;
473 static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
476 struct res_common *ret;
480 ret = alloc_qp_tr(id);
483 ret = alloc_mpt_tr(id, extra);
486 ret = alloc_mtt_tr(id, extra);
489 ret = alloc_eq_tr(id);
492 ret = alloc_cq_tr(id);
495 ret = alloc_srq_tr(id);
498 printk(KERN_ERR "implementation missing\n");
501 ret = alloc_counter_tr(id);
513 static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
514 enum mlx4_resource type, int extra)
518 struct mlx4_priv *priv = mlx4_priv(dev);
519 struct res_common **res_arr;
520 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
521 struct radix_tree_root *root = &tracker->res_tree[type];
523 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
527 for (i = 0; i < count; ++i) {
528 res_arr[i] = alloc_tr(base + i, type, slave, extra);
530 for (--i; i >= 0; --i)
538 spin_lock_irq(mlx4_tlock(dev));
539 for (i = 0; i < count; ++i) {
540 if (find_res(dev, base + i, type)) {
544 err = radix_tree_insert(root, base + i, res_arr[i]);
547 list_add_tail(&res_arr[i]->list,
548 &tracker->slave_list[slave].res_list[type]);
550 spin_unlock_irq(mlx4_tlock(dev));
556 for (--i; i >= base; --i)
557 radix_tree_delete(&tracker->res_tree[type], i);
559 spin_unlock_irq(mlx4_tlock(dev));
561 for (i = 0; i < count; ++i)
569 static int remove_qp_ok(struct res_qp *res)
571 if (res->com.state == RES_QP_BUSY)
573 else if (res->com.state != RES_QP_RESERVED)
579 static int remove_mtt_ok(struct res_mtt *res, int order)
581 if (res->com.state == RES_MTT_BUSY ||
582 atomic_read(&res->ref_count)) {
583 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
585 mtt_states_str(res->com.state),
586 atomic_read(&res->ref_count));
588 } else if (res->com.state != RES_MTT_ALLOCATED)
590 else if (res->order != order)
596 static int remove_mpt_ok(struct res_mpt *res)
598 if (res->com.state == RES_MPT_BUSY)
600 else if (res->com.state != RES_MPT_RESERVED)
606 static int remove_eq_ok(struct res_eq *res)
608 if (res->com.state == RES_MPT_BUSY)
610 else if (res->com.state != RES_MPT_RESERVED)
616 static int remove_counter_ok(struct res_counter *res)
618 if (res->com.state == RES_COUNTER_BUSY)
620 else if (res->com.state != RES_COUNTER_ALLOCATED)
626 static int remove_cq_ok(struct res_cq *res)
628 if (res->com.state == RES_CQ_BUSY)
630 else if (res->com.state != RES_CQ_ALLOCATED)
636 static int remove_srq_ok(struct res_srq *res)
638 if (res->com.state == RES_SRQ_BUSY)
640 else if (res->com.state != RES_SRQ_ALLOCATED)
646 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
650 return remove_qp_ok((struct res_qp *)res);
652 return remove_cq_ok((struct res_cq *)res);
654 return remove_srq_ok((struct res_srq *)res);
656 return remove_mpt_ok((struct res_mpt *)res);
658 return remove_mtt_ok((struct res_mtt *)res, extra);
662 return remove_eq_ok((struct res_eq *)res);
664 return remove_counter_ok((struct res_counter *)res);
670 static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
671 enum mlx4_resource type, int extra)
675 struct mlx4_priv *priv = mlx4_priv(dev);
676 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
677 struct res_common *r;
679 spin_lock_irq(mlx4_tlock(dev));
680 for (i = base; i < base + count; ++i) {
681 r = radix_tree_lookup(&tracker->res_tree[type], i);
686 if (r->owner != slave) {
690 err = remove_ok(r, type, extra);
695 for (i = base; i < base + count; ++i) {
696 r = radix_tree_lookup(&tracker->res_tree[type], i);
697 radix_tree_delete(&tracker->res_tree[type], i);
704 spin_unlock_irq(mlx4_tlock(dev));
709 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
710 enum res_qp_states state, struct res_qp **qp,
713 struct mlx4_priv *priv = mlx4_priv(dev);
714 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
718 spin_lock_irq(mlx4_tlock(dev));
719 r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
722 else if (r->com.owner != slave)
727 mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
728 __func__, r->com.res_id);
732 case RES_QP_RESERVED:
733 if (r->com.state == RES_QP_MAPPED && !alloc)
736 mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
741 if ((r->com.state == RES_QP_RESERVED && alloc) ||
742 r->com.state == RES_QP_HW)
745 mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
753 if (r->com.state != RES_QP_MAPPED)
761 r->com.from_state = r->com.state;
762 r->com.to_state = state;
763 r->com.state = RES_QP_BUSY;
765 *qp = (struct res_qp *)r;
769 spin_unlock_irq(mlx4_tlock(dev));
774 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
775 enum res_mpt_states state, struct res_mpt **mpt)
777 struct mlx4_priv *priv = mlx4_priv(dev);
778 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
782 spin_lock_irq(mlx4_tlock(dev));
783 r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
786 else if (r->com.owner != slave)
794 case RES_MPT_RESERVED:
795 if (r->com.state != RES_MPT_MAPPED)
800 if (r->com.state != RES_MPT_RESERVED &&
801 r->com.state != RES_MPT_HW)
806 if (r->com.state != RES_MPT_MAPPED)
814 r->com.from_state = r->com.state;
815 r->com.to_state = state;
816 r->com.state = RES_MPT_BUSY;
818 *mpt = (struct res_mpt *)r;
822 spin_unlock_irq(mlx4_tlock(dev));
827 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
828 enum res_eq_states state, struct res_eq **eq)
830 struct mlx4_priv *priv = mlx4_priv(dev);
831 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
835 spin_lock_irq(mlx4_tlock(dev));
836 r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
839 else if (r->com.owner != slave)
847 case RES_EQ_RESERVED:
848 if (r->com.state != RES_EQ_HW)
853 if (r->com.state != RES_EQ_RESERVED)
862 r->com.from_state = r->com.state;
863 r->com.to_state = state;
864 r->com.state = RES_EQ_BUSY;
870 spin_unlock_irq(mlx4_tlock(dev));
875 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
876 enum res_cq_states state, struct res_cq **cq)
878 struct mlx4_priv *priv = mlx4_priv(dev);
879 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
883 spin_lock_irq(mlx4_tlock(dev));
884 r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
887 else if (r->com.owner != slave)
895 case RES_CQ_ALLOCATED:
896 if (r->com.state != RES_CQ_HW)
898 else if (atomic_read(&r->ref_count))
905 if (r->com.state != RES_CQ_ALLOCATED)
916 r->com.from_state = r->com.state;
917 r->com.to_state = state;
918 r->com.state = RES_CQ_BUSY;
924 spin_unlock_irq(mlx4_tlock(dev));
929 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
930 enum res_cq_states state, struct res_srq **srq)
932 struct mlx4_priv *priv = mlx4_priv(dev);
933 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
937 spin_lock_irq(mlx4_tlock(dev));
938 r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
941 else if (r->com.owner != slave)
949 case RES_SRQ_ALLOCATED:
950 if (r->com.state != RES_SRQ_HW)
952 else if (atomic_read(&r->ref_count))
957 if (r->com.state != RES_SRQ_ALLOCATED)
966 r->com.from_state = r->com.state;
967 r->com.to_state = state;
968 r->com.state = RES_SRQ_BUSY;
974 spin_unlock_irq(mlx4_tlock(dev));
979 static void res_abort_move(struct mlx4_dev *dev, int slave,
980 enum mlx4_resource type, int id)
982 struct mlx4_priv *priv = mlx4_priv(dev);
983 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
984 struct res_common *r;
986 spin_lock_irq(mlx4_tlock(dev));
987 r = radix_tree_lookup(&tracker->res_tree[type], id);
988 if (r && (r->owner == slave))
989 r->state = r->from_state;
990 spin_unlock_irq(mlx4_tlock(dev));
993 static void res_end_move(struct mlx4_dev *dev, int slave,
994 enum mlx4_resource type, int id)
996 struct mlx4_priv *priv = mlx4_priv(dev);
997 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
998 struct res_common *r;
1000 spin_lock_irq(mlx4_tlock(dev));
1001 r = radix_tree_lookup(&tracker->res_tree[type], id);
1002 if (r && (r->owner == slave))
1003 r->state = r->to_state;
1004 spin_unlock_irq(mlx4_tlock(dev));
1007 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1009 return mlx4_is_qp_reserved(dev, qpn);
1012 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1013 u64 in_param, u64 *out_param)
1022 case RES_OP_RESERVE:
1023 count = get_param_l(&in_param);
1024 align = get_param_h(&in_param);
1025 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1029 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1031 __mlx4_qp_release_range(dev, base, count);
1034 set_param_l(out_param, base);
1036 case RES_OP_MAP_ICM:
1037 qpn = get_param_l(&in_param) & 0x7fffff;
1038 if (valid_reserved(dev, slave, qpn)) {
1039 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1044 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1049 if (!valid_reserved(dev, slave, qpn)) {
1050 err = __mlx4_qp_alloc_icm(dev, qpn);
1052 res_abort_move(dev, slave, RES_QP, qpn);
1057 res_end_move(dev, slave, RES_QP, qpn);
1067 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1068 u64 in_param, u64 *out_param)
1074 if (op != RES_OP_RESERVE_AND_MAP)
1077 order = get_param_l(&in_param);
1078 base = __mlx4_alloc_mtt_range(dev, order);
1082 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1084 __mlx4_free_mtt_range(dev, base, order);
1086 set_param_l(out_param, base);
1091 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1092 u64 in_param, u64 *out_param)
1097 struct res_mpt *mpt;
1100 case RES_OP_RESERVE:
1101 index = __mlx4_mr_reserve(dev);
1104 id = index & mpt_mask(dev);
1106 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1108 __mlx4_mr_release(dev, index);
1111 set_param_l(out_param, index);
1113 case RES_OP_MAP_ICM:
1114 index = get_param_l(&in_param);
1115 id = index & mpt_mask(dev);
1116 err = mr_res_start_move_to(dev, slave, id,
1117 RES_MPT_MAPPED, &mpt);
1121 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1123 res_abort_move(dev, slave, RES_MPT, id);
1127 res_end_move(dev, slave, RES_MPT, id);
1133 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1134 u64 in_param, u64 *out_param)
1140 case RES_OP_RESERVE_AND_MAP:
1141 err = __mlx4_cq_alloc_icm(dev, &cqn);
1145 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1147 __mlx4_cq_free_icm(dev, cqn);
1151 set_param_l(out_param, cqn);
1161 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1162 u64 in_param, u64 *out_param)
1168 case RES_OP_RESERVE_AND_MAP:
1169 err = __mlx4_srq_alloc_icm(dev, &srqn);
1173 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1175 __mlx4_srq_free_icm(dev, srqn);
1179 set_param_l(out_param, srqn);
1189 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1191 struct mlx4_priv *priv = mlx4_priv(dev);
1192 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1193 struct mac_res *res;
1195 res = kzalloc(sizeof *res, GFP_KERNEL);
1199 res->port = (u8) port;
1200 list_add_tail(&res->list,
1201 &tracker->slave_list[slave].res_list[RES_MAC]);
1205 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1208 struct mlx4_priv *priv = mlx4_priv(dev);
1209 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1210 struct list_head *mac_list =
1211 &tracker->slave_list[slave].res_list[RES_MAC];
1212 struct mac_res *res, *tmp;
1214 list_for_each_entry_safe(res, tmp, mac_list, list) {
1215 if (res->mac == mac && res->port == (u8) port) {
1216 list_del(&res->list);
1223 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1225 struct mlx4_priv *priv = mlx4_priv(dev);
1226 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1227 struct list_head *mac_list =
1228 &tracker->slave_list[slave].res_list[RES_MAC];
1229 struct mac_res *res, *tmp;
1231 list_for_each_entry_safe(res, tmp, mac_list, list) {
1232 list_del(&res->list);
1233 __mlx4_unregister_mac(dev, res->port, res->mac);
1238 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1239 u64 in_param, u64 *out_param)
1245 if (op != RES_OP_RESERVE_AND_MAP)
1248 port = get_param_l(out_param);
1251 err = __mlx4_register_mac(dev, port, mac);
1253 set_param_l(out_param, err);
1258 err = mac_add_to_slave(dev, slave, mac, port);
1260 __mlx4_unregister_mac(dev, port, mac);
1265 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1266 u64 in_param, u64 *out_param)
1271 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1272 struct mlx4_vhcr *vhcr,
1273 struct mlx4_cmd_mailbox *inbox,
1274 struct mlx4_cmd_mailbox *outbox,
1275 struct mlx4_cmd_info *cmd)
1278 int alop = vhcr->op_modifier;
1280 switch (vhcr->in_modifier) {
1282 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1283 vhcr->in_param, &vhcr->out_param);
1287 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1288 vhcr->in_param, &vhcr->out_param);
1292 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1293 vhcr->in_param, &vhcr->out_param);
1297 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1298 vhcr->in_param, &vhcr->out_param);
1302 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1303 vhcr->in_param, &vhcr->out_param);
1307 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1308 vhcr->in_param, &vhcr->out_param);
1312 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1313 vhcr->in_param, &vhcr->out_param);
1324 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1333 case RES_OP_RESERVE:
1334 base = get_param_l(&in_param) & 0x7fffff;
1335 count = get_param_h(&in_param);
1336 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1339 __mlx4_qp_release_range(dev, base, count);
1341 case RES_OP_MAP_ICM:
1342 qpn = get_param_l(&in_param) & 0x7fffff;
1343 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1348 if (!valid_reserved(dev, slave, qpn))
1349 __mlx4_qp_free_icm(dev, qpn);
1351 res_end_move(dev, slave, RES_QP, qpn);
1353 if (valid_reserved(dev, slave, qpn))
1354 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1363 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1364 u64 in_param, u64 *out_param)
1370 if (op != RES_OP_RESERVE_AND_MAP)
1373 base = get_param_l(&in_param);
1374 order = get_param_h(&in_param);
1375 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1377 __mlx4_free_mtt_range(dev, base, order);
1381 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1387 struct res_mpt *mpt;
1390 case RES_OP_RESERVE:
1391 index = get_param_l(&in_param);
1392 id = index & mpt_mask(dev);
1393 err = get_res(dev, slave, id, RES_MPT, &mpt);
1397 put_res(dev, slave, id, RES_MPT);
1399 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1402 __mlx4_mr_release(dev, index);
1404 case RES_OP_MAP_ICM:
1405 index = get_param_l(&in_param);
1406 id = index & mpt_mask(dev);
1407 err = mr_res_start_move_to(dev, slave, id,
1408 RES_MPT_RESERVED, &mpt);
1412 __mlx4_mr_free_icm(dev, mpt->key);
1413 res_end_move(dev, slave, RES_MPT, id);
1423 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1424 u64 in_param, u64 *out_param)
1430 case RES_OP_RESERVE_AND_MAP:
1431 cqn = get_param_l(&in_param);
1432 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1436 __mlx4_cq_free_icm(dev, cqn);
1447 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1448 u64 in_param, u64 *out_param)
1454 case RES_OP_RESERVE_AND_MAP:
1455 srqn = get_param_l(&in_param);
1456 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1460 __mlx4_srq_free_icm(dev, srqn);
1471 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1472 u64 in_param, u64 *out_param)
1478 case RES_OP_RESERVE_AND_MAP:
1479 port = get_param_l(out_param);
1480 mac_del_from_slave(dev, slave, in_param, port);
1481 __mlx4_unregister_mac(dev, port, in_param);
1492 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1493 u64 in_param, u64 *out_param)
1498 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1499 struct mlx4_vhcr *vhcr,
1500 struct mlx4_cmd_mailbox *inbox,
1501 struct mlx4_cmd_mailbox *outbox,
1502 struct mlx4_cmd_info *cmd)
1505 int alop = vhcr->op_modifier;
1507 switch (vhcr->in_modifier) {
1509 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1514 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1515 vhcr->in_param, &vhcr->out_param);
1519 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1524 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1525 vhcr->in_param, &vhcr->out_param);
1529 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1530 vhcr->in_param, &vhcr->out_param);
1534 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1535 vhcr->in_param, &vhcr->out_param);
1539 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1540 vhcr->in_param, &vhcr->out_param);
1549 /* ugly but other choices are uglier */
1550 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1552 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1555 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1557 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1560 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1562 return be32_to_cpu(mpt->mtt_sz);
1565 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1567 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1570 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1572 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1575 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1577 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1578 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1579 int log_sq_sride = qpc->sq_size_stride & 7;
1580 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1581 int log_rq_stride = qpc->rq_size_stride & 7;
1582 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1583 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1584 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1589 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1591 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1592 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1593 total_mem = sq_size + rq_size;
1595 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1601 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1602 int size, struct res_mtt *mtt)
1604 int res_start = mtt->com.res_id;
1605 int res_size = (1 << mtt->order);
1607 if (start < res_start || start + size > res_start + res_size)
1612 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1613 struct mlx4_vhcr *vhcr,
1614 struct mlx4_cmd_mailbox *inbox,
1615 struct mlx4_cmd_mailbox *outbox,
1616 struct mlx4_cmd_info *cmd)
1619 int index = vhcr->in_modifier;
1620 struct res_mtt *mtt;
1621 struct res_mpt *mpt;
1622 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1626 id = index & mpt_mask(dev);
1627 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1631 phys = mr_phys_mpt(inbox->buf);
1633 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1637 err = check_mtt_range(dev, slave, mtt_base,
1638 mr_get_mtt_size(inbox->buf), mtt);
1645 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1650 atomic_inc(&mtt->ref_count);
1651 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1654 res_end_move(dev, slave, RES_MPT, id);
1659 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1661 res_abort_move(dev, slave, RES_MPT, id);
1666 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1667 struct mlx4_vhcr *vhcr,
1668 struct mlx4_cmd_mailbox *inbox,
1669 struct mlx4_cmd_mailbox *outbox,
1670 struct mlx4_cmd_info *cmd)
1673 int index = vhcr->in_modifier;
1674 struct res_mpt *mpt;
1677 id = index & mpt_mask(dev);
1678 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1682 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1687 atomic_dec(&mpt->mtt->ref_count);
1689 res_end_move(dev, slave, RES_MPT, id);
1693 res_abort_move(dev, slave, RES_MPT, id);
1698 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1699 struct mlx4_vhcr *vhcr,
1700 struct mlx4_cmd_mailbox *inbox,
1701 struct mlx4_cmd_mailbox *outbox,
1702 struct mlx4_cmd_info *cmd)
1705 int index = vhcr->in_modifier;
1706 struct res_mpt *mpt;
1709 id = index & mpt_mask(dev);
1710 err = get_res(dev, slave, id, RES_MPT, &mpt);
1714 if (mpt->com.from_state != RES_MPT_HW) {
1719 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1722 put_res(dev, slave, id, RES_MPT);
1726 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1728 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1731 static int qp_get_scqn(struct mlx4_qp_context *qpc)
1733 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1736 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1738 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1741 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1742 struct mlx4_vhcr *vhcr,
1743 struct mlx4_cmd_mailbox *inbox,
1744 struct mlx4_cmd_mailbox *outbox,
1745 struct mlx4_cmd_info *cmd)
1748 int qpn = vhcr->in_modifier & 0x7fffff;
1749 struct res_mtt *mtt;
1751 struct mlx4_qp_context *qpc = inbox->buf + 8;
1752 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
1753 int mtt_size = qp_get_mtt_size(qpc);
1756 int rcqn = qp_get_rcqn(qpc);
1757 int scqn = qp_get_scqn(qpc);
1758 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1759 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1760 struct res_srq *srq;
1761 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1763 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1766 qp->local_qpn = local_qpn;
1768 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1772 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1776 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1781 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1788 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1793 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1796 atomic_inc(&mtt->ref_count);
1798 atomic_inc(&rcq->ref_count);
1800 atomic_inc(&scq->ref_count);
1804 put_res(dev, slave, scqn, RES_CQ);
1807 atomic_inc(&srq->ref_count);
1808 put_res(dev, slave, srqn, RES_SRQ);
1811 put_res(dev, slave, rcqn, RES_CQ);
1812 put_res(dev, slave, mtt_base, RES_MTT);
1813 res_end_move(dev, slave, RES_QP, qpn);
1819 put_res(dev, slave, srqn, RES_SRQ);
1822 put_res(dev, slave, scqn, RES_CQ);
1824 put_res(dev, slave, rcqn, RES_CQ);
1826 put_res(dev, slave, mtt_base, RES_MTT);
1828 res_abort_move(dev, slave, RES_QP, qpn);
1833 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
1835 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
1838 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
1840 int log_eq_size = eqc->log_eq_size & 0x1f;
1841 int page_shift = (eqc->log_page_size & 0x3f) + 12;
1843 if (log_eq_size + 5 < page_shift)
1846 return 1 << (log_eq_size + 5 - page_shift);
1849 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
1851 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
1854 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
1856 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
1857 int page_shift = (cqc->log_page_size & 0x3f) + 12;
1859 if (log_cq_size + 5 < page_shift)
1862 return 1 << (log_cq_size + 5 - page_shift);
1865 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1866 struct mlx4_vhcr *vhcr,
1867 struct mlx4_cmd_mailbox *inbox,
1868 struct mlx4_cmd_mailbox *outbox,
1869 struct mlx4_cmd_info *cmd)
1872 int eqn = vhcr->in_modifier;
1873 int res_id = (slave << 8) | eqn;
1874 struct mlx4_eq_context *eqc = inbox->buf;
1875 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
1876 int mtt_size = eq_get_mtt_size(eqc);
1878 struct res_mtt *mtt;
1880 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1883 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
1887 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1891 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1895 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1899 atomic_inc(&mtt->ref_count);
1901 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1902 res_end_move(dev, slave, RES_EQ, res_id);
1906 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1908 res_abort_move(dev, slave, RES_EQ, res_id);
1910 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1914 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
1915 int len, struct res_mtt **res)
1917 struct mlx4_priv *priv = mlx4_priv(dev);
1918 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1919 struct res_mtt *mtt;
1922 spin_lock_irq(mlx4_tlock(dev));
1923 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
1925 if (!check_mtt_range(dev, slave, start, len, mtt)) {
1927 mtt->com.from_state = mtt->com.state;
1928 mtt->com.state = RES_MTT_BUSY;
1933 spin_unlock_irq(mlx4_tlock(dev));
1938 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
1939 struct mlx4_vhcr *vhcr,
1940 struct mlx4_cmd_mailbox *inbox,
1941 struct mlx4_cmd_mailbox *outbox,
1942 struct mlx4_cmd_info *cmd)
1944 struct mlx4_mtt mtt;
1945 __be64 *page_list = inbox->buf;
1946 u64 *pg_list = (u64 *)page_list;
1948 struct res_mtt *rmtt = NULL;
1949 int start = be64_to_cpu(page_list[0]);
1950 int npages = vhcr->in_modifier;
1953 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
1957 /* Call the SW implementation of write_mtt:
1958 * - Prepare a dummy mtt struct
1959 * - Translate inbox contents to simple addresses in host endianess */
1960 mtt.offset = 0; /* TBD this is broken but I don't handle it since
1961 we don't really use it */
1964 for (i = 0; i < npages; ++i)
1965 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
1967 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
1968 ((u64 *)page_list + 2));
1971 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
1976 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1977 struct mlx4_vhcr *vhcr,
1978 struct mlx4_cmd_mailbox *inbox,
1979 struct mlx4_cmd_mailbox *outbox,
1980 struct mlx4_cmd_info *cmd)
1982 int eqn = vhcr->in_modifier;
1983 int res_id = eqn | (slave << 8);
1987 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
1991 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
1995 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1999 atomic_dec(&eq->mtt->ref_count);
2000 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2001 res_end_move(dev, slave, RES_EQ, res_id);
2002 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2007 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2009 res_abort_move(dev, slave, RES_EQ, res_id);
2014 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2016 struct mlx4_priv *priv = mlx4_priv(dev);
2017 struct mlx4_slave_event_eq_info *event_eq;
2018 struct mlx4_cmd_mailbox *mailbox;
2019 u32 in_modifier = 0;
2024 if (!priv->mfunc.master.slave_state)
2027 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2029 /* Create the event only if the slave is registered */
2030 if (event_eq->eqn < 0)
2033 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2034 res_id = (slave << 8) | event_eq->eqn;
2035 err = get_res(dev, slave, res_id, RES_EQ, &req);
2039 if (req->com.from_state != RES_EQ_HW) {
2044 mailbox = mlx4_alloc_cmd_mailbox(dev);
2045 if (IS_ERR(mailbox)) {
2046 err = PTR_ERR(mailbox);
2050 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2052 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2055 memcpy(mailbox->buf, (u8 *) eqe, 28);
2057 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2059 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2060 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2063 put_res(dev, slave, res_id, RES_EQ);
2064 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2065 mlx4_free_cmd_mailbox(dev, mailbox);
2069 put_res(dev, slave, res_id, RES_EQ);
2072 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2076 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2077 struct mlx4_vhcr *vhcr,
2078 struct mlx4_cmd_mailbox *inbox,
2079 struct mlx4_cmd_mailbox *outbox,
2080 struct mlx4_cmd_info *cmd)
2082 int eqn = vhcr->in_modifier;
2083 int res_id = eqn | (slave << 8);
2087 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2091 if (eq->com.from_state != RES_EQ_HW) {
2096 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2099 put_res(dev, slave, res_id, RES_EQ);
2103 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2104 struct mlx4_vhcr *vhcr,
2105 struct mlx4_cmd_mailbox *inbox,
2106 struct mlx4_cmd_mailbox *outbox,
2107 struct mlx4_cmd_info *cmd)
2110 int cqn = vhcr->in_modifier;
2111 struct mlx4_cq_context *cqc = inbox->buf;
2112 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2114 struct res_mtt *mtt;
2116 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2119 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2122 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2125 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2128 atomic_inc(&mtt->ref_count);
2130 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2131 res_end_move(dev, slave, RES_CQ, cqn);
2135 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2137 res_abort_move(dev, slave, RES_CQ, cqn);
2141 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2142 struct mlx4_vhcr *vhcr,
2143 struct mlx4_cmd_mailbox *inbox,
2144 struct mlx4_cmd_mailbox *outbox,
2145 struct mlx4_cmd_info *cmd)
2148 int cqn = vhcr->in_modifier;
2151 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2154 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2157 atomic_dec(&cq->mtt->ref_count);
2158 res_end_move(dev, slave, RES_CQ, cqn);
2162 res_abort_move(dev, slave, RES_CQ, cqn);
2166 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2167 struct mlx4_vhcr *vhcr,
2168 struct mlx4_cmd_mailbox *inbox,
2169 struct mlx4_cmd_mailbox *outbox,
2170 struct mlx4_cmd_info *cmd)
2172 int cqn = vhcr->in_modifier;
2176 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2180 if (cq->com.from_state != RES_CQ_HW)
2183 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2185 put_res(dev, slave, cqn, RES_CQ);
2190 static int handle_resize(struct mlx4_dev *dev, int slave,
2191 struct mlx4_vhcr *vhcr,
2192 struct mlx4_cmd_mailbox *inbox,
2193 struct mlx4_cmd_mailbox *outbox,
2194 struct mlx4_cmd_info *cmd,
2198 struct res_mtt *orig_mtt;
2199 struct res_mtt *mtt;
2200 struct mlx4_cq_context *cqc = inbox->buf;
2201 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2203 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2207 if (orig_mtt != cq->mtt) {
2212 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2216 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2219 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2222 atomic_dec(&orig_mtt->ref_count);
2223 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2224 atomic_inc(&mtt->ref_count);
2226 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2230 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2232 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2238 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2239 struct mlx4_vhcr *vhcr,
2240 struct mlx4_cmd_mailbox *inbox,
2241 struct mlx4_cmd_mailbox *outbox,
2242 struct mlx4_cmd_info *cmd)
2244 int cqn = vhcr->in_modifier;
2248 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2252 if (cq->com.from_state != RES_CQ_HW)
2255 if (vhcr->op_modifier == 0) {
2256 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2261 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2263 put_res(dev, slave, cqn, RES_CQ);
2268 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2270 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2271 int log_rq_stride = srqc->logstride & 7;
2272 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2274 if (log_srq_size + log_rq_stride + 4 < page_shift)
2277 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2280 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2281 struct mlx4_vhcr *vhcr,
2282 struct mlx4_cmd_mailbox *inbox,
2283 struct mlx4_cmd_mailbox *outbox,
2284 struct mlx4_cmd_info *cmd)
2287 int srqn = vhcr->in_modifier;
2288 struct res_mtt *mtt;
2289 struct res_srq *srq;
2290 struct mlx4_srq_context *srqc = inbox->buf;
2291 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2293 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2296 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2299 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2302 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2307 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2311 atomic_inc(&mtt->ref_count);
2313 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2314 res_end_move(dev, slave, RES_SRQ, srqn);
2318 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2320 res_abort_move(dev, slave, RES_SRQ, srqn);
2325 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2326 struct mlx4_vhcr *vhcr,
2327 struct mlx4_cmd_mailbox *inbox,
2328 struct mlx4_cmd_mailbox *outbox,
2329 struct mlx4_cmd_info *cmd)
2332 int srqn = vhcr->in_modifier;
2333 struct res_srq *srq;
2335 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2338 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2341 atomic_dec(&srq->mtt->ref_count);
2343 atomic_dec(&srq->cq->ref_count);
2344 res_end_move(dev, slave, RES_SRQ, srqn);
2349 res_abort_move(dev, slave, RES_SRQ, srqn);
2354 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2355 struct mlx4_vhcr *vhcr,
2356 struct mlx4_cmd_mailbox *inbox,
2357 struct mlx4_cmd_mailbox *outbox,
2358 struct mlx4_cmd_info *cmd)
2361 int srqn = vhcr->in_modifier;
2362 struct res_srq *srq;
2364 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2367 if (srq->com.from_state != RES_SRQ_HW) {
2371 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2373 put_res(dev, slave, srqn, RES_SRQ);
2377 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2378 struct mlx4_vhcr *vhcr,
2379 struct mlx4_cmd_mailbox *inbox,
2380 struct mlx4_cmd_mailbox *outbox,
2381 struct mlx4_cmd_info *cmd)
2384 int srqn = vhcr->in_modifier;
2385 struct res_srq *srq;
2387 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2391 if (srq->com.from_state != RES_SRQ_HW) {
2396 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2398 put_res(dev, slave, srqn, RES_SRQ);
2402 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2403 struct mlx4_vhcr *vhcr,
2404 struct mlx4_cmd_mailbox *inbox,
2405 struct mlx4_cmd_mailbox *outbox,
2406 struct mlx4_cmd_info *cmd)
2409 int qpn = vhcr->in_modifier & 0x7fffff;
2412 err = get_res(dev, slave, qpn, RES_QP, &qp);
2415 if (qp->com.from_state != RES_QP_HW) {
2420 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2422 put_res(dev, slave, qpn, RES_QP);
2426 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2427 struct mlx4_vhcr *vhcr,
2428 struct mlx4_cmd_mailbox *inbox,
2429 struct mlx4_cmd_mailbox *outbox,
2430 struct mlx4_cmd_info *cmd)
2432 struct mlx4_qp_context *qpc = inbox->buf + 8;
2434 update_ud_gid(dev, qpc, (u8)slave);
2436 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2439 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2440 struct mlx4_vhcr *vhcr,
2441 struct mlx4_cmd_mailbox *inbox,
2442 struct mlx4_cmd_mailbox *outbox,
2443 struct mlx4_cmd_info *cmd)
2446 int qpn = vhcr->in_modifier & 0x7fffff;
2449 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2452 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2456 atomic_dec(&qp->mtt->ref_count);
2457 atomic_dec(&qp->rcq->ref_count);
2458 atomic_dec(&qp->scq->ref_count);
2460 atomic_dec(&qp->srq->ref_count);
2461 res_end_move(dev, slave, RES_QP, qpn);
2465 res_abort_move(dev, slave, RES_QP, qpn);
2470 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2471 struct res_qp *rqp, u8 *gid)
2473 struct res_gid *res;
2475 list_for_each_entry(res, &rqp->mcg_list, list) {
2476 if (!memcmp(res->gid, gid, 16))
2482 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2483 u8 *gid, enum mlx4_protocol prot)
2485 struct res_gid *res;
2488 res = kzalloc(sizeof *res, GFP_KERNEL);
2492 spin_lock_irq(&rqp->mcg_spl);
2493 if (find_gid(dev, slave, rqp, gid)) {
2497 memcpy(res->gid, gid, 16);
2499 list_add_tail(&res->list, &rqp->mcg_list);
2502 spin_unlock_irq(&rqp->mcg_spl);
2507 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2508 u8 *gid, enum mlx4_protocol prot)
2510 struct res_gid *res;
2513 spin_lock_irq(&rqp->mcg_spl);
2514 res = find_gid(dev, slave, rqp, gid);
2515 if (!res || res->prot != prot)
2518 list_del(&res->list);
2522 spin_unlock_irq(&rqp->mcg_spl);
2527 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2528 struct mlx4_vhcr *vhcr,
2529 struct mlx4_cmd_mailbox *inbox,
2530 struct mlx4_cmd_mailbox *outbox,
2531 struct mlx4_cmd_info *cmd)
2533 struct mlx4_qp qp; /* dummy for calling attach/detach */
2534 u8 *gid = inbox->buf;
2535 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2539 int attach = vhcr->op_modifier;
2540 int block_loopback = vhcr->in_modifier >> 31;
2541 u8 steer_type_mask = 2;
2542 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
2544 qpn = vhcr->in_modifier & 0xffffff;
2545 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2551 err = add_mcg_res(dev, slave, rqp, gid, prot);
2555 err = mlx4_qp_attach_common(dev, &qp, gid,
2556 block_loopback, prot, type);
2560 err = rem_mcg_res(dev, slave, rqp, gid, prot);
2563 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2566 put_res(dev, slave, qpn, RES_QP);
2570 /* ignore error return below, already in error */
2571 err1 = rem_mcg_res(dev, slave, rqp, gid, prot);
2573 put_res(dev, slave, qpn, RES_QP);
2579 BUSY_MAX_RETRIES = 10
2582 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2583 struct mlx4_vhcr *vhcr,
2584 struct mlx4_cmd_mailbox *inbox,
2585 struct mlx4_cmd_mailbox *outbox,
2586 struct mlx4_cmd_info *cmd)
2589 int index = vhcr->in_modifier & 0xffff;
2591 err = get_res(dev, slave, index, RES_COUNTER, NULL);
2595 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2596 put_res(dev, slave, index, RES_COUNTER);
2600 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2602 struct res_gid *rgid;
2603 struct res_gid *tmp;
2605 struct mlx4_qp qp; /* dummy for calling attach/detach */
2607 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2608 qp.qpn = rqp->local_qpn;
2609 err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2611 list_del(&rgid->list);
2616 static int _move_all_busy(struct mlx4_dev *dev, int slave,
2617 enum mlx4_resource type, int print)
2619 struct mlx4_priv *priv = mlx4_priv(dev);
2620 struct mlx4_resource_tracker *tracker =
2621 &priv->mfunc.master.res_tracker;
2622 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2623 struct res_common *r;
2624 struct res_common *tmp;
2628 spin_lock_irq(mlx4_tlock(dev));
2629 list_for_each_entry_safe(r, tmp, rlist, list) {
2630 if (r->owner == slave) {
2632 if (r->state == RES_ANY_BUSY) {
2635 "%s id 0x%x is busy\n",
2640 r->from_state = r->state;
2641 r->state = RES_ANY_BUSY;
2647 spin_unlock_irq(mlx4_tlock(dev));
2652 static int move_all_busy(struct mlx4_dev *dev, int slave,
2653 enum mlx4_resource type)
2655 unsigned long begin;
2660 busy = _move_all_busy(dev, slave, type, 0);
2661 if (time_after(jiffies, begin + 5 * HZ))
2668 busy = _move_all_busy(dev, slave, type, 1);
2672 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
2674 struct mlx4_priv *priv = mlx4_priv(dev);
2675 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2676 struct list_head *qp_list =
2677 &tracker->slave_list[slave].res_list[RES_QP];
2685 err = move_all_busy(dev, slave, RES_QP);
2687 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
2688 "for slave %d\n", slave);
2690 spin_lock_irq(mlx4_tlock(dev));
2691 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
2692 spin_unlock_irq(mlx4_tlock(dev));
2693 if (qp->com.owner == slave) {
2694 qpn = qp->com.res_id;
2695 detach_qp(dev, slave, qp);
2696 state = qp->com.from_state;
2697 while (state != 0) {
2699 case RES_QP_RESERVED:
2700 spin_lock_irq(mlx4_tlock(dev));
2701 radix_tree_delete(&tracker->res_tree[RES_QP],
2703 list_del(&qp->com.list);
2704 spin_unlock_irq(mlx4_tlock(dev));
2709 if (!valid_reserved(dev, slave, qpn))
2710 __mlx4_qp_free_icm(dev, qpn);
2711 state = RES_QP_RESERVED;
2715 err = mlx4_cmd(dev, in_param,
2718 MLX4_CMD_TIME_CLASS_A,
2721 mlx4_dbg(dev, "rem_slave_qps: failed"
2722 " to move slave %d qpn %d to"
2725 atomic_dec(&qp->rcq->ref_count);
2726 atomic_dec(&qp->scq->ref_count);
2727 atomic_dec(&qp->mtt->ref_count);
2729 atomic_dec(&qp->srq->ref_count);
2730 state = RES_QP_MAPPED;
2737 spin_lock_irq(mlx4_tlock(dev));
2739 spin_unlock_irq(mlx4_tlock(dev));
2742 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
2744 struct mlx4_priv *priv = mlx4_priv(dev);
2745 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2746 struct list_head *srq_list =
2747 &tracker->slave_list[slave].res_list[RES_SRQ];
2748 struct res_srq *srq;
2749 struct res_srq *tmp;
2756 err = move_all_busy(dev, slave, RES_SRQ);
2758 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
2759 "busy for slave %d\n", slave);
2761 spin_lock_irq(mlx4_tlock(dev));
2762 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
2763 spin_unlock_irq(mlx4_tlock(dev));
2764 if (srq->com.owner == slave) {
2765 srqn = srq->com.res_id;
2766 state = srq->com.from_state;
2767 while (state != 0) {
2769 case RES_SRQ_ALLOCATED:
2770 __mlx4_srq_free_icm(dev, srqn);
2771 spin_lock_irq(mlx4_tlock(dev));
2772 radix_tree_delete(&tracker->res_tree[RES_SRQ],
2774 list_del(&srq->com.list);
2775 spin_unlock_irq(mlx4_tlock(dev));
2782 err = mlx4_cmd(dev, in_param, srqn, 1,
2784 MLX4_CMD_TIME_CLASS_A,
2787 mlx4_dbg(dev, "rem_slave_srqs: failed"
2788 " to move slave %d srq %d to"
2792 atomic_dec(&srq->mtt->ref_count);
2794 atomic_dec(&srq->cq->ref_count);
2795 state = RES_SRQ_ALLOCATED;
2803 spin_lock_irq(mlx4_tlock(dev));
2805 spin_unlock_irq(mlx4_tlock(dev));
2808 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
2810 struct mlx4_priv *priv = mlx4_priv(dev);
2811 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2812 struct list_head *cq_list =
2813 &tracker->slave_list[slave].res_list[RES_CQ];
2822 err = move_all_busy(dev, slave, RES_CQ);
2824 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
2825 "busy for slave %d\n", slave);
2827 spin_lock_irq(mlx4_tlock(dev));
2828 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
2829 spin_unlock_irq(mlx4_tlock(dev));
2830 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
2831 cqn = cq->com.res_id;
2832 state = cq->com.from_state;
2833 while (state != 0) {
2835 case RES_CQ_ALLOCATED:
2836 __mlx4_cq_free_icm(dev, cqn);
2837 spin_lock_irq(mlx4_tlock(dev));
2838 radix_tree_delete(&tracker->res_tree[RES_CQ],
2840 list_del(&cq->com.list);
2841 spin_unlock_irq(mlx4_tlock(dev));
2848 err = mlx4_cmd(dev, in_param, cqn, 1,
2850 MLX4_CMD_TIME_CLASS_A,
2853 mlx4_dbg(dev, "rem_slave_cqs: failed"
2854 " to move slave %d cq %d to"
2857 atomic_dec(&cq->mtt->ref_count);
2858 state = RES_CQ_ALLOCATED;
2866 spin_lock_irq(mlx4_tlock(dev));
2868 spin_unlock_irq(mlx4_tlock(dev));
2871 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
2873 struct mlx4_priv *priv = mlx4_priv(dev);
2874 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2875 struct list_head *mpt_list =
2876 &tracker->slave_list[slave].res_list[RES_MPT];
2877 struct res_mpt *mpt;
2878 struct res_mpt *tmp;
2885 err = move_all_busy(dev, slave, RES_MPT);
2887 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
2888 "busy for slave %d\n", slave);
2890 spin_lock_irq(mlx4_tlock(dev));
2891 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
2892 spin_unlock_irq(mlx4_tlock(dev));
2893 if (mpt->com.owner == slave) {
2894 mptn = mpt->com.res_id;
2895 state = mpt->com.from_state;
2896 while (state != 0) {
2898 case RES_MPT_RESERVED:
2899 __mlx4_mr_release(dev, mpt->key);
2900 spin_lock_irq(mlx4_tlock(dev));
2901 radix_tree_delete(&tracker->res_tree[RES_MPT],
2903 list_del(&mpt->com.list);
2904 spin_unlock_irq(mlx4_tlock(dev));
2909 case RES_MPT_MAPPED:
2910 __mlx4_mr_free_icm(dev, mpt->key);
2911 state = RES_MPT_RESERVED;
2916 err = mlx4_cmd(dev, in_param, mptn, 0,
2918 MLX4_CMD_TIME_CLASS_A,
2921 mlx4_dbg(dev, "rem_slave_mrs: failed"
2922 " to move slave %d mpt %d to"
2926 atomic_dec(&mpt->mtt->ref_count);
2927 state = RES_MPT_MAPPED;
2934 spin_lock_irq(mlx4_tlock(dev));
2936 spin_unlock_irq(mlx4_tlock(dev));
2939 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
2941 struct mlx4_priv *priv = mlx4_priv(dev);
2942 struct mlx4_resource_tracker *tracker =
2943 &priv->mfunc.master.res_tracker;
2944 struct list_head *mtt_list =
2945 &tracker->slave_list[slave].res_list[RES_MTT];
2946 struct res_mtt *mtt;
2947 struct res_mtt *tmp;
2953 err = move_all_busy(dev, slave, RES_MTT);
2955 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
2956 "busy for slave %d\n", slave);
2958 spin_lock_irq(mlx4_tlock(dev));
2959 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
2960 spin_unlock_irq(mlx4_tlock(dev));
2961 if (mtt->com.owner == slave) {
2962 base = mtt->com.res_id;
2963 state = mtt->com.from_state;
2964 while (state != 0) {
2966 case RES_MTT_ALLOCATED:
2967 __mlx4_free_mtt_range(dev, base,
2969 spin_lock_irq(mlx4_tlock(dev));
2970 radix_tree_delete(&tracker->res_tree[RES_MTT],
2972 list_del(&mtt->com.list);
2973 spin_unlock_irq(mlx4_tlock(dev));
2983 spin_lock_irq(mlx4_tlock(dev));
2985 spin_unlock_irq(mlx4_tlock(dev));
2988 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
2990 struct mlx4_priv *priv = mlx4_priv(dev);
2991 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2992 struct list_head *eq_list =
2993 &tracker->slave_list[slave].res_list[RES_EQ];
3000 struct mlx4_cmd_mailbox *mailbox;
3002 err = move_all_busy(dev, slave, RES_EQ);
3004 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3005 "busy for slave %d\n", slave);
3007 spin_lock_irq(mlx4_tlock(dev));
3008 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3009 spin_unlock_irq(mlx4_tlock(dev));
3010 if (eq->com.owner == slave) {
3011 eqn = eq->com.res_id;
3012 state = eq->com.from_state;
3013 while (state != 0) {
3015 case RES_EQ_RESERVED:
3016 spin_lock_irq(mlx4_tlock(dev));
3017 radix_tree_delete(&tracker->res_tree[RES_EQ],
3019 list_del(&eq->com.list);
3020 spin_unlock_irq(mlx4_tlock(dev));
3026 mailbox = mlx4_alloc_cmd_mailbox(dev);
3027 if (IS_ERR(mailbox)) {
3031 err = mlx4_cmd_box(dev, slave, 0,
3034 MLX4_CMD_TIME_CLASS_A,
3036 mlx4_dbg(dev, "rem_slave_eqs: failed"
3037 " to move slave %d eqs %d to"
3038 " SW ownership\n", slave, eqn);
3039 mlx4_free_cmd_mailbox(dev, mailbox);
3041 atomic_dec(&eq->mtt->ref_count);
3042 state = RES_EQ_RESERVED;
3051 spin_lock_irq(mlx4_tlock(dev));
3053 spin_unlock_irq(mlx4_tlock(dev));
3056 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3058 struct mlx4_priv *priv = mlx4_priv(dev);
3060 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3062 rem_slave_macs(dev, slave);
3063 rem_slave_qps(dev, slave);
3064 rem_slave_srqs(dev, slave);
3065 rem_slave_cqs(dev, slave);
3066 rem_slave_mrs(dev, slave);
3067 rem_slave_eqs(dev, slave);
3068 rem_slave_mtts(dev, slave);
3069 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);