993a2ef13866a97ae4309d560f6568d88611c5f4
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49
50 #define MLX4_MAC_VALID          (1ull << 63)
51
52 struct mac_res {
53         struct list_head list;
54         u64 mac;
55         u8 port;
56 };
57
58 struct res_common {
59         struct list_head        list;
60         struct rb_node          node;
61         u64                     res_id;
62         int                     owner;
63         int                     state;
64         int                     from_state;
65         int                     to_state;
66         int                     removing;
67 };
68
69 enum {
70         RES_ANY_BUSY = 1
71 };
72
73 struct res_gid {
74         struct list_head        list;
75         u8                      gid[16];
76         enum mlx4_protocol      prot;
77         enum mlx4_steer_type    steer;
78         u64                     reg_id;
79 };
80
81 enum res_qp_states {
82         RES_QP_BUSY = RES_ANY_BUSY,
83
84         /* QP number was allocated */
85         RES_QP_RESERVED,
86
87         /* ICM memory for QP context was mapped */
88         RES_QP_MAPPED,
89
90         /* QP is in hw ownership */
91         RES_QP_HW
92 };
93
94 struct res_qp {
95         struct res_common       com;
96         struct res_mtt         *mtt;
97         struct res_cq          *rcq;
98         struct res_cq          *scq;
99         struct res_srq         *srq;
100         struct list_head        mcg_list;
101         spinlock_t              mcg_spl;
102         int                     local_qpn;
103         atomic_t                ref_count;
104         u32                     qpc_flags;
105         u8                      sched_queue;
106 };
107
108 enum res_mtt_states {
109         RES_MTT_BUSY = RES_ANY_BUSY,
110         RES_MTT_ALLOCATED,
111 };
112
113 static inline const char *mtt_states_str(enum res_mtt_states state)
114 {
115         switch (state) {
116         case RES_MTT_BUSY: return "RES_MTT_BUSY";
117         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
118         default: return "Unknown";
119         }
120 }
121
122 struct res_mtt {
123         struct res_common       com;
124         int                     order;
125         atomic_t                ref_count;
126 };
127
128 enum res_mpt_states {
129         RES_MPT_BUSY = RES_ANY_BUSY,
130         RES_MPT_RESERVED,
131         RES_MPT_MAPPED,
132         RES_MPT_HW,
133 };
134
135 struct res_mpt {
136         struct res_common       com;
137         struct res_mtt         *mtt;
138         int                     key;
139 };
140
141 enum res_eq_states {
142         RES_EQ_BUSY = RES_ANY_BUSY,
143         RES_EQ_RESERVED,
144         RES_EQ_HW,
145 };
146
147 struct res_eq {
148         struct res_common       com;
149         struct res_mtt         *mtt;
150 };
151
152 enum res_cq_states {
153         RES_CQ_BUSY = RES_ANY_BUSY,
154         RES_CQ_ALLOCATED,
155         RES_CQ_HW,
156 };
157
158 struct res_cq {
159         struct res_common       com;
160         struct res_mtt         *mtt;
161         atomic_t                ref_count;
162 };
163
164 enum res_srq_states {
165         RES_SRQ_BUSY = RES_ANY_BUSY,
166         RES_SRQ_ALLOCATED,
167         RES_SRQ_HW,
168 };
169
170 struct res_srq {
171         struct res_common       com;
172         struct res_mtt         *mtt;
173         struct res_cq          *cq;
174         atomic_t                ref_count;
175 };
176
177 enum res_counter_states {
178         RES_COUNTER_BUSY = RES_ANY_BUSY,
179         RES_COUNTER_ALLOCATED,
180 };
181
182 struct res_counter {
183         struct res_common       com;
184         int                     port;
185 };
186
187 enum res_xrcdn_states {
188         RES_XRCD_BUSY = RES_ANY_BUSY,
189         RES_XRCD_ALLOCATED,
190 };
191
192 struct res_xrcdn {
193         struct res_common       com;
194         int                     port;
195 };
196
197 enum res_fs_rule_states {
198         RES_FS_RULE_BUSY = RES_ANY_BUSY,
199         RES_FS_RULE_ALLOCATED,
200 };
201
202 struct res_fs_rule {
203         struct res_common       com;
204         int                     qpn;
205 };
206
207 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
208 {
209         struct rb_node *node = root->rb_node;
210
211         while (node) {
212                 struct res_common *res = container_of(node, struct res_common,
213                                                       node);
214
215                 if (res_id < res->res_id)
216                         node = node->rb_left;
217                 else if (res_id > res->res_id)
218                         node = node->rb_right;
219                 else
220                         return res;
221         }
222         return NULL;
223 }
224
225 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
226 {
227         struct rb_node **new = &(root->rb_node), *parent = NULL;
228
229         /* Figure out where to put new node */
230         while (*new) {
231                 struct res_common *this = container_of(*new, struct res_common,
232                                                        node);
233
234                 parent = *new;
235                 if (res->res_id < this->res_id)
236                         new = &((*new)->rb_left);
237                 else if (res->res_id > this->res_id)
238                         new = &((*new)->rb_right);
239                 else
240                         return -EEXIST;
241         }
242
243         /* Add new node and rebalance tree. */
244         rb_link_node(&res->node, parent, new);
245         rb_insert_color(&res->node, root);
246
247         return 0;
248 }
249
250 enum qp_transition {
251         QP_TRANS_INIT2RTR,
252         QP_TRANS_RTR2RTS,
253         QP_TRANS_RTS2RTS,
254         QP_TRANS_SQERR2RTS,
255         QP_TRANS_SQD2SQD,
256         QP_TRANS_SQD2RTS
257 };
258
259 /* For Debug uses */
260 static const char *ResourceType(enum mlx4_resource rt)
261 {
262         switch (rt) {
263         case RES_QP: return "RES_QP";
264         case RES_CQ: return "RES_CQ";
265         case RES_SRQ: return "RES_SRQ";
266         case RES_MPT: return "RES_MPT";
267         case RES_MTT: return "RES_MTT";
268         case RES_MAC: return  "RES_MAC";
269         case RES_EQ: return "RES_EQ";
270         case RES_COUNTER: return "RES_COUNTER";
271         case RES_FS_RULE: return "RES_FS_RULE";
272         case RES_XRCD: return "RES_XRCD";
273         default: return "Unknown resource type !!!";
274         };
275 }
276
277 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
278 {
279         struct mlx4_priv *priv = mlx4_priv(dev);
280         int i;
281         int t;
282
283         priv->mfunc.master.res_tracker.slave_list =
284                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
285                         GFP_KERNEL);
286         if (!priv->mfunc.master.res_tracker.slave_list)
287                 return -ENOMEM;
288
289         for (i = 0 ; i < dev->num_slaves; i++) {
290                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
291                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
292                                        slave_list[i].res_list[t]);
293                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
294         }
295
296         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
297                  dev->num_slaves);
298         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
299                 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
300
301         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
302         return 0 ;
303 }
304
305 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
306                                 enum mlx4_res_tracker_free_type type)
307 {
308         struct mlx4_priv *priv = mlx4_priv(dev);
309         int i;
310
311         if (priv->mfunc.master.res_tracker.slave_list) {
312                 if (type != RES_TR_FREE_STRUCTS_ONLY)
313                         for (i = 0 ; i < dev->num_slaves; i++)
314                                 if (type == RES_TR_FREE_ALL ||
315                                     dev->caps.function != i)
316                                         mlx4_delete_all_resources_for_slave(dev, i);
317
318                 if (type != RES_TR_FREE_SLAVES_ONLY) {
319                         kfree(priv->mfunc.master.res_tracker.slave_list);
320                         priv->mfunc.master.res_tracker.slave_list = NULL;
321                 }
322         }
323 }
324
325 static void update_pkey_index(struct mlx4_dev *dev, int slave,
326                               struct mlx4_cmd_mailbox *inbox)
327 {
328         u8 sched = *(u8 *)(inbox->buf + 64);
329         u8 orig_index = *(u8 *)(inbox->buf + 35);
330         u8 new_index;
331         struct mlx4_priv *priv = mlx4_priv(dev);
332         int port;
333
334         port = (sched >> 6 & 1) + 1;
335
336         new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
337         *(u8 *)(inbox->buf + 35) = new_index;
338 }
339
340 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
341                        u8 slave)
342 {
343         struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
344         enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
345         u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
346
347         if (MLX4_QP_ST_UD == ts)
348                 qp_ctx->pri_path.mgid_index = 0x80 | slave;
349
350         if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
351                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
352                         qp_ctx->pri_path.mgid_index = slave & 0x7F;
353                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
354                         qp_ctx->alt_path.mgid_index = slave & 0x7F;
355         }
356 }
357
358 static int update_vport_qp_param(struct mlx4_dev *dev,
359                                  struct mlx4_cmd_mailbox *inbox,
360                                  u8 slave, u32 qpn)
361 {
362         struct mlx4_qp_context  *qpc = inbox->buf + 8;
363         struct mlx4_vport_oper_state *vp_oper;
364         struct mlx4_priv *priv;
365         u32 qp_type;
366         int port;
367
368         port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
369         priv = mlx4_priv(dev);
370         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
371
372         if (MLX4_VGT != vp_oper->state.default_vlan) {
373                 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
374                 if (MLX4_QP_ST_RC == qp_type ||
375                     (MLX4_QP_ST_UD == qp_type &&
376                      !mlx4_is_qp_reserved(dev, qpn)))
377                         return -EINVAL;
378
379                 /* the reserved QPs (special, proxy, tunnel)
380                  * do not operate over vlans
381                  */
382                 if (mlx4_is_qp_reserved(dev, qpn))
383                         return 0;
384
385                 /* force strip vlan by clear vsd */
386                 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
387
388                 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
389                     dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
390                         qpc->pri_path.vlan_control =
391                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
392                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
393                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
394                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
395                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
396                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
397                 } else if (0 != vp_oper->state.default_vlan) {
398                         qpc->pri_path.vlan_control =
399                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
400                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
401                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
402                 } else { /* priority tagged */
403                         qpc->pri_path.vlan_control =
404                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
405                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
406                 }
407
408                 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
409                 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
410                 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
411                 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
412                 qpc->pri_path.sched_queue &= 0xC7;
413                 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
414         }
415         if (vp_oper->state.spoofchk) {
416                 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
417                 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
418         }
419         return 0;
420 }
421
422 static int mpt_mask(struct mlx4_dev *dev)
423 {
424         return dev->caps.num_mpts - 1;
425 }
426
427 static void *find_res(struct mlx4_dev *dev, u64 res_id,
428                       enum mlx4_resource type)
429 {
430         struct mlx4_priv *priv = mlx4_priv(dev);
431
432         return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
433                                   res_id);
434 }
435
436 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
437                    enum mlx4_resource type,
438                    void *res)
439 {
440         struct res_common *r;
441         int err = 0;
442
443         spin_lock_irq(mlx4_tlock(dev));
444         r = find_res(dev, res_id, type);
445         if (!r) {
446                 err = -ENONET;
447                 goto exit;
448         }
449
450         if (r->state == RES_ANY_BUSY) {
451                 err = -EBUSY;
452                 goto exit;
453         }
454
455         if (r->owner != slave) {
456                 err = -EPERM;
457                 goto exit;
458         }
459
460         r->from_state = r->state;
461         r->state = RES_ANY_BUSY;
462
463         if (res)
464                 *((struct res_common **)res) = r;
465
466 exit:
467         spin_unlock_irq(mlx4_tlock(dev));
468         return err;
469 }
470
471 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
472                                     enum mlx4_resource type,
473                                     u64 res_id, int *slave)
474 {
475
476         struct res_common *r;
477         int err = -ENOENT;
478         int id = res_id;
479
480         if (type == RES_QP)
481                 id &= 0x7fffff;
482         spin_lock(mlx4_tlock(dev));
483
484         r = find_res(dev, id, type);
485         if (r) {
486                 *slave = r->owner;
487                 err = 0;
488         }
489         spin_unlock(mlx4_tlock(dev));
490
491         return err;
492 }
493
494 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
495                     enum mlx4_resource type)
496 {
497         struct res_common *r;
498
499         spin_lock_irq(mlx4_tlock(dev));
500         r = find_res(dev, res_id, type);
501         if (r)
502                 r->state = r->from_state;
503         spin_unlock_irq(mlx4_tlock(dev));
504 }
505
506 static struct res_common *alloc_qp_tr(int id)
507 {
508         struct res_qp *ret;
509
510         ret = kzalloc(sizeof *ret, GFP_KERNEL);
511         if (!ret)
512                 return NULL;
513
514         ret->com.res_id = id;
515         ret->com.state = RES_QP_RESERVED;
516         ret->local_qpn = id;
517         INIT_LIST_HEAD(&ret->mcg_list);
518         spin_lock_init(&ret->mcg_spl);
519         atomic_set(&ret->ref_count, 0);
520
521         return &ret->com;
522 }
523
524 static struct res_common *alloc_mtt_tr(int id, int order)
525 {
526         struct res_mtt *ret;
527
528         ret = kzalloc(sizeof *ret, GFP_KERNEL);
529         if (!ret)
530                 return NULL;
531
532         ret->com.res_id = id;
533         ret->order = order;
534         ret->com.state = RES_MTT_ALLOCATED;
535         atomic_set(&ret->ref_count, 0);
536
537         return &ret->com;
538 }
539
540 static struct res_common *alloc_mpt_tr(int id, int key)
541 {
542         struct res_mpt *ret;
543
544         ret = kzalloc(sizeof *ret, GFP_KERNEL);
545         if (!ret)
546                 return NULL;
547
548         ret->com.res_id = id;
549         ret->com.state = RES_MPT_RESERVED;
550         ret->key = key;
551
552         return &ret->com;
553 }
554
555 static struct res_common *alloc_eq_tr(int id)
556 {
557         struct res_eq *ret;
558
559         ret = kzalloc(sizeof *ret, GFP_KERNEL);
560         if (!ret)
561                 return NULL;
562
563         ret->com.res_id = id;
564         ret->com.state = RES_EQ_RESERVED;
565
566         return &ret->com;
567 }
568
569 static struct res_common *alloc_cq_tr(int id)
570 {
571         struct res_cq *ret;
572
573         ret = kzalloc(sizeof *ret, GFP_KERNEL);
574         if (!ret)
575                 return NULL;
576
577         ret->com.res_id = id;
578         ret->com.state = RES_CQ_ALLOCATED;
579         atomic_set(&ret->ref_count, 0);
580
581         return &ret->com;
582 }
583
584 static struct res_common *alloc_srq_tr(int id)
585 {
586         struct res_srq *ret;
587
588         ret = kzalloc(sizeof *ret, GFP_KERNEL);
589         if (!ret)
590                 return NULL;
591
592         ret->com.res_id = id;
593         ret->com.state = RES_SRQ_ALLOCATED;
594         atomic_set(&ret->ref_count, 0);
595
596         return &ret->com;
597 }
598
599 static struct res_common *alloc_counter_tr(int id)
600 {
601         struct res_counter *ret;
602
603         ret = kzalloc(sizeof *ret, GFP_KERNEL);
604         if (!ret)
605                 return NULL;
606
607         ret->com.res_id = id;
608         ret->com.state = RES_COUNTER_ALLOCATED;
609
610         return &ret->com;
611 }
612
613 static struct res_common *alloc_xrcdn_tr(int id)
614 {
615         struct res_xrcdn *ret;
616
617         ret = kzalloc(sizeof *ret, GFP_KERNEL);
618         if (!ret)
619                 return NULL;
620
621         ret->com.res_id = id;
622         ret->com.state = RES_XRCD_ALLOCATED;
623
624         return &ret->com;
625 }
626
627 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
628 {
629         struct res_fs_rule *ret;
630
631         ret = kzalloc(sizeof *ret, GFP_KERNEL);
632         if (!ret)
633                 return NULL;
634
635         ret->com.res_id = id;
636         ret->com.state = RES_FS_RULE_ALLOCATED;
637         ret->qpn = qpn;
638         return &ret->com;
639 }
640
641 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
642                                    int extra)
643 {
644         struct res_common *ret;
645
646         switch (type) {
647         case RES_QP:
648                 ret = alloc_qp_tr(id);
649                 break;
650         case RES_MPT:
651                 ret = alloc_mpt_tr(id, extra);
652                 break;
653         case RES_MTT:
654                 ret = alloc_mtt_tr(id, extra);
655                 break;
656         case RES_EQ:
657                 ret = alloc_eq_tr(id);
658                 break;
659         case RES_CQ:
660                 ret = alloc_cq_tr(id);
661                 break;
662         case RES_SRQ:
663                 ret = alloc_srq_tr(id);
664                 break;
665         case RES_MAC:
666                 printk(KERN_ERR "implementation missing\n");
667                 return NULL;
668         case RES_COUNTER:
669                 ret = alloc_counter_tr(id);
670                 break;
671         case RES_XRCD:
672                 ret = alloc_xrcdn_tr(id);
673                 break;
674         case RES_FS_RULE:
675                 ret = alloc_fs_rule_tr(id, extra);
676                 break;
677         default:
678                 return NULL;
679         }
680         if (ret)
681                 ret->owner = slave;
682
683         return ret;
684 }
685
686 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
687                          enum mlx4_resource type, int extra)
688 {
689         int i;
690         int err;
691         struct mlx4_priv *priv = mlx4_priv(dev);
692         struct res_common **res_arr;
693         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
694         struct rb_root *root = &tracker->res_tree[type];
695
696         res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
697         if (!res_arr)
698                 return -ENOMEM;
699
700         for (i = 0; i < count; ++i) {
701                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
702                 if (!res_arr[i]) {
703                         for (--i; i >= 0; --i)
704                                 kfree(res_arr[i]);
705
706                         kfree(res_arr);
707                         return -ENOMEM;
708                 }
709         }
710
711         spin_lock_irq(mlx4_tlock(dev));
712         for (i = 0; i < count; ++i) {
713                 if (find_res(dev, base + i, type)) {
714                         err = -EEXIST;
715                         goto undo;
716                 }
717                 err = res_tracker_insert(root, res_arr[i]);
718                 if (err)
719                         goto undo;
720                 list_add_tail(&res_arr[i]->list,
721                               &tracker->slave_list[slave].res_list[type]);
722         }
723         spin_unlock_irq(mlx4_tlock(dev));
724         kfree(res_arr);
725
726         return 0;
727
728 undo:
729         for (--i; i >= base; --i)
730                 rb_erase(&res_arr[i]->node, root);
731
732         spin_unlock_irq(mlx4_tlock(dev));
733
734         for (i = 0; i < count; ++i)
735                 kfree(res_arr[i]);
736
737         kfree(res_arr);
738
739         return err;
740 }
741
742 static int remove_qp_ok(struct res_qp *res)
743 {
744         if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
745             !list_empty(&res->mcg_list)) {
746                 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
747                        res->com.state, atomic_read(&res->ref_count));
748                 return -EBUSY;
749         } else if (res->com.state != RES_QP_RESERVED) {
750                 return -EPERM;
751         }
752
753         return 0;
754 }
755
756 static int remove_mtt_ok(struct res_mtt *res, int order)
757 {
758         if (res->com.state == RES_MTT_BUSY ||
759             atomic_read(&res->ref_count)) {
760                 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
761                        __func__, __LINE__,
762                        mtt_states_str(res->com.state),
763                        atomic_read(&res->ref_count));
764                 return -EBUSY;
765         } else if (res->com.state != RES_MTT_ALLOCATED)
766                 return -EPERM;
767         else if (res->order != order)
768                 return -EINVAL;
769
770         return 0;
771 }
772
773 static int remove_mpt_ok(struct res_mpt *res)
774 {
775         if (res->com.state == RES_MPT_BUSY)
776                 return -EBUSY;
777         else if (res->com.state != RES_MPT_RESERVED)
778                 return -EPERM;
779
780         return 0;
781 }
782
783 static int remove_eq_ok(struct res_eq *res)
784 {
785         if (res->com.state == RES_MPT_BUSY)
786                 return -EBUSY;
787         else if (res->com.state != RES_MPT_RESERVED)
788                 return -EPERM;
789
790         return 0;
791 }
792
793 static int remove_counter_ok(struct res_counter *res)
794 {
795         if (res->com.state == RES_COUNTER_BUSY)
796                 return -EBUSY;
797         else if (res->com.state != RES_COUNTER_ALLOCATED)
798                 return -EPERM;
799
800         return 0;
801 }
802
803 static int remove_xrcdn_ok(struct res_xrcdn *res)
804 {
805         if (res->com.state == RES_XRCD_BUSY)
806                 return -EBUSY;
807         else if (res->com.state != RES_XRCD_ALLOCATED)
808                 return -EPERM;
809
810         return 0;
811 }
812
813 static int remove_fs_rule_ok(struct res_fs_rule *res)
814 {
815         if (res->com.state == RES_FS_RULE_BUSY)
816                 return -EBUSY;
817         else if (res->com.state != RES_FS_RULE_ALLOCATED)
818                 return -EPERM;
819
820         return 0;
821 }
822
823 static int remove_cq_ok(struct res_cq *res)
824 {
825         if (res->com.state == RES_CQ_BUSY)
826                 return -EBUSY;
827         else if (res->com.state != RES_CQ_ALLOCATED)
828                 return -EPERM;
829
830         return 0;
831 }
832
833 static int remove_srq_ok(struct res_srq *res)
834 {
835         if (res->com.state == RES_SRQ_BUSY)
836                 return -EBUSY;
837         else if (res->com.state != RES_SRQ_ALLOCATED)
838                 return -EPERM;
839
840         return 0;
841 }
842
843 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
844 {
845         switch (type) {
846         case RES_QP:
847                 return remove_qp_ok((struct res_qp *)res);
848         case RES_CQ:
849                 return remove_cq_ok((struct res_cq *)res);
850         case RES_SRQ:
851                 return remove_srq_ok((struct res_srq *)res);
852         case RES_MPT:
853                 return remove_mpt_ok((struct res_mpt *)res);
854         case RES_MTT:
855                 return remove_mtt_ok((struct res_mtt *)res, extra);
856         case RES_MAC:
857                 return -ENOSYS;
858         case RES_EQ:
859                 return remove_eq_ok((struct res_eq *)res);
860         case RES_COUNTER:
861                 return remove_counter_ok((struct res_counter *)res);
862         case RES_XRCD:
863                 return remove_xrcdn_ok((struct res_xrcdn *)res);
864         case RES_FS_RULE:
865                 return remove_fs_rule_ok((struct res_fs_rule *)res);
866         default:
867                 return -EINVAL;
868         }
869 }
870
871 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
872                          enum mlx4_resource type, int extra)
873 {
874         u64 i;
875         int err;
876         struct mlx4_priv *priv = mlx4_priv(dev);
877         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
878         struct res_common *r;
879
880         spin_lock_irq(mlx4_tlock(dev));
881         for (i = base; i < base + count; ++i) {
882                 r = res_tracker_lookup(&tracker->res_tree[type], i);
883                 if (!r) {
884                         err = -ENOENT;
885                         goto out;
886                 }
887                 if (r->owner != slave) {
888                         err = -EPERM;
889                         goto out;
890                 }
891                 err = remove_ok(r, type, extra);
892                 if (err)
893                         goto out;
894         }
895
896         for (i = base; i < base + count; ++i) {
897                 r = res_tracker_lookup(&tracker->res_tree[type], i);
898                 rb_erase(&r->node, &tracker->res_tree[type]);
899                 list_del(&r->list);
900                 kfree(r);
901         }
902         err = 0;
903
904 out:
905         spin_unlock_irq(mlx4_tlock(dev));
906
907         return err;
908 }
909
910 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
911                                 enum res_qp_states state, struct res_qp **qp,
912                                 int alloc)
913 {
914         struct mlx4_priv *priv = mlx4_priv(dev);
915         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
916         struct res_qp *r;
917         int err = 0;
918
919         spin_lock_irq(mlx4_tlock(dev));
920         r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
921         if (!r)
922                 err = -ENOENT;
923         else if (r->com.owner != slave)
924                 err = -EPERM;
925         else {
926                 switch (state) {
927                 case RES_QP_BUSY:
928                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
929                                  __func__, r->com.res_id);
930                         err = -EBUSY;
931                         break;
932
933                 case RES_QP_RESERVED:
934                         if (r->com.state == RES_QP_MAPPED && !alloc)
935                                 break;
936
937                         mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
938                         err = -EINVAL;
939                         break;
940
941                 case RES_QP_MAPPED:
942                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
943                             r->com.state == RES_QP_HW)
944                                 break;
945                         else {
946                                 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
947                                           r->com.res_id);
948                                 err = -EINVAL;
949                         }
950
951                         break;
952
953                 case RES_QP_HW:
954                         if (r->com.state != RES_QP_MAPPED)
955                                 err = -EINVAL;
956                         break;
957                 default:
958                         err = -EINVAL;
959                 }
960
961                 if (!err) {
962                         r->com.from_state = r->com.state;
963                         r->com.to_state = state;
964                         r->com.state = RES_QP_BUSY;
965                         if (qp)
966                                 *qp = r;
967                 }
968         }
969
970         spin_unlock_irq(mlx4_tlock(dev));
971
972         return err;
973 }
974
975 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
976                                 enum res_mpt_states state, struct res_mpt **mpt)
977 {
978         struct mlx4_priv *priv = mlx4_priv(dev);
979         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
980         struct res_mpt *r;
981         int err = 0;
982
983         spin_lock_irq(mlx4_tlock(dev));
984         r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
985         if (!r)
986                 err = -ENOENT;
987         else if (r->com.owner != slave)
988                 err = -EPERM;
989         else {
990                 switch (state) {
991                 case RES_MPT_BUSY:
992                         err = -EINVAL;
993                         break;
994
995                 case RES_MPT_RESERVED:
996                         if (r->com.state != RES_MPT_MAPPED)
997                                 err = -EINVAL;
998                         break;
999
1000                 case RES_MPT_MAPPED:
1001                         if (r->com.state != RES_MPT_RESERVED &&
1002                             r->com.state != RES_MPT_HW)
1003                                 err = -EINVAL;
1004                         break;
1005
1006                 case RES_MPT_HW:
1007                         if (r->com.state != RES_MPT_MAPPED)
1008                                 err = -EINVAL;
1009                         break;
1010                 default:
1011                         err = -EINVAL;
1012                 }
1013
1014                 if (!err) {
1015                         r->com.from_state = r->com.state;
1016                         r->com.to_state = state;
1017                         r->com.state = RES_MPT_BUSY;
1018                         if (mpt)
1019                                 *mpt = r;
1020                 }
1021         }
1022
1023         spin_unlock_irq(mlx4_tlock(dev));
1024
1025         return err;
1026 }
1027
1028 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1029                                 enum res_eq_states state, struct res_eq **eq)
1030 {
1031         struct mlx4_priv *priv = mlx4_priv(dev);
1032         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1033         struct res_eq *r;
1034         int err = 0;
1035
1036         spin_lock_irq(mlx4_tlock(dev));
1037         r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1038         if (!r)
1039                 err = -ENOENT;
1040         else if (r->com.owner != slave)
1041                 err = -EPERM;
1042         else {
1043                 switch (state) {
1044                 case RES_EQ_BUSY:
1045                         err = -EINVAL;
1046                         break;
1047
1048                 case RES_EQ_RESERVED:
1049                         if (r->com.state != RES_EQ_HW)
1050                                 err = -EINVAL;
1051                         break;
1052
1053                 case RES_EQ_HW:
1054                         if (r->com.state != RES_EQ_RESERVED)
1055                                 err = -EINVAL;
1056                         break;
1057
1058                 default:
1059                         err = -EINVAL;
1060                 }
1061
1062                 if (!err) {
1063                         r->com.from_state = r->com.state;
1064                         r->com.to_state = state;
1065                         r->com.state = RES_EQ_BUSY;
1066                         if (eq)
1067                                 *eq = r;
1068                 }
1069         }
1070
1071         spin_unlock_irq(mlx4_tlock(dev));
1072
1073         return err;
1074 }
1075
1076 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1077                                 enum res_cq_states state, struct res_cq **cq)
1078 {
1079         struct mlx4_priv *priv = mlx4_priv(dev);
1080         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1081         struct res_cq *r;
1082         int err;
1083
1084         spin_lock_irq(mlx4_tlock(dev));
1085         r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1086         if (!r)
1087                 err = -ENOENT;
1088         else if (r->com.owner != slave)
1089                 err = -EPERM;
1090         else {
1091                 switch (state) {
1092                 case RES_CQ_BUSY:
1093                         err = -EBUSY;
1094                         break;
1095
1096                 case RES_CQ_ALLOCATED:
1097                         if (r->com.state != RES_CQ_HW)
1098                                 err = -EINVAL;
1099                         else if (atomic_read(&r->ref_count))
1100                                 err = -EBUSY;
1101                         else
1102                                 err = 0;
1103                         break;
1104
1105                 case RES_CQ_HW:
1106                         if (r->com.state != RES_CQ_ALLOCATED)
1107                                 err = -EINVAL;
1108                         else
1109                                 err = 0;
1110                         break;
1111
1112                 default:
1113                         err = -EINVAL;
1114                 }
1115
1116                 if (!err) {
1117                         r->com.from_state = r->com.state;
1118                         r->com.to_state = state;
1119                         r->com.state = RES_CQ_BUSY;
1120                         if (cq)
1121                                 *cq = r;
1122                 }
1123         }
1124
1125         spin_unlock_irq(mlx4_tlock(dev));
1126
1127         return err;
1128 }
1129
1130 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1131                                  enum res_cq_states state, struct res_srq **srq)
1132 {
1133         struct mlx4_priv *priv = mlx4_priv(dev);
1134         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1135         struct res_srq *r;
1136         int err = 0;
1137
1138         spin_lock_irq(mlx4_tlock(dev));
1139         r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1140         if (!r)
1141                 err = -ENOENT;
1142         else if (r->com.owner != slave)
1143                 err = -EPERM;
1144         else {
1145                 switch (state) {
1146                 case RES_SRQ_BUSY:
1147                         err = -EINVAL;
1148                         break;
1149
1150                 case RES_SRQ_ALLOCATED:
1151                         if (r->com.state != RES_SRQ_HW)
1152                                 err = -EINVAL;
1153                         else if (atomic_read(&r->ref_count))
1154                                 err = -EBUSY;
1155                         break;
1156
1157                 case RES_SRQ_HW:
1158                         if (r->com.state != RES_SRQ_ALLOCATED)
1159                                 err = -EINVAL;
1160                         break;
1161
1162                 default:
1163                         err = -EINVAL;
1164                 }
1165
1166                 if (!err) {
1167                         r->com.from_state = r->com.state;
1168                         r->com.to_state = state;
1169                         r->com.state = RES_SRQ_BUSY;
1170                         if (srq)
1171                                 *srq = r;
1172                 }
1173         }
1174
1175         spin_unlock_irq(mlx4_tlock(dev));
1176
1177         return err;
1178 }
1179
1180 static void res_abort_move(struct mlx4_dev *dev, int slave,
1181                            enum mlx4_resource type, int id)
1182 {
1183         struct mlx4_priv *priv = mlx4_priv(dev);
1184         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1185         struct res_common *r;
1186
1187         spin_lock_irq(mlx4_tlock(dev));
1188         r = res_tracker_lookup(&tracker->res_tree[type], id);
1189         if (r && (r->owner == slave))
1190                 r->state = r->from_state;
1191         spin_unlock_irq(mlx4_tlock(dev));
1192 }
1193
1194 static void res_end_move(struct mlx4_dev *dev, int slave,
1195                          enum mlx4_resource type, int id)
1196 {
1197         struct mlx4_priv *priv = mlx4_priv(dev);
1198         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1199         struct res_common *r;
1200
1201         spin_lock_irq(mlx4_tlock(dev));
1202         r = res_tracker_lookup(&tracker->res_tree[type], id);
1203         if (r && (r->owner == slave))
1204                 r->state = r->to_state;
1205         spin_unlock_irq(mlx4_tlock(dev));
1206 }
1207
1208 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1209 {
1210         return mlx4_is_qp_reserved(dev, qpn) &&
1211                 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1212 }
1213
1214 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1215 {
1216         return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1217 }
1218
1219 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1220                         u64 in_param, u64 *out_param)
1221 {
1222         int err;
1223         int count;
1224         int align;
1225         int base;
1226         int qpn;
1227
1228         switch (op) {
1229         case RES_OP_RESERVE:
1230                 count = get_param_l(&in_param);
1231                 align = get_param_h(&in_param);
1232                 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1233                 if (err)
1234                         return err;
1235
1236                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1237                 if (err) {
1238                         __mlx4_qp_release_range(dev, base, count);
1239                         return err;
1240                 }
1241                 set_param_l(out_param, base);
1242                 break;
1243         case RES_OP_MAP_ICM:
1244                 qpn = get_param_l(&in_param) & 0x7fffff;
1245                 if (valid_reserved(dev, slave, qpn)) {
1246                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1247                         if (err)
1248                                 return err;
1249                 }
1250
1251                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1252                                            NULL, 1);
1253                 if (err)
1254                         return err;
1255
1256                 if (!fw_reserved(dev, qpn)) {
1257                         err = __mlx4_qp_alloc_icm(dev, qpn);
1258                         if (err) {
1259                                 res_abort_move(dev, slave, RES_QP, qpn);
1260                                 return err;
1261                         }
1262                 }
1263
1264                 res_end_move(dev, slave, RES_QP, qpn);
1265                 break;
1266
1267         default:
1268                 err = -EINVAL;
1269                 break;
1270         }
1271         return err;
1272 }
1273
1274 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1275                          u64 in_param, u64 *out_param)
1276 {
1277         int err = -EINVAL;
1278         int base;
1279         int order;
1280
1281         if (op != RES_OP_RESERVE_AND_MAP)
1282                 return err;
1283
1284         order = get_param_l(&in_param);
1285         base = __mlx4_alloc_mtt_range(dev, order);
1286         if (base == -1)
1287                 return -ENOMEM;
1288
1289         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1290         if (err)
1291                 __mlx4_free_mtt_range(dev, base, order);
1292         else
1293                 set_param_l(out_param, base);
1294
1295         return err;
1296 }
1297
1298 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1299                          u64 in_param, u64 *out_param)
1300 {
1301         int err = -EINVAL;
1302         int index;
1303         int id;
1304         struct res_mpt *mpt;
1305
1306         switch (op) {
1307         case RES_OP_RESERVE:
1308                 index = __mlx4_mpt_reserve(dev);
1309                 if (index == -1)
1310                         break;
1311                 id = index & mpt_mask(dev);
1312
1313                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1314                 if (err) {
1315                         __mlx4_mpt_release(dev, index);
1316                         break;
1317                 }
1318                 set_param_l(out_param, index);
1319                 break;
1320         case RES_OP_MAP_ICM:
1321                 index = get_param_l(&in_param);
1322                 id = index & mpt_mask(dev);
1323                 err = mr_res_start_move_to(dev, slave, id,
1324                                            RES_MPT_MAPPED, &mpt);
1325                 if (err)
1326                         return err;
1327
1328                 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1329                 if (err) {
1330                         res_abort_move(dev, slave, RES_MPT, id);
1331                         return err;
1332                 }
1333
1334                 res_end_move(dev, slave, RES_MPT, id);
1335                 break;
1336         }
1337         return err;
1338 }
1339
1340 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1341                         u64 in_param, u64 *out_param)
1342 {
1343         int cqn;
1344         int err;
1345
1346         switch (op) {
1347         case RES_OP_RESERVE_AND_MAP:
1348                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1349                 if (err)
1350                         break;
1351
1352                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1353                 if (err) {
1354                         __mlx4_cq_free_icm(dev, cqn);
1355                         break;
1356                 }
1357
1358                 set_param_l(out_param, cqn);
1359                 break;
1360
1361         default:
1362                 err = -EINVAL;
1363         }
1364
1365         return err;
1366 }
1367
1368 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1369                          u64 in_param, u64 *out_param)
1370 {
1371         int srqn;
1372         int err;
1373
1374         switch (op) {
1375         case RES_OP_RESERVE_AND_MAP:
1376                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1377                 if (err)
1378                         break;
1379
1380                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1381                 if (err) {
1382                         __mlx4_srq_free_icm(dev, srqn);
1383                         break;
1384                 }
1385
1386                 set_param_l(out_param, srqn);
1387                 break;
1388
1389         default:
1390                 err = -EINVAL;
1391         }
1392
1393         return err;
1394 }
1395
1396 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1397 {
1398         struct mlx4_priv *priv = mlx4_priv(dev);
1399         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1400         struct mac_res *res;
1401
1402         res = kzalloc(sizeof *res, GFP_KERNEL);
1403         if (!res)
1404                 return -ENOMEM;
1405         res->mac = mac;
1406         res->port = (u8) port;
1407         list_add_tail(&res->list,
1408                       &tracker->slave_list[slave].res_list[RES_MAC]);
1409         return 0;
1410 }
1411
1412 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1413                                int port)
1414 {
1415         struct mlx4_priv *priv = mlx4_priv(dev);
1416         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1417         struct list_head *mac_list =
1418                 &tracker->slave_list[slave].res_list[RES_MAC];
1419         struct mac_res *res, *tmp;
1420
1421         list_for_each_entry_safe(res, tmp, mac_list, list) {
1422                 if (res->mac == mac && res->port == (u8) port) {
1423                         list_del(&res->list);
1424                         kfree(res);
1425                         break;
1426                 }
1427         }
1428 }
1429
1430 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1431 {
1432         struct mlx4_priv *priv = mlx4_priv(dev);
1433         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1434         struct list_head *mac_list =
1435                 &tracker->slave_list[slave].res_list[RES_MAC];
1436         struct mac_res *res, *tmp;
1437
1438         list_for_each_entry_safe(res, tmp, mac_list, list) {
1439                 list_del(&res->list);
1440                 __mlx4_unregister_mac(dev, res->port, res->mac);
1441                 kfree(res);
1442         }
1443 }
1444
1445 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1446                          u64 in_param, u64 *out_param, int in_port)
1447 {
1448         int err = -EINVAL;
1449         int port;
1450         u64 mac;
1451
1452         if (op != RES_OP_RESERVE_AND_MAP)
1453                 return err;
1454
1455         port = !in_port ? get_param_l(out_param) : in_port;
1456         mac = in_param;
1457
1458         err = __mlx4_register_mac(dev, port, mac);
1459         if (err >= 0) {
1460                 set_param_l(out_param, err);
1461                 err = 0;
1462         }
1463
1464         if (!err) {
1465                 err = mac_add_to_slave(dev, slave, mac, port);
1466                 if (err)
1467                         __mlx4_unregister_mac(dev, port, mac);
1468         }
1469         return err;
1470 }
1471
1472 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1473                          u64 in_param, u64 *out_param, int port)
1474 {
1475         return 0;
1476 }
1477
1478 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1479                              u64 in_param, u64 *out_param)
1480 {
1481         u32 index;
1482         int err;
1483
1484         if (op != RES_OP_RESERVE)
1485                 return -EINVAL;
1486
1487         err = __mlx4_counter_alloc(dev, &index);
1488         if (err)
1489                 return err;
1490
1491         err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1492         if (err)
1493                 __mlx4_counter_free(dev, index);
1494         else
1495                 set_param_l(out_param, index);
1496
1497         return err;
1498 }
1499
1500 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1501                            u64 in_param, u64 *out_param)
1502 {
1503         u32 xrcdn;
1504         int err;
1505
1506         if (op != RES_OP_RESERVE)
1507                 return -EINVAL;
1508
1509         err = __mlx4_xrcd_alloc(dev, &xrcdn);
1510         if (err)
1511                 return err;
1512
1513         err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1514         if (err)
1515                 __mlx4_xrcd_free(dev, xrcdn);
1516         else
1517                 set_param_l(out_param, xrcdn);
1518
1519         return err;
1520 }
1521
1522 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1523                            struct mlx4_vhcr *vhcr,
1524                            struct mlx4_cmd_mailbox *inbox,
1525                            struct mlx4_cmd_mailbox *outbox,
1526                            struct mlx4_cmd_info *cmd)
1527 {
1528         int err;
1529         int alop = vhcr->op_modifier;
1530
1531         switch (vhcr->in_modifier & 0xFF) {
1532         case RES_QP:
1533                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1534                                    vhcr->in_param, &vhcr->out_param);
1535                 break;
1536
1537         case RES_MTT:
1538                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1539                                     vhcr->in_param, &vhcr->out_param);
1540                 break;
1541
1542         case RES_MPT:
1543                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1544                                     vhcr->in_param, &vhcr->out_param);
1545                 break;
1546
1547         case RES_CQ:
1548                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1549                                    vhcr->in_param, &vhcr->out_param);
1550                 break;
1551
1552         case RES_SRQ:
1553                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1554                                     vhcr->in_param, &vhcr->out_param);
1555                 break;
1556
1557         case RES_MAC:
1558                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1559                                     vhcr->in_param, &vhcr->out_param,
1560                                     (vhcr->in_modifier >> 8) & 0xFF);
1561                 break;
1562
1563         case RES_VLAN:
1564                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1565                                      vhcr->in_param, &vhcr->out_param,
1566                                      (vhcr->in_modifier >> 8) & 0xFF);
1567                 break;
1568
1569         case RES_COUNTER:
1570                 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1571                                         vhcr->in_param, &vhcr->out_param);
1572                 break;
1573
1574         case RES_XRCD:
1575                 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1576                                       vhcr->in_param, &vhcr->out_param);
1577                 break;
1578
1579         default:
1580                 err = -EINVAL;
1581                 break;
1582         }
1583
1584         return err;
1585 }
1586
1587 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1588                        u64 in_param)
1589 {
1590         int err;
1591         int count;
1592         int base;
1593         int qpn;
1594
1595         switch (op) {
1596         case RES_OP_RESERVE:
1597                 base = get_param_l(&in_param) & 0x7fffff;
1598                 count = get_param_h(&in_param);
1599                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1600                 if (err)
1601                         break;
1602                 __mlx4_qp_release_range(dev, base, count);
1603                 break;
1604         case RES_OP_MAP_ICM:
1605                 qpn = get_param_l(&in_param) & 0x7fffff;
1606                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1607                                            NULL, 0);
1608                 if (err)
1609                         return err;
1610
1611                 if (!fw_reserved(dev, qpn))
1612                         __mlx4_qp_free_icm(dev, qpn);
1613
1614                 res_end_move(dev, slave, RES_QP, qpn);
1615
1616                 if (valid_reserved(dev, slave, qpn))
1617                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1618                 break;
1619         default:
1620                 err = -EINVAL;
1621                 break;
1622         }
1623         return err;
1624 }
1625
1626 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1627                         u64 in_param, u64 *out_param)
1628 {
1629         int err = -EINVAL;
1630         int base;
1631         int order;
1632
1633         if (op != RES_OP_RESERVE_AND_MAP)
1634                 return err;
1635
1636         base = get_param_l(&in_param);
1637         order = get_param_h(&in_param);
1638         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1639         if (!err)
1640                 __mlx4_free_mtt_range(dev, base, order);
1641         return err;
1642 }
1643
1644 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1645                         u64 in_param)
1646 {
1647         int err = -EINVAL;
1648         int index;
1649         int id;
1650         struct res_mpt *mpt;
1651
1652         switch (op) {
1653         case RES_OP_RESERVE:
1654                 index = get_param_l(&in_param);
1655                 id = index & mpt_mask(dev);
1656                 err = get_res(dev, slave, id, RES_MPT, &mpt);
1657                 if (err)
1658                         break;
1659                 index = mpt->key;
1660                 put_res(dev, slave, id, RES_MPT);
1661
1662                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1663                 if (err)
1664                         break;
1665                 __mlx4_mpt_release(dev, index);
1666                 break;
1667         case RES_OP_MAP_ICM:
1668                         index = get_param_l(&in_param);
1669                         id = index & mpt_mask(dev);
1670                         err = mr_res_start_move_to(dev, slave, id,
1671                                                    RES_MPT_RESERVED, &mpt);
1672                         if (err)
1673                                 return err;
1674
1675                         __mlx4_mpt_free_icm(dev, mpt->key);
1676                         res_end_move(dev, slave, RES_MPT, id);
1677                         return err;
1678                 break;
1679         default:
1680                 err = -EINVAL;
1681                 break;
1682         }
1683         return err;
1684 }
1685
1686 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1687                        u64 in_param, u64 *out_param)
1688 {
1689         int cqn;
1690         int err;
1691
1692         switch (op) {
1693         case RES_OP_RESERVE_AND_MAP:
1694                 cqn = get_param_l(&in_param);
1695                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1696                 if (err)
1697                         break;
1698
1699                 __mlx4_cq_free_icm(dev, cqn);
1700                 break;
1701
1702         default:
1703                 err = -EINVAL;
1704                 break;
1705         }
1706
1707         return err;
1708 }
1709
1710 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1711                         u64 in_param, u64 *out_param)
1712 {
1713         int srqn;
1714         int err;
1715
1716         switch (op) {
1717         case RES_OP_RESERVE_AND_MAP:
1718                 srqn = get_param_l(&in_param);
1719                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1720                 if (err)
1721                         break;
1722
1723                 __mlx4_srq_free_icm(dev, srqn);
1724                 break;
1725
1726         default:
1727                 err = -EINVAL;
1728                 break;
1729         }
1730
1731         return err;
1732 }
1733
1734 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1735                             u64 in_param, u64 *out_param, int in_port)
1736 {
1737         int port;
1738         int err = 0;
1739
1740         switch (op) {
1741         case RES_OP_RESERVE_AND_MAP:
1742                 port = !in_port ? get_param_l(out_param) : in_port;
1743                 mac_del_from_slave(dev, slave, in_param, port);
1744                 __mlx4_unregister_mac(dev, port, in_param);
1745                 break;
1746         default:
1747                 err = -EINVAL;
1748                 break;
1749         }
1750
1751         return err;
1752
1753 }
1754
1755 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1756                             u64 in_param, u64 *out_param, int port)
1757 {
1758         return 0;
1759 }
1760
1761 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1762                             u64 in_param, u64 *out_param)
1763 {
1764         int index;
1765         int err;
1766
1767         if (op != RES_OP_RESERVE)
1768                 return -EINVAL;
1769
1770         index = get_param_l(&in_param);
1771         err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1772         if (err)
1773                 return err;
1774
1775         __mlx4_counter_free(dev, index);
1776
1777         return err;
1778 }
1779
1780 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1781                           u64 in_param, u64 *out_param)
1782 {
1783         int xrcdn;
1784         int err;
1785
1786         if (op != RES_OP_RESERVE)
1787                 return -EINVAL;
1788
1789         xrcdn = get_param_l(&in_param);
1790         err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1791         if (err)
1792                 return err;
1793
1794         __mlx4_xrcd_free(dev, xrcdn);
1795
1796         return err;
1797 }
1798
1799 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1800                           struct mlx4_vhcr *vhcr,
1801                           struct mlx4_cmd_mailbox *inbox,
1802                           struct mlx4_cmd_mailbox *outbox,
1803                           struct mlx4_cmd_info *cmd)
1804 {
1805         int err = -EINVAL;
1806         int alop = vhcr->op_modifier;
1807
1808         switch (vhcr->in_modifier & 0xFF) {
1809         case RES_QP:
1810                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1811                                   vhcr->in_param);
1812                 break;
1813
1814         case RES_MTT:
1815                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1816                                    vhcr->in_param, &vhcr->out_param);
1817                 break;
1818
1819         case RES_MPT:
1820                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1821                                    vhcr->in_param);
1822                 break;
1823
1824         case RES_CQ:
1825                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1826                                   vhcr->in_param, &vhcr->out_param);
1827                 break;
1828
1829         case RES_SRQ:
1830                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1831                                    vhcr->in_param, &vhcr->out_param);
1832                 break;
1833
1834         case RES_MAC:
1835                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1836                                    vhcr->in_param, &vhcr->out_param,
1837                                    (vhcr->in_modifier >> 8) & 0xFF);
1838                 break;
1839
1840         case RES_VLAN:
1841                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1842                                     vhcr->in_param, &vhcr->out_param,
1843                                     (vhcr->in_modifier >> 8) & 0xFF);
1844                 break;
1845
1846         case RES_COUNTER:
1847                 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1848                                        vhcr->in_param, &vhcr->out_param);
1849                 break;
1850
1851         case RES_XRCD:
1852                 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1853                                      vhcr->in_param, &vhcr->out_param);
1854
1855         default:
1856                 break;
1857         }
1858         return err;
1859 }
1860
1861 /* ugly but other choices are uglier */
1862 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1863 {
1864         return (be32_to_cpu(mpt->flags) >> 9) & 1;
1865 }
1866
1867 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1868 {
1869         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1870 }
1871
1872 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1873 {
1874         return be32_to_cpu(mpt->mtt_sz);
1875 }
1876
1877 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
1878 {
1879         return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
1880 }
1881
1882 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
1883 {
1884         return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
1885 }
1886
1887 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
1888 {
1889         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
1890 }
1891
1892 static int mr_is_region(struct mlx4_mpt_entry *mpt)
1893 {
1894         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
1895 }
1896
1897 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1898 {
1899         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1900 }
1901
1902 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1903 {
1904         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1905 }
1906
1907 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1908 {
1909         int page_shift = (qpc->log_page_size & 0x3f) + 12;
1910         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1911         int log_sq_sride = qpc->sq_size_stride & 7;
1912         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1913         int log_rq_stride = qpc->rq_size_stride & 7;
1914         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1915         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1916         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
1917         int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
1918         int sq_size;
1919         int rq_size;
1920         int total_pages;
1921         int total_mem;
1922         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1923
1924         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1925         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1926         total_mem = sq_size + rq_size;
1927         total_pages =
1928                 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1929                                    page_shift);
1930
1931         return total_pages;
1932 }
1933
1934 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1935                            int size, struct res_mtt *mtt)
1936 {
1937         int res_start = mtt->com.res_id;
1938         int res_size = (1 << mtt->order);
1939
1940         if (start < res_start || start + size > res_start + res_size)
1941                 return -EPERM;
1942         return 0;
1943 }
1944
1945 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1946                            struct mlx4_vhcr *vhcr,
1947                            struct mlx4_cmd_mailbox *inbox,
1948                            struct mlx4_cmd_mailbox *outbox,
1949                            struct mlx4_cmd_info *cmd)
1950 {
1951         int err;
1952         int index = vhcr->in_modifier;
1953         struct res_mtt *mtt;
1954         struct res_mpt *mpt;
1955         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1956         int phys;
1957         int id;
1958         u32 pd;
1959         int pd_slave;
1960
1961         id = index & mpt_mask(dev);
1962         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1963         if (err)
1964                 return err;
1965
1966         /* Disable memory windows for VFs. */
1967         if (!mr_is_region(inbox->buf)) {
1968                 err = -EPERM;
1969                 goto ex_abort;
1970         }
1971
1972         /* Make sure that the PD bits related to the slave id are zeros. */
1973         pd = mr_get_pd(inbox->buf);
1974         pd_slave = (pd >> 17) & 0x7f;
1975         if (pd_slave != 0 && pd_slave != slave) {
1976                 err = -EPERM;
1977                 goto ex_abort;
1978         }
1979
1980         if (mr_is_fmr(inbox->buf)) {
1981                 /* FMR and Bind Enable are forbidden in slave devices. */
1982                 if (mr_is_bind_enabled(inbox->buf)) {
1983                         err = -EPERM;
1984                         goto ex_abort;
1985                 }
1986                 /* FMR and Memory Windows are also forbidden. */
1987                 if (!mr_is_region(inbox->buf)) {
1988                         err = -EPERM;
1989                         goto ex_abort;
1990                 }
1991         }
1992
1993         phys = mr_phys_mpt(inbox->buf);
1994         if (!phys) {
1995                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1996                 if (err)
1997                         goto ex_abort;
1998
1999                 err = check_mtt_range(dev, slave, mtt_base,
2000                                       mr_get_mtt_size(inbox->buf), mtt);
2001                 if (err)
2002                         goto ex_put;
2003
2004                 mpt->mtt = mtt;
2005         }
2006
2007         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2008         if (err)
2009                 goto ex_put;
2010
2011         if (!phys) {
2012                 atomic_inc(&mtt->ref_count);
2013                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2014         }
2015
2016         res_end_move(dev, slave, RES_MPT, id);
2017         return 0;
2018
2019 ex_put:
2020         if (!phys)
2021                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2022 ex_abort:
2023         res_abort_move(dev, slave, RES_MPT, id);
2024
2025         return err;
2026 }
2027
2028 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2029                            struct mlx4_vhcr *vhcr,
2030                            struct mlx4_cmd_mailbox *inbox,
2031                            struct mlx4_cmd_mailbox *outbox,
2032                            struct mlx4_cmd_info *cmd)
2033 {
2034         int err;
2035         int index = vhcr->in_modifier;
2036         struct res_mpt *mpt;
2037         int id;
2038
2039         id = index & mpt_mask(dev);
2040         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2041         if (err)
2042                 return err;
2043
2044         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2045         if (err)
2046                 goto ex_abort;
2047
2048         if (mpt->mtt)
2049                 atomic_dec(&mpt->mtt->ref_count);
2050
2051         res_end_move(dev, slave, RES_MPT, id);
2052         return 0;
2053
2054 ex_abort:
2055         res_abort_move(dev, slave, RES_MPT, id);
2056
2057         return err;
2058 }
2059
2060 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2061                            struct mlx4_vhcr *vhcr,
2062                            struct mlx4_cmd_mailbox *inbox,
2063                            struct mlx4_cmd_mailbox *outbox,
2064                            struct mlx4_cmd_info *cmd)
2065 {
2066         int err;
2067         int index = vhcr->in_modifier;
2068         struct res_mpt *mpt;
2069         int id;
2070
2071         id = index & mpt_mask(dev);
2072         err = get_res(dev, slave, id, RES_MPT, &mpt);
2073         if (err)
2074                 return err;
2075
2076         if (mpt->com.from_state != RES_MPT_HW) {
2077                 err = -EBUSY;
2078                 goto out;
2079         }
2080
2081         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2082
2083 out:
2084         put_res(dev, slave, id, RES_MPT);
2085         return err;
2086 }
2087
2088 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2089 {
2090         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2091 }
2092
2093 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2094 {
2095         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2096 }
2097
2098 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2099 {
2100         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2101 }
2102
2103 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2104                                   struct mlx4_qp_context *context)
2105 {
2106         u32 qpn = vhcr->in_modifier & 0xffffff;
2107         u32 qkey = 0;
2108
2109         if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2110                 return;
2111
2112         /* adjust qkey in qp context */
2113         context->qkey = cpu_to_be32(qkey);
2114 }
2115
2116 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2117                              struct mlx4_vhcr *vhcr,
2118                              struct mlx4_cmd_mailbox *inbox,
2119                              struct mlx4_cmd_mailbox *outbox,
2120                              struct mlx4_cmd_info *cmd)
2121 {
2122         int err;
2123         int qpn = vhcr->in_modifier & 0x7fffff;
2124         struct res_mtt *mtt;
2125         struct res_qp *qp;
2126         struct mlx4_qp_context *qpc = inbox->buf + 8;
2127         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2128         int mtt_size = qp_get_mtt_size(qpc);
2129         struct res_cq *rcq;
2130         struct res_cq *scq;
2131         int rcqn = qp_get_rcqn(qpc);
2132         int scqn = qp_get_scqn(qpc);
2133         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2134         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2135         struct res_srq *srq;
2136         int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2137
2138         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2139         if (err)
2140                 return err;
2141         qp->local_qpn = local_qpn;
2142         qp->sched_queue = 0;
2143         qp->qpc_flags = be32_to_cpu(qpc->flags);
2144
2145         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2146         if (err)
2147                 goto ex_abort;
2148
2149         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2150         if (err)
2151                 goto ex_put_mtt;
2152
2153         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2154         if (err)
2155                 goto ex_put_mtt;
2156
2157         if (scqn != rcqn) {
2158                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2159                 if (err)
2160                         goto ex_put_rcq;
2161         } else
2162                 scq = rcq;
2163
2164         if (use_srq) {
2165                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2166                 if (err)
2167                         goto ex_put_scq;
2168         }
2169
2170         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2171         update_pkey_index(dev, slave, inbox);
2172         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2173         if (err)
2174                 goto ex_put_srq;
2175         atomic_inc(&mtt->ref_count);
2176         qp->mtt = mtt;
2177         atomic_inc(&rcq->ref_count);
2178         qp->rcq = rcq;
2179         atomic_inc(&scq->ref_count);
2180         qp->scq = scq;
2181
2182         if (scqn != rcqn)
2183                 put_res(dev, slave, scqn, RES_CQ);
2184
2185         if (use_srq) {
2186                 atomic_inc(&srq->ref_count);
2187                 put_res(dev, slave, srqn, RES_SRQ);
2188                 qp->srq = srq;
2189         }
2190         put_res(dev, slave, rcqn, RES_CQ);
2191         put_res(dev, slave, mtt_base, RES_MTT);
2192         res_end_move(dev, slave, RES_QP, qpn);
2193
2194         return 0;
2195
2196 ex_put_srq:
2197         if (use_srq)
2198                 put_res(dev, slave, srqn, RES_SRQ);
2199 ex_put_scq:
2200         if (scqn != rcqn)
2201                 put_res(dev, slave, scqn, RES_CQ);
2202 ex_put_rcq:
2203         put_res(dev, slave, rcqn, RES_CQ);
2204 ex_put_mtt:
2205         put_res(dev, slave, mtt_base, RES_MTT);
2206 ex_abort:
2207         res_abort_move(dev, slave, RES_QP, qpn);
2208
2209         return err;
2210 }
2211
2212 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2213 {
2214         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2215 }
2216
2217 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2218 {
2219         int log_eq_size = eqc->log_eq_size & 0x1f;
2220         int page_shift = (eqc->log_page_size & 0x3f) + 12;
2221
2222         if (log_eq_size + 5 < page_shift)
2223                 return 1;
2224
2225         return 1 << (log_eq_size + 5 - page_shift);
2226 }
2227
2228 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2229 {
2230         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2231 }
2232
2233 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2234 {
2235         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2236         int page_shift = (cqc->log_page_size & 0x3f) + 12;
2237
2238         if (log_cq_size + 5 < page_shift)
2239                 return 1;
2240
2241         return 1 << (log_cq_size + 5 - page_shift);
2242 }
2243
2244 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2245                           struct mlx4_vhcr *vhcr,
2246                           struct mlx4_cmd_mailbox *inbox,
2247                           struct mlx4_cmd_mailbox *outbox,
2248                           struct mlx4_cmd_info *cmd)
2249 {
2250         int err;
2251         int eqn = vhcr->in_modifier;
2252         int res_id = (slave << 8) | eqn;
2253         struct mlx4_eq_context *eqc = inbox->buf;
2254         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2255         int mtt_size = eq_get_mtt_size(eqc);
2256         struct res_eq *eq;
2257         struct res_mtt *mtt;
2258
2259         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2260         if (err)
2261                 return err;
2262         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2263         if (err)
2264                 goto out_add;
2265
2266         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2267         if (err)
2268                 goto out_move;
2269
2270         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2271         if (err)
2272                 goto out_put;
2273
2274         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2275         if (err)
2276                 goto out_put;
2277
2278         atomic_inc(&mtt->ref_count);
2279         eq->mtt = mtt;
2280         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2281         res_end_move(dev, slave, RES_EQ, res_id);
2282         return 0;
2283
2284 out_put:
2285         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2286 out_move:
2287         res_abort_move(dev, slave, RES_EQ, res_id);
2288 out_add:
2289         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2290         return err;
2291 }
2292
2293 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2294                               int len, struct res_mtt **res)
2295 {
2296         struct mlx4_priv *priv = mlx4_priv(dev);
2297         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2298         struct res_mtt *mtt;
2299         int err = -EINVAL;
2300
2301         spin_lock_irq(mlx4_tlock(dev));
2302         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2303                             com.list) {
2304                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2305                         *res = mtt;
2306                         mtt->com.from_state = mtt->com.state;
2307                         mtt->com.state = RES_MTT_BUSY;
2308                         err = 0;
2309                         break;
2310                 }
2311         }
2312         spin_unlock_irq(mlx4_tlock(dev));
2313
2314         return err;
2315 }
2316
2317 static int verify_qp_parameters(struct mlx4_dev *dev,
2318                                 struct mlx4_cmd_mailbox *inbox,
2319                                 enum qp_transition transition, u8 slave)
2320 {
2321         u32                     qp_type;
2322         struct mlx4_qp_context  *qp_ctx;
2323         enum mlx4_qp_optpar     optpar;
2324
2325         qp_ctx  = inbox->buf + 8;
2326         qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2327         optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
2328
2329         switch (qp_type) {
2330         case MLX4_QP_ST_RC:
2331         case MLX4_QP_ST_UC:
2332                 switch (transition) {
2333                 case QP_TRANS_INIT2RTR:
2334                 case QP_TRANS_RTR2RTS:
2335                 case QP_TRANS_RTS2RTS:
2336                 case QP_TRANS_SQD2SQD:
2337                 case QP_TRANS_SQD2RTS:
2338                         if (slave != mlx4_master_func_num(dev))
2339                                 /* slaves have only gid index 0 */
2340                                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2341                                         if (qp_ctx->pri_path.mgid_index)
2342                                                 return -EINVAL;
2343                                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2344                                         if (qp_ctx->alt_path.mgid_index)
2345                                                 return -EINVAL;
2346                         break;
2347                 default:
2348                         break;
2349                 }
2350
2351                 break;
2352         default:
2353                 break;
2354         }
2355
2356         return 0;
2357 }
2358
2359 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2360                            struct mlx4_vhcr *vhcr,
2361                            struct mlx4_cmd_mailbox *inbox,
2362                            struct mlx4_cmd_mailbox *outbox,
2363                            struct mlx4_cmd_info *cmd)
2364 {
2365         struct mlx4_mtt mtt;
2366         __be64 *page_list = inbox->buf;
2367         u64 *pg_list = (u64 *)page_list;
2368         int i;
2369         struct res_mtt *rmtt = NULL;
2370         int start = be64_to_cpu(page_list[0]);
2371         int npages = vhcr->in_modifier;
2372         int err;
2373
2374         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2375         if (err)
2376                 return err;
2377
2378         /* Call the SW implementation of write_mtt:
2379          * - Prepare a dummy mtt struct
2380          * - Translate inbox contents to simple addresses in host endianess */
2381         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
2382                             we don't really use it */
2383         mtt.order = 0;
2384         mtt.page_shift = 0;
2385         for (i = 0; i < npages; ++i)
2386                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2387
2388         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2389                                ((u64 *)page_list + 2));
2390
2391         if (rmtt)
2392                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2393
2394         return err;
2395 }
2396
2397 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2398                           struct mlx4_vhcr *vhcr,
2399                           struct mlx4_cmd_mailbox *inbox,
2400                           struct mlx4_cmd_mailbox *outbox,
2401                           struct mlx4_cmd_info *cmd)
2402 {
2403         int eqn = vhcr->in_modifier;
2404         int res_id = eqn | (slave << 8);
2405         struct res_eq *eq;
2406         int err;
2407
2408         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2409         if (err)
2410                 return err;
2411
2412         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2413         if (err)
2414                 goto ex_abort;
2415
2416         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2417         if (err)
2418                 goto ex_put;
2419
2420         atomic_dec(&eq->mtt->ref_count);
2421         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2422         res_end_move(dev, slave, RES_EQ, res_id);
2423         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2424
2425         return 0;
2426
2427 ex_put:
2428         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2429 ex_abort:
2430         res_abort_move(dev, slave, RES_EQ, res_id);
2431
2432         return err;
2433 }
2434
2435 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2436 {
2437         struct mlx4_priv *priv = mlx4_priv(dev);
2438         struct mlx4_slave_event_eq_info *event_eq;
2439         struct mlx4_cmd_mailbox *mailbox;
2440         u32 in_modifier = 0;
2441         int err;
2442         int res_id;
2443         struct res_eq *req;
2444
2445         if (!priv->mfunc.master.slave_state)
2446                 return -EINVAL;
2447
2448         event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2449
2450         /* Create the event only if the slave is registered */
2451         if (event_eq->eqn < 0)
2452                 return 0;
2453
2454         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2455         res_id = (slave << 8) | event_eq->eqn;
2456         err = get_res(dev, slave, res_id, RES_EQ, &req);
2457         if (err)
2458                 goto unlock;
2459
2460         if (req->com.from_state != RES_EQ_HW) {
2461                 err = -EINVAL;
2462                 goto put;
2463         }
2464
2465         mailbox = mlx4_alloc_cmd_mailbox(dev);
2466         if (IS_ERR(mailbox)) {
2467                 err = PTR_ERR(mailbox);
2468                 goto put;
2469         }
2470
2471         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2472                 ++event_eq->token;
2473                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2474         }
2475
2476         memcpy(mailbox->buf, (u8 *) eqe, 28);
2477
2478         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2479
2480         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2481                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2482                        MLX4_CMD_NATIVE);
2483
2484         put_res(dev, slave, res_id, RES_EQ);
2485         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2486         mlx4_free_cmd_mailbox(dev, mailbox);
2487         return err;
2488
2489 put:
2490         put_res(dev, slave, res_id, RES_EQ);
2491
2492 unlock:
2493         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2494         return err;
2495 }
2496
2497 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2498                           struct mlx4_vhcr *vhcr,
2499                           struct mlx4_cmd_mailbox *inbox,
2500                           struct mlx4_cmd_mailbox *outbox,
2501                           struct mlx4_cmd_info *cmd)
2502 {
2503         int eqn = vhcr->in_modifier;
2504         int res_id = eqn | (slave << 8);
2505         struct res_eq *eq;
2506         int err;
2507
2508         err = get_res(dev, slave, res_id, RES_EQ, &eq);
2509         if (err)
2510                 return err;
2511
2512         if (eq->com.from_state != RES_EQ_HW) {
2513                 err = -EINVAL;
2514                 goto ex_put;
2515         }
2516
2517         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2518
2519 ex_put:
2520         put_res(dev, slave, res_id, RES_EQ);
2521         return err;
2522 }
2523
2524 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2525                           struct mlx4_vhcr *vhcr,
2526                           struct mlx4_cmd_mailbox *inbox,
2527                           struct mlx4_cmd_mailbox *outbox,
2528                           struct mlx4_cmd_info *cmd)
2529 {
2530         int err;
2531         int cqn = vhcr->in_modifier;
2532         struct mlx4_cq_context *cqc = inbox->buf;
2533         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2534         struct res_cq *cq;
2535         struct res_mtt *mtt;
2536
2537         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2538         if (err)
2539                 return err;
2540         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2541         if (err)
2542                 goto out_move;
2543         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2544         if (err)
2545                 goto out_put;
2546         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2547         if (err)
2548                 goto out_put;
2549         atomic_inc(&mtt->ref_count);
2550         cq->mtt = mtt;
2551         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2552         res_end_move(dev, slave, RES_CQ, cqn);
2553         return 0;
2554
2555 out_put:
2556         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2557 out_move:
2558         res_abort_move(dev, slave, RES_CQ, cqn);
2559         return err;
2560 }
2561
2562 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2563                           struct mlx4_vhcr *vhcr,
2564                           struct mlx4_cmd_mailbox *inbox,
2565                           struct mlx4_cmd_mailbox *outbox,
2566                           struct mlx4_cmd_info *cmd)
2567 {
2568         int err;
2569         int cqn = vhcr->in_modifier;
2570         struct res_cq *cq;
2571
2572         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2573         if (err)
2574                 return err;
2575         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2576         if (err)
2577                 goto out_move;
2578         atomic_dec(&cq->mtt->ref_count);
2579         res_end_move(dev, slave, RES_CQ, cqn);
2580         return 0;
2581
2582 out_move:
2583         res_abort_move(dev, slave, RES_CQ, cqn);
2584         return err;
2585 }
2586
2587 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2588                           struct mlx4_vhcr *vhcr,
2589                           struct mlx4_cmd_mailbox *inbox,
2590                           struct mlx4_cmd_mailbox *outbox,
2591                           struct mlx4_cmd_info *cmd)
2592 {
2593         int cqn = vhcr->in_modifier;
2594         struct res_cq *cq;
2595         int err;
2596
2597         err = get_res(dev, slave, cqn, RES_CQ, &cq);
2598         if (err)
2599                 return err;
2600
2601         if (cq->com.from_state != RES_CQ_HW)
2602                 goto ex_put;
2603
2604         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2605 ex_put:
2606         put_res(dev, slave, cqn, RES_CQ);
2607
2608         return err;
2609 }
2610
2611 static int handle_resize(struct mlx4_dev *dev, int slave,
2612                          struct mlx4_vhcr *vhcr,
2613                          struct mlx4_cmd_mailbox *inbox,
2614                          struct mlx4_cmd_mailbox *outbox,
2615                          struct mlx4_cmd_info *cmd,
2616                          struct res_cq *cq)
2617 {
2618         int err;
2619         struct res_mtt *orig_mtt;
2620         struct res_mtt *mtt;
2621         struct mlx4_cq_context *cqc = inbox->buf;
2622         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2623
2624         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2625         if (err)
2626                 return err;
2627
2628         if (orig_mtt != cq->mtt) {
2629                 err = -EINVAL;
2630                 goto ex_put;
2631         }
2632
2633         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2634         if (err)
2635                 goto ex_put;
2636
2637         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2638         if (err)
2639                 goto ex_put1;
2640         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2641         if (err)
2642                 goto ex_put1;
2643         atomic_dec(&orig_mtt->ref_count);
2644         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2645         atomic_inc(&mtt->ref_count);
2646         cq->mtt = mtt;
2647         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2648         return 0;
2649
2650 ex_put1:
2651         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2652 ex_put:
2653         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2654
2655         return err;
2656
2657 }
2658
2659 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2660                            struct mlx4_vhcr *vhcr,
2661                            struct mlx4_cmd_mailbox *inbox,
2662                            struct mlx4_cmd_mailbox *outbox,
2663                            struct mlx4_cmd_info *cmd)
2664 {
2665         int cqn = vhcr->in_modifier;
2666         struct res_cq *cq;
2667         int err;
2668
2669         err = get_res(dev, slave, cqn, RES_CQ, &cq);
2670         if (err)
2671                 return err;
2672
2673         if (cq->com.from_state != RES_CQ_HW)
2674                 goto ex_put;
2675
2676         if (vhcr->op_modifier == 0) {
2677                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2678                 goto ex_put;
2679         }
2680
2681         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2682 ex_put:
2683         put_res(dev, slave, cqn, RES_CQ);
2684
2685         return err;
2686 }
2687
2688 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2689 {
2690         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2691         int log_rq_stride = srqc->logstride & 7;
2692         int page_shift = (srqc->log_page_size & 0x3f) + 12;
2693
2694         if (log_srq_size + log_rq_stride + 4 < page_shift)
2695                 return 1;
2696
2697         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2698 }
2699
2700 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2701                            struct mlx4_vhcr *vhcr,
2702                            struct mlx4_cmd_mailbox *inbox,
2703                            struct mlx4_cmd_mailbox *outbox,
2704                            struct mlx4_cmd_info *cmd)
2705 {
2706         int err;
2707         int srqn = vhcr->in_modifier;
2708         struct res_mtt *mtt;
2709         struct res_srq *srq;
2710         struct mlx4_srq_context *srqc = inbox->buf;
2711         int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2712
2713         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2714                 return -EINVAL;
2715
2716         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2717         if (err)
2718                 return err;
2719         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2720         if (err)
2721                 goto ex_abort;
2722         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2723                               mtt);
2724         if (err)
2725                 goto ex_put_mtt;
2726
2727         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2728         if (err)
2729                 goto ex_put_mtt;
2730
2731         atomic_inc(&mtt->ref_count);
2732         srq->mtt = mtt;
2733         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2734         res_end_move(dev, slave, RES_SRQ, srqn);
2735         return 0;
2736
2737 ex_put_mtt:
2738         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2739 ex_abort:
2740         res_abort_move(dev, slave, RES_SRQ, srqn);
2741
2742         return err;
2743 }
2744
2745 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2746                            struct mlx4_vhcr *vhcr,
2747                            struct mlx4_cmd_mailbox *inbox,
2748                            struct mlx4_cmd_mailbox *outbox,
2749                            struct mlx4_cmd_info *cmd)
2750 {
2751         int err;
2752         int srqn = vhcr->in_modifier;
2753         struct res_srq *srq;
2754
2755         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2756         if (err)
2757                 return err;
2758         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2759         if (err)
2760                 goto ex_abort;
2761         atomic_dec(&srq->mtt->ref_count);
2762         if (srq->cq)
2763                 atomic_dec(&srq->cq->ref_count);
2764         res_end_move(dev, slave, RES_SRQ, srqn);
2765
2766         return 0;
2767
2768 ex_abort:
2769         res_abort_move(dev, slave, RES_SRQ, srqn);
2770
2771         return err;
2772 }
2773
2774 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2775                            struct mlx4_vhcr *vhcr,
2776                            struct mlx4_cmd_mailbox *inbox,
2777                            struct mlx4_cmd_mailbox *outbox,
2778                            struct mlx4_cmd_info *cmd)
2779 {
2780         int err;
2781         int srqn = vhcr->in_modifier;
2782         struct res_srq *srq;
2783
2784         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2785         if (err)
2786                 return err;
2787         if (srq->com.from_state != RES_SRQ_HW) {
2788                 err = -EBUSY;
2789                 goto out;
2790         }
2791         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2792 out:
2793         put_res(dev, slave, srqn, RES_SRQ);
2794         return err;
2795 }
2796
2797 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2798                          struct mlx4_vhcr *vhcr,
2799                          struct mlx4_cmd_mailbox *inbox,
2800                          struct mlx4_cmd_mailbox *outbox,
2801                          struct mlx4_cmd_info *cmd)
2802 {
2803         int err;
2804         int srqn = vhcr->in_modifier;
2805         struct res_srq *srq;
2806
2807         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2808         if (err)
2809                 return err;
2810
2811         if (srq->com.from_state != RES_SRQ_HW) {
2812                 err = -EBUSY;
2813                 goto out;
2814         }
2815
2816         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2817 out:
2818         put_res(dev, slave, srqn, RES_SRQ);
2819         return err;
2820 }
2821
2822 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2823                         struct mlx4_vhcr *vhcr,
2824                         struct mlx4_cmd_mailbox *inbox,
2825                         struct mlx4_cmd_mailbox *outbox,
2826                         struct mlx4_cmd_info *cmd)
2827 {
2828         int err;
2829         int qpn = vhcr->in_modifier & 0x7fffff;
2830         struct res_qp *qp;
2831
2832         err = get_res(dev, slave, qpn, RES_QP, &qp);
2833         if (err)
2834                 return err;
2835         if (qp->com.from_state != RES_QP_HW) {
2836                 err = -EBUSY;
2837                 goto out;
2838         }
2839
2840         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2841 out:
2842         put_res(dev, slave, qpn, RES_QP);
2843         return err;
2844 }
2845
2846 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2847                               struct mlx4_vhcr *vhcr,
2848                               struct mlx4_cmd_mailbox *inbox,
2849                               struct mlx4_cmd_mailbox *outbox,
2850                               struct mlx4_cmd_info *cmd)
2851 {
2852         struct mlx4_qp_context *context = inbox->buf + 8;
2853         adjust_proxy_tun_qkey(dev, vhcr, context);
2854         update_pkey_index(dev, slave, inbox);
2855         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2856 }
2857
2858 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2859                              struct mlx4_vhcr *vhcr,
2860                              struct mlx4_cmd_mailbox *inbox,
2861                              struct mlx4_cmd_mailbox *outbox,
2862                              struct mlx4_cmd_info *cmd)
2863 {
2864         int err;
2865         struct mlx4_qp_context *qpc = inbox->buf + 8;
2866         int qpn = vhcr->in_modifier & 0x7fffff;
2867         struct res_qp *qp;
2868         u8 orig_sched_queue;
2869
2870         err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
2871         if (err)
2872                 return err;
2873
2874         update_pkey_index(dev, slave, inbox);
2875         update_gid(dev, inbox, (u8)slave);
2876         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2877         orig_sched_queue = qpc->pri_path.sched_queue;
2878         err = update_vport_qp_param(dev, inbox, slave, qpn);
2879         if (err)
2880                 return err;
2881
2882         err = get_res(dev, slave, qpn, RES_QP, &qp);
2883         if (err)
2884                 return err;
2885         if (qp->com.from_state != RES_QP_HW) {
2886                 err = -EBUSY;
2887                 goto out;
2888         }
2889
2890         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2891 out:
2892         /* if no error, save sched queue value passed in by VF. This is
2893          * essentially the QOS value provided by the VF. This will be useful
2894          * if we allow dynamic changes from VST back to VGT
2895          */
2896         if (!err)
2897                 qp->sched_queue = orig_sched_queue;
2898
2899         put_res(dev, slave, qpn, RES_QP);
2900         return err;
2901 }
2902
2903 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2904                             struct mlx4_vhcr *vhcr,
2905                             struct mlx4_cmd_mailbox *inbox,
2906                             struct mlx4_cmd_mailbox *outbox,
2907                             struct mlx4_cmd_info *cmd)
2908 {
2909         int err;
2910         struct mlx4_qp_context *context = inbox->buf + 8;
2911
2912         err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
2913         if (err)
2914                 return err;
2915
2916         update_pkey_index(dev, slave, inbox);
2917         update_gid(dev, inbox, (u8)slave);
2918         adjust_proxy_tun_qkey(dev, vhcr, context);
2919         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2920 }
2921
2922 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2923                             struct mlx4_vhcr *vhcr,
2924                             struct mlx4_cmd_mailbox *inbox,
2925                             struct mlx4_cmd_mailbox *outbox,
2926                             struct mlx4_cmd_info *cmd)
2927 {
2928         int err;
2929         struct mlx4_qp_context *context = inbox->buf + 8;
2930
2931         err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
2932         if (err)
2933                 return err;
2934
2935         update_pkey_index(dev, slave, inbox);
2936         update_gid(dev, inbox, (u8)slave);
2937         adjust_proxy_tun_qkey(dev, vhcr, context);
2938         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2939 }
2940
2941
2942 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2943                               struct mlx4_vhcr *vhcr,
2944                               struct mlx4_cmd_mailbox *inbox,
2945                               struct mlx4_cmd_mailbox *outbox,
2946                               struct mlx4_cmd_info *cmd)
2947 {
2948         struct mlx4_qp_context *context = inbox->buf + 8;
2949         adjust_proxy_tun_qkey(dev, vhcr, context);
2950         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2951 }
2952
2953 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
2954                             struct mlx4_vhcr *vhcr,
2955                             struct mlx4_cmd_mailbox *inbox,
2956                             struct mlx4_cmd_mailbox *outbox,
2957                             struct mlx4_cmd_info *cmd)
2958 {
2959         int err;
2960         struct mlx4_qp_context *context = inbox->buf + 8;
2961
2962         err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
2963         if (err)
2964                 return err;
2965
2966         adjust_proxy_tun_qkey(dev, vhcr, context);
2967         update_gid(dev, inbox, (u8)slave);
2968         update_pkey_index(dev, slave, inbox);
2969         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2970 }
2971
2972 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2973                             struct mlx4_vhcr *vhcr,
2974                             struct mlx4_cmd_mailbox *inbox,
2975                             struct mlx4_cmd_mailbox *outbox,
2976                             struct mlx4_cmd_info *cmd)
2977 {
2978         int err;
2979         struct mlx4_qp_context *context = inbox->buf + 8;
2980
2981         err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
2982         if (err)
2983                 return err;
2984
2985         adjust_proxy_tun_qkey(dev, vhcr, context);
2986         update_gid(dev, inbox, (u8)slave);
2987         update_pkey_index(dev, slave, inbox);
2988         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2989 }
2990
2991 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2992                          struct mlx4_vhcr *vhcr,
2993                          struct mlx4_cmd_mailbox *inbox,
2994                          struct mlx4_cmd_mailbox *outbox,
2995                          struct mlx4_cmd_info *cmd)
2996 {
2997         int err;
2998         int qpn = vhcr->in_modifier & 0x7fffff;
2999         struct res_qp *qp;
3000
3001         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3002         if (err)
3003                 return err;
3004         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3005         if (err)
3006                 goto ex_abort;
3007
3008         atomic_dec(&qp->mtt->ref_count);
3009         atomic_dec(&qp->rcq->ref_count);
3010         atomic_dec(&qp->scq->ref_count);
3011         if (qp->srq)
3012                 atomic_dec(&qp->srq->ref_count);
3013         res_end_move(dev, slave, RES_QP, qpn);
3014         return 0;
3015
3016 ex_abort:
3017         res_abort_move(dev, slave, RES_QP, qpn);
3018
3019         return err;
3020 }
3021
3022 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3023                                 struct res_qp *rqp, u8 *gid)
3024 {
3025         struct res_gid *res;
3026
3027         list_for_each_entry(res, &rqp->mcg_list, list) {
3028                 if (!memcmp(res->gid, gid, 16))
3029                         return res;
3030         }
3031         return NULL;
3032 }
3033
3034 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3035                        u8 *gid, enum mlx4_protocol prot,
3036                        enum mlx4_steer_type steer, u64 reg_id)
3037 {
3038         struct res_gid *res;
3039         int err;
3040
3041         res = kzalloc(sizeof *res, GFP_KERNEL);
3042         if (!res)
3043                 return -ENOMEM;
3044
3045         spin_lock_irq(&rqp->mcg_spl);
3046         if (find_gid(dev, slave, rqp, gid)) {
3047                 kfree(res);
3048                 err = -EEXIST;
3049         } else {
3050                 memcpy(res->gid, gid, 16);
3051                 res->prot = prot;
3052                 res->steer = steer;
3053                 res->reg_id = reg_id;
3054                 list_add_tail(&res->list, &rqp->mcg_list);
3055                 err = 0;
3056         }
3057         spin_unlock_irq(&rqp->mcg_spl);
3058
3059         return err;
3060 }
3061
3062 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3063                        u8 *gid, enum mlx4_protocol prot,
3064                        enum mlx4_steer_type steer, u64 *reg_id)
3065 {
3066         struct res_gid *res;
3067         int err;
3068
3069         spin_lock_irq(&rqp->mcg_spl);
3070         res = find_gid(dev, slave, rqp, gid);
3071         if (!res || res->prot != prot || res->steer != steer)
3072                 err = -EINVAL;
3073         else {
3074                 *reg_id = res->reg_id;
3075                 list_del(&res->list);
3076                 kfree(res);
3077                 err = 0;
3078         }
3079         spin_unlock_irq(&rqp->mcg_spl);
3080
3081         return err;
3082 }
3083
3084 static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3085                      int block_loopback, enum mlx4_protocol prot,
3086                      enum mlx4_steer_type type, u64 *reg_id)
3087 {
3088         switch (dev->caps.steering_mode) {
3089         case MLX4_STEERING_MODE_DEVICE_MANAGED:
3090                 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3091                                                 block_loopback, prot,
3092                                                 reg_id);
3093         case MLX4_STEERING_MODE_B0:
3094                 return mlx4_qp_attach_common(dev, qp, gid,
3095                                             block_loopback, prot, type);
3096         default:
3097                 return -EINVAL;
3098         }
3099 }
3100
3101 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3102                      enum mlx4_protocol prot, enum mlx4_steer_type type,
3103                      u64 reg_id)
3104 {
3105         switch (dev->caps.steering_mode) {
3106         case MLX4_STEERING_MODE_DEVICE_MANAGED:
3107                 return mlx4_flow_detach(dev, reg_id);
3108         case MLX4_STEERING_MODE_B0:
3109                 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3110         default:
3111                 return -EINVAL;
3112         }
3113 }
3114
3115 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3116                                struct mlx4_vhcr *vhcr,
3117                                struct mlx4_cmd_mailbox *inbox,
3118                                struct mlx4_cmd_mailbox *outbox,
3119                                struct mlx4_cmd_info *cmd)
3120 {
3121         struct mlx4_qp qp; /* dummy for calling attach/detach */
3122         u8 *gid = inbox->buf;
3123         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3124         int err;
3125         int qpn;
3126         struct res_qp *rqp;
3127         u64 reg_id = 0;
3128         int attach = vhcr->op_modifier;
3129         int block_loopback = vhcr->in_modifier >> 31;
3130         u8 steer_type_mask = 2;
3131         enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3132
3133         qpn = vhcr->in_modifier & 0xffffff;
3134         err = get_res(dev, slave, qpn, RES_QP, &rqp);
3135         if (err)
3136                 return err;
3137
3138         qp.qpn = qpn;
3139         if (attach) {
3140                 err = qp_attach(dev, &qp, gid, block_loopback, prot,
3141                                 type, &reg_id);
3142                 if (err) {
3143                         pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3144                         goto ex_put;
3145                 }
3146                 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3147                 if (err)
3148                         goto ex_detach;
3149         } else {
3150                 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3151                 if (err)
3152                         goto ex_put;
3153
3154                 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3155                 if (err)
3156                         pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3157                                qpn, reg_id);
3158         }
3159         put_res(dev, slave, qpn, RES_QP);
3160         return err;
3161
3162 ex_detach:
3163         qp_detach(dev, &qp, gid, prot, type, reg_id);
3164 ex_put:
3165         put_res(dev, slave, qpn, RES_QP);
3166         return err;
3167 }
3168
3169 /*
3170  * MAC validation for Flow Steering rules.
3171  * VF can attach rules only with a mac address which is assigned to it.
3172  */
3173 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3174                                    struct list_head *rlist)
3175 {
3176         struct mac_res *res, *tmp;
3177         __be64 be_mac;
3178
3179         /* make sure it isn't multicast or broadcast mac*/
3180         if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3181             !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3182                 list_for_each_entry_safe(res, tmp, rlist, list) {
3183                         be_mac = cpu_to_be64(res->mac << 16);
3184                         if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3185                                 return 0;
3186                 }
3187                 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3188                        eth_header->eth.dst_mac, slave);
3189                 return -EINVAL;
3190         }
3191         return 0;
3192 }
3193
3194 /*
3195  * In case of missing eth header, append eth header with a MAC address
3196  * assigned to the VF.
3197  */
3198 static int add_eth_header(struct mlx4_dev *dev, int slave,
3199                           struct mlx4_cmd_mailbox *inbox,
3200                           struct list_head *rlist, int header_id)
3201 {
3202         struct mac_res *res, *tmp;
3203         u8 port;
3204         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3205         struct mlx4_net_trans_rule_hw_eth *eth_header;
3206         struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3207         struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3208         __be64 be_mac = 0;
3209         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3210
3211         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3212         port = ctrl->port;
3213         eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3214
3215         /* Clear a space in the inbox for eth header */
3216         switch (header_id) {
3217         case MLX4_NET_TRANS_RULE_ID_IPV4:
3218                 ip_header =
3219                         (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3220                 memmove(ip_header, eth_header,
3221                         sizeof(*ip_header) + sizeof(*l4_header));
3222                 break;
3223         case MLX4_NET_TRANS_RULE_ID_TCP:
3224         case MLX4_NET_TRANS_RULE_ID_UDP:
3225                 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3226                             (eth_header + 1);
3227                 memmove(l4_header, eth_header, sizeof(*l4_header));
3228                 break;
3229         default:
3230                 return -EINVAL;
3231         }
3232         list_for_each_entry_safe(res, tmp, rlist, list) {
3233                 if (port == res->port) {
3234                         be_mac = cpu_to_be64(res->mac << 16);
3235                         break;
3236                 }
3237         }
3238         if (!be_mac) {
3239                 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3240                        port);
3241                 return -EINVAL;
3242         }
3243
3244         memset(eth_header, 0, sizeof(*eth_header));
3245         eth_header->size = sizeof(*eth_header) >> 2;
3246         eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3247         memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3248         memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3249
3250         return 0;
3251
3252 }
3253
3254 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3255                                          struct mlx4_vhcr *vhcr,
3256                                          struct mlx4_cmd_mailbox *inbox,
3257                                          struct mlx4_cmd_mailbox *outbox,
3258                                          struct mlx4_cmd_info *cmd)
3259 {
3260
3261         struct mlx4_priv *priv = mlx4_priv(dev);
3262         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3263         struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3264         int err;
3265         int qpn;
3266         struct res_qp *rqp;
3267         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3268         struct _rule_hw  *rule_header;
3269         int header_id;
3270
3271         if (dev->caps.steering_mode !=
3272             MLX4_STEERING_MODE_DEVICE_MANAGED)
3273                 return -EOPNOTSUPP;
3274
3275         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3276         qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3277         err = get_res(dev, slave, qpn, RES_QP, &rqp);
3278         if (err) {
3279                 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3280                 return err;
3281         }
3282         rule_header = (struct _rule_hw *)(ctrl + 1);
3283         header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3284
3285         switch (header_id) {
3286         case MLX4_NET_TRANS_RULE_ID_ETH:
3287                 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3288                         err = -EINVAL;
3289                         goto err_put;
3290                 }
3291                 break;
3292         case MLX4_NET_TRANS_RULE_ID_IB:
3293                 break;
3294         case MLX4_NET_TRANS_RULE_ID_IPV4:
3295         case MLX4_NET_TRANS_RULE_ID_TCP:
3296         case MLX4_NET_TRANS_RULE_ID_UDP:
3297                 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3298                 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3299                         err = -EINVAL;
3300                         goto err_put;
3301                 }
3302                 vhcr->in_modifier +=
3303                         sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3304                 break;
3305         default:
3306                 pr_err("Corrupted mailbox.\n");
3307                 err = -EINVAL;
3308                 goto err_put;
3309         }
3310
3311         err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3312                            vhcr->in_modifier, 0,
3313                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3314                            MLX4_CMD_NATIVE);
3315         if (err)
3316                 goto err_put;
3317
3318         err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3319         if (err) {
3320                 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3321                 /* detach rule*/
3322                 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3323                          MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3324                          MLX4_CMD_NATIVE);
3325                 goto err_put;
3326         }
3327         atomic_inc(&rqp->ref_count);
3328 err_put:
3329         put_res(dev, slave, qpn, RES_QP);
3330         return err;
3331 }
3332
3333 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3334                                          struct mlx4_vhcr *vhcr,
3335                                          struct mlx4_cmd_mailbox *inbox,
3336                                          struct mlx4_cmd_mailbox *outbox,
3337                                          struct mlx4_cmd_info *cmd)
3338 {
3339         int err;
3340         struct res_qp *rqp;
3341         struct res_fs_rule *rrule;
3342
3343         if (dev->caps.steering_mode !=
3344             MLX4_STEERING_MODE_DEVICE_MANAGED)
3345                 return -EOPNOTSUPP;
3346
3347         err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3348         if (err)
3349                 return err;
3350         /* Release the rule form busy state before removal */
3351         put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3352         err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3353         if (err)
3354                 return err;
3355
3356         err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3357         if (err) {
3358                 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3359                 goto out;
3360         }
3361
3362         err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3363                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3364                        MLX4_CMD_NATIVE);
3365         if (!err)
3366                 atomic_dec(&rqp->ref_count);
3367 out:
3368         put_res(dev, slave, rrule->qpn, RES_QP);
3369         return err;
3370 }
3371
3372 enum {
3373         BUSY_MAX_RETRIES = 10
3374 };
3375
3376 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3377                                struct mlx4_vhcr *vhcr,
3378                                struct mlx4_cmd_mailbox *inbox,
3379                                struct mlx4_cmd_mailbox *outbox,
3380                                struct mlx4_cmd_info *cmd)
3381 {
3382         int err;
3383         int index = vhcr->in_modifier & 0xffff;
3384
3385         err = get_res(dev, slave, index, RES_COUNTER, NULL);
3386         if (err)
3387                 return err;
3388
3389         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3390         put_res(dev, slave, index, RES_COUNTER);
3391         return err;
3392 }
3393
3394 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3395 {
3396         struct res_gid *rgid;
3397         struct res_gid *tmp;
3398         struct mlx4_qp qp; /* dummy for calling attach/detach */
3399
3400         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3401                 switch (dev->caps.steering_mode) {
3402                 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3403                         mlx4_flow_detach(dev, rgid->reg_id);
3404                         break;
3405                 case MLX4_STEERING_MODE_B0:
3406                         qp.qpn = rqp->local_qpn;
3407                         (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3408                                                      rgid->prot, rgid->steer);
3409                         break;
3410                 }
3411                 list_del(&rgid->list);
3412                 kfree(rgid);
3413         }
3414 }
3415
3416 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3417                           enum mlx4_resource type, int print)
3418 {
3419         struct mlx4_priv *priv = mlx4_priv(dev);
3420         struct mlx4_resource_tracker *tracker =
3421                 &priv->mfunc.master.res_tracker;
3422         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3423         struct res_common *r;
3424         struct res_common *tmp;
3425         int busy;
3426
3427         busy = 0;
3428         spin_lock_irq(mlx4_tlock(dev));
3429         list_for_each_entry_safe(r, tmp, rlist, list) {
3430                 if (r->owner == slave) {
3431                         if (!r->removing) {
3432                                 if (r->state == RES_ANY_BUSY) {
3433                                         if (print)
3434                                                 mlx4_dbg(dev,
3435                                                          "%s id 0x%llx is busy\n",
3436                                                           ResourceType(type),
3437                                                           r->res_id);
3438                                         ++busy;
3439                                 } else {
3440                                         r->from_state = r->state;
3441                                         r->state = RES_ANY_BUSY;
3442                                         r->removing = 1;
3443                                 }
3444                         }
3445                 }
3446         }
3447         spin_unlock_irq(mlx4_tlock(dev));
3448
3449         return busy;
3450 }
3451
3452 static int move_all_busy(struct mlx4_dev *dev, int slave,
3453                          enum mlx4_resource type)
3454 {
3455         unsigned long begin;
3456         int busy;
3457
3458         begin = jiffies;
3459         do {
3460                 busy = _move_all_busy(dev, slave, type, 0);
3461                 if (time_after(jiffies, begin + 5 * HZ))
3462                         break;
3463                 if (busy)
3464                         cond_resched();
3465         } while (busy);
3466
3467         if (busy)
3468                 busy = _move_all_busy(dev, slave, type, 1);
3469
3470         return busy;
3471 }
3472 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3473 {
3474         struct mlx4_priv *priv = mlx4_priv(dev);
3475         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3476         struct list_head *qp_list =
3477                 &tracker->slave_list[slave].res_list[RES_QP];
3478         struct res_qp *qp;
3479         struct res_qp *tmp;
3480         int state;
3481         u64 in_param;
3482         int qpn;
3483         int err;
3484
3485         err = move_all_busy(dev, slave, RES_QP);
3486         if (err)
3487                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3488                           "for slave %d\n", slave);
3489
3490         spin_lock_irq(mlx4_tlock(dev));
3491         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3492                 spin_unlock_irq(mlx4_tlock(dev));
3493                 if (qp->com.owner == slave) {
3494                         qpn = qp->com.res_id;
3495                         detach_qp(dev, slave, qp);
3496                         state = qp->com.from_state;
3497                         while (state != 0) {
3498                                 switch (state) {
3499                                 case RES_QP_RESERVED:
3500                                         spin_lock_irq(mlx4_tlock(dev));
3501                                         rb_erase(&qp->com.node,
3502                                                  &tracker->res_tree[RES_QP]);
3503                                         list_del(&qp->com.list);
3504                                         spin_unlock_irq(mlx4_tlock(dev));
3505                                         kfree(qp);
3506                                         state = 0;
3507                                         break;
3508                                 case RES_QP_MAPPED:
3509                                         if (!valid_reserved(dev, slave, qpn))
3510                                                 __mlx4_qp_free_icm(dev, qpn);
3511                                         state = RES_QP_RESERVED;
3512                                         break;
3513                                 case RES_QP_HW:
3514                                         in_param = slave;
3515                                         err = mlx4_cmd(dev, in_param,
3516                                                        qp->local_qpn, 2,
3517                                                        MLX4_CMD_2RST_QP,
3518                                                        MLX4_CMD_TIME_CLASS_A,
3519                                                        MLX4_CMD_NATIVE);
3520                                         if (err)
3521                                                 mlx4_dbg(dev, "rem_slave_qps: failed"
3522                                                          " to move slave %d qpn %d to"
3523                                                          " reset\n", slave,
3524                                                          qp->local_qpn);
3525                                         atomic_dec(&qp->rcq->ref_count);
3526                                         atomic_dec(&qp->scq->ref_count);
3527                                         atomic_dec(&qp->mtt->ref_count);
3528                                         if (qp->srq)
3529                                                 atomic_dec(&qp->srq->ref_count);
3530                                         state = RES_QP_MAPPED;
3531                                         break;
3532                                 default:
3533                                         state = 0;
3534                                 }
3535                         }
3536                 }
3537                 spin_lock_irq(mlx4_tlock(dev));
3538         }
3539         spin_unlock_irq(mlx4_tlock(dev));
3540 }
3541
3542 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3543 {
3544         struct mlx4_priv *priv = mlx4_priv(dev);
3545         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3546         struct list_head *srq_list =
3547                 &tracker->slave_list[slave].res_list[RES_SRQ];
3548         struct res_srq *srq;
3549         struct res_srq *tmp;
3550         int state;
3551         u64 in_param;
3552         LIST_HEAD(tlist);
3553         int srqn;
3554         int err;
3555
3556         err = move_all_busy(dev, slave, RES_SRQ);
3557         if (err)
3558                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3559                           "busy for slave %d\n", slave);
3560
3561         spin_lock_irq(mlx4_tlock(dev));
3562         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3563                 spin_unlock_irq(mlx4_tlock(dev));
3564                 if (srq->com.owner == slave) {
3565                         srqn = srq->com.res_id;
3566                         state = srq->com.from_state;
3567                         while (state != 0) {
3568                                 switch (state) {
3569                                 case RES_SRQ_ALLOCATED:
3570                                         __mlx4_srq_free_icm(dev, srqn);
3571                                         spin_lock_irq(mlx4_tlock(dev));
3572                                         rb_erase(&srq->com.node,
3573                                                  &tracker->res_tree[RES_SRQ]);
3574                                         list_del(&srq->com.list);
3575                                         spin_unlock_irq(mlx4_tlock(dev));
3576                                         kfree(srq);
3577                                         state = 0;
3578                                         break;
3579
3580                                 case RES_SRQ_HW:
3581                                         in_param = slave;
3582                                         err = mlx4_cmd(dev, in_param, srqn, 1,
3583                                                        MLX4_CMD_HW2SW_SRQ,
3584                                                        MLX4_CMD_TIME_CLASS_A,
3585                                                        MLX4_CMD_NATIVE);
3586                                         if (err)
3587                                                 mlx4_dbg(dev, "rem_slave_srqs: failed"
3588                                                          " to move slave %d srq %d to"
3589                                                          " SW ownership\n",
3590                                                          slave, srqn);
3591
3592                                         atomic_dec(&srq->mtt->ref_count);
3593                                         if (srq->cq)
3594                                                 atomic_dec(&srq->cq->ref_count);
3595                                         state = RES_SRQ_ALLOCATED;
3596                                         break;
3597
3598                                 default:
3599                                         state = 0;
3600                                 }
3601                         }
3602                 }
3603                 spin_lock_irq(mlx4_tlock(dev));
3604         }
3605         spin_unlock_irq(mlx4_tlock(dev));
3606 }
3607
3608 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3609 {
3610         struct mlx4_priv *priv = mlx4_priv(dev);
3611         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3612         struct list_head *cq_list =
3613                 &tracker->slave_list[slave].res_list[RES_CQ];
3614         struct res_cq *cq;
3615         struct res_cq *tmp;
3616         int state;
3617         u64 in_param;
3618         LIST_HEAD(tlist);
3619         int cqn;
3620         int err;
3621
3622         err = move_all_busy(dev, slave, RES_CQ);
3623         if (err)
3624                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3625                           "busy for slave %d\n", slave);
3626
3627         spin_lock_irq(mlx4_tlock(dev));
3628         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3629                 spin_unlock_irq(mlx4_tlock(dev));
3630                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3631                         cqn = cq->com.res_id;
3632                         state = cq->com.from_state;
3633                         while (state != 0) {
3634                                 switch (state) {
3635                                 case RES_CQ_ALLOCATED:
3636                                         __mlx4_cq_free_icm(dev, cqn);
3637                                         spin_lock_irq(mlx4_tlock(dev));
3638                                         rb_erase(&cq->com.node,
3639                                                  &tracker->res_tree[RES_CQ]);
3640                                         list_del(&cq->com.list);
3641                                         spin_unlock_irq(mlx4_tlock(dev));
3642                                         kfree(cq);
3643                                         state = 0;
3644                                         break;
3645
3646                                 case RES_CQ_HW:
3647                                         in_param = slave;
3648                                         err = mlx4_cmd(dev, in_param, cqn, 1,
3649                                                        MLX4_CMD_HW2SW_CQ,
3650                                                        MLX4_CMD_TIME_CLASS_A,
3651                                                        MLX4_CMD_NATIVE);
3652                                         if (err)
3653                                                 mlx4_dbg(dev, "rem_slave_cqs: failed"
3654                                                          " to move slave %d cq %d to"
3655                                                          " SW ownership\n",
3656                                                          slave, cqn);
3657                                         atomic_dec(&cq->mtt->ref_count);
3658                                         state = RES_CQ_ALLOCATED;
3659                                         break;
3660
3661                                 default:
3662                                         state = 0;
3663                                 }
3664                         }
3665                 }
3666                 spin_lock_irq(mlx4_tlock(dev));
3667         }
3668         spin_unlock_irq(mlx4_tlock(dev));
3669 }
3670
3671 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3672 {
3673         struct mlx4_priv *priv = mlx4_priv(dev);
3674         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3675         struct list_head *mpt_list =
3676                 &tracker->slave_list[slave].res_list[RES_MPT];
3677         struct res_mpt *mpt;
3678         struct res_mpt *tmp;
3679         int state;
3680         u64 in_param;
3681         LIST_HEAD(tlist);
3682         int mptn;
3683         int err;
3684
3685         err = move_all_busy(dev, slave, RES_MPT);
3686         if (err)
3687                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3688                           "busy for slave %d\n", slave);
3689
3690         spin_lock_irq(mlx4_tlock(dev));
3691         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3692                 spin_unlock_irq(mlx4_tlock(dev));
3693                 if (mpt->com.owner == slave) {
3694                         mptn = mpt->com.res_id;
3695                         state = mpt->com.from_state;
3696                         while (state != 0) {
3697                                 switch (state) {
3698                                 case RES_MPT_RESERVED:
3699                                         __mlx4_mpt_release(dev, mpt->key);
3700                                         spin_lock_irq(mlx4_tlock(dev));
3701                                         rb_erase(&mpt->com.node,
3702                                                  &tracker->res_tree[RES_MPT]);
3703                                         list_del(&mpt->com.list);
3704                                         spin_unlock_irq(mlx4_tlock(dev));
3705                                         kfree(mpt);
3706                                         state = 0;
3707                                         break;
3708
3709                                 case RES_MPT_MAPPED:
3710                                         __mlx4_mpt_free_icm(dev, mpt->key);
3711                                         state = RES_MPT_RESERVED;
3712                                         break;
3713
3714                                 case RES_MPT_HW:
3715                                         in_param = slave;
3716                                         err = mlx4_cmd(dev, in_param, mptn, 0,
3717                                                      MLX4_CMD_HW2SW_MPT,
3718                                                      MLX4_CMD_TIME_CLASS_A,
3719                                                      MLX4_CMD_NATIVE);
3720                                         if (err)
3721                                                 mlx4_dbg(dev, "rem_slave_mrs: failed"
3722                                                          " to move slave %d mpt %d to"
3723                                                          " SW ownership\n",
3724                                                          slave, mptn);
3725                                         if (mpt->mtt)
3726                                                 atomic_dec(&mpt->mtt->ref_count);
3727                                         state = RES_MPT_MAPPED;
3728                                         break;
3729                                 default:
3730                                         state = 0;
3731                                 }
3732                         }
3733                 }
3734                 spin_lock_irq(mlx4_tlock(dev));
3735         }
3736         spin_unlock_irq(mlx4_tlock(dev));
3737 }
3738
3739 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3740 {
3741         struct mlx4_priv *priv = mlx4_priv(dev);
3742         struct mlx4_resource_tracker *tracker =
3743                 &priv->mfunc.master.res_tracker;
3744         struct list_head *mtt_list =
3745                 &tracker->slave_list[slave].res_list[RES_MTT];
3746         struct res_mtt *mtt;
3747         struct res_mtt *tmp;
3748         int state;
3749         LIST_HEAD(tlist);
3750         int base;
3751         int err;
3752
3753         err = move_all_busy(dev, slave, RES_MTT);
3754         if (err)
3755                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3756                           "busy for slave %d\n", slave);
3757
3758         spin_lock_irq(mlx4_tlock(dev));
3759         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3760                 spin_unlock_irq(mlx4_tlock(dev));
3761                 if (mtt->com.owner == slave) {
3762                         base = mtt->com.res_id;
3763                         state = mtt->com.from_state;
3764                         while (state != 0) {
3765                                 switch (state) {
3766                                 case RES_MTT_ALLOCATED:
3767                                         __mlx4_free_mtt_range(dev, base,
3768                                                               mtt->order);
3769                                         spin_lock_irq(mlx4_tlock(dev));
3770                                         rb_erase(&mtt->com.node,
3771                                                  &tracker->res_tree[RES_MTT]);
3772                                         list_del(&mtt->com.list);
3773                                         spin_unlock_irq(mlx4_tlock(dev));
3774                                         kfree(mtt);
3775                                         state = 0;
3776                                         break;
3777
3778                                 default:
3779                                         state = 0;
3780                                 }
3781                         }
3782                 }
3783                 spin_lock_irq(mlx4_tlock(dev));
3784         }
3785         spin_unlock_irq(mlx4_tlock(dev));
3786 }
3787
3788 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3789 {
3790         struct mlx4_priv *priv = mlx4_priv(dev);
3791         struct mlx4_resource_tracker *tracker =
3792                 &priv->mfunc.master.res_tracker;
3793         struct list_head *fs_rule_list =
3794                 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3795         struct res_fs_rule *fs_rule;
3796         struct res_fs_rule *tmp;
3797         int state;
3798         u64 base;
3799         int err;
3800
3801         err = move_all_busy(dev, slave, RES_FS_RULE);
3802         if (err)
3803                 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3804                           slave);
3805
3806         spin_lock_irq(mlx4_tlock(dev));
3807         list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3808                 spin_unlock_irq(mlx4_tlock(dev));
3809                 if (fs_rule->com.owner == slave) {
3810                         base = fs_rule->com.res_id;
3811                         state = fs_rule->com.from_state;
3812                         while (state != 0) {
3813                                 switch (state) {
3814                                 case RES_FS_RULE_ALLOCATED:
3815                                         /* detach rule */
3816                                         err = mlx4_cmd(dev, base, 0, 0,
3817                                                        MLX4_QP_FLOW_STEERING_DETACH,
3818                                                        MLX4_CMD_TIME_CLASS_A,
3819                                                        MLX4_CMD_NATIVE);
3820
3821                                         spin_lock_irq(mlx4_tlock(dev));
3822                                         rb_erase(&fs_rule->com.node,
3823                                                  &tracker->res_tree[RES_FS_RULE]);
3824                                         list_del(&fs_rule->com.list);
3825                                         spin_unlock_irq(mlx4_tlock(dev));
3826                                         kfree(fs_rule);
3827                                         state = 0;
3828                                         break;
3829
3830                                 default:
3831                                         state = 0;
3832                                 }
3833                         }
3834                 }
3835                 spin_lock_irq(mlx4_tlock(dev));
3836         }
3837         spin_unlock_irq(mlx4_tlock(dev));
3838 }
3839
3840 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3841 {
3842         struct mlx4_priv *priv = mlx4_priv(dev);
3843         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3844         struct list_head *eq_list =
3845                 &tracker->slave_list[slave].res_list[RES_EQ];
3846         struct res_eq *eq;
3847         struct res_eq *tmp;
3848         int err;
3849         int state;
3850         LIST_HEAD(tlist);
3851         int eqn;
3852         struct mlx4_cmd_mailbox *mailbox;
3853
3854         err = move_all_busy(dev, slave, RES_EQ);
3855         if (err)
3856                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3857                           "busy for slave %d\n", slave);
3858
3859         spin_lock_irq(mlx4_tlock(dev));
3860         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3861                 spin_unlock_irq(mlx4_tlock(dev));
3862                 if (eq->com.owner == slave) {
3863                         eqn = eq->com.res_id;
3864                         state = eq->com.from_state;
3865                         while (state != 0) {
3866                                 switch (state) {
3867                                 case RES_EQ_RESERVED:
3868                                         spin_lock_irq(mlx4_tlock(dev));
3869                                         rb_erase(&eq->com.node,
3870                                                  &tracker->res_tree[RES_EQ]);
3871                                         list_del(&eq->com.list);
3872                                         spin_unlock_irq(mlx4_tlock(dev));
3873                                         kfree(eq);
3874                                         state = 0;
3875                                         break;
3876
3877                                 case RES_EQ_HW:
3878                                         mailbox = mlx4_alloc_cmd_mailbox(dev);
3879                                         if (IS_ERR(mailbox)) {
3880                                                 cond_resched();
3881                                                 continue;
3882                                         }
3883                                         err = mlx4_cmd_box(dev, slave, 0,
3884                                                            eqn & 0xff, 0,
3885                                                            MLX4_CMD_HW2SW_EQ,
3886                                                            MLX4_CMD_TIME_CLASS_A,
3887                                                            MLX4_CMD_NATIVE);
3888                                         if (err)
3889                                                 mlx4_dbg(dev, "rem_slave_eqs: failed"
3890                                                          " to move slave %d eqs %d to"
3891                                                          " SW ownership\n", slave, eqn);
3892                                         mlx4_free_cmd_mailbox(dev, mailbox);
3893                                         atomic_dec(&eq->mtt->ref_count);
3894                                         state = RES_EQ_RESERVED;
3895                                         break;
3896
3897                                 default:
3898                                         state = 0;
3899                                 }
3900                         }
3901                 }
3902                 spin_lock_irq(mlx4_tlock(dev));
3903         }
3904         spin_unlock_irq(mlx4_tlock(dev));
3905 }
3906
3907 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3908 {
3909         struct mlx4_priv *priv = mlx4_priv(dev);
3910         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3911         struct list_head *counter_list =
3912                 &tracker->slave_list[slave].res_list[RES_COUNTER];
3913         struct res_counter *counter;
3914         struct res_counter *tmp;
3915         int err;
3916         int index;
3917
3918         err = move_all_busy(dev, slave, RES_COUNTER);
3919         if (err)
3920                 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3921                           "busy for slave %d\n", slave);
3922
3923         spin_lock_irq(mlx4_tlock(dev));
3924         list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3925                 if (counter->com.owner == slave) {
3926                         index = counter->com.res_id;
3927                         rb_erase(&counter->com.node,
3928                                  &tracker->res_tree[RES_COUNTER]);
3929                         list_del(&counter->com.list);
3930                         kfree(counter);
3931                         __mlx4_counter_free(dev, index);
3932                 }
3933         }
3934         spin_unlock_irq(mlx4_tlock(dev));
3935 }
3936
3937 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3938 {
3939         struct mlx4_priv *priv = mlx4_priv(dev);
3940         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3941         struct list_head *xrcdn_list =
3942                 &tracker->slave_list[slave].res_list[RES_XRCD];
3943         struct res_xrcdn *xrcd;
3944         struct res_xrcdn *tmp;
3945         int err;
3946         int xrcdn;
3947
3948         err = move_all_busy(dev, slave, RES_XRCD);
3949         if (err)
3950                 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3951                           "busy for slave %d\n", slave);
3952
3953         spin_lock_irq(mlx4_tlock(dev));
3954         list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3955                 if (xrcd->com.owner == slave) {
3956                         xrcdn = xrcd->com.res_id;
3957                         rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
3958                         list_del(&xrcd->com.list);
3959                         kfree(xrcd);
3960                         __mlx4_xrcd_free(dev, xrcdn);
3961                 }
3962         }
3963         spin_unlock_irq(mlx4_tlock(dev));
3964 }
3965
3966 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3967 {
3968         struct mlx4_priv *priv = mlx4_priv(dev);
3969
3970         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3971         /*VLAN*/
3972         rem_slave_macs(dev, slave);
3973         rem_slave_fs_rule(dev, slave);
3974         rem_slave_qps(dev, slave);
3975         rem_slave_srqs(dev, slave);
3976         rem_slave_cqs(dev, slave);
3977         rem_slave_mrs(dev, slave);
3978         rem_slave_eqs(dev, slave);
3979         rem_slave_mtts(dev, slave);
3980         rem_slave_counters(dev, slave);
3981         rem_slave_xrcdns(dev, slave);
3982         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3983 }
3984
3985 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
3986 {
3987         struct mlx4_vf_immed_vlan_work *work =
3988                 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
3989         struct mlx4_cmd_mailbox *mailbox;
3990         struct mlx4_update_qp_context *upd_context;
3991         struct mlx4_dev *dev = &work->priv->dev;
3992         struct mlx4_resource_tracker *tracker =
3993                 &work->priv->mfunc.master.res_tracker;
3994         struct list_head *qp_list =
3995                 &tracker->slave_list[work->slave].res_list[RES_QP];
3996         struct res_qp *qp;
3997         struct res_qp *tmp;
3998         u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
3999                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4000                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4001                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4002                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4003                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) |
4004                        (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4005                        (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4006
4007         int err;
4008         int port, errors = 0;
4009         u8 vlan_control;
4010
4011         if (mlx4_is_slave(dev)) {
4012                 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4013                           work->slave);
4014                 goto out;
4015         }
4016
4017         mailbox = mlx4_alloc_cmd_mailbox(dev);
4018         if (IS_ERR(mailbox))
4019                 goto out;
4020         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4021                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4022                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4023                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4024                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4025                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4026                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4027         else if (!work->vlan_id)
4028                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4029                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4030         else
4031                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4032                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4033                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4034
4035         upd_context = mailbox->buf;
4036         upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask);
4037         upd_context->qp_context.pri_path.vlan_control = vlan_control;
4038         upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4039
4040         spin_lock_irq(mlx4_tlock(dev));
4041         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4042                 spin_unlock_irq(mlx4_tlock(dev));
4043                 if (qp->com.owner == work->slave) {
4044                         if (qp->com.from_state != RES_QP_HW ||
4045                             !qp->sched_queue ||  /* no INIT2RTR trans yet */
4046                             mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4047                             qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4048                                 spin_lock_irq(mlx4_tlock(dev));
4049                                 continue;
4050                         }
4051                         port = (qp->sched_queue >> 6 & 1) + 1;
4052                         if (port != work->port) {
4053                                 spin_lock_irq(mlx4_tlock(dev));
4054                                 continue;
4055                         }
4056                         upd_context->qp_context.pri_path.sched_queue =
4057                                 qp->sched_queue & 0xC7;
4058                         upd_context->qp_context.pri_path.sched_queue |=
4059                                 ((work->qos & 0x7) << 3);
4060
4061                         err = mlx4_cmd(dev, mailbox->dma,
4062                                        qp->local_qpn & 0xffffff,
4063                                        0, MLX4_CMD_UPDATE_QP,
4064                                        MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4065                         if (err) {
4066                                 mlx4_info(dev, "UPDATE_QP failed for slave %d, "
4067                                           "port %d, qpn %d (%d)\n",
4068                                           work->slave, port, qp->local_qpn,
4069                                           err);
4070                                 errors++;
4071                         }
4072                 }
4073                 spin_lock_irq(mlx4_tlock(dev));
4074         }
4075         spin_unlock_irq(mlx4_tlock(dev));
4076         mlx4_free_cmd_mailbox(dev, mailbox);
4077
4078         if (errors)
4079                 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4080                          errors, work->slave, work->port);
4081
4082         /* unregister previous vlan_id if needed and we had no errors
4083          * while updating the QPs
4084          */
4085         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4086             NO_INDX != work->orig_vlan_ix)
4087                 __mlx4_unregister_vlan(&work->priv->dev, work->port,
4088                                        work->orig_vlan_id);
4089 out:
4090         kfree(work);
4091         return;
4092 }