mlx4: Implement QP paravirtualization and maintain phys_pkey_cache for smp_snoop
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49
50 #define MLX4_MAC_VALID          (1ull << 63)
51
52 struct mac_res {
53         struct list_head list;
54         u64 mac;
55         u8 port;
56 };
57
58 struct res_common {
59         struct list_head        list;
60         struct rb_node          node;
61         u64                     res_id;
62         int                     owner;
63         int                     state;
64         int                     from_state;
65         int                     to_state;
66         int                     removing;
67 };
68
69 enum {
70         RES_ANY_BUSY = 1
71 };
72
73 struct res_gid {
74         struct list_head        list;
75         u8                      gid[16];
76         enum mlx4_protocol      prot;
77         enum mlx4_steer_type    steer;
78 };
79
80 enum res_qp_states {
81         RES_QP_BUSY = RES_ANY_BUSY,
82
83         /* QP number was allocated */
84         RES_QP_RESERVED,
85
86         /* ICM memory for QP context was mapped */
87         RES_QP_MAPPED,
88
89         /* QP is in hw ownership */
90         RES_QP_HW
91 };
92
93 struct res_qp {
94         struct res_common       com;
95         struct res_mtt         *mtt;
96         struct res_cq          *rcq;
97         struct res_cq          *scq;
98         struct res_srq         *srq;
99         struct list_head        mcg_list;
100         spinlock_t              mcg_spl;
101         int                     local_qpn;
102 };
103
104 enum res_mtt_states {
105         RES_MTT_BUSY = RES_ANY_BUSY,
106         RES_MTT_ALLOCATED,
107 };
108
109 static inline const char *mtt_states_str(enum res_mtt_states state)
110 {
111         switch (state) {
112         case RES_MTT_BUSY: return "RES_MTT_BUSY";
113         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
114         default: return "Unknown";
115         }
116 }
117
118 struct res_mtt {
119         struct res_common       com;
120         int                     order;
121         atomic_t                ref_count;
122 };
123
124 enum res_mpt_states {
125         RES_MPT_BUSY = RES_ANY_BUSY,
126         RES_MPT_RESERVED,
127         RES_MPT_MAPPED,
128         RES_MPT_HW,
129 };
130
131 struct res_mpt {
132         struct res_common       com;
133         struct res_mtt         *mtt;
134         int                     key;
135 };
136
137 enum res_eq_states {
138         RES_EQ_BUSY = RES_ANY_BUSY,
139         RES_EQ_RESERVED,
140         RES_EQ_HW,
141 };
142
143 struct res_eq {
144         struct res_common       com;
145         struct res_mtt         *mtt;
146 };
147
148 enum res_cq_states {
149         RES_CQ_BUSY = RES_ANY_BUSY,
150         RES_CQ_ALLOCATED,
151         RES_CQ_HW,
152 };
153
154 struct res_cq {
155         struct res_common       com;
156         struct res_mtt         *mtt;
157         atomic_t                ref_count;
158 };
159
160 enum res_srq_states {
161         RES_SRQ_BUSY = RES_ANY_BUSY,
162         RES_SRQ_ALLOCATED,
163         RES_SRQ_HW,
164 };
165
166 struct res_srq {
167         struct res_common       com;
168         struct res_mtt         *mtt;
169         struct res_cq          *cq;
170         atomic_t                ref_count;
171 };
172
173 enum res_counter_states {
174         RES_COUNTER_BUSY = RES_ANY_BUSY,
175         RES_COUNTER_ALLOCATED,
176 };
177
178 struct res_counter {
179         struct res_common       com;
180         int                     port;
181 };
182
183 enum res_xrcdn_states {
184         RES_XRCD_BUSY = RES_ANY_BUSY,
185         RES_XRCD_ALLOCATED,
186 };
187
188 struct res_xrcdn {
189         struct res_common       com;
190         int                     port;
191 };
192
193 enum res_fs_rule_states {
194         RES_FS_RULE_BUSY = RES_ANY_BUSY,
195         RES_FS_RULE_ALLOCATED,
196 };
197
198 struct res_fs_rule {
199         struct res_common       com;
200 };
201
202 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
203 {
204         struct rb_node *node = root->rb_node;
205
206         while (node) {
207                 struct res_common *res = container_of(node, struct res_common,
208                                                       node);
209
210                 if (res_id < res->res_id)
211                         node = node->rb_left;
212                 else if (res_id > res->res_id)
213                         node = node->rb_right;
214                 else
215                         return res;
216         }
217         return NULL;
218 }
219
220 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
221 {
222         struct rb_node **new = &(root->rb_node), *parent = NULL;
223
224         /* Figure out where to put new node */
225         while (*new) {
226                 struct res_common *this = container_of(*new, struct res_common,
227                                                        node);
228
229                 parent = *new;
230                 if (res->res_id < this->res_id)
231                         new = &((*new)->rb_left);
232                 else if (res->res_id > this->res_id)
233                         new = &((*new)->rb_right);
234                 else
235                         return -EEXIST;
236         }
237
238         /* Add new node and rebalance tree. */
239         rb_link_node(&res->node, parent, new);
240         rb_insert_color(&res->node, root);
241
242         return 0;
243 }
244
245 enum qp_transition {
246         QP_TRANS_INIT2RTR,
247         QP_TRANS_RTR2RTS,
248         QP_TRANS_RTS2RTS,
249         QP_TRANS_SQERR2RTS,
250         QP_TRANS_SQD2SQD,
251         QP_TRANS_SQD2RTS
252 };
253
254 /* For Debug uses */
255 static const char *ResourceType(enum mlx4_resource rt)
256 {
257         switch (rt) {
258         case RES_QP: return "RES_QP";
259         case RES_CQ: return "RES_CQ";
260         case RES_SRQ: return "RES_SRQ";
261         case RES_MPT: return "RES_MPT";
262         case RES_MTT: return "RES_MTT";
263         case RES_MAC: return  "RES_MAC";
264         case RES_EQ: return "RES_EQ";
265         case RES_COUNTER: return "RES_COUNTER";
266         case RES_FS_RULE: return "RES_FS_RULE";
267         case RES_XRCD: return "RES_XRCD";
268         default: return "Unknown resource type !!!";
269         };
270 }
271
272 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
273 {
274         struct mlx4_priv *priv = mlx4_priv(dev);
275         int i;
276         int t;
277
278         priv->mfunc.master.res_tracker.slave_list =
279                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
280                         GFP_KERNEL);
281         if (!priv->mfunc.master.res_tracker.slave_list)
282                 return -ENOMEM;
283
284         for (i = 0 ; i < dev->num_slaves; i++) {
285                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
286                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
287                                        slave_list[i].res_list[t]);
288                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
289         }
290
291         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
292                  dev->num_slaves);
293         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
294                 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
295
296         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
297         return 0 ;
298 }
299
300 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
301                                 enum mlx4_res_tracker_free_type type)
302 {
303         struct mlx4_priv *priv = mlx4_priv(dev);
304         int i;
305
306         if (priv->mfunc.master.res_tracker.slave_list) {
307                 if (type != RES_TR_FREE_STRUCTS_ONLY)
308                         for (i = 0 ; i < dev->num_slaves; i++)
309                                 if (type == RES_TR_FREE_ALL ||
310                                     dev->caps.function != i)
311                                         mlx4_delete_all_resources_for_slave(dev, i);
312
313                 if (type != RES_TR_FREE_SLAVES_ONLY) {
314                         kfree(priv->mfunc.master.res_tracker.slave_list);
315                         priv->mfunc.master.res_tracker.slave_list = NULL;
316                 }
317         }
318 }
319
320 static void update_pkey_index(struct mlx4_dev *dev, int slave,
321                               struct mlx4_cmd_mailbox *inbox)
322 {
323         u8 sched = *(u8 *)(inbox->buf + 64);
324         u8 orig_index = *(u8 *)(inbox->buf + 35);
325         u8 new_index;
326         struct mlx4_priv *priv = mlx4_priv(dev);
327         int port;
328
329         port = (sched >> 6 & 1) + 1;
330
331         new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
332         *(u8 *)(inbox->buf + 35) = new_index;
333
334         mlx4_dbg(dev, "port = %d, orig pkey index = %d, "
335                  "new pkey index = %d\n", port, orig_index, new_index);
336 }
337
338 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
339                        u8 slave)
340 {
341         struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
342         enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
343         u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
344
345         if (MLX4_QP_ST_UD == ts)
346                 qp_ctx->pri_path.mgid_index = 0x80 | slave;
347
348         if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
349                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
350                         qp_ctx->pri_path.mgid_index = slave & 0x7F;
351                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
352                         qp_ctx->alt_path.mgid_index = slave & 0x7F;
353         }
354
355         mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
356                 slave, qp_ctx->pri_path.mgid_index);
357 }
358
359 static int mpt_mask(struct mlx4_dev *dev)
360 {
361         return dev->caps.num_mpts - 1;
362 }
363
364 static void *find_res(struct mlx4_dev *dev, int res_id,
365                       enum mlx4_resource type)
366 {
367         struct mlx4_priv *priv = mlx4_priv(dev);
368
369         return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
370                                   res_id);
371 }
372
373 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
374                    enum mlx4_resource type,
375                    void *res)
376 {
377         struct res_common *r;
378         int err = 0;
379
380         spin_lock_irq(mlx4_tlock(dev));
381         r = find_res(dev, res_id, type);
382         if (!r) {
383                 err = -ENONET;
384                 goto exit;
385         }
386
387         if (r->state == RES_ANY_BUSY) {
388                 err = -EBUSY;
389                 goto exit;
390         }
391
392         if (r->owner != slave) {
393                 err = -EPERM;
394                 goto exit;
395         }
396
397         r->from_state = r->state;
398         r->state = RES_ANY_BUSY;
399         mlx4_dbg(dev, "res %s id 0x%llx to busy\n",
400                  ResourceType(type), r->res_id);
401
402         if (res)
403                 *((struct res_common **)res) = r;
404
405 exit:
406         spin_unlock_irq(mlx4_tlock(dev));
407         return err;
408 }
409
410 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
411                                     enum mlx4_resource type,
412                                     u64 res_id, int *slave)
413 {
414
415         struct res_common *r;
416         int err = -ENOENT;
417         int id = res_id;
418
419         if (type == RES_QP)
420                 id &= 0x7fffff;
421         spin_lock(mlx4_tlock(dev));
422
423         r = find_res(dev, id, type);
424         if (r) {
425                 *slave = r->owner;
426                 err = 0;
427         }
428         spin_unlock(mlx4_tlock(dev));
429
430         return err;
431 }
432
433 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
434                     enum mlx4_resource type)
435 {
436         struct res_common *r;
437
438         spin_lock_irq(mlx4_tlock(dev));
439         r = find_res(dev, res_id, type);
440         if (r)
441                 r->state = r->from_state;
442         spin_unlock_irq(mlx4_tlock(dev));
443 }
444
445 static struct res_common *alloc_qp_tr(int id)
446 {
447         struct res_qp *ret;
448
449         ret = kzalloc(sizeof *ret, GFP_KERNEL);
450         if (!ret)
451                 return NULL;
452
453         ret->com.res_id = id;
454         ret->com.state = RES_QP_RESERVED;
455         ret->local_qpn = id;
456         INIT_LIST_HEAD(&ret->mcg_list);
457         spin_lock_init(&ret->mcg_spl);
458
459         return &ret->com;
460 }
461
462 static struct res_common *alloc_mtt_tr(int id, int order)
463 {
464         struct res_mtt *ret;
465
466         ret = kzalloc(sizeof *ret, GFP_KERNEL);
467         if (!ret)
468                 return NULL;
469
470         ret->com.res_id = id;
471         ret->order = order;
472         ret->com.state = RES_MTT_ALLOCATED;
473         atomic_set(&ret->ref_count, 0);
474
475         return &ret->com;
476 }
477
478 static struct res_common *alloc_mpt_tr(int id, int key)
479 {
480         struct res_mpt *ret;
481
482         ret = kzalloc(sizeof *ret, GFP_KERNEL);
483         if (!ret)
484                 return NULL;
485
486         ret->com.res_id = id;
487         ret->com.state = RES_MPT_RESERVED;
488         ret->key = key;
489
490         return &ret->com;
491 }
492
493 static struct res_common *alloc_eq_tr(int id)
494 {
495         struct res_eq *ret;
496
497         ret = kzalloc(sizeof *ret, GFP_KERNEL);
498         if (!ret)
499                 return NULL;
500
501         ret->com.res_id = id;
502         ret->com.state = RES_EQ_RESERVED;
503
504         return &ret->com;
505 }
506
507 static struct res_common *alloc_cq_tr(int id)
508 {
509         struct res_cq *ret;
510
511         ret = kzalloc(sizeof *ret, GFP_KERNEL);
512         if (!ret)
513                 return NULL;
514
515         ret->com.res_id = id;
516         ret->com.state = RES_CQ_ALLOCATED;
517         atomic_set(&ret->ref_count, 0);
518
519         return &ret->com;
520 }
521
522 static struct res_common *alloc_srq_tr(int id)
523 {
524         struct res_srq *ret;
525
526         ret = kzalloc(sizeof *ret, GFP_KERNEL);
527         if (!ret)
528                 return NULL;
529
530         ret->com.res_id = id;
531         ret->com.state = RES_SRQ_ALLOCATED;
532         atomic_set(&ret->ref_count, 0);
533
534         return &ret->com;
535 }
536
537 static struct res_common *alloc_counter_tr(int id)
538 {
539         struct res_counter *ret;
540
541         ret = kzalloc(sizeof *ret, GFP_KERNEL);
542         if (!ret)
543                 return NULL;
544
545         ret->com.res_id = id;
546         ret->com.state = RES_COUNTER_ALLOCATED;
547
548         return &ret->com;
549 }
550
551 static struct res_common *alloc_xrcdn_tr(int id)
552 {
553         struct res_xrcdn *ret;
554
555         ret = kzalloc(sizeof *ret, GFP_KERNEL);
556         if (!ret)
557                 return NULL;
558
559         ret->com.res_id = id;
560         ret->com.state = RES_XRCD_ALLOCATED;
561
562         return &ret->com;
563 }
564
565 static struct res_common *alloc_fs_rule_tr(u64 id)
566 {
567         struct res_fs_rule *ret;
568
569         ret = kzalloc(sizeof *ret, GFP_KERNEL);
570         if (!ret)
571                 return NULL;
572
573         ret->com.res_id = id;
574         ret->com.state = RES_FS_RULE_ALLOCATED;
575
576         return &ret->com;
577 }
578
579 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
580                                    int extra)
581 {
582         struct res_common *ret;
583
584         switch (type) {
585         case RES_QP:
586                 ret = alloc_qp_tr(id);
587                 break;
588         case RES_MPT:
589                 ret = alloc_mpt_tr(id, extra);
590                 break;
591         case RES_MTT:
592                 ret = alloc_mtt_tr(id, extra);
593                 break;
594         case RES_EQ:
595                 ret = alloc_eq_tr(id);
596                 break;
597         case RES_CQ:
598                 ret = alloc_cq_tr(id);
599                 break;
600         case RES_SRQ:
601                 ret = alloc_srq_tr(id);
602                 break;
603         case RES_MAC:
604                 printk(KERN_ERR "implementation missing\n");
605                 return NULL;
606         case RES_COUNTER:
607                 ret = alloc_counter_tr(id);
608                 break;
609         case RES_XRCD:
610                 ret = alloc_xrcdn_tr(id);
611                 break;
612         case RES_FS_RULE:
613                 ret = alloc_fs_rule_tr(id);
614                 break;
615         default:
616                 return NULL;
617         }
618         if (ret)
619                 ret->owner = slave;
620
621         return ret;
622 }
623
624 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
625                          enum mlx4_resource type, int extra)
626 {
627         int i;
628         int err;
629         struct mlx4_priv *priv = mlx4_priv(dev);
630         struct res_common **res_arr;
631         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
632         struct rb_root *root = &tracker->res_tree[type];
633
634         res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
635         if (!res_arr)
636                 return -ENOMEM;
637
638         for (i = 0; i < count; ++i) {
639                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
640                 if (!res_arr[i]) {
641                         for (--i; i >= 0; --i)
642                                 kfree(res_arr[i]);
643
644                         kfree(res_arr);
645                         return -ENOMEM;
646                 }
647         }
648
649         spin_lock_irq(mlx4_tlock(dev));
650         for (i = 0; i < count; ++i) {
651                 if (find_res(dev, base + i, type)) {
652                         err = -EEXIST;
653                         goto undo;
654                 }
655                 err = res_tracker_insert(root, res_arr[i]);
656                 if (err)
657                         goto undo;
658                 list_add_tail(&res_arr[i]->list,
659                               &tracker->slave_list[slave].res_list[type]);
660         }
661         spin_unlock_irq(mlx4_tlock(dev));
662         kfree(res_arr);
663
664         return 0;
665
666 undo:
667         for (--i; i >= base; --i)
668                 rb_erase(&res_arr[i]->node, root);
669
670         spin_unlock_irq(mlx4_tlock(dev));
671
672         for (i = 0; i < count; ++i)
673                 kfree(res_arr[i]);
674
675         kfree(res_arr);
676
677         return err;
678 }
679
680 static int remove_qp_ok(struct res_qp *res)
681 {
682         if (res->com.state == RES_QP_BUSY)
683                 return -EBUSY;
684         else if (res->com.state != RES_QP_RESERVED)
685                 return -EPERM;
686
687         return 0;
688 }
689
690 static int remove_mtt_ok(struct res_mtt *res, int order)
691 {
692         if (res->com.state == RES_MTT_BUSY ||
693             atomic_read(&res->ref_count)) {
694                 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
695                        __func__, __LINE__,
696                        mtt_states_str(res->com.state),
697                        atomic_read(&res->ref_count));
698                 return -EBUSY;
699         } else if (res->com.state != RES_MTT_ALLOCATED)
700                 return -EPERM;
701         else if (res->order != order)
702                 return -EINVAL;
703
704         return 0;
705 }
706
707 static int remove_mpt_ok(struct res_mpt *res)
708 {
709         if (res->com.state == RES_MPT_BUSY)
710                 return -EBUSY;
711         else if (res->com.state != RES_MPT_RESERVED)
712                 return -EPERM;
713
714         return 0;
715 }
716
717 static int remove_eq_ok(struct res_eq *res)
718 {
719         if (res->com.state == RES_MPT_BUSY)
720                 return -EBUSY;
721         else if (res->com.state != RES_MPT_RESERVED)
722                 return -EPERM;
723
724         return 0;
725 }
726
727 static int remove_counter_ok(struct res_counter *res)
728 {
729         if (res->com.state == RES_COUNTER_BUSY)
730                 return -EBUSY;
731         else if (res->com.state != RES_COUNTER_ALLOCATED)
732                 return -EPERM;
733
734         return 0;
735 }
736
737 static int remove_xrcdn_ok(struct res_xrcdn *res)
738 {
739         if (res->com.state == RES_XRCD_BUSY)
740                 return -EBUSY;
741         else if (res->com.state != RES_XRCD_ALLOCATED)
742                 return -EPERM;
743
744         return 0;
745 }
746
747 static int remove_fs_rule_ok(struct res_fs_rule *res)
748 {
749         if (res->com.state == RES_FS_RULE_BUSY)
750                 return -EBUSY;
751         else if (res->com.state != RES_FS_RULE_ALLOCATED)
752                 return -EPERM;
753
754         return 0;
755 }
756
757 static int remove_cq_ok(struct res_cq *res)
758 {
759         if (res->com.state == RES_CQ_BUSY)
760                 return -EBUSY;
761         else if (res->com.state != RES_CQ_ALLOCATED)
762                 return -EPERM;
763
764         return 0;
765 }
766
767 static int remove_srq_ok(struct res_srq *res)
768 {
769         if (res->com.state == RES_SRQ_BUSY)
770                 return -EBUSY;
771         else if (res->com.state != RES_SRQ_ALLOCATED)
772                 return -EPERM;
773
774         return 0;
775 }
776
777 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
778 {
779         switch (type) {
780         case RES_QP:
781                 return remove_qp_ok((struct res_qp *)res);
782         case RES_CQ:
783                 return remove_cq_ok((struct res_cq *)res);
784         case RES_SRQ:
785                 return remove_srq_ok((struct res_srq *)res);
786         case RES_MPT:
787                 return remove_mpt_ok((struct res_mpt *)res);
788         case RES_MTT:
789                 return remove_mtt_ok((struct res_mtt *)res, extra);
790         case RES_MAC:
791                 return -ENOSYS;
792         case RES_EQ:
793                 return remove_eq_ok((struct res_eq *)res);
794         case RES_COUNTER:
795                 return remove_counter_ok((struct res_counter *)res);
796         case RES_XRCD:
797                 return remove_xrcdn_ok((struct res_xrcdn *)res);
798         case RES_FS_RULE:
799                 return remove_fs_rule_ok((struct res_fs_rule *)res);
800         default:
801                 return -EINVAL;
802         }
803 }
804
805 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
806                          enum mlx4_resource type, int extra)
807 {
808         u64 i;
809         int err;
810         struct mlx4_priv *priv = mlx4_priv(dev);
811         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
812         struct res_common *r;
813
814         spin_lock_irq(mlx4_tlock(dev));
815         for (i = base; i < base + count; ++i) {
816                 r = res_tracker_lookup(&tracker->res_tree[type], i);
817                 if (!r) {
818                         err = -ENOENT;
819                         goto out;
820                 }
821                 if (r->owner != slave) {
822                         err = -EPERM;
823                         goto out;
824                 }
825                 err = remove_ok(r, type, extra);
826                 if (err)
827                         goto out;
828         }
829
830         for (i = base; i < base + count; ++i) {
831                 r = res_tracker_lookup(&tracker->res_tree[type], i);
832                 rb_erase(&r->node, &tracker->res_tree[type]);
833                 list_del(&r->list);
834                 kfree(r);
835         }
836         err = 0;
837
838 out:
839         spin_unlock_irq(mlx4_tlock(dev));
840
841         return err;
842 }
843
844 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
845                                 enum res_qp_states state, struct res_qp **qp,
846                                 int alloc)
847 {
848         struct mlx4_priv *priv = mlx4_priv(dev);
849         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
850         struct res_qp *r;
851         int err = 0;
852
853         spin_lock_irq(mlx4_tlock(dev));
854         r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
855         if (!r)
856                 err = -ENOENT;
857         else if (r->com.owner != slave)
858                 err = -EPERM;
859         else {
860                 switch (state) {
861                 case RES_QP_BUSY:
862                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
863                                  __func__, r->com.res_id);
864                         err = -EBUSY;
865                         break;
866
867                 case RES_QP_RESERVED:
868                         if (r->com.state == RES_QP_MAPPED && !alloc)
869                                 break;
870
871                         mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
872                         err = -EINVAL;
873                         break;
874
875                 case RES_QP_MAPPED:
876                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
877                             r->com.state == RES_QP_HW)
878                                 break;
879                         else {
880                                 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
881                                           r->com.res_id);
882                                 err = -EINVAL;
883                         }
884
885                         break;
886
887                 case RES_QP_HW:
888                         if (r->com.state != RES_QP_MAPPED)
889                                 err = -EINVAL;
890                         break;
891                 default:
892                         err = -EINVAL;
893                 }
894
895                 if (!err) {
896                         r->com.from_state = r->com.state;
897                         r->com.to_state = state;
898                         r->com.state = RES_QP_BUSY;
899                         if (qp)
900                                 *qp = r;
901                 }
902         }
903
904         spin_unlock_irq(mlx4_tlock(dev));
905
906         return err;
907 }
908
909 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
910                                 enum res_mpt_states state, struct res_mpt **mpt)
911 {
912         struct mlx4_priv *priv = mlx4_priv(dev);
913         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
914         struct res_mpt *r;
915         int err = 0;
916
917         spin_lock_irq(mlx4_tlock(dev));
918         r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
919         if (!r)
920                 err = -ENOENT;
921         else if (r->com.owner != slave)
922                 err = -EPERM;
923         else {
924                 switch (state) {
925                 case RES_MPT_BUSY:
926                         err = -EINVAL;
927                         break;
928
929                 case RES_MPT_RESERVED:
930                         if (r->com.state != RES_MPT_MAPPED)
931                                 err = -EINVAL;
932                         break;
933
934                 case RES_MPT_MAPPED:
935                         if (r->com.state != RES_MPT_RESERVED &&
936                             r->com.state != RES_MPT_HW)
937                                 err = -EINVAL;
938                         break;
939
940                 case RES_MPT_HW:
941                         if (r->com.state != RES_MPT_MAPPED)
942                                 err = -EINVAL;
943                         break;
944                 default:
945                         err = -EINVAL;
946                 }
947
948                 if (!err) {
949                         r->com.from_state = r->com.state;
950                         r->com.to_state = state;
951                         r->com.state = RES_MPT_BUSY;
952                         if (mpt)
953                                 *mpt = r;
954                 }
955         }
956
957         spin_unlock_irq(mlx4_tlock(dev));
958
959         return err;
960 }
961
962 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
963                                 enum res_eq_states state, struct res_eq **eq)
964 {
965         struct mlx4_priv *priv = mlx4_priv(dev);
966         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
967         struct res_eq *r;
968         int err = 0;
969
970         spin_lock_irq(mlx4_tlock(dev));
971         r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
972         if (!r)
973                 err = -ENOENT;
974         else if (r->com.owner != slave)
975                 err = -EPERM;
976         else {
977                 switch (state) {
978                 case RES_EQ_BUSY:
979                         err = -EINVAL;
980                         break;
981
982                 case RES_EQ_RESERVED:
983                         if (r->com.state != RES_EQ_HW)
984                                 err = -EINVAL;
985                         break;
986
987                 case RES_EQ_HW:
988                         if (r->com.state != RES_EQ_RESERVED)
989                                 err = -EINVAL;
990                         break;
991
992                 default:
993                         err = -EINVAL;
994                 }
995
996                 if (!err) {
997                         r->com.from_state = r->com.state;
998                         r->com.to_state = state;
999                         r->com.state = RES_EQ_BUSY;
1000                         if (eq)
1001                                 *eq = r;
1002                 }
1003         }
1004
1005         spin_unlock_irq(mlx4_tlock(dev));
1006
1007         return err;
1008 }
1009
1010 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1011                                 enum res_cq_states state, struct res_cq **cq)
1012 {
1013         struct mlx4_priv *priv = mlx4_priv(dev);
1014         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1015         struct res_cq *r;
1016         int err;
1017
1018         spin_lock_irq(mlx4_tlock(dev));
1019         r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1020         if (!r)
1021                 err = -ENOENT;
1022         else if (r->com.owner != slave)
1023                 err = -EPERM;
1024         else {
1025                 switch (state) {
1026                 case RES_CQ_BUSY:
1027                         err = -EBUSY;
1028                         break;
1029
1030                 case RES_CQ_ALLOCATED:
1031                         if (r->com.state != RES_CQ_HW)
1032                                 err = -EINVAL;
1033                         else if (atomic_read(&r->ref_count))
1034                                 err = -EBUSY;
1035                         else
1036                                 err = 0;
1037                         break;
1038
1039                 case RES_CQ_HW:
1040                         if (r->com.state != RES_CQ_ALLOCATED)
1041                                 err = -EINVAL;
1042                         else
1043                                 err = 0;
1044                         break;
1045
1046                 default:
1047                         err = -EINVAL;
1048                 }
1049
1050                 if (!err) {
1051                         r->com.from_state = r->com.state;
1052                         r->com.to_state = state;
1053                         r->com.state = RES_CQ_BUSY;
1054                         if (cq)
1055                                 *cq = r;
1056                 }
1057         }
1058
1059         spin_unlock_irq(mlx4_tlock(dev));
1060
1061         return err;
1062 }
1063
1064 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1065                                  enum res_cq_states state, struct res_srq **srq)
1066 {
1067         struct mlx4_priv *priv = mlx4_priv(dev);
1068         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1069         struct res_srq *r;
1070         int err = 0;
1071
1072         spin_lock_irq(mlx4_tlock(dev));
1073         r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1074         if (!r)
1075                 err = -ENOENT;
1076         else if (r->com.owner != slave)
1077                 err = -EPERM;
1078         else {
1079                 switch (state) {
1080                 case RES_SRQ_BUSY:
1081                         err = -EINVAL;
1082                         break;
1083
1084                 case RES_SRQ_ALLOCATED:
1085                         if (r->com.state != RES_SRQ_HW)
1086                                 err = -EINVAL;
1087                         else if (atomic_read(&r->ref_count))
1088                                 err = -EBUSY;
1089                         break;
1090
1091                 case RES_SRQ_HW:
1092                         if (r->com.state != RES_SRQ_ALLOCATED)
1093                                 err = -EINVAL;
1094                         break;
1095
1096                 default:
1097                         err = -EINVAL;
1098                 }
1099
1100                 if (!err) {
1101                         r->com.from_state = r->com.state;
1102                         r->com.to_state = state;
1103                         r->com.state = RES_SRQ_BUSY;
1104                         if (srq)
1105                                 *srq = r;
1106                 }
1107         }
1108
1109         spin_unlock_irq(mlx4_tlock(dev));
1110
1111         return err;
1112 }
1113
1114 static void res_abort_move(struct mlx4_dev *dev, int slave,
1115                            enum mlx4_resource type, int id)
1116 {
1117         struct mlx4_priv *priv = mlx4_priv(dev);
1118         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1119         struct res_common *r;
1120
1121         spin_lock_irq(mlx4_tlock(dev));
1122         r = res_tracker_lookup(&tracker->res_tree[type], id);
1123         if (r && (r->owner == slave))
1124                 r->state = r->from_state;
1125         spin_unlock_irq(mlx4_tlock(dev));
1126 }
1127
1128 static void res_end_move(struct mlx4_dev *dev, int slave,
1129                          enum mlx4_resource type, int id)
1130 {
1131         struct mlx4_priv *priv = mlx4_priv(dev);
1132         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1133         struct res_common *r;
1134
1135         spin_lock_irq(mlx4_tlock(dev));
1136         r = res_tracker_lookup(&tracker->res_tree[type], id);
1137         if (r && (r->owner == slave))
1138                 r->state = r->to_state;
1139         spin_unlock_irq(mlx4_tlock(dev));
1140 }
1141
1142 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1143 {
1144         return mlx4_is_qp_reserved(dev, qpn) &&
1145                 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1146 }
1147
1148 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1149 {
1150         return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1151 }
1152
1153 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1154                         u64 in_param, u64 *out_param)
1155 {
1156         int err;
1157         int count;
1158         int align;
1159         int base;
1160         int qpn;
1161
1162         switch (op) {
1163         case RES_OP_RESERVE:
1164                 count = get_param_l(&in_param);
1165                 align = get_param_h(&in_param);
1166                 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1167                 if (err)
1168                         return err;
1169
1170                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1171                 if (err) {
1172                         __mlx4_qp_release_range(dev, base, count);
1173                         return err;
1174                 }
1175                 set_param_l(out_param, base);
1176                 break;
1177         case RES_OP_MAP_ICM:
1178                 qpn = get_param_l(&in_param) & 0x7fffff;
1179                 if (valid_reserved(dev, slave, qpn)) {
1180                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1181                         if (err)
1182                                 return err;
1183                 }
1184
1185                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1186                                            NULL, 1);
1187                 if (err)
1188                         return err;
1189
1190                 if (!fw_reserved(dev, qpn)) {
1191                         err = __mlx4_qp_alloc_icm(dev, qpn);
1192                         if (err) {
1193                                 res_abort_move(dev, slave, RES_QP, qpn);
1194                                 return err;
1195                         }
1196                 }
1197
1198                 res_end_move(dev, slave, RES_QP, qpn);
1199                 break;
1200
1201         default:
1202                 err = -EINVAL;
1203                 break;
1204         }
1205         return err;
1206 }
1207
1208 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1209                          u64 in_param, u64 *out_param)
1210 {
1211         int err = -EINVAL;
1212         int base;
1213         int order;
1214
1215         if (op != RES_OP_RESERVE_AND_MAP)
1216                 return err;
1217
1218         order = get_param_l(&in_param);
1219         base = __mlx4_alloc_mtt_range(dev, order);
1220         if (base == -1)
1221                 return -ENOMEM;
1222
1223         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1224         if (err)
1225                 __mlx4_free_mtt_range(dev, base, order);
1226         else
1227                 set_param_l(out_param, base);
1228
1229         return err;
1230 }
1231
1232 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1233                          u64 in_param, u64 *out_param)
1234 {
1235         int err = -EINVAL;
1236         int index;
1237         int id;
1238         struct res_mpt *mpt;
1239
1240         switch (op) {
1241         case RES_OP_RESERVE:
1242                 index = __mlx4_mr_reserve(dev);
1243                 if (index == -1)
1244                         break;
1245                 id = index & mpt_mask(dev);
1246
1247                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1248                 if (err) {
1249                         __mlx4_mr_release(dev, index);
1250                         break;
1251                 }
1252                 set_param_l(out_param, index);
1253                 break;
1254         case RES_OP_MAP_ICM:
1255                 index = get_param_l(&in_param);
1256                 id = index & mpt_mask(dev);
1257                 err = mr_res_start_move_to(dev, slave, id,
1258                                            RES_MPT_MAPPED, &mpt);
1259                 if (err)
1260                         return err;
1261
1262                 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1263                 if (err) {
1264                         res_abort_move(dev, slave, RES_MPT, id);
1265                         return err;
1266                 }
1267
1268                 res_end_move(dev, slave, RES_MPT, id);
1269                 break;
1270         }
1271         return err;
1272 }
1273
1274 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1275                         u64 in_param, u64 *out_param)
1276 {
1277         int cqn;
1278         int err;
1279
1280         switch (op) {
1281         case RES_OP_RESERVE_AND_MAP:
1282                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1283                 if (err)
1284                         break;
1285
1286                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1287                 if (err) {
1288                         __mlx4_cq_free_icm(dev, cqn);
1289                         break;
1290                 }
1291
1292                 set_param_l(out_param, cqn);
1293                 break;
1294
1295         default:
1296                 err = -EINVAL;
1297         }
1298
1299         return err;
1300 }
1301
1302 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1303                          u64 in_param, u64 *out_param)
1304 {
1305         int srqn;
1306         int err;
1307
1308         switch (op) {
1309         case RES_OP_RESERVE_AND_MAP:
1310                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1311                 if (err)
1312                         break;
1313
1314                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1315                 if (err) {
1316                         __mlx4_srq_free_icm(dev, srqn);
1317                         break;
1318                 }
1319
1320                 set_param_l(out_param, srqn);
1321                 break;
1322
1323         default:
1324                 err = -EINVAL;
1325         }
1326
1327         return err;
1328 }
1329
1330 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1331 {
1332         struct mlx4_priv *priv = mlx4_priv(dev);
1333         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1334         struct mac_res *res;
1335
1336         res = kzalloc(sizeof *res, GFP_KERNEL);
1337         if (!res)
1338                 return -ENOMEM;
1339         res->mac = mac;
1340         res->port = (u8) port;
1341         list_add_tail(&res->list,
1342                       &tracker->slave_list[slave].res_list[RES_MAC]);
1343         return 0;
1344 }
1345
1346 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1347                                int port)
1348 {
1349         struct mlx4_priv *priv = mlx4_priv(dev);
1350         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1351         struct list_head *mac_list =
1352                 &tracker->slave_list[slave].res_list[RES_MAC];
1353         struct mac_res *res, *tmp;
1354
1355         list_for_each_entry_safe(res, tmp, mac_list, list) {
1356                 if (res->mac == mac && res->port == (u8) port) {
1357                         list_del(&res->list);
1358                         kfree(res);
1359                         break;
1360                 }
1361         }
1362 }
1363
1364 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1365 {
1366         struct mlx4_priv *priv = mlx4_priv(dev);
1367         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1368         struct list_head *mac_list =
1369                 &tracker->slave_list[slave].res_list[RES_MAC];
1370         struct mac_res *res, *tmp;
1371
1372         list_for_each_entry_safe(res, tmp, mac_list, list) {
1373                 list_del(&res->list);
1374                 __mlx4_unregister_mac(dev, res->port, res->mac);
1375                 kfree(res);
1376         }
1377 }
1378
1379 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1380                          u64 in_param, u64 *out_param)
1381 {
1382         int err = -EINVAL;
1383         int port;
1384         u64 mac;
1385
1386         if (op != RES_OP_RESERVE_AND_MAP)
1387                 return err;
1388
1389         port = get_param_l(out_param);
1390         mac = in_param;
1391
1392         err = __mlx4_register_mac(dev, port, mac);
1393         if (err >= 0) {
1394                 set_param_l(out_param, err);
1395                 err = 0;
1396         }
1397
1398         if (!err) {
1399                 err = mac_add_to_slave(dev, slave, mac, port);
1400                 if (err)
1401                         __mlx4_unregister_mac(dev, port, mac);
1402         }
1403         return err;
1404 }
1405
1406 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1407                          u64 in_param, u64 *out_param)
1408 {
1409         return 0;
1410 }
1411
1412 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1413                              u64 in_param, u64 *out_param)
1414 {
1415         u32 index;
1416         int err;
1417
1418         if (op != RES_OP_RESERVE)
1419                 return -EINVAL;
1420
1421         err = __mlx4_counter_alloc(dev, &index);
1422         if (err)
1423                 return err;
1424
1425         err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1426         if (err)
1427                 __mlx4_counter_free(dev, index);
1428         else
1429                 set_param_l(out_param, index);
1430
1431         return err;
1432 }
1433
1434 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1435                            u64 in_param, u64 *out_param)
1436 {
1437         u32 xrcdn;
1438         int err;
1439
1440         if (op != RES_OP_RESERVE)
1441                 return -EINVAL;
1442
1443         err = __mlx4_xrcd_alloc(dev, &xrcdn);
1444         if (err)
1445                 return err;
1446
1447         err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1448         if (err)
1449                 __mlx4_xrcd_free(dev, xrcdn);
1450         else
1451                 set_param_l(out_param, xrcdn);
1452
1453         return err;
1454 }
1455
1456 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1457                            struct mlx4_vhcr *vhcr,
1458                            struct mlx4_cmd_mailbox *inbox,
1459                            struct mlx4_cmd_mailbox *outbox,
1460                            struct mlx4_cmd_info *cmd)
1461 {
1462         int err;
1463         int alop = vhcr->op_modifier;
1464
1465         switch (vhcr->in_modifier) {
1466         case RES_QP:
1467                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1468                                    vhcr->in_param, &vhcr->out_param);
1469                 break;
1470
1471         case RES_MTT:
1472                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1473                                     vhcr->in_param, &vhcr->out_param);
1474                 break;
1475
1476         case RES_MPT:
1477                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1478                                     vhcr->in_param, &vhcr->out_param);
1479                 break;
1480
1481         case RES_CQ:
1482                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1483                                    vhcr->in_param, &vhcr->out_param);
1484                 break;
1485
1486         case RES_SRQ:
1487                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1488                                     vhcr->in_param, &vhcr->out_param);
1489                 break;
1490
1491         case RES_MAC:
1492                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1493                                     vhcr->in_param, &vhcr->out_param);
1494                 break;
1495
1496         case RES_VLAN:
1497                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1498                                     vhcr->in_param, &vhcr->out_param);
1499                 break;
1500
1501         case RES_COUNTER:
1502                 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1503                                         vhcr->in_param, &vhcr->out_param);
1504                 break;
1505
1506         case RES_XRCD:
1507                 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1508                                       vhcr->in_param, &vhcr->out_param);
1509                 break;
1510
1511         default:
1512                 err = -EINVAL;
1513                 break;
1514         }
1515
1516         return err;
1517 }
1518
1519 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1520                        u64 in_param)
1521 {
1522         int err;
1523         int count;
1524         int base;
1525         int qpn;
1526
1527         switch (op) {
1528         case RES_OP_RESERVE:
1529                 base = get_param_l(&in_param) & 0x7fffff;
1530                 count = get_param_h(&in_param);
1531                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1532                 if (err)
1533                         break;
1534                 __mlx4_qp_release_range(dev, base, count);
1535                 break;
1536         case RES_OP_MAP_ICM:
1537                 qpn = get_param_l(&in_param) & 0x7fffff;
1538                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1539                                            NULL, 0);
1540                 if (err)
1541                         return err;
1542
1543                 if (!fw_reserved(dev, qpn))
1544                         __mlx4_qp_free_icm(dev, qpn);
1545
1546                 res_end_move(dev, slave, RES_QP, qpn);
1547
1548                 if (valid_reserved(dev, slave, qpn))
1549                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1550                 break;
1551         default:
1552                 err = -EINVAL;
1553                 break;
1554         }
1555         return err;
1556 }
1557
1558 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1559                         u64 in_param, u64 *out_param)
1560 {
1561         int err = -EINVAL;
1562         int base;
1563         int order;
1564
1565         if (op != RES_OP_RESERVE_AND_MAP)
1566                 return err;
1567
1568         base = get_param_l(&in_param);
1569         order = get_param_h(&in_param);
1570         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1571         if (!err)
1572                 __mlx4_free_mtt_range(dev, base, order);
1573         return err;
1574 }
1575
1576 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1577                         u64 in_param)
1578 {
1579         int err = -EINVAL;
1580         int index;
1581         int id;
1582         struct res_mpt *mpt;
1583
1584         switch (op) {
1585         case RES_OP_RESERVE:
1586                 index = get_param_l(&in_param);
1587                 id = index & mpt_mask(dev);
1588                 err = get_res(dev, slave, id, RES_MPT, &mpt);
1589                 if (err)
1590                         break;
1591                 index = mpt->key;
1592                 put_res(dev, slave, id, RES_MPT);
1593
1594                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1595                 if (err)
1596                         break;
1597                 __mlx4_mr_release(dev, index);
1598                 break;
1599         case RES_OP_MAP_ICM:
1600                         index = get_param_l(&in_param);
1601                         id = index & mpt_mask(dev);
1602                         err = mr_res_start_move_to(dev, slave, id,
1603                                                    RES_MPT_RESERVED, &mpt);
1604                         if (err)
1605                                 return err;
1606
1607                         __mlx4_mr_free_icm(dev, mpt->key);
1608                         res_end_move(dev, slave, RES_MPT, id);
1609                         return err;
1610                 break;
1611         default:
1612                 err = -EINVAL;
1613                 break;
1614         }
1615         return err;
1616 }
1617
1618 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1619                        u64 in_param, u64 *out_param)
1620 {
1621         int cqn;
1622         int err;
1623
1624         switch (op) {
1625         case RES_OP_RESERVE_AND_MAP:
1626                 cqn = get_param_l(&in_param);
1627                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1628                 if (err)
1629                         break;
1630
1631                 __mlx4_cq_free_icm(dev, cqn);
1632                 break;
1633
1634         default:
1635                 err = -EINVAL;
1636                 break;
1637         }
1638
1639         return err;
1640 }
1641
1642 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1643                         u64 in_param, u64 *out_param)
1644 {
1645         int srqn;
1646         int err;
1647
1648         switch (op) {
1649         case RES_OP_RESERVE_AND_MAP:
1650                 srqn = get_param_l(&in_param);
1651                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1652                 if (err)
1653                         break;
1654
1655                 __mlx4_srq_free_icm(dev, srqn);
1656                 break;
1657
1658         default:
1659                 err = -EINVAL;
1660                 break;
1661         }
1662
1663         return err;
1664 }
1665
1666 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1667                             u64 in_param, u64 *out_param)
1668 {
1669         int port;
1670         int err = 0;
1671
1672         switch (op) {
1673         case RES_OP_RESERVE_AND_MAP:
1674                 port = get_param_l(out_param);
1675                 mac_del_from_slave(dev, slave, in_param, port);
1676                 __mlx4_unregister_mac(dev, port, in_param);
1677                 break;
1678         default:
1679                 err = -EINVAL;
1680                 break;
1681         }
1682
1683         return err;
1684
1685 }
1686
1687 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1688                             u64 in_param, u64 *out_param)
1689 {
1690         return 0;
1691 }
1692
1693 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1694                             u64 in_param, u64 *out_param)
1695 {
1696         int index;
1697         int err;
1698
1699         if (op != RES_OP_RESERVE)
1700                 return -EINVAL;
1701
1702         index = get_param_l(&in_param);
1703         err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1704         if (err)
1705                 return err;
1706
1707         __mlx4_counter_free(dev, index);
1708
1709         return err;
1710 }
1711
1712 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1713                           u64 in_param, u64 *out_param)
1714 {
1715         int xrcdn;
1716         int err;
1717
1718         if (op != RES_OP_RESERVE)
1719                 return -EINVAL;
1720
1721         xrcdn = get_param_l(&in_param);
1722         err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1723         if (err)
1724                 return err;
1725
1726         __mlx4_xrcd_free(dev, xrcdn);
1727
1728         return err;
1729 }
1730
1731 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1732                           struct mlx4_vhcr *vhcr,
1733                           struct mlx4_cmd_mailbox *inbox,
1734                           struct mlx4_cmd_mailbox *outbox,
1735                           struct mlx4_cmd_info *cmd)
1736 {
1737         int err = -EINVAL;
1738         int alop = vhcr->op_modifier;
1739
1740         switch (vhcr->in_modifier) {
1741         case RES_QP:
1742                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1743                                   vhcr->in_param);
1744                 break;
1745
1746         case RES_MTT:
1747                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1748                                    vhcr->in_param, &vhcr->out_param);
1749                 break;
1750
1751         case RES_MPT:
1752                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1753                                    vhcr->in_param);
1754                 break;
1755
1756         case RES_CQ:
1757                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1758                                   vhcr->in_param, &vhcr->out_param);
1759                 break;
1760
1761         case RES_SRQ:
1762                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1763                                    vhcr->in_param, &vhcr->out_param);
1764                 break;
1765
1766         case RES_MAC:
1767                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1768                                    vhcr->in_param, &vhcr->out_param);
1769                 break;
1770
1771         case RES_VLAN:
1772                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1773                                    vhcr->in_param, &vhcr->out_param);
1774                 break;
1775
1776         case RES_COUNTER:
1777                 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1778                                        vhcr->in_param, &vhcr->out_param);
1779                 break;
1780
1781         case RES_XRCD:
1782                 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1783                                      vhcr->in_param, &vhcr->out_param);
1784
1785         default:
1786                 break;
1787         }
1788         return err;
1789 }
1790
1791 /* ugly but other choices are uglier */
1792 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1793 {
1794         return (be32_to_cpu(mpt->flags) >> 9) & 1;
1795 }
1796
1797 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1798 {
1799         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1800 }
1801
1802 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1803 {
1804         return be32_to_cpu(mpt->mtt_sz);
1805 }
1806
1807 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1808 {
1809         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1810 }
1811
1812 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1813 {
1814         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1815 }
1816
1817 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1818 {
1819         int page_shift = (qpc->log_page_size & 0x3f) + 12;
1820         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1821         int log_sq_sride = qpc->sq_size_stride & 7;
1822         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1823         int log_rq_stride = qpc->rq_size_stride & 7;
1824         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1825         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1826         int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1827         int sq_size;
1828         int rq_size;
1829         int total_pages;
1830         int total_mem;
1831         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1832
1833         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1834         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1835         total_mem = sq_size + rq_size;
1836         total_pages =
1837                 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1838                                    page_shift);
1839
1840         return total_pages;
1841 }
1842
1843 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1844                            int size, struct res_mtt *mtt)
1845 {
1846         int res_start = mtt->com.res_id;
1847         int res_size = (1 << mtt->order);
1848
1849         if (start < res_start || start + size > res_start + res_size)
1850                 return -EPERM;
1851         return 0;
1852 }
1853
1854 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1855                            struct mlx4_vhcr *vhcr,
1856                            struct mlx4_cmd_mailbox *inbox,
1857                            struct mlx4_cmd_mailbox *outbox,
1858                            struct mlx4_cmd_info *cmd)
1859 {
1860         int err;
1861         int index = vhcr->in_modifier;
1862         struct res_mtt *mtt;
1863         struct res_mpt *mpt;
1864         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1865         int phys;
1866         int id;
1867
1868         id = index & mpt_mask(dev);
1869         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1870         if (err)
1871                 return err;
1872
1873         phys = mr_phys_mpt(inbox->buf);
1874         if (!phys) {
1875                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1876                 if (err)
1877                         goto ex_abort;
1878
1879                 err = check_mtt_range(dev, slave, mtt_base,
1880                                       mr_get_mtt_size(inbox->buf), mtt);
1881                 if (err)
1882                         goto ex_put;
1883
1884                 mpt->mtt = mtt;
1885         }
1886
1887         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1888         if (err)
1889                 goto ex_put;
1890
1891         if (!phys) {
1892                 atomic_inc(&mtt->ref_count);
1893                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1894         }
1895
1896         res_end_move(dev, slave, RES_MPT, id);
1897         return 0;
1898
1899 ex_put:
1900         if (!phys)
1901                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1902 ex_abort:
1903         res_abort_move(dev, slave, RES_MPT, id);
1904
1905         return err;
1906 }
1907
1908 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1909                            struct mlx4_vhcr *vhcr,
1910                            struct mlx4_cmd_mailbox *inbox,
1911                            struct mlx4_cmd_mailbox *outbox,
1912                            struct mlx4_cmd_info *cmd)
1913 {
1914         int err;
1915         int index = vhcr->in_modifier;
1916         struct res_mpt *mpt;
1917         int id;
1918
1919         id = index & mpt_mask(dev);
1920         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1921         if (err)
1922                 return err;
1923
1924         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1925         if (err)
1926                 goto ex_abort;
1927
1928         if (mpt->mtt)
1929                 atomic_dec(&mpt->mtt->ref_count);
1930
1931         res_end_move(dev, slave, RES_MPT, id);
1932         return 0;
1933
1934 ex_abort:
1935         res_abort_move(dev, slave, RES_MPT, id);
1936
1937         return err;
1938 }
1939
1940 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1941                            struct mlx4_vhcr *vhcr,
1942                            struct mlx4_cmd_mailbox *inbox,
1943                            struct mlx4_cmd_mailbox *outbox,
1944                            struct mlx4_cmd_info *cmd)
1945 {
1946         int err;
1947         int index = vhcr->in_modifier;
1948         struct res_mpt *mpt;
1949         int id;
1950
1951         id = index & mpt_mask(dev);
1952         err = get_res(dev, slave, id, RES_MPT, &mpt);
1953         if (err)
1954                 return err;
1955
1956         if (mpt->com.from_state != RES_MPT_HW) {
1957                 err = -EBUSY;
1958                 goto out;
1959         }
1960
1961         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1962
1963 out:
1964         put_res(dev, slave, id, RES_MPT);
1965         return err;
1966 }
1967
1968 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1969 {
1970         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1971 }
1972
1973 static int qp_get_scqn(struct mlx4_qp_context *qpc)
1974 {
1975         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1976 }
1977
1978 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1979 {
1980         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1981 }
1982
1983 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
1984                                   struct mlx4_qp_context *context)
1985 {
1986         u32 qpn = vhcr->in_modifier & 0xffffff;
1987         u32 qkey = 0;
1988
1989         if (mlx4_get_parav_qkey(dev, qpn, &qkey))
1990                 return;
1991
1992         /* adjust qkey in qp context */
1993         context->qkey = cpu_to_be32(qkey);
1994 }
1995
1996 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1997                              struct mlx4_vhcr *vhcr,
1998                              struct mlx4_cmd_mailbox *inbox,
1999                              struct mlx4_cmd_mailbox *outbox,
2000                              struct mlx4_cmd_info *cmd)
2001 {
2002         int err;
2003         int qpn = vhcr->in_modifier & 0x7fffff;
2004         struct res_mtt *mtt;
2005         struct res_qp *qp;
2006         struct mlx4_qp_context *qpc = inbox->buf + 8;
2007         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2008         int mtt_size = qp_get_mtt_size(qpc);
2009         struct res_cq *rcq;
2010         struct res_cq *scq;
2011         int rcqn = qp_get_rcqn(qpc);
2012         int scqn = qp_get_scqn(qpc);
2013         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2014         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2015         struct res_srq *srq;
2016         int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2017
2018         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2019         if (err)
2020                 return err;
2021         qp->local_qpn = local_qpn;
2022
2023         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2024         if (err)
2025                 goto ex_abort;
2026
2027         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2028         if (err)
2029                 goto ex_put_mtt;
2030
2031         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2032         if (err)
2033                 goto ex_put_mtt;
2034
2035         if (scqn != rcqn) {
2036                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2037                 if (err)
2038                         goto ex_put_rcq;
2039         } else
2040                 scq = rcq;
2041
2042         if (use_srq) {
2043                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2044                 if (err)
2045                         goto ex_put_scq;
2046         }
2047
2048         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2049         update_pkey_index(dev, slave, inbox);
2050         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2051         if (err)
2052                 goto ex_put_srq;
2053         atomic_inc(&mtt->ref_count);
2054         qp->mtt = mtt;
2055         atomic_inc(&rcq->ref_count);
2056         qp->rcq = rcq;
2057         atomic_inc(&scq->ref_count);
2058         qp->scq = scq;
2059
2060         if (scqn != rcqn)
2061                 put_res(dev, slave, scqn, RES_CQ);
2062
2063         if (use_srq) {
2064                 atomic_inc(&srq->ref_count);
2065                 put_res(dev, slave, srqn, RES_SRQ);
2066                 qp->srq = srq;
2067         }
2068         put_res(dev, slave, rcqn, RES_CQ);
2069         put_res(dev, slave, mtt_base, RES_MTT);
2070         res_end_move(dev, slave, RES_QP, qpn);
2071
2072         return 0;
2073
2074 ex_put_srq:
2075         if (use_srq)
2076                 put_res(dev, slave, srqn, RES_SRQ);
2077 ex_put_scq:
2078         if (scqn != rcqn)
2079                 put_res(dev, slave, scqn, RES_CQ);
2080 ex_put_rcq:
2081         put_res(dev, slave, rcqn, RES_CQ);
2082 ex_put_mtt:
2083         put_res(dev, slave, mtt_base, RES_MTT);
2084 ex_abort:
2085         res_abort_move(dev, slave, RES_QP, qpn);
2086
2087         return err;
2088 }
2089
2090 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2091 {
2092         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2093 }
2094
2095 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2096 {
2097         int log_eq_size = eqc->log_eq_size & 0x1f;
2098         int page_shift = (eqc->log_page_size & 0x3f) + 12;
2099
2100         if (log_eq_size + 5 < page_shift)
2101                 return 1;
2102
2103         return 1 << (log_eq_size + 5 - page_shift);
2104 }
2105
2106 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2107 {
2108         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2109 }
2110
2111 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2112 {
2113         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2114         int page_shift = (cqc->log_page_size & 0x3f) + 12;
2115
2116         if (log_cq_size + 5 < page_shift)
2117                 return 1;
2118
2119         return 1 << (log_cq_size + 5 - page_shift);
2120 }
2121
2122 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2123                           struct mlx4_vhcr *vhcr,
2124                           struct mlx4_cmd_mailbox *inbox,
2125                           struct mlx4_cmd_mailbox *outbox,
2126                           struct mlx4_cmd_info *cmd)
2127 {
2128         int err;
2129         int eqn = vhcr->in_modifier;
2130         int res_id = (slave << 8) | eqn;
2131         struct mlx4_eq_context *eqc = inbox->buf;
2132         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2133         int mtt_size = eq_get_mtt_size(eqc);
2134         struct res_eq *eq;
2135         struct res_mtt *mtt;
2136
2137         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2138         if (err)
2139                 return err;
2140         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2141         if (err)
2142                 goto out_add;
2143
2144         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2145         if (err)
2146                 goto out_move;
2147
2148         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2149         if (err)
2150                 goto out_put;
2151
2152         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2153         if (err)
2154                 goto out_put;
2155
2156         atomic_inc(&mtt->ref_count);
2157         eq->mtt = mtt;
2158         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2159         res_end_move(dev, slave, RES_EQ, res_id);
2160         return 0;
2161
2162 out_put:
2163         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2164 out_move:
2165         res_abort_move(dev, slave, RES_EQ, res_id);
2166 out_add:
2167         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2168         return err;
2169 }
2170
2171 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2172                               int len, struct res_mtt **res)
2173 {
2174         struct mlx4_priv *priv = mlx4_priv(dev);
2175         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2176         struct res_mtt *mtt;
2177         int err = -EINVAL;
2178
2179         spin_lock_irq(mlx4_tlock(dev));
2180         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2181                             com.list) {
2182                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2183                         *res = mtt;
2184                         mtt->com.from_state = mtt->com.state;
2185                         mtt->com.state = RES_MTT_BUSY;
2186                         err = 0;
2187                         break;
2188                 }
2189         }
2190         spin_unlock_irq(mlx4_tlock(dev));
2191
2192         return err;
2193 }
2194
2195 static int verify_qp_parameters(struct mlx4_dev *dev,
2196                                 struct mlx4_cmd_mailbox *inbox,
2197                                 enum qp_transition transition, u8 slave)
2198 {
2199         u32                     qp_type;
2200         struct mlx4_qp_context  *qp_ctx;
2201         enum mlx4_qp_optpar     optpar;
2202
2203         qp_ctx  = inbox->buf + 8;
2204         qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2205         optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
2206
2207         switch (qp_type) {
2208         case MLX4_QP_ST_RC:
2209         case MLX4_QP_ST_UC:
2210                 switch (transition) {
2211                 case QP_TRANS_INIT2RTR:
2212                 case QP_TRANS_RTR2RTS:
2213                 case QP_TRANS_RTS2RTS:
2214                 case QP_TRANS_SQD2SQD:
2215                 case QP_TRANS_SQD2RTS:
2216                         if (slave != mlx4_master_func_num(dev))
2217                                 /* slaves have only gid index 0 */
2218                                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2219                                         if (qp_ctx->pri_path.mgid_index)
2220                                                 return -EINVAL;
2221                                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2222                                         if (qp_ctx->alt_path.mgid_index)
2223                                                 return -EINVAL;
2224                         break;
2225                 default:
2226                         break;
2227                 }
2228
2229                 break;
2230         default:
2231                 break;
2232         }
2233
2234         return 0;
2235 }
2236
2237 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2238                            struct mlx4_vhcr *vhcr,
2239                            struct mlx4_cmd_mailbox *inbox,
2240                            struct mlx4_cmd_mailbox *outbox,
2241                            struct mlx4_cmd_info *cmd)
2242 {
2243         struct mlx4_mtt mtt;
2244         __be64 *page_list = inbox->buf;
2245         u64 *pg_list = (u64 *)page_list;
2246         int i;
2247         struct res_mtt *rmtt = NULL;
2248         int start = be64_to_cpu(page_list[0]);
2249         int npages = vhcr->in_modifier;
2250         int err;
2251
2252         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2253         if (err)
2254                 return err;
2255
2256         /* Call the SW implementation of write_mtt:
2257          * - Prepare a dummy mtt struct
2258          * - Translate inbox contents to simple addresses in host endianess */
2259         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
2260                             we don't really use it */
2261         mtt.order = 0;
2262         mtt.page_shift = 0;
2263         for (i = 0; i < npages; ++i)
2264                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2265
2266         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2267                                ((u64 *)page_list + 2));
2268
2269         if (rmtt)
2270                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2271
2272         return err;
2273 }
2274
2275 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2276                           struct mlx4_vhcr *vhcr,
2277                           struct mlx4_cmd_mailbox *inbox,
2278                           struct mlx4_cmd_mailbox *outbox,
2279                           struct mlx4_cmd_info *cmd)
2280 {
2281         int eqn = vhcr->in_modifier;
2282         int res_id = eqn | (slave << 8);
2283         struct res_eq *eq;
2284         int err;
2285
2286         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2287         if (err)
2288                 return err;
2289
2290         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2291         if (err)
2292                 goto ex_abort;
2293
2294         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2295         if (err)
2296                 goto ex_put;
2297
2298         atomic_dec(&eq->mtt->ref_count);
2299         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2300         res_end_move(dev, slave, RES_EQ, res_id);
2301         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2302
2303         return 0;
2304
2305 ex_put:
2306         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2307 ex_abort:
2308         res_abort_move(dev, slave, RES_EQ, res_id);
2309
2310         return err;
2311 }
2312
2313 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2314 {
2315         struct mlx4_priv *priv = mlx4_priv(dev);
2316         struct mlx4_slave_event_eq_info *event_eq;
2317         struct mlx4_cmd_mailbox *mailbox;
2318         u32 in_modifier = 0;
2319         int err;
2320         int res_id;
2321         struct res_eq *req;
2322
2323         if (!priv->mfunc.master.slave_state)
2324                 return -EINVAL;
2325
2326         event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2327
2328         /* Create the event only if the slave is registered */
2329         if (event_eq->eqn < 0)
2330                 return 0;
2331
2332         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2333         res_id = (slave << 8) | event_eq->eqn;
2334         err = get_res(dev, slave, res_id, RES_EQ, &req);
2335         if (err)
2336                 goto unlock;
2337
2338         if (req->com.from_state != RES_EQ_HW) {
2339                 err = -EINVAL;
2340                 goto put;
2341         }
2342
2343         mailbox = mlx4_alloc_cmd_mailbox(dev);
2344         if (IS_ERR(mailbox)) {
2345                 err = PTR_ERR(mailbox);
2346                 goto put;
2347         }
2348
2349         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2350                 ++event_eq->token;
2351                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2352         }
2353
2354         memcpy(mailbox->buf, (u8 *) eqe, 28);
2355
2356         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2357
2358         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2359                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2360                        MLX4_CMD_NATIVE);
2361
2362         put_res(dev, slave, res_id, RES_EQ);
2363         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2364         mlx4_free_cmd_mailbox(dev, mailbox);
2365         return err;
2366
2367 put:
2368         put_res(dev, slave, res_id, RES_EQ);
2369
2370 unlock:
2371         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2372         return err;
2373 }
2374
2375 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2376                           struct mlx4_vhcr *vhcr,
2377                           struct mlx4_cmd_mailbox *inbox,
2378                           struct mlx4_cmd_mailbox *outbox,
2379                           struct mlx4_cmd_info *cmd)
2380 {
2381         int eqn = vhcr->in_modifier;
2382         int res_id = eqn | (slave << 8);
2383         struct res_eq *eq;
2384         int err;
2385
2386         err = get_res(dev, slave, res_id, RES_EQ, &eq);
2387         if (err)
2388                 return err;
2389
2390         if (eq->com.from_state != RES_EQ_HW) {
2391                 err = -EINVAL;
2392                 goto ex_put;
2393         }
2394
2395         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2396
2397 ex_put:
2398         put_res(dev, slave, res_id, RES_EQ);
2399         return err;
2400 }
2401
2402 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2403                           struct mlx4_vhcr *vhcr,
2404                           struct mlx4_cmd_mailbox *inbox,
2405                           struct mlx4_cmd_mailbox *outbox,
2406                           struct mlx4_cmd_info *cmd)
2407 {
2408         int err;
2409         int cqn = vhcr->in_modifier;
2410         struct mlx4_cq_context *cqc = inbox->buf;
2411         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2412         struct res_cq *cq;
2413         struct res_mtt *mtt;
2414
2415         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2416         if (err)
2417                 return err;
2418         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2419         if (err)
2420                 goto out_move;
2421         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2422         if (err)
2423                 goto out_put;
2424         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2425         if (err)
2426                 goto out_put;
2427         atomic_inc(&mtt->ref_count);
2428         cq->mtt = mtt;
2429         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2430         res_end_move(dev, slave, RES_CQ, cqn);
2431         return 0;
2432
2433 out_put:
2434         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2435 out_move:
2436         res_abort_move(dev, slave, RES_CQ, cqn);
2437         return err;
2438 }
2439
2440 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2441                           struct mlx4_vhcr *vhcr,
2442                           struct mlx4_cmd_mailbox *inbox,
2443                           struct mlx4_cmd_mailbox *outbox,
2444                           struct mlx4_cmd_info *cmd)
2445 {
2446         int err;
2447         int cqn = vhcr->in_modifier;
2448         struct res_cq *cq;
2449
2450         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2451         if (err)
2452                 return err;
2453         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2454         if (err)
2455                 goto out_move;
2456         atomic_dec(&cq->mtt->ref_count);
2457         res_end_move(dev, slave, RES_CQ, cqn);
2458         return 0;
2459
2460 out_move:
2461         res_abort_move(dev, slave, RES_CQ, cqn);
2462         return err;
2463 }
2464
2465 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2466                           struct mlx4_vhcr *vhcr,
2467                           struct mlx4_cmd_mailbox *inbox,
2468                           struct mlx4_cmd_mailbox *outbox,
2469                           struct mlx4_cmd_info *cmd)
2470 {
2471         int cqn = vhcr->in_modifier;
2472         struct res_cq *cq;
2473         int err;
2474
2475         err = get_res(dev, slave, cqn, RES_CQ, &cq);
2476         if (err)
2477                 return err;
2478
2479         if (cq->com.from_state != RES_CQ_HW)
2480                 goto ex_put;
2481
2482         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2483 ex_put:
2484         put_res(dev, slave, cqn, RES_CQ);
2485
2486         return err;
2487 }
2488
2489 static int handle_resize(struct mlx4_dev *dev, int slave,
2490                          struct mlx4_vhcr *vhcr,
2491                          struct mlx4_cmd_mailbox *inbox,
2492                          struct mlx4_cmd_mailbox *outbox,
2493                          struct mlx4_cmd_info *cmd,
2494                          struct res_cq *cq)
2495 {
2496         int err;
2497         struct res_mtt *orig_mtt;
2498         struct res_mtt *mtt;
2499         struct mlx4_cq_context *cqc = inbox->buf;
2500         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2501
2502         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2503         if (err)
2504                 return err;
2505
2506         if (orig_mtt != cq->mtt) {
2507                 err = -EINVAL;
2508                 goto ex_put;
2509         }
2510
2511         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2512         if (err)
2513                 goto ex_put;
2514
2515         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2516         if (err)
2517                 goto ex_put1;
2518         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2519         if (err)
2520                 goto ex_put1;
2521         atomic_dec(&orig_mtt->ref_count);
2522         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2523         atomic_inc(&mtt->ref_count);
2524         cq->mtt = mtt;
2525         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2526         return 0;
2527
2528 ex_put1:
2529         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2530 ex_put:
2531         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2532
2533         return err;
2534
2535 }
2536
2537 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2538                            struct mlx4_vhcr *vhcr,
2539                            struct mlx4_cmd_mailbox *inbox,
2540                            struct mlx4_cmd_mailbox *outbox,
2541                            struct mlx4_cmd_info *cmd)
2542 {
2543         int cqn = vhcr->in_modifier;
2544         struct res_cq *cq;
2545         int err;
2546
2547         err = get_res(dev, slave, cqn, RES_CQ, &cq);
2548         if (err)
2549                 return err;
2550
2551         if (cq->com.from_state != RES_CQ_HW)
2552                 goto ex_put;
2553
2554         if (vhcr->op_modifier == 0) {
2555                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2556                 goto ex_put;
2557         }
2558
2559         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2560 ex_put:
2561         put_res(dev, slave, cqn, RES_CQ);
2562
2563         return err;
2564 }
2565
2566 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2567 {
2568         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2569         int log_rq_stride = srqc->logstride & 7;
2570         int page_shift = (srqc->log_page_size & 0x3f) + 12;
2571
2572         if (log_srq_size + log_rq_stride + 4 < page_shift)
2573                 return 1;
2574
2575         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2576 }
2577
2578 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2579                            struct mlx4_vhcr *vhcr,
2580                            struct mlx4_cmd_mailbox *inbox,
2581                            struct mlx4_cmd_mailbox *outbox,
2582                            struct mlx4_cmd_info *cmd)
2583 {
2584         int err;
2585         int srqn = vhcr->in_modifier;
2586         struct res_mtt *mtt;
2587         struct res_srq *srq;
2588         struct mlx4_srq_context *srqc = inbox->buf;
2589         int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2590
2591         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2592                 return -EINVAL;
2593
2594         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2595         if (err)
2596                 return err;
2597         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2598         if (err)
2599                 goto ex_abort;
2600         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2601                               mtt);
2602         if (err)
2603                 goto ex_put_mtt;
2604
2605         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2606         if (err)
2607                 goto ex_put_mtt;
2608
2609         atomic_inc(&mtt->ref_count);
2610         srq->mtt = mtt;
2611         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2612         res_end_move(dev, slave, RES_SRQ, srqn);
2613         return 0;
2614
2615 ex_put_mtt:
2616         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2617 ex_abort:
2618         res_abort_move(dev, slave, RES_SRQ, srqn);
2619
2620         return err;
2621 }
2622
2623 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2624                            struct mlx4_vhcr *vhcr,
2625                            struct mlx4_cmd_mailbox *inbox,
2626                            struct mlx4_cmd_mailbox *outbox,
2627                            struct mlx4_cmd_info *cmd)
2628 {
2629         int err;
2630         int srqn = vhcr->in_modifier;
2631         struct res_srq *srq;
2632
2633         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2634         if (err)
2635                 return err;
2636         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2637         if (err)
2638                 goto ex_abort;
2639         atomic_dec(&srq->mtt->ref_count);
2640         if (srq->cq)
2641                 atomic_dec(&srq->cq->ref_count);
2642         res_end_move(dev, slave, RES_SRQ, srqn);
2643
2644         return 0;
2645
2646 ex_abort:
2647         res_abort_move(dev, slave, RES_SRQ, srqn);
2648
2649         return err;
2650 }
2651
2652 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2653                            struct mlx4_vhcr *vhcr,
2654                            struct mlx4_cmd_mailbox *inbox,
2655                            struct mlx4_cmd_mailbox *outbox,
2656                            struct mlx4_cmd_info *cmd)
2657 {
2658         int err;
2659         int srqn = vhcr->in_modifier;
2660         struct res_srq *srq;
2661
2662         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2663         if (err)
2664                 return err;
2665         if (srq->com.from_state != RES_SRQ_HW) {
2666                 err = -EBUSY;
2667                 goto out;
2668         }
2669         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2670 out:
2671         put_res(dev, slave, srqn, RES_SRQ);
2672         return err;
2673 }
2674
2675 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2676                          struct mlx4_vhcr *vhcr,
2677                          struct mlx4_cmd_mailbox *inbox,
2678                          struct mlx4_cmd_mailbox *outbox,
2679                          struct mlx4_cmd_info *cmd)
2680 {
2681         int err;
2682         int srqn = vhcr->in_modifier;
2683         struct res_srq *srq;
2684
2685         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2686         if (err)
2687                 return err;
2688
2689         if (srq->com.from_state != RES_SRQ_HW) {
2690                 err = -EBUSY;
2691                 goto out;
2692         }
2693
2694         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2695 out:
2696         put_res(dev, slave, srqn, RES_SRQ);
2697         return err;
2698 }
2699
2700 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2701                         struct mlx4_vhcr *vhcr,
2702                         struct mlx4_cmd_mailbox *inbox,
2703                         struct mlx4_cmd_mailbox *outbox,
2704                         struct mlx4_cmd_info *cmd)
2705 {
2706         int err;
2707         int qpn = vhcr->in_modifier & 0x7fffff;
2708         struct res_qp *qp;
2709
2710         err = get_res(dev, slave, qpn, RES_QP, &qp);
2711         if (err)
2712                 return err;
2713         if (qp->com.from_state != RES_QP_HW) {
2714                 err = -EBUSY;
2715                 goto out;
2716         }
2717
2718         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2719 out:
2720         put_res(dev, slave, qpn, RES_QP);
2721         return err;
2722 }
2723
2724 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2725                               struct mlx4_vhcr *vhcr,
2726                               struct mlx4_cmd_mailbox *inbox,
2727                               struct mlx4_cmd_mailbox *outbox,
2728                               struct mlx4_cmd_info *cmd)
2729 {
2730         struct mlx4_qp_context *context = inbox->buf + 8;
2731         adjust_proxy_tun_qkey(dev, vhcr, context);
2732         update_pkey_index(dev, slave, inbox);
2733         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2734 }
2735
2736 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2737                              struct mlx4_vhcr *vhcr,
2738                              struct mlx4_cmd_mailbox *inbox,
2739                              struct mlx4_cmd_mailbox *outbox,
2740                              struct mlx4_cmd_info *cmd)
2741 {
2742         int err;
2743         struct mlx4_qp_context *qpc = inbox->buf + 8;
2744
2745         err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
2746         if (err)
2747                 return err;
2748
2749         update_pkey_index(dev, slave, inbox);
2750         update_gid(dev, inbox, (u8)slave);
2751         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2752
2753         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2754 }
2755
2756 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2757                             struct mlx4_vhcr *vhcr,
2758                             struct mlx4_cmd_mailbox *inbox,
2759                             struct mlx4_cmd_mailbox *outbox,
2760                             struct mlx4_cmd_info *cmd)
2761 {
2762         int err;
2763         struct mlx4_qp_context *context = inbox->buf + 8;
2764
2765         err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
2766         if (err)
2767                 return err;
2768
2769         update_pkey_index(dev, slave, inbox);
2770         update_gid(dev, inbox, (u8)slave);
2771         adjust_proxy_tun_qkey(dev, vhcr, context);
2772         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2773 }
2774
2775 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2776                             struct mlx4_vhcr *vhcr,
2777                             struct mlx4_cmd_mailbox *inbox,
2778                             struct mlx4_cmd_mailbox *outbox,
2779                             struct mlx4_cmd_info *cmd)
2780 {
2781         int err;
2782         struct mlx4_qp_context *context = inbox->buf + 8;
2783
2784         err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
2785         if (err)
2786                 return err;
2787
2788         update_pkey_index(dev, slave, inbox);
2789         update_gid(dev, inbox, (u8)slave);
2790         adjust_proxy_tun_qkey(dev, vhcr, context);
2791         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2792 }
2793
2794
2795 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2796                               struct mlx4_vhcr *vhcr,
2797                               struct mlx4_cmd_mailbox *inbox,
2798                               struct mlx4_cmd_mailbox *outbox,
2799                               struct mlx4_cmd_info *cmd)
2800 {
2801         struct mlx4_qp_context *context = inbox->buf + 8;
2802         adjust_proxy_tun_qkey(dev, vhcr, context);
2803         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2804 }
2805
2806 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
2807                             struct mlx4_vhcr *vhcr,
2808                             struct mlx4_cmd_mailbox *inbox,
2809                             struct mlx4_cmd_mailbox *outbox,
2810                             struct mlx4_cmd_info *cmd)
2811 {
2812         int err;
2813         struct mlx4_qp_context *context = inbox->buf + 8;
2814
2815         err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
2816         if (err)
2817                 return err;
2818
2819         adjust_proxy_tun_qkey(dev, vhcr, context);
2820         update_gid(dev, inbox, (u8)slave);
2821         update_pkey_index(dev, slave, inbox);
2822         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2823 }
2824
2825 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2826                             struct mlx4_vhcr *vhcr,
2827                             struct mlx4_cmd_mailbox *inbox,
2828                             struct mlx4_cmd_mailbox *outbox,
2829                             struct mlx4_cmd_info *cmd)
2830 {
2831         int err;
2832         struct mlx4_qp_context *context = inbox->buf + 8;
2833
2834         err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
2835         if (err)
2836                 return err;
2837
2838         adjust_proxy_tun_qkey(dev, vhcr, context);
2839         update_gid(dev, inbox, (u8)slave);
2840         update_pkey_index(dev, slave, inbox);
2841         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2842 }
2843
2844 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2845                          struct mlx4_vhcr *vhcr,
2846                          struct mlx4_cmd_mailbox *inbox,
2847                          struct mlx4_cmd_mailbox *outbox,
2848                          struct mlx4_cmd_info *cmd)
2849 {
2850         int err;
2851         int qpn = vhcr->in_modifier & 0x7fffff;
2852         struct res_qp *qp;
2853
2854         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2855         if (err)
2856                 return err;
2857         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2858         if (err)
2859                 goto ex_abort;
2860
2861         atomic_dec(&qp->mtt->ref_count);
2862         atomic_dec(&qp->rcq->ref_count);
2863         atomic_dec(&qp->scq->ref_count);
2864         if (qp->srq)
2865                 atomic_dec(&qp->srq->ref_count);
2866         res_end_move(dev, slave, RES_QP, qpn);
2867         return 0;
2868
2869 ex_abort:
2870         res_abort_move(dev, slave, RES_QP, qpn);
2871
2872         return err;
2873 }
2874
2875 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2876                                 struct res_qp *rqp, u8 *gid)
2877 {
2878         struct res_gid *res;
2879
2880         list_for_each_entry(res, &rqp->mcg_list, list) {
2881                 if (!memcmp(res->gid, gid, 16))
2882                         return res;
2883         }
2884         return NULL;
2885 }
2886
2887 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2888                        u8 *gid, enum mlx4_protocol prot,
2889                        enum mlx4_steer_type steer)
2890 {
2891         struct res_gid *res;
2892         int err;
2893
2894         res = kzalloc(sizeof *res, GFP_KERNEL);
2895         if (!res)
2896                 return -ENOMEM;
2897
2898         spin_lock_irq(&rqp->mcg_spl);
2899         if (find_gid(dev, slave, rqp, gid)) {
2900                 kfree(res);
2901                 err = -EEXIST;
2902         } else {
2903                 memcpy(res->gid, gid, 16);
2904                 res->prot = prot;
2905                 res->steer = steer;
2906                 list_add_tail(&res->list, &rqp->mcg_list);
2907                 err = 0;
2908         }
2909         spin_unlock_irq(&rqp->mcg_spl);
2910
2911         return err;
2912 }
2913
2914 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2915                        u8 *gid, enum mlx4_protocol prot,
2916                        enum mlx4_steer_type steer)
2917 {
2918         struct res_gid *res;
2919         int err;
2920
2921         spin_lock_irq(&rqp->mcg_spl);
2922         res = find_gid(dev, slave, rqp, gid);
2923         if (!res || res->prot != prot || res->steer != steer)
2924                 err = -EINVAL;
2925         else {
2926                 list_del(&res->list);
2927                 kfree(res);
2928                 err = 0;
2929         }
2930         spin_unlock_irq(&rqp->mcg_spl);
2931
2932         return err;
2933 }
2934
2935 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2936                                struct mlx4_vhcr *vhcr,
2937                                struct mlx4_cmd_mailbox *inbox,
2938                                struct mlx4_cmd_mailbox *outbox,
2939                                struct mlx4_cmd_info *cmd)
2940 {
2941         struct mlx4_qp qp; /* dummy for calling attach/detach */
2942         u8 *gid = inbox->buf;
2943         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2944         int err;
2945         int qpn;
2946         struct res_qp *rqp;
2947         int attach = vhcr->op_modifier;
2948         int block_loopback = vhcr->in_modifier >> 31;
2949         u8 steer_type_mask = 2;
2950         enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
2951
2952         qpn = vhcr->in_modifier & 0xffffff;
2953         err = get_res(dev, slave, qpn, RES_QP, &rqp);
2954         if (err)
2955                 return err;
2956
2957         qp.qpn = qpn;
2958         if (attach) {
2959                 err = add_mcg_res(dev, slave, rqp, gid, prot, type);
2960                 if (err)
2961                         goto ex_put;
2962
2963                 err = mlx4_qp_attach_common(dev, &qp, gid,
2964                                             block_loopback, prot, type);
2965                 if (err)
2966                         goto ex_rem;
2967         } else {
2968                 err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
2969                 if (err)
2970                         goto ex_put;
2971                 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2972         }
2973
2974         put_res(dev, slave, qpn, RES_QP);
2975         return 0;
2976
2977 ex_rem:
2978         /* ignore error return below, already in error */
2979         (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
2980 ex_put:
2981         put_res(dev, slave, qpn, RES_QP);
2982
2983         return err;
2984 }
2985
2986 /*
2987  * MAC validation for Flow Steering rules.
2988  * VF can attach rules only with a mac address which is assigned to it.
2989  */
2990 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
2991                                    struct list_head *rlist)
2992 {
2993         struct mac_res *res, *tmp;
2994         __be64 be_mac;
2995
2996         /* make sure it isn't multicast or broadcast mac*/
2997         if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
2998             !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
2999                 list_for_each_entry_safe(res, tmp, rlist, list) {
3000                         be_mac = cpu_to_be64(res->mac << 16);
3001                         if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3002                                 return 0;
3003                 }
3004                 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3005                        eth_header->eth.dst_mac, slave);
3006                 return -EINVAL;
3007         }
3008         return 0;
3009 }
3010
3011 /*
3012  * In case of missing eth header, append eth header with a MAC address
3013  * assigned to the VF.
3014  */
3015 static int add_eth_header(struct mlx4_dev *dev, int slave,
3016                           struct mlx4_cmd_mailbox *inbox,
3017                           struct list_head *rlist, int header_id)
3018 {
3019         struct mac_res *res, *tmp;
3020         u8 port;
3021         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3022         struct mlx4_net_trans_rule_hw_eth *eth_header;
3023         struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3024         struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3025         __be64 be_mac = 0;
3026         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3027
3028         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3029         port = be32_to_cpu(ctrl->vf_vep_port) & 0xff;
3030         eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3031
3032         /* Clear a space in the inbox for eth header */
3033         switch (header_id) {
3034         case MLX4_NET_TRANS_RULE_ID_IPV4:
3035                 ip_header =
3036                         (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3037                 memmove(ip_header, eth_header,
3038                         sizeof(*ip_header) + sizeof(*l4_header));
3039                 break;
3040         case MLX4_NET_TRANS_RULE_ID_TCP:
3041         case MLX4_NET_TRANS_RULE_ID_UDP:
3042                 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3043                             (eth_header + 1);
3044                 memmove(l4_header, eth_header, sizeof(*l4_header));
3045                 break;
3046         default:
3047                 return -EINVAL;
3048         }
3049         list_for_each_entry_safe(res, tmp, rlist, list) {
3050                 if (port == res->port) {
3051                         be_mac = cpu_to_be64(res->mac << 16);
3052                         break;
3053                 }
3054         }
3055         if (!be_mac) {
3056                 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3057                        port);
3058                 return -EINVAL;
3059         }
3060
3061         memset(eth_header, 0, sizeof(*eth_header));
3062         eth_header->size = sizeof(*eth_header) >> 2;
3063         eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3064         memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3065         memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3066
3067         return 0;
3068
3069 }
3070
3071 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3072                                          struct mlx4_vhcr *vhcr,
3073                                          struct mlx4_cmd_mailbox *inbox,
3074                                          struct mlx4_cmd_mailbox *outbox,
3075                                          struct mlx4_cmd_info *cmd)
3076 {
3077
3078         struct mlx4_priv *priv = mlx4_priv(dev);
3079         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3080         struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3081         int err;
3082         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3083         struct _rule_hw  *rule_header;
3084         int header_id;
3085
3086         if (dev->caps.steering_mode !=
3087             MLX4_STEERING_MODE_DEVICE_MANAGED)
3088                 return -EOPNOTSUPP;
3089
3090         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3091         rule_header = (struct _rule_hw *)(ctrl + 1);
3092         header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3093
3094         switch (header_id) {
3095         case MLX4_NET_TRANS_RULE_ID_ETH:
3096                 if (validate_eth_header_mac(slave, rule_header, rlist))
3097                         return -EINVAL;
3098                 break;
3099         case MLX4_NET_TRANS_RULE_ID_IPV4:
3100         case MLX4_NET_TRANS_RULE_ID_TCP:
3101         case MLX4_NET_TRANS_RULE_ID_UDP:
3102                 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3103                 if (add_eth_header(dev, slave, inbox, rlist, header_id))
3104                         return -EINVAL;
3105                 vhcr->in_modifier +=
3106                         sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3107                 break;
3108         default:
3109                 pr_err("Corrupted mailbox.\n");
3110                 return -EINVAL;
3111         }
3112
3113         err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3114                            vhcr->in_modifier, 0,
3115                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3116                            MLX4_CMD_NATIVE);
3117         if (err)
3118                 return err;
3119
3120         err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
3121         if (err) {
3122                 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3123                 /* detach rule*/
3124                 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3125                          MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3126                          MLX4_CMD_NATIVE);
3127         }
3128         return err;
3129 }
3130
3131 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3132                                          struct mlx4_vhcr *vhcr,
3133                                          struct mlx4_cmd_mailbox *inbox,
3134                                          struct mlx4_cmd_mailbox *outbox,
3135                                          struct mlx4_cmd_info *cmd)
3136 {
3137         int err;
3138
3139         if (dev->caps.steering_mode !=
3140             MLX4_STEERING_MODE_DEVICE_MANAGED)
3141                 return -EOPNOTSUPP;
3142
3143         err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3144         if (err) {
3145                 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3146                 return err;
3147         }
3148
3149         err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3150                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3151                        MLX4_CMD_NATIVE);
3152         return err;
3153 }
3154
3155 enum {
3156         BUSY_MAX_RETRIES = 10
3157 };
3158
3159 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3160                                struct mlx4_vhcr *vhcr,
3161                                struct mlx4_cmd_mailbox *inbox,
3162                                struct mlx4_cmd_mailbox *outbox,
3163                                struct mlx4_cmd_info *cmd)
3164 {
3165         int err;
3166         int index = vhcr->in_modifier & 0xffff;
3167
3168         err = get_res(dev, slave, index, RES_COUNTER, NULL);
3169         if (err)
3170                 return err;
3171
3172         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3173         put_res(dev, slave, index, RES_COUNTER);
3174         return err;
3175 }
3176
3177 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3178 {
3179         struct res_gid *rgid;
3180         struct res_gid *tmp;
3181         struct mlx4_qp qp; /* dummy for calling attach/detach */
3182
3183         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3184                 qp.qpn = rqp->local_qpn;
3185                 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
3186                                              rgid->steer);
3187                 list_del(&rgid->list);
3188                 kfree(rgid);
3189         }
3190 }
3191
3192 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3193                           enum mlx4_resource type, int print)
3194 {
3195         struct mlx4_priv *priv = mlx4_priv(dev);
3196         struct mlx4_resource_tracker *tracker =
3197                 &priv->mfunc.master.res_tracker;
3198         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3199         struct res_common *r;
3200         struct res_common *tmp;
3201         int busy;
3202
3203         busy = 0;
3204         spin_lock_irq(mlx4_tlock(dev));
3205         list_for_each_entry_safe(r, tmp, rlist, list) {
3206                 if (r->owner == slave) {
3207                         if (!r->removing) {
3208                                 if (r->state == RES_ANY_BUSY) {
3209                                         if (print)
3210                                                 mlx4_dbg(dev,
3211                                                          "%s id 0x%llx is busy\n",
3212                                                           ResourceType(type),
3213                                                           r->res_id);
3214                                         ++busy;
3215                                 } else {
3216                                         r->from_state = r->state;
3217                                         r->state = RES_ANY_BUSY;
3218                                         r->removing = 1;
3219                                 }
3220                         }
3221                 }
3222         }
3223         spin_unlock_irq(mlx4_tlock(dev));
3224
3225         return busy;
3226 }
3227
3228 static int move_all_busy(struct mlx4_dev *dev, int slave,
3229                          enum mlx4_resource type)
3230 {
3231         unsigned long begin;
3232         int busy;
3233
3234         begin = jiffies;
3235         do {
3236                 busy = _move_all_busy(dev, slave, type, 0);
3237                 if (time_after(jiffies, begin + 5 * HZ))
3238                         break;
3239                 if (busy)
3240                         cond_resched();
3241         } while (busy);
3242
3243         if (busy)
3244                 busy = _move_all_busy(dev, slave, type, 1);
3245
3246         return busy;
3247 }
3248 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3249 {
3250         struct mlx4_priv *priv = mlx4_priv(dev);
3251         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3252         struct list_head *qp_list =
3253                 &tracker->slave_list[slave].res_list[RES_QP];
3254         struct res_qp *qp;
3255         struct res_qp *tmp;
3256         int state;
3257         u64 in_param;
3258         int qpn;
3259         int err;
3260
3261         err = move_all_busy(dev, slave, RES_QP);
3262         if (err)
3263                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3264                           "for slave %d\n", slave);
3265
3266         spin_lock_irq(mlx4_tlock(dev));
3267         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3268                 spin_unlock_irq(mlx4_tlock(dev));
3269                 if (qp->com.owner == slave) {
3270                         qpn = qp->com.res_id;
3271                         detach_qp(dev, slave, qp);
3272                         state = qp->com.from_state;
3273                         while (state != 0) {
3274                                 switch (state) {
3275                                 case RES_QP_RESERVED:
3276                                         spin_lock_irq(mlx4_tlock(dev));
3277                                         rb_erase(&qp->com.node,
3278                                                  &tracker->res_tree[RES_QP]);
3279                                         list_del(&qp->com.list);
3280                                         spin_unlock_irq(mlx4_tlock(dev));
3281                                         kfree(qp);
3282                                         state = 0;
3283                                         break;
3284                                 case RES_QP_MAPPED:
3285                                         if (!valid_reserved(dev, slave, qpn))
3286                                                 __mlx4_qp_free_icm(dev, qpn);
3287                                         state = RES_QP_RESERVED;
3288                                         break;
3289                                 case RES_QP_HW:
3290                                         in_param = slave;
3291                                         err = mlx4_cmd(dev, in_param,
3292                                                        qp->local_qpn, 2,
3293                                                        MLX4_CMD_2RST_QP,
3294                                                        MLX4_CMD_TIME_CLASS_A,
3295                                                        MLX4_CMD_NATIVE);
3296                                         if (err)
3297                                                 mlx4_dbg(dev, "rem_slave_qps: failed"
3298                                                          " to move slave %d qpn %d to"
3299                                                          " reset\n", slave,
3300                                                          qp->local_qpn);
3301                                         atomic_dec(&qp->rcq->ref_count);
3302                                         atomic_dec(&qp->scq->ref_count);
3303                                         atomic_dec(&qp->mtt->ref_count);
3304                                         if (qp->srq)
3305                                                 atomic_dec(&qp->srq->ref_count);
3306                                         state = RES_QP_MAPPED;
3307                                         break;
3308                                 default:
3309                                         state = 0;
3310                                 }
3311                         }
3312                 }
3313                 spin_lock_irq(mlx4_tlock(dev));
3314         }
3315         spin_unlock_irq(mlx4_tlock(dev));
3316 }
3317
3318 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3319 {
3320         struct mlx4_priv *priv = mlx4_priv(dev);
3321         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3322         struct list_head *srq_list =
3323                 &tracker->slave_list[slave].res_list[RES_SRQ];
3324         struct res_srq *srq;
3325         struct res_srq *tmp;
3326         int state;
3327         u64 in_param;
3328         LIST_HEAD(tlist);
3329         int srqn;
3330         int err;
3331
3332         err = move_all_busy(dev, slave, RES_SRQ);
3333         if (err)
3334                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3335                           "busy for slave %d\n", slave);
3336
3337         spin_lock_irq(mlx4_tlock(dev));
3338         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3339                 spin_unlock_irq(mlx4_tlock(dev));
3340                 if (srq->com.owner == slave) {
3341                         srqn = srq->com.res_id;
3342                         state = srq->com.from_state;
3343                         while (state != 0) {
3344                                 switch (state) {
3345                                 case RES_SRQ_ALLOCATED:
3346                                         __mlx4_srq_free_icm(dev, srqn);
3347                                         spin_lock_irq(mlx4_tlock(dev));
3348                                         rb_erase(&srq->com.node,
3349                                                  &tracker->res_tree[RES_SRQ]);
3350                                         list_del(&srq->com.list);
3351                                         spin_unlock_irq(mlx4_tlock(dev));
3352                                         kfree(srq);
3353                                         state = 0;
3354                                         break;
3355
3356                                 case RES_SRQ_HW:
3357                                         in_param = slave;
3358                                         err = mlx4_cmd(dev, in_param, srqn, 1,
3359                                                        MLX4_CMD_HW2SW_SRQ,
3360                                                        MLX4_CMD_TIME_CLASS_A,
3361                                                        MLX4_CMD_NATIVE);
3362                                         if (err)
3363                                                 mlx4_dbg(dev, "rem_slave_srqs: failed"
3364                                                          " to move slave %d srq %d to"
3365                                                          " SW ownership\n",
3366                                                          slave, srqn);
3367
3368                                         atomic_dec(&srq->mtt->ref_count);
3369                                         if (srq->cq)
3370                                                 atomic_dec(&srq->cq->ref_count);
3371                                         state = RES_SRQ_ALLOCATED;
3372                                         break;
3373
3374                                 default:
3375                                         state = 0;
3376                                 }
3377                         }
3378                 }
3379                 spin_lock_irq(mlx4_tlock(dev));
3380         }
3381         spin_unlock_irq(mlx4_tlock(dev));
3382 }
3383
3384 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3385 {
3386         struct mlx4_priv *priv = mlx4_priv(dev);
3387         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3388         struct list_head *cq_list =
3389                 &tracker->slave_list[slave].res_list[RES_CQ];
3390         struct res_cq *cq;
3391         struct res_cq *tmp;
3392         int state;
3393         u64 in_param;
3394         LIST_HEAD(tlist);
3395         int cqn;
3396         int err;
3397
3398         err = move_all_busy(dev, slave, RES_CQ);
3399         if (err)
3400                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3401                           "busy for slave %d\n", slave);
3402
3403         spin_lock_irq(mlx4_tlock(dev));
3404         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3405                 spin_unlock_irq(mlx4_tlock(dev));
3406                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3407                         cqn = cq->com.res_id;
3408                         state = cq->com.from_state;
3409                         while (state != 0) {
3410                                 switch (state) {
3411                                 case RES_CQ_ALLOCATED:
3412                                         __mlx4_cq_free_icm(dev, cqn);
3413                                         spin_lock_irq(mlx4_tlock(dev));
3414                                         rb_erase(&cq->com.node,
3415                                                  &tracker->res_tree[RES_CQ]);
3416                                         list_del(&cq->com.list);
3417                                         spin_unlock_irq(mlx4_tlock(dev));
3418                                         kfree(cq);
3419                                         state = 0;
3420                                         break;
3421
3422                                 case RES_CQ_HW:
3423                                         in_param = slave;
3424                                         err = mlx4_cmd(dev, in_param, cqn, 1,
3425                                                        MLX4_CMD_HW2SW_CQ,
3426                                                        MLX4_CMD_TIME_CLASS_A,
3427                                                        MLX4_CMD_NATIVE);
3428                                         if (err)
3429                                                 mlx4_dbg(dev, "rem_slave_cqs: failed"
3430                                                          " to move slave %d cq %d to"
3431                                                          " SW ownership\n",
3432                                                          slave, cqn);
3433                                         atomic_dec(&cq->mtt->ref_count);
3434                                         state = RES_CQ_ALLOCATED;
3435                                         break;
3436
3437                                 default:
3438                                         state = 0;
3439                                 }
3440                         }
3441                 }
3442                 spin_lock_irq(mlx4_tlock(dev));
3443         }
3444         spin_unlock_irq(mlx4_tlock(dev));
3445 }
3446
3447 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3448 {
3449         struct mlx4_priv *priv = mlx4_priv(dev);
3450         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3451         struct list_head *mpt_list =
3452                 &tracker->slave_list[slave].res_list[RES_MPT];
3453         struct res_mpt *mpt;
3454         struct res_mpt *tmp;
3455         int state;
3456         u64 in_param;
3457         LIST_HEAD(tlist);
3458         int mptn;
3459         int err;
3460
3461         err = move_all_busy(dev, slave, RES_MPT);
3462         if (err)
3463                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3464                           "busy for slave %d\n", slave);
3465
3466         spin_lock_irq(mlx4_tlock(dev));
3467         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3468                 spin_unlock_irq(mlx4_tlock(dev));
3469                 if (mpt->com.owner == slave) {
3470                         mptn = mpt->com.res_id;
3471                         state = mpt->com.from_state;
3472                         while (state != 0) {
3473                                 switch (state) {
3474                                 case RES_MPT_RESERVED:
3475                                         __mlx4_mr_release(dev, mpt->key);
3476                                         spin_lock_irq(mlx4_tlock(dev));
3477                                         rb_erase(&mpt->com.node,
3478                                                  &tracker->res_tree[RES_MPT]);
3479                                         list_del(&mpt->com.list);
3480                                         spin_unlock_irq(mlx4_tlock(dev));
3481                                         kfree(mpt);
3482                                         state = 0;
3483                                         break;
3484
3485                                 case RES_MPT_MAPPED:
3486                                         __mlx4_mr_free_icm(dev, mpt->key);
3487                                         state = RES_MPT_RESERVED;
3488                                         break;
3489
3490                                 case RES_MPT_HW:
3491                                         in_param = slave;
3492                                         err = mlx4_cmd(dev, in_param, mptn, 0,
3493                                                      MLX4_CMD_HW2SW_MPT,
3494                                                      MLX4_CMD_TIME_CLASS_A,
3495                                                      MLX4_CMD_NATIVE);
3496                                         if (err)
3497                                                 mlx4_dbg(dev, "rem_slave_mrs: failed"
3498                                                          " to move slave %d mpt %d to"
3499                                                          " SW ownership\n",
3500                                                          slave, mptn);
3501                                         if (mpt->mtt)
3502                                                 atomic_dec(&mpt->mtt->ref_count);
3503                                         state = RES_MPT_MAPPED;
3504                                         break;
3505                                 default:
3506                                         state = 0;
3507                                 }
3508                         }
3509                 }
3510                 spin_lock_irq(mlx4_tlock(dev));
3511         }
3512         spin_unlock_irq(mlx4_tlock(dev));
3513 }
3514
3515 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3516 {
3517         struct mlx4_priv *priv = mlx4_priv(dev);
3518         struct mlx4_resource_tracker *tracker =
3519                 &priv->mfunc.master.res_tracker;
3520         struct list_head *mtt_list =
3521                 &tracker->slave_list[slave].res_list[RES_MTT];
3522         struct res_mtt *mtt;
3523         struct res_mtt *tmp;
3524         int state;
3525         LIST_HEAD(tlist);
3526         int base;
3527         int err;
3528
3529         err = move_all_busy(dev, slave, RES_MTT);
3530         if (err)
3531                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3532                           "busy for slave %d\n", slave);
3533
3534         spin_lock_irq(mlx4_tlock(dev));
3535         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3536                 spin_unlock_irq(mlx4_tlock(dev));
3537                 if (mtt->com.owner == slave) {
3538                         base = mtt->com.res_id;
3539                         state = mtt->com.from_state;
3540                         while (state != 0) {
3541                                 switch (state) {
3542                                 case RES_MTT_ALLOCATED:
3543                                         __mlx4_free_mtt_range(dev, base,
3544                                                               mtt->order);
3545                                         spin_lock_irq(mlx4_tlock(dev));
3546                                         rb_erase(&mtt->com.node,
3547                                                  &tracker->res_tree[RES_MTT]);
3548                                         list_del(&mtt->com.list);
3549                                         spin_unlock_irq(mlx4_tlock(dev));
3550                                         kfree(mtt);
3551                                         state = 0;
3552                                         break;
3553
3554                                 default:
3555                                         state = 0;
3556                                 }
3557                         }
3558                 }
3559                 spin_lock_irq(mlx4_tlock(dev));
3560         }
3561         spin_unlock_irq(mlx4_tlock(dev));
3562 }
3563
3564 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3565 {
3566         struct mlx4_priv *priv = mlx4_priv(dev);
3567         struct mlx4_resource_tracker *tracker =
3568                 &priv->mfunc.master.res_tracker;
3569         struct list_head *fs_rule_list =
3570                 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3571         struct res_fs_rule *fs_rule;
3572         struct res_fs_rule *tmp;
3573         int state;
3574         u64 base;
3575         int err;
3576
3577         err = move_all_busy(dev, slave, RES_FS_RULE);
3578         if (err)
3579                 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3580                           slave);
3581
3582         spin_lock_irq(mlx4_tlock(dev));
3583         list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3584                 spin_unlock_irq(mlx4_tlock(dev));
3585                 if (fs_rule->com.owner == slave) {
3586                         base = fs_rule->com.res_id;
3587                         state = fs_rule->com.from_state;
3588                         while (state != 0) {
3589                                 switch (state) {
3590                                 case RES_FS_RULE_ALLOCATED:
3591                                         /* detach rule */
3592                                         err = mlx4_cmd(dev, base, 0, 0,
3593                                                        MLX4_QP_FLOW_STEERING_DETACH,
3594                                                        MLX4_CMD_TIME_CLASS_A,
3595                                                        MLX4_CMD_NATIVE);
3596
3597                                         spin_lock_irq(mlx4_tlock(dev));
3598                                         rb_erase(&fs_rule->com.node,
3599                                                  &tracker->res_tree[RES_FS_RULE]);
3600                                         list_del(&fs_rule->com.list);
3601                                         spin_unlock_irq(mlx4_tlock(dev));
3602                                         kfree(fs_rule);
3603                                         state = 0;
3604                                         break;
3605
3606                                 default:
3607                                         state = 0;
3608                                 }
3609                         }
3610                 }
3611                 spin_lock_irq(mlx4_tlock(dev));
3612         }
3613         spin_unlock_irq(mlx4_tlock(dev));
3614 }
3615
3616 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3617 {
3618         struct mlx4_priv *priv = mlx4_priv(dev);
3619         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3620         struct list_head *eq_list =
3621                 &tracker->slave_list[slave].res_list[RES_EQ];
3622         struct res_eq *eq;
3623         struct res_eq *tmp;
3624         int err;
3625         int state;
3626         LIST_HEAD(tlist);
3627         int eqn;
3628         struct mlx4_cmd_mailbox *mailbox;
3629
3630         err = move_all_busy(dev, slave, RES_EQ);
3631         if (err)
3632                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3633                           "busy for slave %d\n", slave);
3634
3635         spin_lock_irq(mlx4_tlock(dev));
3636         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3637                 spin_unlock_irq(mlx4_tlock(dev));
3638                 if (eq->com.owner == slave) {
3639                         eqn = eq->com.res_id;
3640                         state = eq->com.from_state;
3641                         while (state != 0) {
3642                                 switch (state) {
3643                                 case RES_EQ_RESERVED:
3644                                         spin_lock_irq(mlx4_tlock(dev));
3645                                         rb_erase(&eq->com.node,
3646                                                  &tracker->res_tree[RES_EQ]);
3647                                         list_del(&eq->com.list);
3648                                         spin_unlock_irq(mlx4_tlock(dev));
3649                                         kfree(eq);
3650                                         state = 0;
3651                                         break;
3652
3653                                 case RES_EQ_HW:
3654                                         mailbox = mlx4_alloc_cmd_mailbox(dev);
3655                                         if (IS_ERR(mailbox)) {
3656                                                 cond_resched();
3657                                                 continue;
3658                                         }
3659                                         err = mlx4_cmd_box(dev, slave, 0,
3660                                                            eqn & 0xff, 0,
3661                                                            MLX4_CMD_HW2SW_EQ,
3662                                                            MLX4_CMD_TIME_CLASS_A,
3663                                                            MLX4_CMD_NATIVE);
3664                                         if (err)
3665                                                 mlx4_dbg(dev, "rem_slave_eqs: failed"
3666                                                          " to move slave %d eqs %d to"
3667                                                          " SW ownership\n", slave, eqn);
3668                                         mlx4_free_cmd_mailbox(dev, mailbox);
3669                                         atomic_dec(&eq->mtt->ref_count);
3670                                         state = RES_EQ_RESERVED;
3671                                         break;
3672
3673                                 default:
3674                                         state = 0;
3675                                 }
3676                         }
3677                 }
3678                 spin_lock_irq(mlx4_tlock(dev));
3679         }
3680         spin_unlock_irq(mlx4_tlock(dev));
3681 }
3682
3683 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3684 {
3685         struct mlx4_priv *priv = mlx4_priv(dev);
3686         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3687         struct list_head *counter_list =
3688                 &tracker->slave_list[slave].res_list[RES_COUNTER];
3689         struct res_counter *counter;
3690         struct res_counter *tmp;
3691         int err;
3692         int index;
3693
3694         err = move_all_busy(dev, slave, RES_COUNTER);
3695         if (err)
3696                 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3697                           "busy for slave %d\n", slave);
3698
3699         spin_lock_irq(mlx4_tlock(dev));
3700         list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3701                 if (counter->com.owner == slave) {
3702                         index = counter->com.res_id;
3703                         rb_erase(&counter->com.node,
3704                                  &tracker->res_tree[RES_COUNTER]);
3705                         list_del(&counter->com.list);
3706                         kfree(counter);
3707                         __mlx4_counter_free(dev, index);
3708                 }
3709         }
3710         spin_unlock_irq(mlx4_tlock(dev));
3711 }
3712
3713 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3714 {
3715         struct mlx4_priv *priv = mlx4_priv(dev);
3716         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3717         struct list_head *xrcdn_list =
3718                 &tracker->slave_list[slave].res_list[RES_XRCD];
3719         struct res_xrcdn *xrcd;
3720         struct res_xrcdn *tmp;
3721         int err;
3722         int xrcdn;
3723
3724         err = move_all_busy(dev, slave, RES_XRCD);
3725         if (err)
3726                 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3727                           "busy for slave %d\n", slave);
3728
3729         spin_lock_irq(mlx4_tlock(dev));
3730         list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3731                 if (xrcd->com.owner == slave) {
3732                         xrcdn = xrcd->com.res_id;
3733                         rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
3734                         list_del(&xrcd->com.list);
3735                         kfree(xrcd);
3736                         __mlx4_xrcd_free(dev, xrcdn);
3737                 }
3738         }
3739         spin_unlock_irq(mlx4_tlock(dev));
3740 }
3741
3742 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3743 {
3744         struct mlx4_priv *priv = mlx4_priv(dev);
3745
3746         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3747         /*VLAN*/
3748         rem_slave_macs(dev, slave);
3749         rem_slave_qps(dev, slave);
3750         rem_slave_srqs(dev, slave);
3751         rem_slave_cqs(dev, slave);
3752         rem_slave_mrs(dev, slave);
3753         rem_slave_eqs(dev, slave);
3754         rem_slave_mtts(dev, slave);
3755         rem_slave_counters(dev, slave);
3756         rem_slave_xrcdns(dev, slave);
3757         rem_slave_fs_rule(dev, slave);
3758         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3759 }