mlx4_core: Adjust flow steering attach wrapper so that IB works on SR-IOV VFs
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49
50 #define MLX4_MAC_VALID          (1ull << 63)
51
52 struct mac_res {
53         struct list_head list;
54         u64 mac;
55         u8 port;
56 };
57
58 struct res_common {
59         struct list_head        list;
60         struct rb_node          node;
61         u64                     res_id;
62         int                     owner;
63         int                     state;
64         int                     from_state;
65         int                     to_state;
66         int                     removing;
67 };
68
69 enum {
70         RES_ANY_BUSY = 1
71 };
72
73 struct res_gid {
74         struct list_head        list;
75         u8                      gid[16];
76         enum mlx4_protocol      prot;
77         enum mlx4_steer_type    steer;
78 };
79
80 enum res_qp_states {
81         RES_QP_BUSY = RES_ANY_BUSY,
82
83         /* QP number was allocated */
84         RES_QP_RESERVED,
85
86         /* ICM memory for QP context was mapped */
87         RES_QP_MAPPED,
88
89         /* QP is in hw ownership */
90         RES_QP_HW
91 };
92
93 struct res_qp {
94         struct res_common       com;
95         struct res_mtt         *mtt;
96         struct res_cq          *rcq;
97         struct res_cq          *scq;
98         struct res_srq         *srq;
99         struct list_head        mcg_list;
100         spinlock_t              mcg_spl;
101         int                     local_qpn;
102 };
103
104 enum res_mtt_states {
105         RES_MTT_BUSY = RES_ANY_BUSY,
106         RES_MTT_ALLOCATED,
107 };
108
109 static inline const char *mtt_states_str(enum res_mtt_states state)
110 {
111         switch (state) {
112         case RES_MTT_BUSY: return "RES_MTT_BUSY";
113         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
114         default: return "Unknown";
115         }
116 }
117
118 struct res_mtt {
119         struct res_common       com;
120         int                     order;
121         atomic_t                ref_count;
122 };
123
124 enum res_mpt_states {
125         RES_MPT_BUSY = RES_ANY_BUSY,
126         RES_MPT_RESERVED,
127         RES_MPT_MAPPED,
128         RES_MPT_HW,
129 };
130
131 struct res_mpt {
132         struct res_common       com;
133         struct res_mtt         *mtt;
134         int                     key;
135 };
136
137 enum res_eq_states {
138         RES_EQ_BUSY = RES_ANY_BUSY,
139         RES_EQ_RESERVED,
140         RES_EQ_HW,
141 };
142
143 struct res_eq {
144         struct res_common       com;
145         struct res_mtt         *mtt;
146 };
147
148 enum res_cq_states {
149         RES_CQ_BUSY = RES_ANY_BUSY,
150         RES_CQ_ALLOCATED,
151         RES_CQ_HW,
152 };
153
154 struct res_cq {
155         struct res_common       com;
156         struct res_mtt         *mtt;
157         atomic_t                ref_count;
158 };
159
160 enum res_srq_states {
161         RES_SRQ_BUSY = RES_ANY_BUSY,
162         RES_SRQ_ALLOCATED,
163         RES_SRQ_HW,
164 };
165
166 struct res_srq {
167         struct res_common       com;
168         struct res_mtt         *mtt;
169         struct res_cq          *cq;
170         atomic_t                ref_count;
171 };
172
173 enum res_counter_states {
174         RES_COUNTER_BUSY = RES_ANY_BUSY,
175         RES_COUNTER_ALLOCATED,
176 };
177
178 struct res_counter {
179         struct res_common       com;
180         int                     port;
181 };
182
183 enum res_xrcdn_states {
184         RES_XRCD_BUSY = RES_ANY_BUSY,
185         RES_XRCD_ALLOCATED,
186 };
187
188 struct res_xrcdn {
189         struct res_common       com;
190         int                     port;
191 };
192
193 enum res_fs_rule_states {
194         RES_FS_RULE_BUSY = RES_ANY_BUSY,
195         RES_FS_RULE_ALLOCATED,
196 };
197
198 struct res_fs_rule {
199         struct res_common       com;
200 };
201
202 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
203 {
204         struct rb_node *node = root->rb_node;
205
206         while (node) {
207                 struct res_common *res = container_of(node, struct res_common,
208                                                       node);
209
210                 if (res_id < res->res_id)
211                         node = node->rb_left;
212                 else if (res_id > res->res_id)
213                         node = node->rb_right;
214                 else
215                         return res;
216         }
217         return NULL;
218 }
219
220 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
221 {
222         struct rb_node **new = &(root->rb_node), *parent = NULL;
223
224         /* Figure out where to put new node */
225         while (*new) {
226                 struct res_common *this = container_of(*new, struct res_common,
227                                                        node);
228
229                 parent = *new;
230                 if (res->res_id < this->res_id)
231                         new = &((*new)->rb_left);
232                 else if (res->res_id > this->res_id)
233                         new = &((*new)->rb_right);
234                 else
235                         return -EEXIST;
236         }
237
238         /* Add new node and rebalance tree. */
239         rb_link_node(&res->node, parent, new);
240         rb_insert_color(&res->node, root);
241
242         return 0;
243 }
244
245 enum qp_transition {
246         QP_TRANS_INIT2RTR,
247         QP_TRANS_RTR2RTS,
248         QP_TRANS_RTS2RTS,
249         QP_TRANS_SQERR2RTS,
250         QP_TRANS_SQD2SQD,
251         QP_TRANS_SQD2RTS
252 };
253
254 /* For Debug uses */
255 static const char *ResourceType(enum mlx4_resource rt)
256 {
257         switch (rt) {
258         case RES_QP: return "RES_QP";
259         case RES_CQ: return "RES_CQ";
260         case RES_SRQ: return "RES_SRQ";
261         case RES_MPT: return "RES_MPT";
262         case RES_MTT: return "RES_MTT";
263         case RES_MAC: return  "RES_MAC";
264         case RES_EQ: return "RES_EQ";
265         case RES_COUNTER: return "RES_COUNTER";
266         case RES_FS_RULE: return "RES_FS_RULE";
267         case RES_XRCD: return "RES_XRCD";
268         default: return "Unknown resource type !!!";
269         };
270 }
271
272 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
273 {
274         struct mlx4_priv *priv = mlx4_priv(dev);
275         int i;
276         int t;
277
278         priv->mfunc.master.res_tracker.slave_list =
279                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
280                         GFP_KERNEL);
281         if (!priv->mfunc.master.res_tracker.slave_list)
282                 return -ENOMEM;
283
284         for (i = 0 ; i < dev->num_slaves; i++) {
285                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
286                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
287                                        slave_list[i].res_list[t]);
288                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
289         }
290
291         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
292                  dev->num_slaves);
293         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
294                 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
295
296         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
297         return 0 ;
298 }
299
300 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
301                                 enum mlx4_res_tracker_free_type type)
302 {
303         struct mlx4_priv *priv = mlx4_priv(dev);
304         int i;
305
306         if (priv->mfunc.master.res_tracker.slave_list) {
307                 if (type != RES_TR_FREE_STRUCTS_ONLY)
308                         for (i = 0 ; i < dev->num_slaves; i++)
309                                 if (type == RES_TR_FREE_ALL ||
310                                     dev->caps.function != i)
311                                         mlx4_delete_all_resources_for_slave(dev, i);
312
313                 if (type != RES_TR_FREE_SLAVES_ONLY) {
314                         kfree(priv->mfunc.master.res_tracker.slave_list);
315                         priv->mfunc.master.res_tracker.slave_list = NULL;
316                 }
317         }
318 }
319
320 static void update_pkey_index(struct mlx4_dev *dev, int slave,
321                               struct mlx4_cmd_mailbox *inbox)
322 {
323         u8 sched = *(u8 *)(inbox->buf + 64);
324         u8 orig_index = *(u8 *)(inbox->buf + 35);
325         u8 new_index;
326         struct mlx4_priv *priv = mlx4_priv(dev);
327         int port;
328
329         port = (sched >> 6 & 1) + 1;
330
331         new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
332         *(u8 *)(inbox->buf + 35) = new_index;
333
334         mlx4_dbg(dev, "port = %d, orig pkey index = %d, "
335                  "new pkey index = %d\n", port, orig_index, new_index);
336 }
337
338 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
339                        u8 slave)
340 {
341         struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
342         enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
343         u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
344
345         if (MLX4_QP_ST_UD == ts)
346                 qp_ctx->pri_path.mgid_index = 0x80 | slave;
347
348         if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
349                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
350                         qp_ctx->pri_path.mgid_index = slave & 0x7F;
351                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
352                         qp_ctx->alt_path.mgid_index = slave & 0x7F;
353         }
354
355         mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
356                 slave, qp_ctx->pri_path.mgid_index);
357 }
358
359 static int mpt_mask(struct mlx4_dev *dev)
360 {
361         return dev->caps.num_mpts - 1;
362 }
363
364 static void *find_res(struct mlx4_dev *dev, int res_id,
365                       enum mlx4_resource type)
366 {
367         struct mlx4_priv *priv = mlx4_priv(dev);
368
369         return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
370                                   res_id);
371 }
372
373 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
374                    enum mlx4_resource type,
375                    void *res)
376 {
377         struct res_common *r;
378         int err = 0;
379
380         spin_lock_irq(mlx4_tlock(dev));
381         r = find_res(dev, res_id, type);
382         if (!r) {
383                 err = -ENONET;
384                 goto exit;
385         }
386
387         if (r->state == RES_ANY_BUSY) {
388                 err = -EBUSY;
389                 goto exit;
390         }
391
392         if (r->owner != slave) {
393                 err = -EPERM;
394                 goto exit;
395         }
396
397         r->from_state = r->state;
398         r->state = RES_ANY_BUSY;
399
400         if (res)
401                 *((struct res_common **)res) = r;
402
403 exit:
404         spin_unlock_irq(mlx4_tlock(dev));
405         return err;
406 }
407
408 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
409                                     enum mlx4_resource type,
410                                     u64 res_id, int *slave)
411 {
412
413         struct res_common *r;
414         int err = -ENOENT;
415         int id = res_id;
416
417         if (type == RES_QP)
418                 id &= 0x7fffff;
419         spin_lock(mlx4_tlock(dev));
420
421         r = find_res(dev, id, type);
422         if (r) {
423                 *slave = r->owner;
424                 err = 0;
425         }
426         spin_unlock(mlx4_tlock(dev));
427
428         return err;
429 }
430
431 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
432                     enum mlx4_resource type)
433 {
434         struct res_common *r;
435
436         spin_lock_irq(mlx4_tlock(dev));
437         r = find_res(dev, res_id, type);
438         if (r)
439                 r->state = r->from_state;
440         spin_unlock_irq(mlx4_tlock(dev));
441 }
442
443 static struct res_common *alloc_qp_tr(int id)
444 {
445         struct res_qp *ret;
446
447         ret = kzalloc(sizeof *ret, GFP_KERNEL);
448         if (!ret)
449                 return NULL;
450
451         ret->com.res_id = id;
452         ret->com.state = RES_QP_RESERVED;
453         ret->local_qpn = id;
454         INIT_LIST_HEAD(&ret->mcg_list);
455         spin_lock_init(&ret->mcg_spl);
456
457         return &ret->com;
458 }
459
460 static struct res_common *alloc_mtt_tr(int id, int order)
461 {
462         struct res_mtt *ret;
463
464         ret = kzalloc(sizeof *ret, GFP_KERNEL);
465         if (!ret)
466                 return NULL;
467
468         ret->com.res_id = id;
469         ret->order = order;
470         ret->com.state = RES_MTT_ALLOCATED;
471         atomic_set(&ret->ref_count, 0);
472
473         return &ret->com;
474 }
475
476 static struct res_common *alloc_mpt_tr(int id, int key)
477 {
478         struct res_mpt *ret;
479
480         ret = kzalloc(sizeof *ret, GFP_KERNEL);
481         if (!ret)
482                 return NULL;
483
484         ret->com.res_id = id;
485         ret->com.state = RES_MPT_RESERVED;
486         ret->key = key;
487
488         return &ret->com;
489 }
490
491 static struct res_common *alloc_eq_tr(int id)
492 {
493         struct res_eq *ret;
494
495         ret = kzalloc(sizeof *ret, GFP_KERNEL);
496         if (!ret)
497                 return NULL;
498
499         ret->com.res_id = id;
500         ret->com.state = RES_EQ_RESERVED;
501
502         return &ret->com;
503 }
504
505 static struct res_common *alloc_cq_tr(int id)
506 {
507         struct res_cq *ret;
508
509         ret = kzalloc(sizeof *ret, GFP_KERNEL);
510         if (!ret)
511                 return NULL;
512
513         ret->com.res_id = id;
514         ret->com.state = RES_CQ_ALLOCATED;
515         atomic_set(&ret->ref_count, 0);
516
517         return &ret->com;
518 }
519
520 static struct res_common *alloc_srq_tr(int id)
521 {
522         struct res_srq *ret;
523
524         ret = kzalloc(sizeof *ret, GFP_KERNEL);
525         if (!ret)
526                 return NULL;
527
528         ret->com.res_id = id;
529         ret->com.state = RES_SRQ_ALLOCATED;
530         atomic_set(&ret->ref_count, 0);
531
532         return &ret->com;
533 }
534
535 static struct res_common *alloc_counter_tr(int id)
536 {
537         struct res_counter *ret;
538
539         ret = kzalloc(sizeof *ret, GFP_KERNEL);
540         if (!ret)
541                 return NULL;
542
543         ret->com.res_id = id;
544         ret->com.state = RES_COUNTER_ALLOCATED;
545
546         return &ret->com;
547 }
548
549 static struct res_common *alloc_xrcdn_tr(int id)
550 {
551         struct res_xrcdn *ret;
552
553         ret = kzalloc(sizeof *ret, GFP_KERNEL);
554         if (!ret)
555                 return NULL;
556
557         ret->com.res_id = id;
558         ret->com.state = RES_XRCD_ALLOCATED;
559
560         return &ret->com;
561 }
562
563 static struct res_common *alloc_fs_rule_tr(u64 id)
564 {
565         struct res_fs_rule *ret;
566
567         ret = kzalloc(sizeof *ret, GFP_KERNEL);
568         if (!ret)
569                 return NULL;
570
571         ret->com.res_id = id;
572         ret->com.state = RES_FS_RULE_ALLOCATED;
573
574         return &ret->com;
575 }
576
577 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
578                                    int extra)
579 {
580         struct res_common *ret;
581
582         switch (type) {
583         case RES_QP:
584                 ret = alloc_qp_tr(id);
585                 break;
586         case RES_MPT:
587                 ret = alloc_mpt_tr(id, extra);
588                 break;
589         case RES_MTT:
590                 ret = alloc_mtt_tr(id, extra);
591                 break;
592         case RES_EQ:
593                 ret = alloc_eq_tr(id);
594                 break;
595         case RES_CQ:
596                 ret = alloc_cq_tr(id);
597                 break;
598         case RES_SRQ:
599                 ret = alloc_srq_tr(id);
600                 break;
601         case RES_MAC:
602                 printk(KERN_ERR "implementation missing\n");
603                 return NULL;
604         case RES_COUNTER:
605                 ret = alloc_counter_tr(id);
606                 break;
607         case RES_XRCD:
608                 ret = alloc_xrcdn_tr(id);
609                 break;
610         case RES_FS_RULE:
611                 ret = alloc_fs_rule_tr(id);
612                 break;
613         default:
614                 return NULL;
615         }
616         if (ret)
617                 ret->owner = slave;
618
619         return ret;
620 }
621
622 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
623                          enum mlx4_resource type, int extra)
624 {
625         int i;
626         int err;
627         struct mlx4_priv *priv = mlx4_priv(dev);
628         struct res_common **res_arr;
629         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
630         struct rb_root *root = &tracker->res_tree[type];
631
632         res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
633         if (!res_arr)
634                 return -ENOMEM;
635
636         for (i = 0; i < count; ++i) {
637                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
638                 if (!res_arr[i]) {
639                         for (--i; i >= 0; --i)
640                                 kfree(res_arr[i]);
641
642                         kfree(res_arr);
643                         return -ENOMEM;
644                 }
645         }
646
647         spin_lock_irq(mlx4_tlock(dev));
648         for (i = 0; i < count; ++i) {
649                 if (find_res(dev, base + i, type)) {
650                         err = -EEXIST;
651                         goto undo;
652                 }
653                 err = res_tracker_insert(root, res_arr[i]);
654                 if (err)
655                         goto undo;
656                 list_add_tail(&res_arr[i]->list,
657                               &tracker->slave_list[slave].res_list[type]);
658         }
659         spin_unlock_irq(mlx4_tlock(dev));
660         kfree(res_arr);
661
662         return 0;
663
664 undo:
665         for (--i; i >= base; --i)
666                 rb_erase(&res_arr[i]->node, root);
667
668         spin_unlock_irq(mlx4_tlock(dev));
669
670         for (i = 0; i < count; ++i)
671                 kfree(res_arr[i]);
672
673         kfree(res_arr);
674
675         return err;
676 }
677
678 static int remove_qp_ok(struct res_qp *res)
679 {
680         if (res->com.state == RES_QP_BUSY)
681                 return -EBUSY;
682         else if (res->com.state != RES_QP_RESERVED)
683                 return -EPERM;
684
685         return 0;
686 }
687
688 static int remove_mtt_ok(struct res_mtt *res, int order)
689 {
690         if (res->com.state == RES_MTT_BUSY ||
691             atomic_read(&res->ref_count)) {
692                 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
693                        __func__, __LINE__,
694                        mtt_states_str(res->com.state),
695                        atomic_read(&res->ref_count));
696                 return -EBUSY;
697         } else if (res->com.state != RES_MTT_ALLOCATED)
698                 return -EPERM;
699         else if (res->order != order)
700                 return -EINVAL;
701
702         return 0;
703 }
704
705 static int remove_mpt_ok(struct res_mpt *res)
706 {
707         if (res->com.state == RES_MPT_BUSY)
708                 return -EBUSY;
709         else if (res->com.state != RES_MPT_RESERVED)
710                 return -EPERM;
711
712         return 0;
713 }
714
715 static int remove_eq_ok(struct res_eq *res)
716 {
717         if (res->com.state == RES_MPT_BUSY)
718                 return -EBUSY;
719         else if (res->com.state != RES_MPT_RESERVED)
720                 return -EPERM;
721
722         return 0;
723 }
724
725 static int remove_counter_ok(struct res_counter *res)
726 {
727         if (res->com.state == RES_COUNTER_BUSY)
728                 return -EBUSY;
729         else if (res->com.state != RES_COUNTER_ALLOCATED)
730                 return -EPERM;
731
732         return 0;
733 }
734
735 static int remove_xrcdn_ok(struct res_xrcdn *res)
736 {
737         if (res->com.state == RES_XRCD_BUSY)
738                 return -EBUSY;
739         else if (res->com.state != RES_XRCD_ALLOCATED)
740                 return -EPERM;
741
742         return 0;
743 }
744
745 static int remove_fs_rule_ok(struct res_fs_rule *res)
746 {
747         if (res->com.state == RES_FS_RULE_BUSY)
748                 return -EBUSY;
749         else if (res->com.state != RES_FS_RULE_ALLOCATED)
750                 return -EPERM;
751
752         return 0;
753 }
754
755 static int remove_cq_ok(struct res_cq *res)
756 {
757         if (res->com.state == RES_CQ_BUSY)
758                 return -EBUSY;
759         else if (res->com.state != RES_CQ_ALLOCATED)
760                 return -EPERM;
761
762         return 0;
763 }
764
765 static int remove_srq_ok(struct res_srq *res)
766 {
767         if (res->com.state == RES_SRQ_BUSY)
768                 return -EBUSY;
769         else if (res->com.state != RES_SRQ_ALLOCATED)
770                 return -EPERM;
771
772         return 0;
773 }
774
775 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
776 {
777         switch (type) {
778         case RES_QP:
779                 return remove_qp_ok((struct res_qp *)res);
780         case RES_CQ:
781                 return remove_cq_ok((struct res_cq *)res);
782         case RES_SRQ:
783                 return remove_srq_ok((struct res_srq *)res);
784         case RES_MPT:
785                 return remove_mpt_ok((struct res_mpt *)res);
786         case RES_MTT:
787                 return remove_mtt_ok((struct res_mtt *)res, extra);
788         case RES_MAC:
789                 return -ENOSYS;
790         case RES_EQ:
791                 return remove_eq_ok((struct res_eq *)res);
792         case RES_COUNTER:
793                 return remove_counter_ok((struct res_counter *)res);
794         case RES_XRCD:
795                 return remove_xrcdn_ok((struct res_xrcdn *)res);
796         case RES_FS_RULE:
797                 return remove_fs_rule_ok((struct res_fs_rule *)res);
798         default:
799                 return -EINVAL;
800         }
801 }
802
803 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
804                          enum mlx4_resource type, int extra)
805 {
806         u64 i;
807         int err;
808         struct mlx4_priv *priv = mlx4_priv(dev);
809         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
810         struct res_common *r;
811
812         spin_lock_irq(mlx4_tlock(dev));
813         for (i = base; i < base + count; ++i) {
814                 r = res_tracker_lookup(&tracker->res_tree[type], i);
815                 if (!r) {
816                         err = -ENOENT;
817                         goto out;
818                 }
819                 if (r->owner != slave) {
820                         err = -EPERM;
821                         goto out;
822                 }
823                 err = remove_ok(r, type, extra);
824                 if (err)
825                         goto out;
826         }
827
828         for (i = base; i < base + count; ++i) {
829                 r = res_tracker_lookup(&tracker->res_tree[type], i);
830                 rb_erase(&r->node, &tracker->res_tree[type]);
831                 list_del(&r->list);
832                 kfree(r);
833         }
834         err = 0;
835
836 out:
837         spin_unlock_irq(mlx4_tlock(dev));
838
839         return err;
840 }
841
842 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
843                                 enum res_qp_states state, struct res_qp **qp,
844                                 int alloc)
845 {
846         struct mlx4_priv *priv = mlx4_priv(dev);
847         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
848         struct res_qp *r;
849         int err = 0;
850
851         spin_lock_irq(mlx4_tlock(dev));
852         r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
853         if (!r)
854                 err = -ENOENT;
855         else if (r->com.owner != slave)
856                 err = -EPERM;
857         else {
858                 switch (state) {
859                 case RES_QP_BUSY:
860                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
861                                  __func__, r->com.res_id);
862                         err = -EBUSY;
863                         break;
864
865                 case RES_QP_RESERVED:
866                         if (r->com.state == RES_QP_MAPPED && !alloc)
867                                 break;
868
869                         mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
870                         err = -EINVAL;
871                         break;
872
873                 case RES_QP_MAPPED:
874                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
875                             r->com.state == RES_QP_HW)
876                                 break;
877                         else {
878                                 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
879                                           r->com.res_id);
880                                 err = -EINVAL;
881                         }
882
883                         break;
884
885                 case RES_QP_HW:
886                         if (r->com.state != RES_QP_MAPPED)
887                                 err = -EINVAL;
888                         break;
889                 default:
890                         err = -EINVAL;
891                 }
892
893                 if (!err) {
894                         r->com.from_state = r->com.state;
895                         r->com.to_state = state;
896                         r->com.state = RES_QP_BUSY;
897                         if (qp)
898                                 *qp = r;
899                 }
900         }
901
902         spin_unlock_irq(mlx4_tlock(dev));
903
904         return err;
905 }
906
907 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
908                                 enum res_mpt_states state, struct res_mpt **mpt)
909 {
910         struct mlx4_priv *priv = mlx4_priv(dev);
911         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
912         struct res_mpt *r;
913         int err = 0;
914
915         spin_lock_irq(mlx4_tlock(dev));
916         r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
917         if (!r)
918                 err = -ENOENT;
919         else if (r->com.owner != slave)
920                 err = -EPERM;
921         else {
922                 switch (state) {
923                 case RES_MPT_BUSY:
924                         err = -EINVAL;
925                         break;
926
927                 case RES_MPT_RESERVED:
928                         if (r->com.state != RES_MPT_MAPPED)
929                                 err = -EINVAL;
930                         break;
931
932                 case RES_MPT_MAPPED:
933                         if (r->com.state != RES_MPT_RESERVED &&
934                             r->com.state != RES_MPT_HW)
935                                 err = -EINVAL;
936                         break;
937
938                 case RES_MPT_HW:
939                         if (r->com.state != RES_MPT_MAPPED)
940                                 err = -EINVAL;
941                         break;
942                 default:
943                         err = -EINVAL;
944                 }
945
946                 if (!err) {
947                         r->com.from_state = r->com.state;
948                         r->com.to_state = state;
949                         r->com.state = RES_MPT_BUSY;
950                         if (mpt)
951                                 *mpt = r;
952                 }
953         }
954
955         spin_unlock_irq(mlx4_tlock(dev));
956
957         return err;
958 }
959
960 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
961                                 enum res_eq_states state, struct res_eq **eq)
962 {
963         struct mlx4_priv *priv = mlx4_priv(dev);
964         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
965         struct res_eq *r;
966         int err = 0;
967
968         spin_lock_irq(mlx4_tlock(dev));
969         r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
970         if (!r)
971                 err = -ENOENT;
972         else if (r->com.owner != slave)
973                 err = -EPERM;
974         else {
975                 switch (state) {
976                 case RES_EQ_BUSY:
977                         err = -EINVAL;
978                         break;
979
980                 case RES_EQ_RESERVED:
981                         if (r->com.state != RES_EQ_HW)
982                                 err = -EINVAL;
983                         break;
984
985                 case RES_EQ_HW:
986                         if (r->com.state != RES_EQ_RESERVED)
987                                 err = -EINVAL;
988                         break;
989
990                 default:
991                         err = -EINVAL;
992                 }
993
994                 if (!err) {
995                         r->com.from_state = r->com.state;
996                         r->com.to_state = state;
997                         r->com.state = RES_EQ_BUSY;
998                         if (eq)
999                                 *eq = r;
1000                 }
1001         }
1002
1003         spin_unlock_irq(mlx4_tlock(dev));
1004
1005         return err;
1006 }
1007
1008 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1009                                 enum res_cq_states state, struct res_cq **cq)
1010 {
1011         struct mlx4_priv *priv = mlx4_priv(dev);
1012         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1013         struct res_cq *r;
1014         int err;
1015
1016         spin_lock_irq(mlx4_tlock(dev));
1017         r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1018         if (!r)
1019                 err = -ENOENT;
1020         else if (r->com.owner != slave)
1021                 err = -EPERM;
1022         else {
1023                 switch (state) {
1024                 case RES_CQ_BUSY:
1025                         err = -EBUSY;
1026                         break;
1027
1028                 case RES_CQ_ALLOCATED:
1029                         if (r->com.state != RES_CQ_HW)
1030                                 err = -EINVAL;
1031                         else if (atomic_read(&r->ref_count))
1032                                 err = -EBUSY;
1033                         else
1034                                 err = 0;
1035                         break;
1036
1037                 case RES_CQ_HW:
1038                         if (r->com.state != RES_CQ_ALLOCATED)
1039                                 err = -EINVAL;
1040                         else
1041                                 err = 0;
1042                         break;
1043
1044                 default:
1045                         err = -EINVAL;
1046                 }
1047
1048                 if (!err) {
1049                         r->com.from_state = r->com.state;
1050                         r->com.to_state = state;
1051                         r->com.state = RES_CQ_BUSY;
1052                         if (cq)
1053                                 *cq = r;
1054                 }
1055         }
1056
1057         spin_unlock_irq(mlx4_tlock(dev));
1058
1059         return err;
1060 }
1061
1062 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1063                                  enum res_cq_states state, struct res_srq **srq)
1064 {
1065         struct mlx4_priv *priv = mlx4_priv(dev);
1066         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1067         struct res_srq *r;
1068         int err = 0;
1069
1070         spin_lock_irq(mlx4_tlock(dev));
1071         r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1072         if (!r)
1073                 err = -ENOENT;
1074         else if (r->com.owner != slave)
1075                 err = -EPERM;
1076         else {
1077                 switch (state) {
1078                 case RES_SRQ_BUSY:
1079                         err = -EINVAL;
1080                         break;
1081
1082                 case RES_SRQ_ALLOCATED:
1083                         if (r->com.state != RES_SRQ_HW)
1084                                 err = -EINVAL;
1085                         else if (atomic_read(&r->ref_count))
1086                                 err = -EBUSY;
1087                         break;
1088
1089                 case RES_SRQ_HW:
1090                         if (r->com.state != RES_SRQ_ALLOCATED)
1091                                 err = -EINVAL;
1092                         break;
1093
1094                 default:
1095                         err = -EINVAL;
1096                 }
1097
1098                 if (!err) {
1099                         r->com.from_state = r->com.state;
1100                         r->com.to_state = state;
1101                         r->com.state = RES_SRQ_BUSY;
1102                         if (srq)
1103                                 *srq = r;
1104                 }
1105         }
1106
1107         spin_unlock_irq(mlx4_tlock(dev));
1108
1109         return err;
1110 }
1111
1112 static void res_abort_move(struct mlx4_dev *dev, int slave,
1113                            enum mlx4_resource type, int id)
1114 {
1115         struct mlx4_priv *priv = mlx4_priv(dev);
1116         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1117         struct res_common *r;
1118
1119         spin_lock_irq(mlx4_tlock(dev));
1120         r = res_tracker_lookup(&tracker->res_tree[type], id);
1121         if (r && (r->owner == slave))
1122                 r->state = r->from_state;
1123         spin_unlock_irq(mlx4_tlock(dev));
1124 }
1125
1126 static void res_end_move(struct mlx4_dev *dev, int slave,
1127                          enum mlx4_resource type, int id)
1128 {
1129         struct mlx4_priv *priv = mlx4_priv(dev);
1130         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1131         struct res_common *r;
1132
1133         spin_lock_irq(mlx4_tlock(dev));
1134         r = res_tracker_lookup(&tracker->res_tree[type], id);
1135         if (r && (r->owner == slave))
1136                 r->state = r->to_state;
1137         spin_unlock_irq(mlx4_tlock(dev));
1138 }
1139
1140 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1141 {
1142         return mlx4_is_qp_reserved(dev, qpn) &&
1143                 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1144 }
1145
1146 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1147 {
1148         return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1149 }
1150
1151 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1152                         u64 in_param, u64 *out_param)
1153 {
1154         int err;
1155         int count;
1156         int align;
1157         int base;
1158         int qpn;
1159
1160         switch (op) {
1161         case RES_OP_RESERVE:
1162                 count = get_param_l(&in_param);
1163                 align = get_param_h(&in_param);
1164                 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1165                 if (err)
1166                         return err;
1167
1168                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1169                 if (err) {
1170                         __mlx4_qp_release_range(dev, base, count);
1171                         return err;
1172                 }
1173                 set_param_l(out_param, base);
1174                 break;
1175         case RES_OP_MAP_ICM:
1176                 qpn = get_param_l(&in_param) & 0x7fffff;
1177                 if (valid_reserved(dev, slave, qpn)) {
1178                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1179                         if (err)
1180                                 return err;
1181                 }
1182
1183                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1184                                            NULL, 1);
1185                 if (err)
1186                         return err;
1187
1188                 if (!fw_reserved(dev, qpn)) {
1189                         err = __mlx4_qp_alloc_icm(dev, qpn);
1190                         if (err) {
1191                                 res_abort_move(dev, slave, RES_QP, qpn);
1192                                 return err;
1193                         }
1194                 }
1195
1196                 res_end_move(dev, slave, RES_QP, qpn);
1197                 break;
1198
1199         default:
1200                 err = -EINVAL;
1201                 break;
1202         }
1203         return err;
1204 }
1205
1206 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1207                          u64 in_param, u64 *out_param)
1208 {
1209         int err = -EINVAL;
1210         int base;
1211         int order;
1212
1213         if (op != RES_OP_RESERVE_AND_MAP)
1214                 return err;
1215
1216         order = get_param_l(&in_param);
1217         base = __mlx4_alloc_mtt_range(dev, order);
1218         if (base == -1)
1219                 return -ENOMEM;
1220
1221         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1222         if (err)
1223                 __mlx4_free_mtt_range(dev, base, order);
1224         else
1225                 set_param_l(out_param, base);
1226
1227         return err;
1228 }
1229
1230 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1231                          u64 in_param, u64 *out_param)
1232 {
1233         int err = -EINVAL;
1234         int index;
1235         int id;
1236         struct res_mpt *mpt;
1237
1238         switch (op) {
1239         case RES_OP_RESERVE:
1240                 index = __mlx4_mr_reserve(dev);
1241                 if (index == -1)
1242                         break;
1243                 id = index & mpt_mask(dev);
1244
1245                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1246                 if (err) {
1247                         __mlx4_mr_release(dev, index);
1248                         break;
1249                 }
1250                 set_param_l(out_param, index);
1251                 break;
1252         case RES_OP_MAP_ICM:
1253                 index = get_param_l(&in_param);
1254                 id = index & mpt_mask(dev);
1255                 err = mr_res_start_move_to(dev, slave, id,
1256                                            RES_MPT_MAPPED, &mpt);
1257                 if (err)
1258                         return err;
1259
1260                 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1261                 if (err) {
1262                         res_abort_move(dev, slave, RES_MPT, id);
1263                         return err;
1264                 }
1265
1266                 res_end_move(dev, slave, RES_MPT, id);
1267                 break;
1268         }
1269         return err;
1270 }
1271
1272 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1273                         u64 in_param, u64 *out_param)
1274 {
1275         int cqn;
1276         int err;
1277
1278         switch (op) {
1279         case RES_OP_RESERVE_AND_MAP:
1280                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1281                 if (err)
1282                         break;
1283
1284                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1285                 if (err) {
1286                         __mlx4_cq_free_icm(dev, cqn);
1287                         break;
1288                 }
1289
1290                 set_param_l(out_param, cqn);
1291                 break;
1292
1293         default:
1294                 err = -EINVAL;
1295         }
1296
1297         return err;
1298 }
1299
1300 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1301                          u64 in_param, u64 *out_param)
1302 {
1303         int srqn;
1304         int err;
1305
1306         switch (op) {
1307         case RES_OP_RESERVE_AND_MAP:
1308                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1309                 if (err)
1310                         break;
1311
1312                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1313                 if (err) {
1314                         __mlx4_srq_free_icm(dev, srqn);
1315                         break;
1316                 }
1317
1318                 set_param_l(out_param, srqn);
1319                 break;
1320
1321         default:
1322                 err = -EINVAL;
1323         }
1324
1325         return err;
1326 }
1327
1328 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1329 {
1330         struct mlx4_priv *priv = mlx4_priv(dev);
1331         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1332         struct mac_res *res;
1333
1334         res = kzalloc(sizeof *res, GFP_KERNEL);
1335         if (!res)
1336                 return -ENOMEM;
1337         res->mac = mac;
1338         res->port = (u8) port;
1339         list_add_tail(&res->list,
1340                       &tracker->slave_list[slave].res_list[RES_MAC]);
1341         return 0;
1342 }
1343
1344 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1345                                int port)
1346 {
1347         struct mlx4_priv *priv = mlx4_priv(dev);
1348         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1349         struct list_head *mac_list =
1350                 &tracker->slave_list[slave].res_list[RES_MAC];
1351         struct mac_res *res, *tmp;
1352
1353         list_for_each_entry_safe(res, tmp, mac_list, list) {
1354                 if (res->mac == mac && res->port == (u8) port) {
1355                         list_del(&res->list);
1356                         kfree(res);
1357                         break;
1358                 }
1359         }
1360 }
1361
1362 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1363 {
1364         struct mlx4_priv *priv = mlx4_priv(dev);
1365         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1366         struct list_head *mac_list =
1367                 &tracker->slave_list[slave].res_list[RES_MAC];
1368         struct mac_res *res, *tmp;
1369
1370         list_for_each_entry_safe(res, tmp, mac_list, list) {
1371                 list_del(&res->list);
1372                 __mlx4_unregister_mac(dev, res->port, res->mac);
1373                 kfree(res);
1374         }
1375 }
1376
1377 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1378                          u64 in_param, u64 *out_param)
1379 {
1380         int err = -EINVAL;
1381         int port;
1382         u64 mac;
1383
1384         if (op != RES_OP_RESERVE_AND_MAP)
1385                 return err;
1386
1387         port = get_param_l(out_param);
1388         mac = in_param;
1389
1390         err = __mlx4_register_mac(dev, port, mac);
1391         if (err >= 0) {
1392                 set_param_l(out_param, err);
1393                 err = 0;
1394         }
1395
1396         if (!err) {
1397                 err = mac_add_to_slave(dev, slave, mac, port);
1398                 if (err)
1399                         __mlx4_unregister_mac(dev, port, mac);
1400         }
1401         return err;
1402 }
1403
1404 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1405                          u64 in_param, u64 *out_param)
1406 {
1407         return 0;
1408 }
1409
1410 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1411                              u64 in_param, u64 *out_param)
1412 {
1413         u32 index;
1414         int err;
1415
1416         if (op != RES_OP_RESERVE)
1417                 return -EINVAL;
1418
1419         err = __mlx4_counter_alloc(dev, &index);
1420         if (err)
1421                 return err;
1422
1423         err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1424         if (err)
1425                 __mlx4_counter_free(dev, index);
1426         else
1427                 set_param_l(out_param, index);
1428
1429         return err;
1430 }
1431
1432 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1433                            u64 in_param, u64 *out_param)
1434 {
1435         u32 xrcdn;
1436         int err;
1437
1438         if (op != RES_OP_RESERVE)
1439                 return -EINVAL;
1440
1441         err = __mlx4_xrcd_alloc(dev, &xrcdn);
1442         if (err)
1443                 return err;
1444
1445         err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1446         if (err)
1447                 __mlx4_xrcd_free(dev, xrcdn);
1448         else
1449                 set_param_l(out_param, xrcdn);
1450
1451         return err;
1452 }
1453
1454 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1455                            struct mlx4_vhcr *vhcr,
1456                            struct mlx4_cmd_mailbox *inbox,
1457                            struct mlx4_cmd_mailbox *outbox,
1458                            struct mlx4_cmd_info *cmd)
1459 {
1460         int err;
1461         int alop = vhcr->op_modifier;
1462
1463         switch (vhcr->in_modifier) {
1464         case RES_QP:
1465                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1466                                    vhcr->in_param, &vhcr->out_param);
1467                 break;
1468
1469         case RES_MTT:
1470                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1471                                     vhcr->in_param, &vhcr->out_param);
1472                 break;
1473
1474         case RES_MPT:
1475                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1476                                     vhcr->in_param, &vhcr->out_param);
1477                 break;
1478
1479         case RES_CQ:
1480                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1481                                    vhcr->in_param, &vhcr->out_param);
1482                 break;
1483
1484         case RES_SRQ:
1485                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1486                                     vhcr->in_param, &vhcr->out_param);
1487                 break;
1488
1489         case RES_MAC:
1490                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1491                                     vhcr->in_param, &vhcr->out_param);
1492                 break;
1493
1494         case RES_VLAN:
1495                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1496                                     vhcr->in_param, &vhcr->out_param);
1497                 break;
1498
1499         case RES_COUNTER:
1500                 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1501                                         vhcr->in_param, &vhcr->out_param);
1502                 break;
1503
1504         case RES_XRCD:
1505                 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1506                                       vhcr->in_param, &vhcr->out_param);
1507                 break;
1508
1509         default:
1510                 err = -EINVAL;
1511                 break;
1512         }
1513
1514         return err;
1515 }
1516
1517 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1518                        u64 in_param)
1519 {
1520         int err;
1521         int count;
1522         int base;
1523         int qpn;
1524
1525         switch (op) {
1526         case RES_OP_RESERVE:
1527                 base = get_param_l(&in_param) & 0x7fffff;
1528                 count = get_param_h(&in_param);
1529                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1530                 if (err)
1531                         break;
1532                 __mlx4_qp_release_range(dev, base, count);
1533                 break;
1534         case RES_OP_MAP_ICM:
1535                 qpn = get_param_l(&in_param) & 0x7fffff;
1536                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1537                                            NULL, 0);
1538                 if (err)
1539                         return err;
1540
1541                 if (!fw_reserved(dev, qpn))
1542                         __mlx4_qp_free_icm(dev, qpn);
1543
1544                 res_end_move(dev, slave, RES_QP, qpn);
1545
1546                 if (valid_reserved(dev, slave, qpn))
1547                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1548                 break;
1549         default:
1550                 err = -EINVAL;
1551                 break;
1552         }
1553         return err;
1554 }
1555
1556 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1557                         u64 in_param, u64 *out_param)
1558 {
1559         int err = -EINVAL;
1560         int base;
1561         int order;
1562
1563         if (op != RES_OP_RESERVE_AND_MAP)
1564                 return err;
1565
1566         base = get_param_l(&in_param);
1567         order = get_param_h(&in_param);
1568         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1569         if (!err)
1570                 __mlx4_free_mtt_range(dev, base, order);
1571         return err;
1572 }
1573
1574 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1575                         u64 in_param)
1576 {
1577         int err = -EINVAL;
1578         int index;
1579         int id;
1580         struct res_mpt *mpt;
1581
1582         switch (op) {
1583         case RES_OP_RESERVE:
1584                 index = get_param_l(&in_param);
1585                 id = index & mpt_mask(dev);
1586                 err = get_res(dev, slave, id, RES_MPT, &mpt);
1587                 if (err)
1588                         break;
1589                 index = mpt->key;
1590                 put_res(dev, slave, id, RES_MPT);
1591
1592                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1593                 if (err)
1594                         break;
1595                 __mlx4_mr_release(dev, index);
1596                 break;
1597         case RES_OP_MAP_ICM:
1598                         index = get_param_l(&in_param);
1599                         id = index & mpt_mask(dev);
1600                         err = mr_res_start_move_to(dev, slave, id,
1601                                                    RES_MPT_RESERVED, &mpt);
1602                         if (err)
1603                                 return err;
1604
1605                         __mlx4_mr_free_icm(dev, mpt->key);
1606                         res_end_move(dev, slave, RES_MPT, id);
1607                         return err;
1608                 break;
1609         default:
1610                 err = -EINVAL;
1611                 break;
1612         }
1613         return err;
1614 }
1615
1616 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1617                        u64 in_param, u64 *out_param)
1618 {
1619         int cqn;
1620         int err;
1621
1622         switch (op) {
1623         case RES_OP_RESERVE_AND_MAP:
1624                 cqn = get_param_l(&in_param);
1625                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1626                 if (err)
1627                         break;
1628
1629                 __mlx4_cq_free_icm(dev, cqn);
1630                 break;
1631
1632         default:
1633                 err = -EINVAL;
1634                 break;
1635         }
1636
1637         return err;
1638 }
1639
1640 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1641                         u64 in_param, u64 *out_param)
1642 {
1643         int srqn;
1644         int err;
1645
1646         switch (op) {
1647         case RES_OP_RESERVE_AND_MAP:
1648                 srqn = get_param_l(&in_param);
1649                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1650                 if (err)
1651                         break;
1652
1653                 __mlx4_srq_free_icm(dev, srqn);
1654                 break;
1655
1656         default:
1657                 err = -EINVAL;
1658                 break;
1659         }
1660
1661         return err;
1662 }
1663
1664 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1665                             u64 in_param, u64 *out_param)
1666 {
1667         int port;
1668         int err = 0;
1669
1670         switch (op) {
1671         case RES_OP_RESERVE_AND_MAP:
1672                 port = get_param_l(out_param);
1673                 mac_del_from_slave(dev, slave, in_param, port);
1674                 __mlx4_unregister_mac(dev, port, in_param);
1675                 break;
1676         default:
1677                 err = -EINVAL;
1678                 break;
1679         }
1680
1681         return err;
1682
1683 }
1684
1685 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1686                             u64 in_param, u64 *out_param)
1687 {
1688         return 0;
1689 }
1690
1691 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1692                             u64 in_param, u64 *out_param)
1693 {
1694         int index;
1695         int err;
1696
1697         if (op != RES_OP_RESERVE)
1698                 return -EINVAL;
1699
1700         index = get_param_l(&in_param);
1701         err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1702         if (err)
1703                 return err;
1704
1705         __mlx4_counter_free(dev, index);
1706
1707         return err;
1708 }
1709
1710 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1711                           u64 in_param, u64 *out_param)
1712 {
1713         int xrcdn;
1714         int err;
1715
1716         if (op != RES_OP_RESERVE)
1717                 return -EINVAL;
1718
1719         xrcdn = get_param_l(&in_param);
1720         err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1721         if (err)
1722                 return err;
1723
1724         __mlx4_xrcd_free(dev, xrcdn);
1725
1726         return err;
1727 }
1728
1729 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1730                           struct mlx4_vhcr *vhcr,
1731                           struct mlx4_cmd_mailbox *inbox,
1732                           struct mlx4_cmd_mailbox *outbox,
1733                           struct mlx4_cmd_info *cmd)
1734 {
1735         int err = -EINVAL;
1736         int alop = vhcr->op_modifier;
1737
1738         switch (vhcr->in_modifier) {
1739         case RES_QP:
1740                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1741                                   vhcr->in_param);
1742                 break;
1743
1744         case RES_MTT:
1745                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1746                                    vhcr->in_param, &vhcr->out_param);
1747                 break;
1748
1749         case RES_MPT:
1750                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1751                                    vhcr->in_param);
1752                 break;
1753
1754         case RES_CQ:
1755                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1756                                   vhcr->in_param, &vhcr->out_param);
1757                 break;
1758
1759         case RES_SRQ:
1760                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1761                                    vhcr->in_param, &vhcr->out_param);
1762                 break;
1763
1764         case RES_MAC:
1765                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1766                                    vhcr->in_param, &vhcr->out_param);
1767                 break;
1768
1769         case RES_VLAN:
1770                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1771                                    vhcr->in_param, &vhcr->out_param);
1772                 break;
1773
1774         case RES_COUNTER:
1775                 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1776                                        vhcr->in_param, &vhcr->out_param);
1777                 break;
1778
1779         case RES_XRCD:
1780                 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1781                                      vhcr->in_param, &vhcr->out_param);
1782
1783         default:
1784                 break;
1785         }
1786         return err;
1787 }
1788
1789 /* ugly but other choices are uglier */
1790 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1791 {
1792         return (be32_to_cpu(mpt->flags) >> 9) & 1;
1793 }
1794
1795 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1796 {
1797         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1798 }
1799
1800 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1801 {
1802         return be32_to_cpu(mpt->mtt_sz);
1803 }
1804
1805 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1806 {
1807         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1808 }
1809
1810 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1811 {
1812         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1813 }
1814
1815 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1816 {
1817         int page_shift = (qpc->log_page_size & 0x3f) + 12;
1818         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1819         int log_sq_sride = qpc->sq_size_stride & 7;
1820         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1821         int log_rq_stride = qpc->rq_size_stride & 7;
1822         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1823         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1824         int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1825         int sq_size;
1826         int rq_size;
1827         int total_pages;
1828         int total_mem;
1829         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1830
1831         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1832         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1833         total_mem = sq_size + rq_size;
1834         total_pages =
1835                 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1836                                    page_shift);
1837
1838         return total_pages;
1839 }
1840
1841 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1842                            int size, struct res_mtt *mtt)
1843 {
1844         int res_start = mtt->com.res_id;
1845         int res_size = (1 << mtt->order);
1846
1847         if (start < res_start || start + size > res_start + res_size)
1848                 return -EPERM;
1849         return 0;
1850 }
1851
1852 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1853                            struct mlx4_vhcr *vhcr,
1854                            struct mlx4_cmd_mailbox *inbox,
1855                            struct mlx4_cmd_mailbox *outbox,
1856                            struct mlx4_cmd_info *cmd)
1857 {
1858         int err;
1859         int index = vhcr->in_modifier;
1860         struct res_mtt *mtt;
1861         struct res_mpt *mpt;
1862         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1863         int phys;
1864         int id;
1865
1866         id = index & mpt_mask(dev);
1867         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1868         if (err)
1869                 return err;
1870
1871         phys = mr_phys_mpt(inbox->buf);
1872         if (!phys) {
1873                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1874                 if (err)
1875                         goto ex_abort;
1876
1877                 err = check_mtt_range(dev, slave, mtt_base,
1878                                       mr_get_mtt_size(inbox->buf), mtt);
1879                 if (err)
1880                         goto ex_put;
1881
1882                 mpt->mtt = mtt;
1883         }
1884
1885         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1886         if (err)
1887                 goto ex_put;
1888
1889         if (!phys) {
1890                 atomic_inc(&mtt->ref_count);
1891                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1892         }
1893
1894         res_end_move(dev, slave, RES_MPT, id);
1895         return 0;
1896
1897 ex_put:
1898         if (!phys)
1899                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1900 ex_abort:
1901         res_abort_move(dev, slave, RES_MPT, id);
1902
1903         return err;
1904 }
1905
1906 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1907                            struct mlx4_vhcr *vhcr,
1908                            struct mlx4_cmd_mailbox *inbox,
1909                            struct mlx4_cmd_mailbox *outbox,
1910                            struct mlx4_cmd_info *cmd)
1911 {
1912         int err;
1913         int index = vhcr->in_modifier;
1914         struct res_mpt *mpt;
1915         int id;
1916
1917         id = index & mpt_mask(dev);
1918         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1919         if (err)
1920                 return err;
1921
1922         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1923         if (err)
1924                 goto ex_abort;
1925
1926         if (mpt->mtt)
1927                 atomic_dec(&mpt->mtt->ref_count);
1928
1929         res_end_move(dev, slave, RES_MPT, id);
1930         return 0;
1931
1932 ex_abort:
1933         res_abort_move(dev, slave, RES_MPT, id);
1934
1935         return err;
1936 }
1937
1938 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1939                            struct mlx4_vhcr *vhcr,
1940                            struct mlx4_cmd_mailbox *inbox,
1941                            struct mlx4_cmd_mailbox *outbox,
1942                            struct mlx4_cmd_info *cmd)
1943 {
1944         int err;
1945         int index = vhcr->in_modifier;
1946         struct res_mpt *mpt;
1947         int id;
1948
1949         id = index & mpt_mask(dev);
1950         err = get_res(dev, slave, id, RES_MPT, &mpt);
1951         if (err)
1952                 return err;
1953
1954         if (mpt->com.from_state != RES_MPT_HW) {
1955                 err = -EBUSY;
1956                 goto out;
1957         }
1958
1959         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1960
1961 out:
1962         put_res(dev, slave, id, RES_MPT);
1963         return err;
1964 }
1965
1966 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1967 {
1968         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1969 }
1970
1971 static int qp_get_scqn(struct mlx4_qp_context *qpc)
1972 {
1973         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1974 }
1975
1976 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1977 {
1978         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1979 }
1980
1981 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
1982                                   struct mlx4_qp_context *context)
1983 {
1984         u32 qpn = vhcr->in_modifier & 0xffffff;
1985         u32 qkey = 0;
1986
1987         if (mlx4_get_parav_qkey(dev, qpn, &qkey))
1988                 return;
1989
1990         /* adjust qkey in qp context */
1991         context->qkey = cpu_to_be32(qkey);
1992 }
1993
1994 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1995                              struct mlx4_vhcr *vhcr,
1996                              struct mlx4_cmd_mailbox *inbox,
1997                              struct mlx4_cmd_mailbox *outbox,
1998                              struct mlx4_cmd_info *cmd)
1999 {
2000         int err;
2001         int qpn = vhcr->in_modifier & 0x7fffff;
2002         struct res_mtt *mtt;
2003         struct res_qp *qp;
2004         struct mlx4_qp_context *qpc = inbox->buf + 8;
2005         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2006         int mtt_size = qp_get_mtt_size(qpc);
2007         struct res_cq *rcq;
2008         struct res_cq *scq;
2009         int rcqn = qp_get_rcqn(qpc);
2010         int scqn = qp_get_scqn(qpc);
2011         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2012         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2013         struct res_srq *srq;
2014         int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2015
2016         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2017         if (err)
2018                 return err;
2019         qp->local_qpn = local_qpn;
2020
2021         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2022         if (err)
2023                 goto ex_abort;
2024
2025         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2026         if (err)
2027                 goto ex_put_mtt;
2028
2029         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2030         if (err)
2031                 goto ex_put_mtt;
2032
2033         if (scqn != rcqn) {
2034                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2035                 if (err)
2036                         goto ex_put_rcq;
2037         } else
2038                 scq = rcq;
2039
2040         if (use_srq) {
2041                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2042                 if (err)
2043                         goto ex_put_scq;
2044         }
2045
2046         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2047         update_pkey_index(dev, slave, inbox);
2048         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2049         if (err)
2050                 goto ex_put_srq;
2051         atomic_inc(&mtt->ref_count);
2052         qp->mtt = mtt;
2053         atomic_inc(&rcq->ref_count);
2054         qp->rcq = rcq;
2055         atomic_inc(&scq->ref_count);
2056         qp->scq = scq;
2057
2058         if (scqn != rcqn)
2059                 put_res(dev, slave, scqn, RES_CQ);
2060
2061         if (use_srq) {
2062                 atomic_inc(&srq->ref_count);
2063                 put_res(dev, slave, srqn, RES_SRQ);
2064                 qp->srq = srq;
2065         }
2066         put_res(dev, slave, rcqn, RES_CQ);
2067         put_res(dev, slave, mtt_base, RES_MTT);
2068         res_end_move(dev, slave, RES_QP, qpn);
2069
2070         return 0;
2071
2072 ex_put_srq:
2073         if (use_srq)
2074                 put_res(dev, slave, srqn, RES_SRQ);
2075 ex_put_scq:
2076         if (scqn != rcqn)
2077                 put_res(dev, slave, scqn, RES_CQ);
2078 ex_put_rcq:
2079         put_res(dev, slave, rcqn, RES_CQ);
2080 ex_put_mtt:
2081         put_res(dev, slave, mtt_base, RES_MTT);
2082 ex_abort:
2083         res_abort_move(dev, slave, RES_QP, qpn);
2084
2085         return err;
2086 }
2087
2088 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2089 {
2090         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2091 }
2092
2093 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2094 {
2095         int log_eq_size = eqc->log_eq_size & 0x1f;
2096         int page_shift = (eqc->log_page_size & 0x3f) + 12;
2097
2098         if (log_eq_size + 5 < page_shift)
2099                 return 1;
2100
2101         return 1 << (log_eq_size + 5 - page_shift);
2102 }
2103
2104 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2105 {
2106         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2107 }
2108
2109 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2110 {
2111         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2112         int page_shift = (cqc->log_page_size & 0x3f) + 12;
2113
2114         if (log_cq_size + 5 < page_shift)
2115                 return 1;
2116
2117         return 1 << (log_cq_size + 5 - page_shift);
2118 }
2119
2120 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2121                           struct mlx4_vhcr *vhcr,
2122                           struct mlx4_cmd_mailbox *inbox,
2123                           struct mlx4_cmd_mailbox *outbox,
2124                           struct mlx4_cmd_info *cmd)
2125 {
2126         int err;
2127         int eqn = vhcr->in_modifier;
2128         int res_id = (slave << 8) | eqn;
2129         struct mlx4_eq_context *eqc = inbox->buf;
2130         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2131         int mtt_size = eq_get_mtt_size(eqc);
2132         struct res_eq *eq;
2133         struct res_mtt *mtt;
2134
2135         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2136         if (err)
2137                 return err;
2138         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2139         if (err)
2140                 goto out_add;
2141
2142         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2143         if (err)
2144                 goto out_move;
2145
2146         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2147         if (err)
2148                 goto out_put;
2149
2150         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2151         if (err)
2152                 goto out_put;
2153
2154         atomic_inc(&mtt->ref_count);
2155         eq->mtt = mtt;
2156         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2157         res_end_move(dev, slave, RES_EQ, res_id);
2158         return 0;
2159
2160 out_put:
2161         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2162 out_move:
2163         res_abort_move(dev, slave, RES_EQ, res_id);
2164 out_add:
2165         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2166         return err;
2167 }
2168
2169 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2170                               int len, struct res_mtt **res)
2171 {
2172         struct mlx4_priv *priv = mlx4_priv(dev);
2173         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2174         struct res_mtt *mtt;
2175         int err = -EINVAL;
2176
2177         spin_lock_irq(mlx4_tlock(dev));
2178         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2179                             com.list) {
2180                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2181                         *res = mtt;
2182                         mtt->com.from_state = mtt->com.state;
2183                         mtt->com.state = RES_MTT_BUSY;
2184                         err = 0;
2185                         break;
2186                 }
2187         }
2188         spin_unlock_irq(mlx4_tlock(dev));
2189
2190         return err;
2191 }
2192
2193 static int verify_qp_parameters(struct mlx4_dev *dev,
2194                                 struct mlx4_cmd_mailbox *inbox,
2195                                 enum qp_transition transition, u8 slave)
2196 {
2197         u32                     qp_type;
2198         struct mlx4_qp_context  *qp_ctx;
2199         enum mlx4_qp_optpar     optpar;
2200
2201         qp_ctx  = inbox->buf + 8;
2202         qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2203         optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
2204
2205         switch (qp_type) {
2206         case MLX4_QP_ST_RC:
2207         case MLX4_QP_ST_UC:
2208                 switch (transition) {
2209                 case QP_TRANS_INIT2RTR:
2210                 case QP_TRANS_RTR2RTS:
2211                 case QP_TRANS_RTS2RTS:
2212                 case QP_TRANS_SQD2SQD:
2213                 case QP_TRANS_SQD2RTS:
2214                         if (slave != mlx4_master_func_num(dev))
2215                                 /* slaves have only gid index 0 */
2216                                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2217                                         if (qp_ctx->pri_path.mgid_index)
2218                                                 return -EINVAL;
2219                                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2220                                         if (qp_ctx->alt_path.mgid_index)
2221                                                 return -EINVAL;
2222                         break;
2223                 default:
2224                         break;
2225                 }
2226
2227                 break;
2228         default:
2229                 break;
2230         }
2231
2232         return 0;
2233 }
2234
2235 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2236                            struct mlx4_vhcr *vhcr,
2237                            struct mlx4_cmd_mailbox *inbox,
2238                            struct mlx4_cmd_mailbox *outbox,
2239                            struct mlx4_cmd_info *cmd)
2240 {
2241         struct mlx4_mtt mtt;
2242         __be64 *page_list = inbox->buf;
2243         u64 *pg_list = (u64 *)page_list;
2244         int i;
2245         struct res_mtt *rmtt = NULL;
2246         int start = be64_to_cpu(page_list[0]);
2247         int npages = vhcr->in_modifier;
2248         int err;
2249
2250         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2251         if (err)
2252                 return err;
2253
2254         /* Call the SW implementation of write_mtt:
2255          * - Prepare a dummy mtt struct
2256          * - Translate inbox contents to simple addresses in host endianess */
2257         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
2258                             we don't really use it */
2259         mtt.order = 0;
2260         mtt.page_shift = 0;
2261         for (i = 0; i < npages; ++i)
2262                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2263
2264         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2265                                ((u64 *)page_list + 2));
2266
2267         if (rmtt)
2268                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2269
2270         return err;
2271 }
2272
2273 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2274                           struct mlx4_vhcr *vhcr,
2275                           struct mlx4_cmd_mailbox *inbox,
2276                           struct mlx4_cmd_mailbox *outbox,
2277                           struct mlx4_cmd_info *cmd)
2278 {
2279         int eqn = vhcr->in_modifier;
2280         int res_id = eqn | (slave << 8);
2281         struct res_eq *eq;
2282         int err;
2283
2284         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2285         if (err)
2286                 return err;
2287
2288         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2289         if (err)
2290                 goto ex_abort;
2291
2292         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2293         if (err)
2294                 goto ex_put;
2295
2296         atomic_dec(&eq->mtt->ref_count);
2297         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2298         res_end_move(dev, slave, RES_EQ, res_id);
2299         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2300
2301         return 0;
2302
2303 ex_put:
2304         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2305 ex_abort:
2306         res_abort_move(dev, slave, RES_EQ, res_id);
2307
2308         return err;
2309 }
2310
2311 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2312 {
2313         struct mlx4_priv *priv = mlx4_priv(dev);
2314         struct mlx4_slave_event_eq_info *event_eq;
2315         struct mlx4_cmd_mailbox *mailbox;
2316         u32 in_modifier = 0;
2317         int err;
2318         int res_id;
2319         struct res_eq *req;
2320
2321         if (!priv->mfunc.master.slave_state)
2322                 return -EINVAL;
2323
2324         event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2325
2326         /* Create the event only if the slave is registered */
2327         if (event_eq->eqn < 0)
2328                 return 0;
2329
2330         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2331         res_id = (slave << 8) | event_eq->eqn;
2332         err = get_res(dev, slave, res_id, RES_EQ, &req);
2333         if (err)
2334                 goto unlock;
2335
2336         if (req->com.from_state != RES_EQ_HW) {
2337                 err = -EINVAL;
2338                 goto put;
2339         }
2340
2341         mailbox = mlx4_alloc_cmd_mailbox(dev);
2342         if (IS_ERR(mailbox)) {
2343                 err = PTR_ERR(mailbox);
2344                 goto put;
2345         }
2346
2347         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2348                 ++event_eq->token;
2349                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2350         }
2351
2352         memcpy(mailbox->buf, (u8 *) eqe, 28);
2353
2354         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2355
2356         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2357                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2358                        MLX4_CMD_NATIVE);
2359
2360         put_res(dev, slave, res_id, RES_EQ);
2361         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2362         mlx4_free_cmd_mailbox(dev, mailbox);
2363         return err;
2364
2365 put:
2366         put_res(dev, slave, res_id, RES_EQ);
2367
2368 unlock:
2369         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2370         return err;
2371 }
2372
2373 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2374                           struct mlx4_vhcr *vhcr,
2375                           struct mlx4_cmd_mailbox *inbox,
2376                           struct mlx4_cmd_mailbox *outbox,
2377                           struct mlx4_cmd_info *cmd)
2378 {
2379         int eqn = vhcr->in_modifier;
2380         int res_id = eqn | (slave << 8);
2381         struct res_eq *eq;
2382         int err;
2383
2384         err = get_res(dev, slave, res_id, RES_EQ, &eq);
2385         if (err)
2386                 return err;
2387
2388         if (eq->com.from_state != RES_EQ_HW) {
2389                 err = -EINVAL;
2390                 goto ex_put;
2391         }
2392
2393         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2394
2395 ex_put:
2396         put_res(dev, slave, res_id, RES_EQ);
2397         return err;
2398 }
2399
2400 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2401                           struct mlx4_vhcr *vhcr,
2402                           struct mlx4_cmd_mailbox *inbox,
2403                           struct mlx4_cmd_mailbox *outbox,
2404                           struct mlx4_cmd_info *cmd)
2405 {
2406         int err;
2407         int cqn = vhcr->in_modifier;
2408         struct mlx4_cq_context *cqc = inbox->buf;
2409         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2410         struct res_cq *cq;
2411         struct res_mtt *mtt;
2412
2413         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2414         if (err)
2415                 return err;
2416         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2417         if (err)
2418                 goto out_move;
2419         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2420         if (err)
2421                 goto out_put;
2422         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2423         if (err)
2424                 goto out_put;
2425         atomic_inc(&mtt->ref_count);
2426         cq->mtt = mtt;
2427         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2428         res_end_move(dev, slave, RES_CQ, cqn);
2429         return 0;
2430
2431 out_put:
2432         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2433 out_move:
2434         res_abort_move(dev, slave, RES_CQ, cqn);
2435         return err;
2436 }
2437
2438 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2439                           struct mlx4_vhcr *vhcr,
2440                           struct mlx4_cmd_mailbox *inbox,
2441                           struct mlx4_cmd_mailbox *outbox,
2442                           struct mlx4_cmd_info *cmd)
2443 {
2444         int err;
2445         int cqn = vhcr->in_modifier;
2446         struct res_cq *cq;
2447
2448         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2449         if (err)
2450                 return err;
2451         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2452         if (err)
2453                 goto out_move;
2454         atomic_dec(&cq->mtt->ref_count);
2455         res_end_move(dev, slave, RES_CQ, cqn);
2456         return 0;
2457
2458 out_move:
2459         res_abort_move(dev, slave, RES_CQ, cqn);
2460         return err;
2461 }
2462
2463 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2464                           struct mlx4_vhcr *vhcr,
2465                           struct mlx4_cmd_mailbox *inbox,
2466                           struct mlx4_cmd_mailbox *outbox,
2467                           struct mlx4_cmd_info *cmd)
2468 {
2469         int cqn = vhcr->in_modifier;
2470         struct res_cq *cq;
2471         int err;
2472
2473         err = get_res(dev, slave, cqn, RES_CQ, &cq);
2474         if (err)
2475                 return err;
2476
2477         if (cq->com.from_state != RES_CQ_HW)
2478                 goto ex_put;
2479
2480         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2481 ex_put:
2482         put_res(dev, slave, cqn, RES_CQ);
2483
2484         return err;
2485 }
2486
2487 static int handle_resize(struct mlx4_dev *dev, int slave,
2488                          struct mlx4_vhcr *vhcr,
2489                          struct mlx4_cmd_mailbox *inbox,
2490                          struct mlx4_cmd_mailbox *outbox,
2491                          struct mlx4_cmd_info *cmd,
2492                          struct res_cq *cq)
2493 {
2494         int err;
2495         struct res_mtt *orig_mtt;
2496         struct res_mtt *mtt;
2497         struct mlx4_cq_context *cqc = inbox->buf;
2498         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2499
2500         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2501         if (err)
2502                 return err;
2503
2504         if (orig_mtt != cq->mtt) {
2505                 err = -EINVAL;
2506                 goto ex_put;
2507         }
2508
2509         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2510         if (err)
2511                 goto ex_put;
2512
2513         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2514         if (err)
2515                 goto ex_put1;
2516         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2517         if (err)
2518                 goto ex_put1;
2519         atomic_dec(&orig_mtt->ref_count);
2520         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2521         atomic_inc(&mtt->ref_count);
2522         cq->mtt = mtt;
2523         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2524         return 0;
2525
2526 ex_put1:
2527         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2528 ex_put:
2529         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2530
2531         return err;
2532
2533 }
2534
2535 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2536                            struct mlx4_vhcr *vhcr,
2537                            struct mlx4_cmd_mailbox *inbox,
2538                            struct mlx4_cmd_mailbox *outbox,
2539                            struct mlx4_cmd_info *cmd)
2540 {
2541         int cqn = vhcr->in_modifier;
2542         struct res_cq *cq;
2543         int err;
2544
2545         err = get_res(dev, slave, cqn, RES_CQ, &cq);
2546         if (err)
2547                 return err;
2548
2549         if (cq->com.from_state != RES_CQ_HW)
2550                 goto ex_put;
2551
2552         if (vhcr->op_modifier == 0) {
2553                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2554                 goto ex_put;
2555         }
2556
2557         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2558 ex_put:
2559         put_res(dev, slave, cqn, RES_CQ);
2560
2561         return err;
2562 }
2563
2564 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2565 {
2566         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2567         int log_rq_stride = srqc->logstride & 7;
2568         int page_shift = (srqc->log_page_size & 0x3f) + 12;
2569
2570         if (log_srq_size + log_rq_stride + 4 < page_shift)
2571                 return 1;
2572
2573         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2574 }
2575
2576 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2577                            struct mlx4_vhcr *vhcr,
2578                            struct mlx4_cmd_mailbox *inbox,
2579                            struct mlx4_cmd_mailbox *outbox,
2580                            struct mlx4_cmd_info *cmd)
2581 {
2582         int err;
2583         int srqn = vhcr->in_modifier;
2584         struct res_mtt *mtt;
2585         struct res_srq *srq;
2586         struct mlx4_srq_context *srqc = inbox->buf;
2587         int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2588
2589         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2590                 return -EINVAL;
2591
2592         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2593         if (err)
2594                 return err;
2595         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2596         if (err)
2597                 goto ex_abort;
2598         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2599                               mtt);
2600         if (err)
2601                 goto ex_put_mtt;
2602
2603         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2604         if (err)
2605                 goto ex_put_mtt;
2606
2607         atomic_inc(&mtt->ref_count);
2608         srq->mtt = mtt;
2609         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2610         res_end_move(dev, slave, RES_SRQ, srqn);
2611         return 0;
2612
2613 ex_put_mtt:
2614         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2615 ex_abort:
2616         res_abort_move(dev, slave, RES_SRQ, srqn);
2617
2618         return err;
2619 }
2620
2621 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2622                            struct mlx4_vhcr *vhcr,
2623                            struct mlx4_cmd_mailbox *inbox,
2624                            struct mlx4_cmd_mailbox *outbox,
2625                            struct mlx4_cmd_info *cmd)
2626 {
2627         int err;
2628         int srqn = vhcr->in_modifier;
2629         struct res_srq *srq;
2630
2631         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2632         if (err)
2633                 return err;
2634         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2635         if (err)
2636                 goto ex_abort;
2637         atomic_dec(&srq->mtt->ref_count);
2638         if (srq->cq)
2639                 atomic_dec(&srq->cq->ref_count);
2640         res_end_move(dev, slave, RES_SRQ, srqn);
2641
2642         return 0;
2643
2644 ex_abort:
2645         res_abort_move(dev, slave, RES_SRQ, srqn);
2646
2647         return err;
2648 }
2649
2650 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2651                            struct mlx4_vhcr *vhcr,
2652                            struct mlx4_cmd_mailbox *inbox,
2653                            struct mlx4_cmd_mailbox *outbox,
2654                            struct mlx4_cmd_info *cmd)
2655 {
2656         int err;
2657         int srqn = vhcr->in_modifier;
2658         struct res_srq *srq;
2659
2660         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2661         if (err)
2662                 return err;
2663         if (srq->com.from_state != RES_SRQ_HW) {
2664                 err = -EBUSY;
2665                 goto out;
2666         }
2667         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2668 out:
2669         put_res(dev, slave, srqn, RES_SRQ);
2670         return err;
2671 }
2672
2673 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2674                          struct mlx4_vhcr *vhcr,
2675                          struct mlx4_cmd_mailbox *inbox,
2676                          struct mlx4_cmd_mailbox *outbox,
2677                          struct mlx4_cmd_info *cmd)
2678 {
2679         int err;
2680         int srqn = vhcr->in_modifier;
2681         struct res_srq *srq;
2682
2683         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2684         if (err)
2685                 return err;
2686
2687         if (srq->com.from_state != RES_SRQ_HW) {
2688                 err = -EBUSY;
2689                 goto out;
2690         }
2691
2692         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2693 out:
2694         put_res(dev, slave, srqn, RES_SRQ);
2695         return err;
2696 }
2697
2698 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2699                         struct mlx4_vhcr *vhcr,
2700                         struct mlx4_cmd_mailbox *inbox,
2701                         struct mlx4_cmd_mailbox *outbox,
2702                         struct mlx4_cmd_info *cmd)
2703 {
2704         int err;
2705         int qpn = vhcr->in_modifier & 0x7fffff;
2706         struct res_qp *qp;
2707
2708         err = get_res(dev, slave, qpn, RES_QP, &qp);
2709         if (err)
2710                 return err;
2711         if (qp->com.from_state != RES_QP_HW) {
2712                 err = -EBUSY;
2713                 goto out;
2714         }
2715
2716         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2717 out:
2718         put_res(dev, slave, qpn, RES_QP);
2719         return err;
2720 }
2721
2722 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2723                               struct mlx4_vhcr *vhcr,
2724                               struct mlx4_cmd_mailbox *inbox,
2725                               struct mlx4_cmd_mailbox *outbox,
2726                               struct mlx4_cmd_info *cmd)
2727 {
2728         struct mlx4_qp_context *context = inbox->buf + 8;
2729         adjust_proxy_tun_qkey(dev, vhcr, context);
2730         update_pkey_index(dev, slave, inbox);
2731         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2732 }
2733
2734 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2735                              struct mlx4_vhcr *vhcr,
2736                              struct mlx4_cmd_mailbox *inbox,
2737                              struct mlx4_cmd_mailbox *outbox,
2738                              struct mlx4_cmd_info *cmd)
2739 {
2740         int err;
2741         struct mlx4_qp_context *qpc = inbox->buf + 8;
2742
2743         err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
2744         if (err)
2745                 return err;
2746
2747         update_pkey_index(dev, slave, inbox);
2748         update_gid(dev, inbox, (u8)slave);
2749         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2750
2751         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2752 }
2753
2754 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2755                             struct mlx4_vhcr *vhcr,
2756                             struct mlx4_cmd_mailbox *inbox,
2757                             struct mlx4_cmd_mailbox *outbox,
2758                             struct mlx4_cmd_info *cmd)
2759 {
2760         int err;
2761         struct mlx4_qp_context *context = inbox->buf + 8;
2762
2763         err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
2764         if (err)
2765                 return err;
2766
2767         update_pkey_index(dev, slave, inbox);
2768         update_gid(dev, inbox, (u8)slave);
2769         adjust_proxy_tun_qkey(dev, vhcr, context);
2770         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2771 }
2772
2773 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2774                             struct mlx4_vhcr *vhcr,
2775                             struct mlx4_cmd_mailbox *inbox,
2776                             struct mlx4_cmd_mailbox *outbox,
2777                             struct mlx4_cmd_info *cmd)
2778 {
2779         int err;
2780         struct mlx4_qp_context *context = inbox->buf + 8;
2781
2782         err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
2783         if (err)
2784                 return err;
2785
2786         update_pkey_index(dev, slave, inbox);
2787         update_gid(dev, inbox, (u8)slave);
2788         adjust_proxy_tun_qkey(dev, vhcr, context);
2789         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2790 }
2791
2792
2793 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2794                               struct mlx4_vhcr *vhcr,
2795                               struct mlx4_cmd_mailbox *inbox,
2796                               struct mlx4_cmd_mailbox *outbox,
2797                               struct mlx4_cmd_info *cmd)
2798 {
2799         struct mlx4_qp_context *context = inbox->buf + 8;
2800         adjust_proxy_tun_qkey(dev, vhcr, context);
2801         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2802 }
2803
2804 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
2805                             struct mlx4_vhcr *vhcr,
2806                             struct mlx4_cmd_mailbox *inbox,
2807                             struct mlx4_cmd_mailbox *outbox,
2808                             struct mlx4_cmd_info *cmd)
2809 {
2810         int err;
2811         struct mlx4_qp_context *context = inbox->buf + 8;
2812
2813         err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
2814         if (err)
2815                 return err;
2816
2817         adjust_proxy_tun_qkey(dev, vhcr, context);
2818         update_gid(dev, inbox, (u8)slave);
2819         update_pkey_index(dev, slave, inbox);
2820         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2821 }
2822
2823 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2824                             struct mlx4_vhcr *vhcr,
2825                             struct mlx4_cmd_mailbox *inbox,
2826                             struct mlx4_cmd_mailbox *outbox,
2827                             struct mlx4_cmd_info *cmd)
2828 {
2829         int err;
2830         struct mlx4_qp_context *context = inbox->buf + 8;
2831
2832         err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
2833         if (err)
2834                 return err;
2835
2836         adjust_proxy_tun_qkey(dev, vhcr, context);
2837         update_gid(dev, inbox, (u8)slave);
2838         update_pkey_index(dev, slave, inbox);
2839         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2840 }
2841
2842 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2843                          struct mlx4_vhcr *vhcr,
2844                          struct mlx4_cmd_mailbox *inbox,
2845                          struct mlx4_cmd_mailbox *outbox,
2846                          struct mlx4_cmd_info *cmd)
2847 {
2848         int err;
2849         int qpn = vhcr->in_modifier & 0x7fffff;
2850         struct res_qp *qp;
2851
2852         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2853         if (err)
2854                 return err;
2855         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2856         if (err)
2857                 goto ex_abort;
2858
2859         atomic_dec(&qp->mtt->ref_count);
2860         atomic_dec(&qp->rcq->ref_count);
2861         atomic_dec(&qp->scq->ref_count);
2862         if (qp->srq)
2863                 atomic_dec(&qp->srq->ref_count);
2864         res_end_move(dev, slave, RES_QP, qpn);
2865         return 0;
2866
2867 ex_abort:
2868         res_abort_move(dev, slave, RES_QP, qpn);
2869
2870         return err;
2871 }
2872
2873 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2874                                 struct res_qp *rqp, u8 *gid)
2875 {
2876         struct res_gid *res;
2877
2878         list_for_each_entry(res, &rqp->mcg_list, list) {
2879                 if (!memcmp(res->gid, gid, 16))
2880                         return res;
2881         }
2882         return NULL;
2883 }
2884
2885 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2886                        u8 *gid, enum mlx4_protocol prot,
2887                        enum mlx4_steer_type steer)
2888 {
2889         struct res_gid *res;
2890         int err;
2891
2892         res = kzalloc(sizeof *res, GFP_KERNEL);
2893         if (!res)
2894                 return -ENOMEM;
2895
2896         spin_lock_irq(&rqp->mcg_spl);
2897         if (find_gid(dev, slave, rqp, gid)) {
2898                 kfree(res);
2899                 err = -EEXIST;
2900         } else {
2901                 memcpy(res->gid, gid, 16);
2902                 res->prot = prot;
2903                 res->steer = steer;
2904                 list_add_tail(&res->list, &rqp->mcg_list);
2905                 err = 0;
2906         }
2907         spin_unlock_irq(&rqp->mcg_spl);
2908
2909         return err;
2910 }
2911
2912 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2913                        u8 *gid, enum mlx4_protocol prot,
2914                        enum mlx4_steer_type steer)
2915 {
2916         struct res_gid *res;
2917         int err;
2918
2919         spin_lock_irq(&rqp->mcg_spl);
2920         res = find_gid(dev, slave, rqp, gid);
2921         if (!res || res->prot != prot || res->steer != steer)
2922                 err = -EINVAL;
2923         else {
2924                 list_del(&res->list);
2925                 kfree(res);
2926                 err = 0;
2927         }
2928         spin_unlock_irq(&rqp->mcg_spl);
2929
2930         return err;
2931 }
2932
2933 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2934                                struct mlx4_vhcr *vhcr,
2935                                struct mlx4_cmd_mailbox *inbox,
2936                                struct mlx4_cmd_mailbox *outbox,
2937                                struct mlx4_cmd_info *cmd)
2938 {
2939         struct mlx4_qp qp; /* dummy for calling attach/detach */
2940         u8 *gid = inbox->buf;
2941         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2942         int err;
2943         int qpn;
2944         struct res_qp *rqp;
2945         int attach = vhcr->op_modifier;
2946         int block_loopback = vhcr->in_modifier >> 31;
2947         u8 steer_type_mask = 2;
2948         enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
2949
2950         qpn = vhcr->in_modifier & 0xffffff;
2951         err = get_res(dev, slave, qpn, RES_QP, &rqp);
2952         if (err)
2953                 return err;
2954
2955         qp.qpn = qpn;
2956         if (attach) {
2957                 err = add_mcg_res(dev, slave, rqp, gid, prot, type);
2958                 if (err)
2959                         goto ex_put;
2960
2961                 err = mlx4_qp_attach_common(dev, &qp, gid,
2962                                             block_loopback, prot, type);
2963                 if (err)
2964                         goto ex_rem;
2965         } else {
2966                 err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
2967                 if (err)
2968                         goto ex_put;
2969                 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2970         }
2971
2972         put_res(dev, slave, qpn, RES_QP);
2973         return 0;
2974
2975 ex_rem:
2976         /* ignore error return below, already in error */
2977         (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
2978 ex_put:
2979         put_res(dev, slave, qpn, RES_QP);
2980
2981         return err;
2982 }
2983
2984 /*
2985  * MAC validation for Flow Steering rules.
2986  * VF can attach rules only with a mac address which is assigned to it.
2987  */
2988 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
2989                                    struct list_head *rlist)
2990 {
2991         struct mac_res *res, *tmp;
2992         __be64 be_mac;
2993
2994         /* make sure it isn't multicast or broadcast mac*/
2995         if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
2996             !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
2997                 list_for_each_entry_safe(res, tmp, rlist, list) {
2998                         be_mac = cpu_to_be64(res->mac << 16);
2999                         if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3000                                 return 0;
3001                 }
3002                 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3003                        eth_header->eth.dst_mac, slave);
3004                 return -EINVAL;
3005         }
3006         return 0;
3007 }
3008
3009 /*
3010  * In case of missing eth header, append eth header with a MAC address
3011  * assigned to the VF.
3012  */
3013 static int add_eth_header(struct mlx4_dev *dev, int slave,
3014                           struct mlx4_cmd_mailbox *inbox,
3015                           struct list_head *rlist, int header_id)
3016 {
3017         struct mac_res *res, *tmp;
3018         u8 port;
3019         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3020         struct mlx4_net_trans_rule_hw_eth *eth_header;
3021         struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3022         struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3023         __be64 be_mac = 0;
3024         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3025
3026         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3027         port = be32_to_cpu(ctrl->vf_vep_port) & 0xff;
3028         eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3029
3030         /* Clear a space in the inbox for eth header */
3031         switch (header_id) {
3032         case MLX4_NET_TRANS_RULE_ID_IPV4:
3033                 ip_header =
3034                         (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3035                 memmove(ip_header, eth_header,
3036                         sizeof(*ip_header) + sizeof(*l4_header));
3037                 break;
3038         case MLX4_NET_TRANS_RULE_ID_TCP:
3039         case MLX4_NET_TRANS_RULE_ID_UDP:
3040                 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3041                             (eth_header + 1);
3042                 memmove(l4_header, eth_header, sizeof(*l4_header));
3043                 break;
3044         default:
3045                 return -EINVAL;
3046         }
3047         list_for_each_entry_safe(res, tmp, rlist, list) {
3048                 if (port == res->port) {
3049                         be_mac = cpu_to_be64(res->mac << 16);
3050                         break;
3051                 }
3052         }
3053         if (!be_mac) {
3054                 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3055                        port);
3056                 return -EINVAL;
3057         }
3058
3059         memset(eth_header, 0, sizeof(*eth_header));
3060         eth_header->size = sizeof(*eth_header) >> 2;
3061         eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3062         memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3063         memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3064
3065         return 0;
3066
3067 }
3068
3069 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3070                                          struct mlx4_vhcr *vhcr,
3071                                          struct mlx4_cmd_mailbox *inbox,
3072                                          struct mlx4_cmd_mailbox *outbox,
3073                                          struct mlx4_cmd_info *cmd)
3074 {
3075
3076         struct mlx4_priv *priv = mlx4_priv(dev);
3077         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3078         struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3079         int err;
3080         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3081         struct _rule_hw  *rule_header;
3082         int header_id;
3083
3084         if (dev->caps.steering_mode !=
3085             MLX4_STEERING_MODE_DEVICE_MANAGED)
3086                 return -EOPNOTSUPP;
3087
3088         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3089         rule_header = (struct _rule_hw *)(ctrl + 1);
3090         header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3091
3092         switch (header_id) {
3093         case MLX4_NET_TRANS_RULE_ID_ETH:
3094                 if (validate_eth_header_mac(slave, rule_header, rlist))
3095                         return -EINVAL;
3096                 break;
3097         case MLX4_NET_TRANS_RULE_ID_IB:
3098                 break;
3099         case MLX4_NET_TRANS_RULE_ID_IPV4:
3100         case MLX4_NET_TRANS_RULE_ID_TCP:
3101         case MLX4_NET_TRANS_RULE_ID_UDP:
3102                 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3103                 if (add_eth_header(dev, slave, inbox, rlist, header_id))
3104                         return -EINVAL;
3105                 vhcr->in_modifier +=
3106                         sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3107                 break;
3108         default:
3109                 pr_err("Corrupted mailbox.\n");
3110                 return -EINVAL;
3111         }
3112
3113         err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3114                            vhcr->in_modifier, 0,
3115                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3116                            MLX4_CMD_NATIVE);
3117         if (err)
3118                 return err;
3119
3120         err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
3121         if (err) {
3122                 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3123                 /* detach rule*/
3124                 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3125                          MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3126                          MLX4_CMD_NATIVE);
3127         }
3128         return err;
3129 }
3130
3131 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3132                                          struct mlx4_vhcr *vhcr,
3133                                          struct mlx4_cmd_mailbox *inbox,
3134                                          struct mlx4_cmd_mailbox *outbox,
3135                                          struct mlx4_cmd_info *cmd)
3136 {
3137         int err;
3138
3139         if (dev->caps.steering_mode !=
3140             MLX4_STEERING_MODE_DEVICE_MANAGED)
3141                 return -EOPNOTSUPP;
3142
3143         err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3144         if (err) {
3145                 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3146                 return err;
3147         }
3148
3149         err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3150                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3151                        MLX4_CMD_NATIVE);
3152         return err;
3153 }
3154
3155 enum {
3156         BUSY_MAX_RETRIES = 10
3157 };
3158
3159 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3160                                struct mlx4_vhcr *vhcr,
3161                                struct mlx4_cmd_mailbox *inbox,
3162                                struct mlx4_cmd_mailbox *outbox,
3163                                struct mlx4_cmd_info *cmd)
3164 {
3165         int err;
3166         int index = vhcr->in_modifier & 0xffff;
3167
3168         err = get_res(dev, slave, index, RES_COUNTER, NULL);
3169         if (err)
3170                 return err;
3171
3172         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3173         put_res(dev, slave, index, RES_COUNTER);
3174         return err;
3175 }
3176
3177 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3178 {
3179         struct res_gid *rgid;
3180         struct res_gid *tmp;
3181         struct mlx4_qp qp; /* dummy for calling attach/detach */
3182
3183         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3184                 qp.qpn = rqp->local_qpn;
3185                 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
3186                                              rgid->steer);
3187                 list_del(&rgid->list);
3188                 kfree(rgid);
3189         }
3190 }
3191
3192 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3193                           enum mlx4_resource type, int print)
3194 {
3195         struct mlx4_priv *priv = mlx4_priv(dev);
3196         struct mlx4_resource_tracker *tracker =
3197                 &priv->mfunc.master.res_tracker;
3198         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3199         struct res_common *r;
3200         struct res_common *tmp;
3201         int busy;
3202
3203         busy = 0;
3204         spin_lock_irq(mlx4_tlock(dev));
3205         list_for_each_entry_safe(r, tmp, rlist, list) {
3206                 if (r->owner == slave) {
3207                         if (!r->removing) {
3208                                 if (r->state == RES_ANY_BUSY) {
3209                                         if (print)
3210                                                 mlx4_dbg(dev,
3211                                                          "%s id 0x%llx is busy\n",
3212                                                           ResourceType(type),
3213                                                           r->res_id);
3214                                         ++busy;
3215                                 } else {
3216                                         r->from_state = r->state;
3217                                         r->state = RES_ANY_BUSY;
3218                                         r->removing = 1;
3219                                 }
3220                         }
3221                 }
3222         }
3223         spin_unlock_irq(mlx4_tlock(dev));
3224
3225         return busy;
3226 }
3227
3228 static int move_all_busy(struct mlx4_dev *dev, int slave,
3229                          enum mlx4_resource type)
3230 {
3231         unsigned long begin;
3232         int busy;
3233
3234         begin = jiffies;
3235         do {
3236                 busy = _move_all_busy(dev, slave, type, 0);
3237                 if (time_after(jiffies, begin + 5 * HZ))
3238                         break;
3239                 if (busy)
3240                         cond_resched();
3241         } while (busy);
3242
3243         if (busy)
3244                 busy = _move_all_busy(dev, slave, type, 1);
3245
3246         return busy;
3247 }
3248 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3249 {
3250         struct mlx4_priv *priv = mlx4_priv(dev);
3251         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3252         struct list_head *qp_list =
3253                 &tracker->slave_list[slave].res_list[RES_QP];
3254         struct res_qp *qp;
3255         struct res_qp *tmp;
3256         int state;
3257         u64 in_param;
3258         int qpn;
3259         int err;
3260
3261         err = move_all_busy(dev, slave, RES_QP);
3262         if (err)
3263                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3264                           "for slave %d\n", slave);
3265
3266         spin_lock_irq(mlx4_tlock(dev));
3267         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3268                 spin_unlock_irq(mlx4_tlock(dev));
3269                 if (qp->com.owner == slave) {
3270                         qpn = qp->com.res_id;
3271                         detach_qp(dev, slave, qp);
3272                         state = qp->com.from_state;
3273                         while (state != 0) {
3274                                 switch (state) {
3275                                 case RES_QP_RESERVED:
3276                                         spin_lock_irq(mlx4_tlock(dev));
3277                                         rb_erase(&qp->com.node,
3278                                                  &tracker->res_tree[RES_QP]);
3279                                         list_del(&qp->com.list);
3280                                         spin_unlock_irq(mlx4_tlock(dev));
3281                                         kfree(qp);
3282                                         state = 0;
3283                                         break;
3284                                 case RES_QP_MAPPED:
3285                                         if (!valid_reserved(dev, slave, qpn))
3286                                                 __mlx4_qp_free_icm(dev, qpn);
3287                                         state = RES_QP_RESERVED;
3288                                         break;
3289                                 case RES_QP_HW:
3290                                         in_param = slave;
3291                                         err = mlx4_cmd(dev, in_param,
3292                                                        qp->local_qpn, 2,
3293                                                        MLX4_CMD_2RST_QP,
3294                                                        MLX4_CMD_TIME_CLASS_A,
3295                                                        MLX4_CMD_NATIVE);
3296                                         if (err)
3297                                                 mlx4_dbg(dev, "rem_slave_qps: failed"
3298                                                          " to move slave %d qpn %d to"
3299                                                          " reset\n", slave,
3300                                                          qp->local_qpn);
3301                                         atomic_dec(&qp->rcq->ref_count);
3302                                         atomic_dec(&qp->scq->ref_count);
3303                                         atomic_dec(&qp->mtt->ref_count);
3304                                         if (qp->srq)
3305                                                 atomic_dec(&qp->srq->ref_count);
3306                                         state = RES_QP_MAPPED;
3307                                         break;
3308                                 default:
3309                                         state = 0;
3310                                 }
3311                         }
3312                 }
3313                 spin_lock_irq(mlx4_tlock(dev));
3314         }
3315         spin_unlock_irq(mlx4_tlock(dev));
3316 }
3317
3318 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3319 {
3320         struct mlx4_priv *priv = mlx4_priv(dev);
3321         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3322         struct list_head *srq_list =
3323                 &tracker->slave_list[slave].res_list[RES_SRQ];
3324         struct res_srq *srq;
3325         struct res_srq *tmp;
3326         int state;
3327         u64 in_param;
3328         LIST_HEAD(tlist);
3329         int srqn;
3330         int err;
3331
3332         err = move_all_busy(dev, slave, RES_SRQ);
3333         if (err)
3334                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3335                           "busy for slave %d\n", slave);
3336
3337         spin_lock_irq(mlx4_tlock(dev));
3338         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3339                 spin_unlock_irq(mlx4_tlock(dev));
3340                 if (srq->com.owner == slave) {
3341                         srqn = srq->com.res_id;
3342                         state = srq->com.from_state;
3343                         while (state != 0) {
3344                                 switch (state) {
3345                                 case RES_SRQ_ALLOCATED:
3346                                         __mlx4_srq_free_icm(dev, srqn);
3347                                         spin_lock_irq(mlx4_tlock(dev));
3348                                         rb_erase(&srq->com.node,
3349                                                  &tracker->res_tree[RES_SRQ]);
3350                                         list_del(&srq->com.list);
3351                                         spin_unlock_irq(mlx4_tlock(dev));
3352                                         kfree(srq);
3353                                         state = 0;
3354                                         break;
3355
3356                                 case RES_SRQ_HW:
3357                                         in_param = slave;
3358                                         err = mlx4_cmd(dev, in_param, srqn, 1,
3359                                                        MLX4_CMD_HW2SW_SRQ,
3360                                                        MLX4_CMD_TIME_CLASS_A,
3361                                                        MLX4_CMD_NATIVE);
3362                                         if (err)
3363                                                 mlx4_dbg(dev, "rem_slave_srqs: failed"
3364                                                          " to move slave %d srq %d to"
3365                                                          " SW ownership\n",
3366                                                          slave, srqn);
3367
3368                                         atomic_dec(&srq->mtt->ref_count);
3369                                         if (srq->cq)
3370                                                 atomic_dec(&srq->cq->ref_count);
3371                                         state = RES_SRQ_ALLOCATED;
3372                                         break;
3373
3374                                 default:
3375                                         state = 0;
3376                                 }
3377                         }
3378                 }
3379                 spin_lock_irq(mlx4_tlock(dev));
3380         }
3381         spin_unlock_irq(mlx4_tlock(dev));
3382 }
3383
3384 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3385 {
3386         struct mlx4_priv *priv = mlx4_priv(dev);
3387         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3388         struct list_head *cq_list =
3389                 &tracker->slave_list[slave].res_list[RES_CQ];
3390         struct res_cq *cq;
3391         struct res_cq *tmp;
3392         int state;
3393         u64 in_param;
3394         LIST_HEAD(tlist);
3395         int cqn;
3396         int err;
3397
3398         err = move_all_busy(dev, slave, RES_CQ);
3399         if (err)
3400                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3401                           "busy for slave %d\n", slave);
3402
3403         spin_lock_irq(mlx4_tlock(dev));
3404         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3405                 spin_unlock_irq(mlx4_tlock(dev));
3406                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3407                         cqn = cq->com.res_id;
3408                         state = cq->com.from_state;
3409                         while (state != 0) {
3410                                 switch (state) {
3411                                 case RES_CQ_ALLOCATED:
3412                                         __mlx4_cq_free_icm(dev, cqn);
3413                                         spin_lock_irq(mlx4_tlock(dev));
3414                                         rb_erase(&cq->com.node,
3415                                                  &tracker->res_tree[RES_CQ]);
3416                                         list_del(&cq->com.list);
3417                                         spin_unlock_irq(mlx4_tlock(dev));
3418                                         kfree(cq);
3419                                         state = 0;
3420                                         break;
3421
3422                                 case RES_CQ_HW:
3423                                         in_param = slave;
3424                                         err = mlx4_cmd(dev, in_param, cqn, 1,
3425                                                        MLX4_CMD_HW2SW_CQ,
3426                                                        MLX4_CMD_TIME_CLASS_A,
3427                                                        MLX4_CMD_NATIVE);
3428                                         if (err)
3429                                                 mlx4_dbg(dev, "rem_slave_cqs: failed"
3430                                                          " to move slave %d cq %d to"
3431                                                          " SW ownership\n",
3432                                                          slave, cqn);
3433                                         atomic_dec(&cq->mtt->ref_count);
3434                                         state = RES_CQ_ALLOCATED;
3435                                         break;
3436
3437                                 default:
3438                                         state = 0;
3439                                 }
3440                         }
3441                 }
3442                 spin_lock_irq(mlx4_tlock(dev));
3443         }
3444         spin_unlock_irq(mlx4_tlock(dev));
3445 }
3446
3447 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3448 {
3449         struct mlx4_priv *priv = mlx4_priv(dev);
3450         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3451         struct list_head *mpt_list =
3452                 &tracker->slave_list[slave].res_list[RES_MPT];
3453         struct res_mpt *mpt;
3454         struct res_mpt *tmp;
3455         int state;
3456         u64 in_param;
3457         LIST_HEAD(tlist);
3458         int mptn;
3459         int err;
3460
3461         err = move_all_busy(dev, slave, RES_MPT);
3462         if (err)
3463                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3464                           "busy for slave %d\n", slave);
3465
3466         spin_lock_irq(mlx4_tlock(dev));
3467         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3468                 spin_unlock_irq(mlx4_tlock(dev));
3469                 if (mpt->com.owner == slave) {
3470                         mptn = mpt->com.res_id;
3471                         state = mpt->com.from_state;
3472                         while (state != 0) {
3473                                 switch (state) {
3474                                 case RES_MPT_RESERVED:
3475                                         __mlx4_mr_release(dev, mpt->key);
3476                                         spin_lock_irq(mlx4_tlock(dev));
3477                                         rb_erase(&mpt->com.node,
3478                                                  &tracker->res_tree[RES_MPT]);
3479                                         list_del(&mpt->com.list);
3480                                         spin_unlock_irq(mlx4_tlock(dev));
3481                                         kfree(mpt);
3482                                         state = 0;
3483                                         break;
3484
3485                                 case RES_MPT_MAPPED:
3486                                         __mlx4_mr_free_icm(dev, mpt->key);
3487                                         state = RES_MPT_RESERVED;
3488                                         break;
3489
3490                                 case RES_MPT_HW:
3491                                         in_param = slave;
3492                                         err = mlx4_cmd(dev, in_param, mptn, 0,
3493                                                      MLX4_CMD_HW2SW_MPT,
3494                                                      MLX4_CMD_TIME_CLASS_A,
3495                                                      MLX4_CMD_NATIVE);
3496                                         if (err)
3497                                                 mlx4_dbg(dev, "rem_slave_mrs: failed"
3498                                                          " to move slave %d mpt %d to"
3499                                                          " SW ownership\n",
3500                                                          slave, mptn);
3501                                         if (mpt->mtt)
3502                                                 atomic_dec(&mpt->mtt->ref_count);
3503                                         state = RES_MPT_MAPPED;
3504                                         break;
3505                                 default:
3506                                         state = 0;
3507                                 }
3508                         }
3509                 }
3510                 spin_lock_irq(mlx4_tlock(dev));
3511         }
3512         spin_unlock_irq(mlx4_tlock(dev));
3513 }
3514
3515 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3516 {
3517         struct mlx4_priv *priv = mlx4_priv(dev);
3518         struct mlx4_resource_tracker *tracker =
3519                 &priv->mfunc.master.res_tracker;
3520         struct list_head *mtt_list =
3521                 &tracker->slave_list[slave].res_list[RES_MTT];
3522         struct res_mtt *mtt;
3523         struct res_mtt *tmp;
3524         int state;
3525         LIST_HEAD(tlist);
3526         int base;
3527         int err;
3528
3529         err = move_all_busy(dev, slave, RES_MTT);
3530         if (err)
3531                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3532                           "busy for slave %d\n", slave);
3533
3534         spin_lock_irq(mlx4_tlock(dev));
3535         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3536                 spin_unlock_irq(mlx4_tlock(dev));
3537                 if (mtt->com.owner == slave) {
3538                         base = mtt->com.res_id;
3539                         state = mtt->com.from_state;
3540                         while (state != 0) {
3541                                 switch (state) {
3542                                 case RES_MTT_ALLOCATED:
3543                                         __mlx4_free_mtt_range(dev, base,
3544                                                               mtt->order);
3545                                         spin_lock_irq(mlx4_tlock(dev));
3546                                         rb_erase(&mtt->com.node,
3547                                                  &tracker->res_tree[RES_MTT]);
3548                                         list_del(&mtt->com.list);
3549                                         spin_unlock_irq(mlx4_tlock(dev));
3550                                         kfree(mtt);
3551                                         state = 0;
3552                                         break;
3553
3554                                 default:
3555                                         state = 0;
3556                                 }
3557                         }
3558                 }
3559                 spin_lock_irq(mlx4_tlock(dev));
3560         }
3561         spin_unlock_irq(mlx4_tlock(dev));
3562 }
3563
3564 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3565 {
3566         struct mlx4_priv *priv = mlx4_priv(dev);
3567         struct mlx4_resource_tracker *tracker =
3568                 &priv->mfunc.master.res_tracker;
3569         struct list_head *fs_rule_list =
3570                 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3571         struct res_fs_rule *fs_rule;
3572         struct res_fs_rule *tmp;
3573         int state;
3574         u64 base;
3575         int err;
3576
3577         err = move_all_busy(dev, slave, RES_FS_RULE);
3578         if (err)
3579                 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3580                           slave);
3581
3582         spin_lock_irq(mlx4_tlock(dev));
3583         list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3584                 spin_unlock_irq(mlx4_tlock(dev));
3585                 if (fs_rule->com.owner == slave) {
3586                         base = fs_rule->com.res_id;
3587                         state = fs_rule->com.from_state;
3588                         while (state != 0) {
3589                                 switch (state) {
3590                                 case RES_FS_RULE_ALLOCATED:
3591                                         /* detach rule */
3592                                         err = mlx4_cmd(dev, base, 0, 0,
3593                                                        MLX4_QP_FLOW_STEERING_DETACH,
3594                                                        MLX4_CMD_TIME_CLASS_A,
3595                                                        MLX4_CMD_NATIVE);
3596
3597                                         spin_lock_irq(mlx4_tlock(dev));
3598                                         rb_erase(&fs_rule->com.node,
3599                                                  &tracker->res_tree[RES_FS_RULE]);
3600                                         list_del(&fs_rule->com.list);
3601                                         spin_unlock_irq(mlx4_tlock(dev));
3602                                         kfree(fs_rule);
3603                                         state = 0;
3604                                         break;
3605
3606                                 default:
3607                                         state = 0;
3608                                 }
3609                         }
3610                 }
3611                 spin_lock_irq(mlx4_tlock(dev));
3612         }
3613         spin_unlock_irq(mlx4_tlock(dev));
3614 }
3615
3616 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3617 {
3618         struct mlx4_priv *priv = mlx4_priv(dev);
3619         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3620         struct list_head *eq_list =
3621                 &tracker->slave_list[slave].res_list[RES_EQ];
3622         struct res_eq *eq;
3623         struct res_eq *tmp;
3624         int err;
3625         int state;
3626         LIST_HEAD(tlist);
3627         int eqn;
3628         struct mlx4_cmd_mailbox *mailbox;
3629
3630         err = move_all_busy(dev, slave, RES_EQ);
3631         if (err)
3632                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3633                           "busy for slave %d\n", slave);
3634
3635         spin_lock_irq(mlx4_tlock(dev));
3636         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3637                 spin_unlock_irq(mlx4_tlock(dev));
3638                 if (eq->com.owner == slave) {
3639                         eqn = eq->com.res_id;
3640                         state = eq->com.from_state;
3641                         while (state != 0) {
3642                                 switch (state) {
3643                                 case RES_EQ_RESERVED:
3644                                         spin_lock_irq(mlx4_tlock(dev));
3645                                         rb_erase(&eq->com.node,
3646                                                  &tracker->res_tree[RES_EQ]);
3647                                         list_del(&eq->com.list);
3648                                         spin_unlock_irq(mlx4_tlock(dev));
3649                                         kfree(eq);
3650                                         state = 0;
3651                                         break;
3652
3653                                 case RES_EQ_HW:
3654                                         mailbox = mlx4_alloc_cmd_mailbox(dev);
3655                                         if (IS_ERR(mailbox)) {
3656                                                 cond_resched();
3657                                                 continue;
3658                                         }
3659                                         err = mlx4_cmd_box(dev, slave, 0,
3660                                                            eqn & 0xff, 0,
3661                                                            MLX4_CMD_HW2SW_EQ,
3662                                                            MLX4_CMD_TIME_CLASS_A,
3663                                                            MLX4_CMD_NATIVE);
3664                                         if (err)
3665                                                 mlx4_dbg(dev, "rem_slave_eqs: failed"
3666                                                          " to move slave %d eqs %d to"
3667                                                          " SW ownership\n", slave, eqn);
3668                                         mlx4_free_cmd_mailbox(dev, mailbox);
3669                                         atomic_dec(&eq->mtt->ref_count);
3670                                         state = RES_EQ_RESERVED;
3671                                         break;
3672
3673                                 default:
3674                                         state = 0;
3675                                 }
3676                         }
3677                 }
3678                 spin_lock_irq(mlx4_tlock(dev));
3679         }
3680         spin_unlock_irq(mlx4_tlock(dev));
3681 }
3682
3683 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3684 {
3685         struct mlx4_priv *priv = mlx4_priv(dev);
3686         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3687         struct list_head *counter_list =
3688                 &tracker->slave_list[slave].res_list[RES_COUNTER];
3689         struct res_counter *counter;
3690         struct res_counter *tmp;
3691         int err;
3692         int index;
3693
3694         err = move_all_busy(dev, slave, RES_COUNTER);
3695         if (err)
3696                 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3697                           "busy for slave %d\n", slave);
3698
3699         spin_lock_irq(mlx4_tlock(dev));
3700         list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3701                 if (counter->com.owner == slave) {
3702                         index = counter->com.res_id;
3703                         rb_erase(&counter->com.node,
3704                                  &tracker->res_tree[RES_COUNTER]);
3705                         list_del(&counter->com.list);
3706                         kfree(counter);
3707                         __mlx4_counter_free(dev, index);
3708                 }
3709         }
3710         spin_unlock_irq(mlx4_tlock(dev));
3711 }
3712
3713 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3714 {
3715         struct mlx4_priv *priv = mlx4_priv(dev);
3716         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3717         struct list_head *xrcdn_list =
3718                 &tracker->slave_list[slave].res_list[RES_XRCD];
3719         struct res_xrcdn *xrcd;
3720         struct res_xrcdn *tmp;
3721         int err;
3722         int xrcdn;
3723
3724         err = move_all_busy(dev, slave, RES_XRCD);
3725         if (err)
3726                 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3727                           "busy for slave %d\n", slave);
3728
3729         spin_lock_irq(mlx4_tlock(dev));
3730         list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3731                 if (xrcd->com.owner == slave) {
3732                         xrcdn = xrcd->com.res_id;
3733                         rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
3734                         list_del(&xrcd->com.list);
3735                         kfree(xrcd);
3736                         __mlx4_xrcd_free(dev, xrcdn);
3737                 }
3738         }
3739         spin_unlock_irq(mlx4_tlock(dev));
3740 }
3741
3742 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3743 {
3744         struct mlx4_priv *priv = mlx4_priv(dev);
3745
3746         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3747         /*VLAN*/
3748         rem_slave_macs(dev, slave);
3749         rem_slave_qps(dev, slave);
3750         rem_slave_srqs(dev, slave);
3751         rem_slave_cqs(dev, slave);
3752         rem_slave_mrs(dev, slave);
3753         rem_slave_eqs(dev, slave);
3754         rem_slave_mtts(dev, slave);
3755         rem_slave_counters(dev, slave);
3756         rem_slave_xrcdns(dev, slave);
3757         rem_slave_fs_rule(dev, slave);
3758         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3759 }