mlx4: fix QP tree trashing
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44
45 #include "mlx4.h"
46 #include "fw.h"
47
48 #define MLX4_MAC_VALID          (1ull << 63)
49 #define MLX4_MAC_MASK           0x7fffffffffffffffULL
50 #define ETH_ALEN                6
51
52 struct mac_res {
53         struct list_head list;
54         u64 mac;
55         u8 port;
56 };
57
58 struct res_common {
59         struct list_head        list;
60         u32                     res_id;
61         int                     owner;
62         int                     state;
63         int                     from_state;
64         int                     to_state;
65         int                     removing;
66 };
67
68 enum {
69         RES_ANY_BUSY = 1
70 };
71
72 struct res_gid {
73         struct list_head        list;
74         u8                      gid[16];
75         enum mlx4_protocol      prot;
76 };
77
78 enum res_qp_states {
79         RES_QP_BUSY = RES_ANY_BUSY,
80
81         /* QP number was allocated */
82         RES_QP_RESERVED,
83
84         /* ICM memory for QP context was mapped */
85         RES_QP_MAPPED,
86
87         /* QP is in hw ownership */
88         RES_QP_HW
89 };
90
91 static inline const char *qp_states_str(enum res_qp_states state)
92 {
93         switch (state) {
94         case RES_QP_BUSY: return "RES_QP_BUSY";
95         case RES_QP_RESERVED: return "RES_QP_RESERVED";
96         case RES_QP_MAPPED: return "RES_QP_MAPPED";
97         case RES_QP_HW: return "RES_QP_HW";
98         default: return "Unknown";
99         }
100 }
101
102 struct res_qp {
103         struct res_common       com;
104         struct res_mtt         *mtt;
105         struct res_cq          *rcq;
106         struct res_cq          *scq;
107         struct res_srq         *srq;
108         struct list_head        mcg_list;
109         spinlock_t              mcg_spl;
110         int                     local_qpn;
111 };
112
113 enum res_mtt_states {
114         RES_MTT_BUSY = RES_ANY_BUSY,
115         RES_MTT_ALLOCATED,
116 };
117
118 static inline const char *mtt_states_str(enum res_mtt_states state)
119 {
120         switch (state) {
121         case RES_MTT_BUSY: return "RES_MTT_BUSY";
122         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
123         default: return "Unknown";
124         }
125 }
126
127 struct res_mtt {
128         struct res_common       com;
129         int                     order;
130         atomic_t                ref_count;
131 };
132
133 enum res_mpt_states {
134         RES_MPT_BUSY = RES_ANY_BUSY,
135         RES_MPT_RESERVED,
136         RES_MPT_MAPPED,
137         RES_MPT_HW,
138 };
139
140 struct res_mpt {
141         struct res_common       com;
142         struct res_mtt         *mtt;
143         int                     key;
144 };
145
146 enum res_eq_states {
147         RES_EQ_BUSY = RES_ANY_BUSY,
148         RES_EQ_RESERVED,
149         RES_EQ_HW,
150 };
151
152 struct res_eq {
153         struct res_common       com;
154         struct res_mtt         *mtt;
155 };
156
157 enum res_cq_states {
158         RES_CQ_BUSY = RES_ANY_BUSY,
159         RES_CQ_ALLOCATED,
160         RES_CQ_HW,
161 };
162
163 struct res_cq {
164         struct res_common       com;
165         struct res_mtt         *mtt;
166         atomic_t                ref_count;
167 };
168
169 enum res_srq_states {
170         RES_SRQ_BUSY = RES_ANY_BUSY,
171         RES_SRQ_ALLOCATED,
172         RES_SRQ_HW,
173 };
174
175 static inline const char *srq_states_str(enum res_srq_states state)
176 {
177         switch (state) {
178         case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
179         case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
180         case RES_SRQ_HW: return "RES_SRQ_HW";
181         default: return "Unknown";
182         }
183 }
184
185 struct res_srq {
186         struct res_common       com;
187         struct res_mtt         *mtt;
188         struct res_cq          *cq;
189         atomic_t                ref_count;
190 };
191
192 enum res_counter_states {
193         RES_COUNTER_BUSY = RES_ANY_BUSY,
194         RES_COUNTER_ALLOCATED,
195 };
196
197 static inline const char *counter_states_str(enum res_counter_states state)
198 {
199         switch (state) {
200         case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
201         case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
202         default: return "Unknown";
203         }
204 }
205
206 struct res_counter {
207         struct res_common       com;
208         int                     port;
209 };
210
211 /* For Debug uses */
212 static const char *ResourceType(enum mlx4_resource rt)
213 {
214         switch (rt) {
215         case RES_QP: return "RES_QP";
216         case RES_CQ: return "RES_CQ";
217         case RES_SRQ: return "RES_SRQ";
218         case RES_MPT: return "RES_MPT";
219         case RES_MTT: return "RES_MTT";
220         case RES_MAC: return  "RES_MAC";
221         case RES_EQ: return "RES_EQ";
222         case RES_COUNTER: return "RES_COUNTER";
223         default: return "Unknown resource type !!!";
224         };
225 }
226
227 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
228 {
229         struct mlx4_priv *priv = mlx4_priv(dev);
230         int i;
231         int t;
232
233         priv->mfunc.master.res_tracker.slave_list =
234                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
235                         GFP_KERNEL);
236         if (!priv->mfunc.master.res_tracker.slave_list)
237                 return -ENOMEM;
238
239         for (i = 0 ; i < dev->num_slaves; i++) {
240                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
241                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
242                                        slave_list[i].res_list[t]);
243                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
244         }
245
246         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
247                  dev->num_slaves);
248         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
249                 INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
250                                 GFP_ATOMIC|__GFP_NOWARN);
251
252         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
253         return 0 ;
254 }
255
256 void mlx4_free_resource_tracker(struct mlx4_dev *dev)
257 {
258         struct mlx4_priv *priv = mlx4_priv(dev);
259         int i;
260
261         if (priv->mfunc.master.res_tracker.slave_list) {
262                 for (i = 0 ; i < dev->num_slaves; i++)
263                         mlx4_delete_all_resources_for_slave(dev, i);
264
265                 kfree(priv->mfunc.master.res_tracker.slave_list);
266         }
267 }
268
269 static void update_ud_gid(struct mlx4_dev *dev,
270                           struct mlx4_qp_context *qp_ctx, u8 slave)
271 {
272         u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
273
274         if (MLX4_QP_ST_UD == ts)
275                 qp_ctx->pri_path.mgid_index = 0x80 | slave;
276
277         mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
278                 slave, qp_ctx->pri_path.mgid_index);
279 }
280
281 static int mpt_mask(struct mlx4_dev *dev)
282 {
283         return dev->caps.num_mpts - 1;
284 }
285
286 static void *find_res(struct mlx4_dev *dev, int res_id,
287                       enum mlx4_resource type)
288 {
289         struct mlx4_priv *priv = mlx4_priv(dev);
290
291         return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
292                                  res_id);
293 }
294
295 static int get_res(struct mlx4_dev *dev, int slave, int res_id,
296                    enum mlx4_resource type,
297                    void *res)
298 {
299         struct res_common *r;
300         int err = 0;
301
302         spin_lock_irq(mlx4_tlock(dev));
303         r = find_res(dev, res_id, type);
304         if (!r) {
305                 err = -ENONET;
306                 goto exit;
307         }
308
309         if (r->state == RES_ANY_BUSY) {
310                 err = -EBUSY;
311                 goto exit;
312         }
313
314         if (r->owner != slave) {
315                 err = -EPERM;
316                 goto exit;
317         }
318
319         r->from_state = r->state;
320         r->state = RES_ANY_BUSY;
321         mlx4_dbg(dev, "res %s id 0x%x to busy\n",
322                  ResourceType(type), r->res_id);
323
324         if (res)
325                 *((struct res_common **)res) = r;
326
327 exit:
328         spin_unlock_irq(mlx4_tlock(dev));
329         return err;
330 }
331
332 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
333                                     enum mlx4_resource type,
334                                     int res_id, int *slave)
335 {
336
337         struct res_common *r;
338         int err = -ENOENT;
339         int id = res_id;
340
341         if (type == RES_QP)
342                 id &= 0x7fffff;
343         spin_lock(mlx4_tlock(dev));
344
345         r = find_res(dev, id, type);
346         if (r) {
347                 *slave = r->owner;
348                 err = 0;
349         }
350         spin_unlock(mlx4_tlock(dev));
351
352         return err;
353 }
354
355 static void put_res(struct mlx4_dev *dev, int slave, int res_id,
356                     enum mlx4_resource type)
357 {
358         struct res_common *r;
359
360         spin_lock_irq(mlx4_tlock(dev));
361         r = find_res(dev, res_id, type);
362         if (r)
363                 r->state = r->from_state;
364         spin_unlock_irq(mlx4_tlock(dev));
365 }
366
367 static struct res_common *alloc_qp_tr(int id)
368 {
369         struct res_qp *ret;
370
371         ret = kzalloc(sizeof *ret, GFP_KERNEL);
372         if (!ret)
373                 return NULL;
374
375         ret->com.res_id = id;
376         ret->com.state = RES_QP_RESERVED;
377         ret->local_qpn = id;
378         INIT_LIST_HEAD(&ret->mcg_list);
379         spin_lock_init(&ret->mcg_spl);
380
381         return &ret->com;
382 }
383
384 static struct res_common *alloc_mtt_tr(int id, int order)
385 {
386         struct res_mtt *ret;
387
388         ret = kzalloc(sizeof *ret, GFP_KERNEL);
389         if (!ret)
390                 return NULL;
391
392         ret->com.res_id = id;
393         ret->order = order;
394         ret->com.state = RES_MTT_ALLOCATED;
395         atomic_set(&ret->ref_count, 0);
396
397         return &ret->com;
398 }
399
400 static struct res_common *alloc_mpt_tr(int id, int key)
401 {
402         struct res_mpt *ret;
403
404         ret = kzalloc(sizeof *ret, GFP_KERNEL);
405         if (!ret)
406                 return NULL;
407
408         ret->com.res_id = id;
409         ret->com.state = RES_MPT_RESERVED;
410         ret->key = key;
411
412         return &ret->com;
413 }
414
415 static struct res_common *alloc_eq_tr(int id)
416 {
417         struct res_eq *ret;
418
419         ret = kzalloc(sizeof *ret, GFP_KERNEL);
420         if (!ret)
421                 return NULL;
422
423         ret->com.res_id = id;
424         ret->com.state = RES_EQ_RESERVED;
425
426         return &ret->com;
427 }
428
429 static struct res_common *alloc_cq_tr(int id)
430 {
431         struct res_cq *ret;
432
433         ret = kzalloc(sizeof *ret, GFP_KERNEL);
434         if (!ret)
435                 return NULL;
436
437         ret->com.res_id = id;
438         ret->com.state = RES_CQ_ALLOCATED;
439         atomic_set(&ret->ref_count, 0);
440
441         return &ret->com;
442 }
443
444 static struct res_common *alloc_srq_tr(int id)
445 {
446         struct res_srq *ret;
447
448         ret = kzalloc(sizeof *ret, GFP_KERNEL);
449         if (!ret)
450                 return NULL;
451
452         ret->com.res_id = id;
453         ret->com.state = RES_SRQ_ALLOCATED;
454         atomic_set(&ret->ref_count, 0);
455
456         return &ret->com;
457 }
458
459 static struct res_common *alloc_counter_tr(int id)
460 {
461         struct res_counter *ret;
462
463         ret = kzalloc(sizeof *ret, GFP_KERNEL);
464         if (!ret)
465                 return NULL;
466
467         ret->com.res_id = id;
468         ret->com.state = RES_COUNTER_ALLOCATED;
469
470         return &ret->com;
471 }
472
473 static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
474                                    int extra)
475 {
476         struct res_common *ret;
477
478         switch (type) {
479         case RES_QP:
480                 ret = alloc_qp_tr(id);
481                 break;
482         case RES_MPT:
483                 ret = alloc_mpt_tr(id, extra);
484                 break;
485         case RES_MTT:
486                 ret = alloc_mtt_tr(id, extra);
487                 break;
488         case RES_EQ:
489                 ret = alloc_eq_tr(id);
490                 break;
491         case RES_CQ:
492                 ret = alloc_cq_tr(id);
493                 break;
494         case RES_SRQ:
495                 ret = alloc_srq_tr(id);
496                 break;
497         case RES_MAC:
498                 printk(KERN_ERR "implementation missing\n");
499                 return NULL;
500         case RES_COUNTER:
501                 ret = alloc_counter_tr(id);
502                 break;
503
504         default:
505                 return NULL;
506         }
507         if (ret)
508                 ret->owner = slave;
509
510         return ret;
511 }
512
513 static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
514                          enum mlx4_resource type, int extra)
515 {
516         int i;
517         int err;
518         struct mlx4_priv *priv = mlx4_priv(dev);
519         struct res_common **res_arr;
520         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
521         struct radix_tree_root *root = &tracker->res_tree[type];
522
523         res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
524         if (!res_arr)
525                 return -ENOMEM;
526
527         for (i = 0; i < count; ++i) {
528                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
529                 if (!res_arr[i]) {
530                         for (--i; i >= 0; --i)
531                                 kfree(res_arr[i]);
532
533                         kfree(res_arr);
534                         return -ENOMEM;
535                 }
536         }
537
538         spin_lock_irq(mlx4_tlock(dev));
539         for (i = 0; i < count; ++i) {
540                 if (find_res(dev, base + i, type)) {
541                         err = -EEXIST;
542                         goto undo;
543                 }
544                 err = radix_tree_insert(root, base + i, res_arr[i]);
545                 if (err)
546                         goto undo;
547                 list_add_tail(&res_arr[i]->list,
548                               &tracker->slave_list[slave].res_list[type]);
549         }
550         spin_unlock_irq(mlx4_tlock(dev));
551         kfree(res_arr);
552
553         return 0;
554
555 undo:
556         for (--i; i >= base; --i)
557                 radix_tree_delete(&tracker->res_tree[type], i);
558
559         spin_unlock_irq(mlx4_tlock(dev));
560
561         for (i = 0; i < count; ++i)
562                 kfree(res_arr[i]);
563
564         kfree(res_arr);
565
566         return err;
567 }
568
569 static int remove_qp_ok(struct res_qp *res)
570 {
571         if (res->com.state == RES_QP_BUSY)
572                 return -EBUSY;
573         else if (res->com.state != RES_QP_RESERVED)
574                 return -EPERM;
575
576         return 0;
577 }
578
579 static int remove_mtt_ok(struct res_mtt *res, int order)
580 {
581         if (res->com.state == RES_MTT_BUSY ||
582             atomic_read(&res->ref_count)) {
583                 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
584                        __func__, __LINE__,
585                        mtt_states_str(res->com.state),
586                        atomic_read(&res->ref_count));
587                 return -EBUSY;
588         } else if (res->com.state != RES_MTT_ALLOCATED)
589                 return -EPERM;
590         else if (res->order != order)
591                 return -EINVAL;
592
593         return 0;
594 }
595
596 static int remove_mpt_ok(struct res_mpt *res)
597 {
598         if (res->com.state == RES_MPT_BUSY)
599                 return -EBUSY;
600         else if (res->com.state != RES_MPT_RESERVED)
601                 return -EPERM;
602
603         return 0;
604 }
605
606 static int remove_eq_ok(struct res_eq *res)
607 {
608         if (res->com.state == RES_MPT_BUSY)
609                 return -EBUSY;
610         else if (res->com.state != RES_MPT_RESERVED)
611                 return -EPERM;
612
613         return 0;
614 }
615
616 static int remove_counter_ok(struct res_counter *res)
617 {
618         if (res->com.state == RES_COUNTER_BUSY)
619                 return -EBUSY;
620         else if (res->com.state != RES_COUNTER_ALLOCATED)
621                 return -EPERM;
622
623         return 0;
624 }
625
626 static int remove_cq_ok(struct res_cq *res)
627 {
628         if (res->com.state == RES_CQ_BUSY)
629                 return -EBUSY;
630         else if (res->com.state != RES_CQ_ALLOCATED)
631                 return -EPERM;
632
633         return 0;
634 }
635
636 static int remove_srq_ok(struct res_srq *res)
637 {
638         if (res->com.state == RES_SRQ_BUSY)
639                 return -EBUSY;
640         else if (res->com.state != RES_SRQ_ALLOCATED)
641                 return -EPERM;
642
643         return 0;
644 }
645
646 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
647 {
648         switch (type) {
649         case RES_QP:
650                 return remove_qp_ok((struct res_qp *)res);
651         case RES_CQ:
652                 return remove_cq_ok((struct res_cq *)res);
653         case RES_SRQ:
654                 return remove_srq_ok((struct res_srq *)res);
655         case RES_MPT:
656                 return remove_mpt_ok((struct res_mpt *)res);
657         case RES_MTT:
658                 return remove_mtt_ok((struct res_mtt *)res, extra);
659         case RES_MAC:
660                 return -ENOSYS;
661         case RES_EQ:
662                 return remove_eq_ok((struct res_eq *)res);
663         case RES_COUNTER:
664                 return remove_counter_ok((struct res_counter *)res);
665         default:
666                 return -EINVAL;
667         }
668 }
669
670 static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
671                          enum mlx4_resource type, int extra)
672 {
673         int i;
674         int err;
675         struct mlx4_priv *priv = mlx4_priv(dev);
676         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
677         struct res_common *r;
678
679         spin_lock_irq(mlx4_tlock(dev));
680         for (i = base; i < base + count; ++i) {
681                 r = radix_tree_lookup(&tracker->res_tree[type], i);
682                 if (!r) {
683                         err = -ENOENT;
684                         goto out;
685                 }
686                 if (r->owner != slave) {
687                         err = -EPERM;
688                         goto out;
689                 }
690                 err = remove_ok(r, type, extra);
691                 if (err)
692                         goto out;
693         }
694
695         for (i = base; i < base + count; ++i) {
696                 r = radix_tree_lookup(&tracker->res_tree[type], i);
697                 radix_tree_delete(&tracker->res_tree[type], i);
698                 list_del(&r->list);
699                 kfree(r);
700         }
701         err = 0;
702
703 out:
704         spin_unlock_irq(mlx4_tlock(dev));
705
706         return err;
707 }
708
709 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
710                                 enum res_qp_states state, struct res_qp **qp,
711                                 int alloc)
712 {
713         struct mlx4_priv *priv = mlx4_priv(dev);
714         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
715         struct res_qp *r;
716         int err = 0;
717
718         spin_lock_irq(mlx4_tlock(dev));
719         r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
720         if (!r)
721                 err = -ENOENT;
722         else if (r->com.owner != slave)
723                 err = -EPERM;
724         else {
725                 switch (state) {
726                 case RES_QP_BUSY:
727                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
728                                  __func__, r->com.res_id);
729                         err = -EBUSY;
730                         break;
731
732                 case RES_QP_RESERVED:
733                         if (r->com.state == RES_QP_MAPPED && !alloc)
734                                 break;
735
736                         mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
737                         err = -EINVAL;
738                         break;
739
740                 case RES_QP_MAPPED:
741                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
742                             r->com.state == RES_QP_HW)
743                                 break;
744                         else {
745                                 mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
746                                           r->com.res_id);
747                                 err = -EINVAL;
748                         }
749
750                         break;
751
752                 case RES_QP_HW:
753                         if (r->com.state != RES_QP_MAPPED)
754                                 err = -EINVAL;
755                         break;
756                 default:
757                         err = -EINVAL;
758                 }
759
760                 if (!err) {
761                         r->com.from_state = r->com.state;
762                         r->com.to_state = state;
763                         r->com.state = RES_QP_BUSY;
764                         if (qp)
765                                 *qp = (struct res_qp *)r;
766                 }
767         }
768
769         spin_unlock_irq(mlx4_tlock(dev));
770
771         return err;
772 }
773
774 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
775                                 enum res_mpt_states state, struct res_mpt **mpt)
776 {
777         struct mlx4_priv *priv = mlx4_priv(dev);
778         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
779         struct res_mpt *r;
780         int err = 0;
781
782         spin_lock_irq(mlx4_tlock(dev));
783         r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
784         if (!r)
785                 err = -ENOENT;
786         else if (r->com.owner != slave)
787                 err = -EPERM;
788         else {
789                 switch (state) {
790                 case RES_MPT_BUSY:
791                         err = -EINVAL;
792                         break;
793
794                 case RES_MPT_RESERVED:
795                         if (r->com.state != RES_MPT_MAPPED)
796                                 err = -EINVAL;
797                         break;
798
799                 case RES_MPT_MAPPED:
800                         if (r->com.state != RES_MPT_RESERVED &&
801                             r->com.state != RES_MPT_HW)
802                                 err = -EINVAL;
803                         break;
804
805                 case RES_MPT_HW:
806                         if (r->com.state != RES_MPT_MAPPED)
807                                 err = -EINVAL;
808                         break;
809                 default:
810                         err = -EINVAL;
811                 }
812
813                 if (!err) {
814                         r->com.from_state = r->com.state;
815                         r->com.to_state = state;
816                         r->com.state = RES_MPT_BUSY;
817                         if (mpt)
818                                 *mpt = (struct res_mpt *)r;
819                 }
820         }
821
822         spin_unlock_irq(mlx4_tlock(dev));
823
824         return err;
825 }
826
827 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
828                                 enum res_eq_states state, struct res_eq **eq)
829 {
830         struct mlx4_priv *priv = mlx4_priv(dev);
831         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
832         struct res_eq *r;
833         int err = 0;
834
835         spin_lock_irq(mlx4_tlock(dev));
836         r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
837         if (!r)
838                 err = -ENOENT;
839         else if (r->com.owner != slave)
840                 err = -EPERM;
841         else {
842                 switch (state) {
843                 case RES_EQ_BUSY:
844                         err = -EINVAL;
845                         break;
846
847                 case RES_EQ_RESERVED:
848                         if (r->com.state != RES_EQ_HW)
849                                 err = -EINVAL;
850                         break;
851
852                 case RES_EQ_HW:
853                         if (r->com.state != RES_EQ_RESERVED)
854                                 err = -EINVAL;
855                         break;
856
857                 default:
858                         err = -EINVAL;
859                 }
860
861                 if (!err) {
862                         r->com.from_state = r->com.state;
863                         r->com.to_state = state;
864                         r->com.state = RES_EQ_BUSY;
865                         if (eq)
866                                 *eq = r;
867                 }
868         }
869
870         spin_unlock_irq(mlx4_tlock(dev));
871
872         return err;
873 }
874
875 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
876                                 enum res_cq_states state, struct res_cq **cq)
877 {
878         struct mlx4_priv *priv = mlx4_priv(dev);
879         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
880         struct res_cq *r;
881         int err;
882
883         spin_lock_irq(mlx4_tlock(dev));
884         r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
885         if (!r)
886                 err = -ENOENT;
887         else if (r->com.owner != slave)
888                 err = -EPERM;
889         else {
890                 switch (state) {
891                 case RES_CQ_BUSY:
892                         err = -EBUSY;
893                         break;
894
895                 case RES_CQ_ALLOCATED:
896                         if (r->com.state != RES_CQ_HW)
897                                 err = -EINVAL;
898                         else if (atomic_read(&r->ref_count))
899                                 err = -EBUSY;
900                         else
901                                 err = 0;
902                         break;
903
904                 case RES_CQ_HW:
905                         if (r->com.state != RES_CQ_ALLOCATED)
906                                 err = -EINVAL;
907                         else
908                                 err = 0;
909                         break;
910
911                 default:
912                         err = -EINVAL;
913                 }
914
915                 if (!err) {
916                         r->com.from_state = r->com.state;
917                         r->com.to_state = state;
918                         r->com.state = RES_CQ_BUSY;
919                         if (cq)
920                                 *cq = r;
921                 }
922         }
923
924         spin_unlock_irq(mlx4_tlock(dev));
925
926         return err;
927 }
928
929 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
930                                  enum res_cq_states state, struct res_srq **srq)
931 {
932         struct mlx4_priv *priv = mlx4_priv(dev);
933         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
934         struct res_srq *r;
935         int err = 0;
936
937         spin_lock_irq(mlx4_tlock(dev));
938         r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
939         if (!r)
940                 err = -ENOENT;
941         else if (r->com.owner != slave)
942                 err = -EPERM;
943         else {
944                 switch (state) {
945                 case RES_SRQ_BUSY:
946                         err = -EINVAL;
947                         break;
948
949                 case RES_SRQ_ALLOCATED:
950                         if (r->com.state != RES_SRQ_HW)
951                                 err = -EINVAL;
952                         else if (atomic_read(&r->ref_count))
953                                 err = -EBUSY;
954                         break;
955
956                 case RES_SRQ_HW:
957                         if (r->com.state != RES_SRQ_ALLOCATED)
958                                 err = -EINVAL;
959                         break;
960
961                 default:
962                         err = -EINVAL;
963                 }
964
965                 if (!err) {
966                         r->com.from_state = r->com.state;
967                         r->com.to_state = state;
968                         r->com.state = RES_SRQ_BUSY;
969                         if (srq)
970                                 *srq = r;
971                 }
972         }
973
974         spin_unlock_irq(mlx4_tlock(dev));
975
976         return err;
977 }
978
979 static void res_abort_move(struct mlx4_dev *dev, int slave,
980                            enum mlx4_resource type, int id)
981 {
982         struct mlx4_priv *priv = mlx4_priv(dev);
983         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
984         struct res_common *r;
985
986         spin_lock_irq(mlx4_tlock(dev));
987         r = radix_tree_lookup(&tracker->res_tree[type], id);
988         if (r && (r->owner == slave))
989                 r->state = r->from_state;
990         spin_unlock_irq(mlx4_tlock(dev));
991 }
992
993 static void res_end_move(struct mlx4_dev *dev, int slave,
994                          enum mlx4_resource type, int id)
995 {
996         struct mlx4_priv *priv = mlx4_priv(dev);
997         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
998         struct res_common *r;
999
1000         spin_lock_irq(mlx4_tlock(dev));
1001         r = radix_tree_lookup(&tracker->res_tree[type], id);
1002         if (r && (r->owner == slave))
1003                 r->state = r->to_state;
1004         spin_unlock_irq(mlx4_tlock(dev));
1005 }
1006
1007 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1008 {
1009         return mlx4_is_qp_reserved(dev, qpn);
1010 }
1011
1012 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1013                         u64 in_param, u64 *out_param)
1014 {
1015         int err;
1016         int count;
1017         int align;
1018         int base;
1019         int qpn;
1020
1021         switch (op) {
1022         case RES_OP_RESERVE:
1023                 count = get_param_l(&in_param);
1024                 align = get_param_h(&in_param);
1025                 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1026                 if (err)
1027                         return err;
1028
1029                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1030                 if (err) {
1031                         __mlx4_qp_release_range(dev, base, count);
1032                         return err;
1033                 }
1034                 set_param_l(out_param, base);
1035                 break;
1036         case RES_OP_MAP_ICM:
1037                 qpn = get_param_l(&in_param) & 0x7fffff;
1038                 if (valid_reserved(dev, slave, qpn)) {
1039                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1040                         if (err)
1041                                 return err;
1042                 }
1043
1044                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1045                                            NULL, 1);
1046                 if (err)
1047                         return err;
1048
1049                 if (!valid_reserved(dev, slave, qpn)) {
1050                         err = __mlx4_qp_alloc_icm(dev, qpn);
1051                         if (err) {
1052                                 res_abort_move(dev, slave, RES_QP, qpn);
1053                                 return err;
1054                         }
1055                 }
1056
1057                 res_end_move(dev, slave, RES_QP, qpn);
1058                 break;
1059
1060         default:
1061                 err = -EINVAL;
1062                 break;
1063         }
1064         return err;
1065 }
1066
1067 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1068                          u64 in_param, u64 *out_param)
1069 {
1070         int err = -EINVAL;
1071         int base;
1072         int order;
1073
1074         if (op != RES_OP_RESERVE_AND_MAP)
1075                 return err;
1076
1077         order = get_param_l(&in_param);
1078         base = __mlx4_alloc_mtt_range(dev, order);
1079         if (base == -1)
1080                 return -ENOMEM;
1081
1082         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1083         if (err)
1084                 __mlx4_free_mtt_range(dev, base, order);
1085         else
1086                 set_param_l(out_param, base);
1087
1088         return err;
1089 }
1090
1091 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1092                          u64 in_param, u64 *out_param)
1093 {
1094         int err = -EINVAL;
1095         int index;
1096         int id;
1097         struct res_mpt *mpt;
1098
1099         switch (op) {
1100         case RES_OP_RESERVE:
1101                 index = __mlx4_mr_reserve(dev);
1102                 if (index == -1)
1103                         break;
1104                 id = index & mpt_mask(dev);
1105
1106                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1107                 if (err) {
1108                         __mlx4_mr_release(dev, index);
1109                         break;
1110                 }
1111                 set_param_l(out_param, index);
1112                 break;
1113         case RES_OP_MAP_ICM:
1114                 index = get_param_l(&in_param);
1115                 id = index & mpt_mask(dev);
1116                 err = mr_res_start_move_to(dev, slave, id,
1117                                            RES_MPT_MAPPED, &mpt);
1118                 if (err)
1119                         return err;
1120
1121                 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1122                 if (err) {
1123                         res_abort_move(dev, slave, RES_MPT, id);
1124                         return err;
1125                 }
1126
1127                 res_end_move(dev, slave, RES_MPT, id);
1128                 break;
1129         }
1130         return err;
1131 }
1132
1133 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1134                         u64 in_param, u64 *out_param)
1135 {
1136         int cqn;
1137         int err;
1138
1139         switch (op) {
1140         case RES_OP_RESERVE_AND_MAP:
1141                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1142                 if (err)
1143                         break;
1144
1145                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1146                 if (err) {
1147                         __mlx4_cq_free_icm(dev, cqn);
1148                         break;
1149                 }
1150
1151                 set_param_l(out_param, cqn);
1152                 break;
1153
1154         default:
1155                 err = -EINVAL;
1156         }
1157
1158         return err;
1159 }
1160
1161 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1162                          u64 in_param, u64 *out_param)
1163 {
1164         int srqn;
1165         int err;
1166
1167         switch (op) {
1168         case RES_OP_RESERVE_AND_MAP:
1169                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1170                 if (err)
1171                         break;
1172
1173                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1174                 if (err) {
1175                         __mlx4_srq_free_icm(dev, srqn);
1176                         break;
1177                 }
1178
1179                 set_param_l(out_param, srqn);
1180                 break;
1181
1182         default:
1183                 err = -EINVAL;
1184         }
1185
1186         return err;
1187 }
1188
1189 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1190 {
1191         struct mlx4_priv *priv = mlx4_priv(dev);
1192         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1193         struct mac_res *res;
1194
1195         res = kzalloc(sizeof *res, GFP_KERNEL);
1196         if (!res)
1197                 return -ENOMEM;
1198         res->mac = mac;
1199         res->port = (u8) port;
1200         list_add_tail(&res->list,
1201                       &tracker->slave_list[slave].res_list[RES_MAC]);
1202         return 0;
1203 }
1204
1205 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1206                                int port)
1207 {
1208         struct mlx4_priv *priv = mlx4_priv(dev);
1209         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1210         struct list_head *mac_list =
1211                 &tracker->slave_list[slave].res_list[RES_MAC];
1212         struct mac_res *res, *tmp;
1213
1214         list_for_each_entry_safe(res, tmp, mac_list, list) {
1215                 if (res->mac == mac && res->port == (u8) port) {
1216                         list_del(&res->list);
1217                         kfree(res);
1218                         break;
1219                 }
1220         }
1221 }
1222
1223 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1224 {
1225         struct mlx4_priv *priv = mlx4_priv(dev);
1226         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1227         struct list_head *mac_list =
1228                 &tracker->slave_list[slave].res_list[RES_MAC];
1229         struct mac_res *res, *tmp;
1230
1231         list_for_each_entry_safe(res, tmp, mac_list, list) {
1232                 list_del(&res->list);
1233                 __mlx4_unregister_mac(dev, res->port, res->mac);
1234                 kfree(res);
1235         }
1236 }
1237
1238 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1239                          u64 in_param, u64 *out_param)
1240 {
1241         int err = -EINVAL;
1242         int port;
1243         u64 mac;
1244
1245         if (op != RES_OP_RESERVE_AND_MAP)
1246                 return err;
1247
1248         port = get_param_l(out_param);
1249         mac = in_param;
1250
1251         err = __mlx4_register_mac(dev, port, mac);
1252         if (err >= 0) {
1253                 set_param_l(out_param, err);
1254                 err = 0;
1255         }
1256
1257         if (!err) {
1258                 err = mac_add_to_slave(dev, slave, mac, port);
1259                 if (err)
1260                         __mlx4_unregister_mac(dev, port, mac);
1261         }
1262         return err;
1263 }
1264
1265 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1266                          u64 in_param, u64 *out_param)
1267 {
1268         return 0;
1269 }
1270
1271 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1272                            struct mlx4_vhcr *vhcr,
1273                            struct mlx4_cmd_mailbox *inbox,
1274                            struct mlx4_cmd_mailbox *outbox,
1275                            struct mlx4_cmd_info *cmd)
1276 {
1277         int err;
1278         int alop = vhcr->op_modifier;
1279
1280         switch (vhcr->in_modifier) {
1281         case RES_QP:
1282                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1283                                    vhcr->in_param, &vhcr->out_param);
1284                 break;
1285
1286         case RES_MTT:
1287                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1288                                     vhcr->in_param, &vhcr->out_param);
1289                 break;
1290
1291         case RES_MPT:
1292                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1293                                     vhcr->in_param, &vhcr->out_param);
1294                 break;
1295
1296         case RES_CQ:
1297                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1298                                    vhcr->in_param, &vhcr->out_param);
1299                 break;
1300
1301         case RES_SRQ:
1302                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1303                                     vhcr->in_param, &vhcr->out_param);
1304                 break;
1305
1306         case RES_MAC:
1307                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1308                                     vhcr->in_param, &vhcr->out_param);
1309                 break;
1310
1311         case RES_VLAN:
1312                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1313                                     vhcr->in_param, &vhcr->out_param);
1314                 break;
1315
1316         default:
1317                 err = -EINVAL;
1318                 break;
1319         }
1320
1321         return err;
1322 }
1323
1324 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1325                        u64 in_param)
1326 {
1327         int err;
1328         int count;
1329         int base;
1330         int qpn;
1331
1332         switch (op) {
1333         case RES_OP_RESERVE:
1334                 base = get_param_l(&in_param) & 0x7fffff;
1335                 count = get_param_h(&in_param);
1336                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1337                 if (err)
1338                         break;
1339                 __mlx4_qp_release_range(dev, base, count);
1340                 break;
1341         case RES_OP_MAP_ICM:
1342                 qpn = get_param_l(&in_param) & 0x7fffff;
1343                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1344                                            NULL, 0);
1345                 if (err)
1346                         return err;
1347
1348                 if (!valid_reserved(dev, slave, qpn))
1349                         __mlx4_qp_free_icm(dev, qpn);
1350
1351                 res_end_move(dev, slave, RES_QP, qpn);
1352
1353                 if (valid_reserved(dev, slave, qpn))
1354                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1355                 break;
1356         default:
1357                 err = -EINVAL;
1358                 break;
1359         }
1360         return err;
1361 }
1362
1363 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1364                         u64 in_param, u64 *out_param)
1365 {
1366         int err = -EINVAL;
1367         int base;
1368         int order;
1369
1370         if (op != RES_OP_RESERVE_AND_MAP)
1371                 return err;
1372
1373         base = get_param_l(&in_param);
1374         order = get_param_h(&in_param);
1375         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1376         if (!err)
1377                 __mlx4_free_mtt_range(dev, base, order);
1378         return err;
1379 }
1380
1381 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1382                         u64 in_param)
1383 {
1384         int err = -EINVAL;
1385         int index;
1386         int id;
1387         struct res_mpt *mpt;
1388
1389         switch (op) {
1390         case RES_OP_RESERVE:
1391                 index = get_param_l(&in_param);
1392                 id = index & mpt_mask(dev);
1393                 err = get_res(dev, slave, id, RES_MPT, &mpt);
1394                 if (err)
1395                         break;
1396                 index = mpt->key;
1397                 put_res(dev, slave, id, RES_MPT);
1398
1399                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1400                 if (err)
1401                         break;
1402                 __mlx4_mr_release(dev, index);
1403                 break;
1404         case RES_OP_MAP_ICM:
1405                         index = get_param_l(&in_param);
1406                         id = index & mpt_mask(dev);
1407                         err = mr_res_start_move_to(dev, slave, id,
1408                                                    RES_MPT_RESERVED, &mpt);
1409                         if (err)
1410                                 return err;
1411
1412                         __mlx4_mr_free_icm(dev, mpt->key);
1413                         res_end_move(dev, slave, RES_MPT, id);
1414                         return err;
1415                 break;
1416         default:
1417                 err = -EINVAL;
1418                 break;
1419         }
1420         return err;
1421 }
1422
1423 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1424                        u64 in_param, u64 *out_param)
1425 {
1426         int cqn;
1427         int err;
1428
1429         switch (op) {
1430         case RES_OP_RESERVE_AND_MAP:
1431                 cqn = get_param_l(&in_param);
1432                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1433                 if (err)
1434                         break;
1435
1436                 __mlx4_cq_free_icm(dev, cqn);
1437                 break;
1438
1439         default:
1440                 err = -EINVAL;
1441                 break;
1442         }
1443
1444         return err;
1445 }
1446
1447 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1448                         u64 in_param, u64 *out_param)
1449 {
1450         int srqn;
1451         int err;
1452
1453         switch (op) {
1454         case RES_OP_RESERVE_AND_MAP:
1455                 srqn = get_param_l(&in_param);
1456                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1457                 if (err)
1458                         break;
1459
1460                 __mlx4_srq_free_icm(dev, srqn);
1461                 break;
1462
1463         default:
1464                 err = -EINVAL;
1465                 break;
1466         }
1467
1468         return err;
1469 }
1470
1471 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1472                             u64 in_param, u64 *out_param)
1473 {
1474         int port;
1475         int err = 0;
1476
1477         switch (op) {
1478         case RES_OP_RESERVE_AND_MAP:
1479                 port = get_param_l(out_param);
1480                 mac_del_from_slave(dev, slave, in_param, port);
1481                 __mlx4_unregister_mac(dev, port, in_param);
1482                 break;
1483         default:
1484                 err = -EINVAL;
1485                 break;
1486         }
1487
1488         return err;
1489
1490 }
1491
1492 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1493                             u64 in_param, u64 *out_param)
1494 {
1495         return 0;
1496 }
1497
1498 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1499                           struct mlx4_vhcr *vhcr,
1500                           struct mlx4_cmd_mailbox *inbox,
1501                           struct mlx4_cmd_mailbox *outbox,
1502                           struct mlx4_cmd_info *cmd)
1503 {
1504         int err = -EINVAL;
1505         int alop = vhcr->op_modifier;
1506
1507         switch (vhcr->in_modifier) {
1508         case RES_QP:
1509                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1510                                   vhcr->in_param);
1511                 break;
1512
1513         case RES_MTT:
1514                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1515                                    vhcr->in_param, &vhcr->out_param);
1516                 break;
1517
1518         case RES_MPT:
1519                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1520                                    vhcr->in_param);
1521                 break;
1522
1523         case RES_CQ:
1524                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1525                                   vhcr->in_param, &vhcr->out_param);
1526                 break;
1527
1528         case RES_SRQ:
1529                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1530                                    vhcr->in_param, &vhcr->out_param);
1531                 break;
1532
1533         case RES_MAC:
1534                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1535                                    vhcr->in_param, &vhcr->out_param);
1536                 break;
1537
1538         case RES_VLAN:
1539                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1540                                    vhcr->in_param, &vhcr->out_param);
1541                 break;
1542
1543         default:
1544                 break;
1545         }
1546         return err;
1547 }
1548
1549 /* ugly but other choices are uglier */
1550 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1551 {
1552         return (be32_to_cpu(mpt->flags) >> 9) & 1;
1553 }
1554
1555 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1556 {
1557         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1558 }
1559
1560 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1561 {
1562         return be32_to_cpu(mpt->mtt_sz);
1563 }
1564
1565 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1566 {
1567         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1568 }
1569
1570 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1571 {
1572         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1573 }
1574
1575 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1576 {
1577         int page_shift = (qpc->log_page_size & 0x3f) + 12;
1578         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1579         int log_sq_sride = qpc->sq_size_stride & 7;
1580         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1581         int log_rq_stride = qpc->rq_size_stride & 7;
1582         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1583         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1584         int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1585         int sq_size;
1586         int rq_size;
1587         int total_pages;
1588         int total_mem;
1589         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1590
1591         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1592         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1593         total_mem = sq_size + rq_size;
1594         total_pages =
1595                 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1596                                    page_shift);
1597
1598         return total_pages;
1599 }
1600
1601 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1602                            int size, struct res_mtt *mtt)
1603 {
1604         int res_start = mtt->com.res_id;
1605         int res_size = (1 << mtt->order);
1606
1607         if (start < res_start || start + size > res_start + res_size)
1608                 return -EPERM;
1609         return 0;
1610 }
1611
1612 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1613                            struct mlx4_vhcr *vhcr,
1614                            struct mlx4_cmd_mailbox *inbox,
1615                            struct mlx4_cmd_mailbox *outbox,
1616                            struct mlx4_cmd_info *cmd)
1617 {
1618         int err;
1619         int index = vhcr->in_modifier;
1620         struct res_mtt *mtt;
1621         struct res_mpt *mpt;
1622         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1623         int phys;
1624         int id;
1625
1626         id = index & mpt_mask(dev);
1627         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1628         if (err)
1629                 return err;
1630
1631         phys = mr_phys_mpt(inbox->buf);
1632         if (!phys) {
1633                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1634                 if (err)
1635                         goto ex_abort;
1636
1637                 err = check_mtt_range(dev, slave, mtt_base,
1638                                       mr_get_mtt_size(inbox->buf), mtt);
1639                 if (err)
1640                         goto ex_put;
1641
1642                 mpt->mtt = mtt;
1643         }
1644
1645         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1646         if (err)
1647                 goto ex_put;
1648
1649         if (!phys) {
1650                 atomic_inc(&mtt->ref_count);
1651                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1652         }
1653
1654         res_end_move(dev, slave, RES_MPT, id);
1655         return 0;
1656
1657 ex_put:
1658         if (!phys)
1659                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1660 ex_abort:
1661         res_abort_move(dev, slave, RES_MPT, id);
1662
1663         return err;
1664 }
1665
1666 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1667                            struct mlx4_vhcr *vhcr,
1668                            struct mlx4_cmd_mailbox *inbox,
1669                            struct mlx4_cmd_mailbox *outbox,
1670                            struct mlx4_cmd_info *cmd)
1671 {
1672         int err;
1673         int index = vhcr->in_modifier;
1674         struct res_mpt *mpt;
1675         int id;
1676
1677         id = index & mpt_mask(dev);
1678         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1679         if (err)
1680                 return err;
1681
1682         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1683         if (err)
1684                 goto ex_abort;
1685
1686         if (mpt->mtt)
1687                 atomic_dec(&mpt->mtt->ref_count);
1688
1689         res_end_move(dev, slave, RES_MPT, id);
1690         return 0;
1691
1692 ex_abort:
1693         res_abort_move(dev, slave, RES_MPT, id);
1694
1695         return err;
1696 }
1697
1698 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1699                            struct mlx4_vhcr *vhcr,
1700                            struct mlx4_cmd_mailbox *inbox,
1701                            struct mlx4_cmd_mailbox *outbox,
1702                            struct mlx4_cmd_info *cmd)
1703 {
1704         int err;
1705         int index = vhcr->in_modifier;
1706         struct res_mpt *mpt;
1707         int id;
1708
1709         id = index & mpt_mask(dev);
1710         err = get_res(dev, slave, id, RES_MPT, &mpt);
1711         if (err)
1712                 return err;
1713
1714         if (mpt->com.from_state != RES_MPT_HW) {
1715                 err = -EBUSY;
1716                 goto out;
1717         }
1718
1719         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1720
1721 out:
1722         put_res(dev, slave, id, RES_MPT);
1723         return err;
1724 }
1725
1726 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1727 {
1728         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1729 }
1730
1731 static int qp_get_scqn(struct mlx4_qp_context *qpc)
1732 {
1733         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1734 }
1735
1736 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1737 {
1738         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1739 }
1740
1741 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1742                              struct mlx4_vhcr *vhcr,
1743                              struct mlx4_cmd_mailbox *inbox,
1744                              struct mlx4_cmd_mailbox *outbox,
1745                              struct mlx4_cmd_info *cmd)
1746 {
1747         int err;
1748         int qpn = vhcr->in_modifier & 0x7fffff;
1749         struct res_mtt *mtt;
1750         struct res_qp *qp;
1751         struct mlx4_qp_context *qpc = inbox->buf + 8;
1752         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
1753         int mtt_size = qp_get_mtt_size(qpc);
1754         struct res_cq *rcq;
1755         struct res_cq *scq;
1756         int rcqn = qp_get_rcqn(qpc);
1757         int scqn = qp_get_scqn(qpc);
1758         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1759         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1760         struct res_srq *srq;
1761         int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1762
1763         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1764         if (err)
1765                 return err;
1766         qp->local_qpn = local_qpn;
1767
1768         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1769         if (err)
1770                 goto ex_abort;
1771
1772         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1773         if (err)
1774                 goto ex_put_mtt;
1775
1776         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1777         if (err)
1778                 goto ex_put_mtt;
1779
1780         if (scqn != rcqn) {
1781                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1782                 if (err)
1783                         goto ex_put_rcq;
1784         } else
1785                 scq = rcq;
1786
1787         if (use_srq) {
1788                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1789                 if (err)
1790                         goto ex_put_scq;
1791         }
1792
1793         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1794         if (err)
1795                 goto ex_put_srq;
1796         atomic_inc(&mtt->ref_count);
1797         qp->mtt = mtt;
1798         atomic_inc(&rcq->ref_count);
1799         qp->rcq = rcq;
1800         atomic_inc(&scq->ref_count);
1801         qp->scq = scq;
1802
1803         if (scqn != rcqn)
1804                 put_res(dev, slave, scqn, RES_CQ);
1805
1806         if (use_srq) {
1807                 atomic_inc(&srq->ref_count);
1808                 put_res(dev, slave, srqn, RES_SRQ);
1809                 qp->srq = srq;
1810         }
1811         put_res(dev, slave, rcqn, RES_CQ);
1812         put_res(dev, slave, mtt_base, RES_MTT);
1813         res_end_move(dev, slave, RES_QP, qpn);
1814
1815         return 0;
1816
1817 ex_put_srq:
1818         if (use_srq)
1819                 put_res(dev, slave, srqn, RES_SRQ);
1820 ex_put_scq:
1821         if (scqn != rcqn)
1822                 put_res(dev, slave, scqn, RES_CQ);
1823 ex_put_rcq:
1824         put_res(dev, slave, rcqn, RES_CQ);
1825 ex_put_mtt:
1826         put_res(dev, slave, mtt_base, RES_MTT);
1827 ex_abort:
1828         res_abort_move(dev, slave, RES_QP, qpn);
1829
1830         return err;
1831 }
1832
1833 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
1834 {
1835         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
1836 }
1837
1838 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
1839 {
1840         int log_eq_size = eqc->log_eq_size & 0x1f;
1841         int page_shift = (eqc->log_page_size & 0x3f) + 12;
1842
1843         if (log_eq_size + 5 < page_shift)
1844                 return 1;
1845
1846         return 1 << (log_eq_size + 5 - page_shift);
1847 }
1848
1849 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
1850 {
1851         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
1852 }
1853
1854 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
1855 {
1856         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
1857         int page_shift = (cqc->log_page_size & 0x3f) + 12;
1858
1859         if (log_cq_size + 5 < page_shift)
1860                 return 1;
1861
1862         return 1 << (log_cq_size + 5 - page_shift);
1863 }
1864
1865 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1866                           struct mlx4_vhcr *vhcr,
1867                           struct mlx4_cmd_mailbox *inbox,
1868                           struct mlx4_cmd_mailbox *outbox,
1869                           struct mlx4_cmd_info *cmd)
1870 {
1871         int err;
1872         int eqn = vhcr->in_modifier;
1873         int res_id = (slave << 8) | eqn;
1874         struct mlx4_eq_context *eqc = inbox->buf;
1875         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
1876         int mtt_size = eq_get_mtt_size(eqc);
1877         struct res_eq *eq;
1878         struct res_mtt *mtt;
1879
1880         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1881         if (err)
1882                 return err;
1883         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
1884         if (err)
1885                 goto out_add;
1886
1887         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1888         if (err)
1889                 goto out_move;
1890
1891         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1892         if (err)
1893                 goto out_put;
1894
1895         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1896         if (err)
1897                 goto out_put;
1898
1899         atomic_inc(&mtt->ref_count);
1900         eq->mtt = mtt;
1901         put_res(dev, slave, mtt->com.res_id, RES_MTT);
1902         res_end_move(dev, slave, RES_EQ, res_id);
1903         return 0;
1904
1905 out_put:
1906         put_res(dev, slave, mtt->com.res_id, RES_MTT);
1907 out_move:
1908         res_abort_move(dev, slave, RES_EQ, res_id);
1909 out_add:
1910         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1911         return err;
1912 }
1913
1914 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
1915                               int len, struct res_mtt **res)
1916 {
1917         struct mlx4_priv *priv = mlx4_priv(dev);
1918         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1919         struct res_mtt *mtt;
1920         int err = -EINVAL;
1921
1922         spin_lock_irq(mlx4_tlock(dev));
1923         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
1924                             com.list) {
1925                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
1926                         *res = mtt;
1927                         mtt->com.from_state = mtt->com.state;
1928                         mtt->com.state = RES_MTT_BUSY;
1929                         err = 0;
1930                         break;
1931                 }
1932         }
1933         spin_unlock_irq(mlx4_tlock(dev));
1934
1935         return err;
1936 }
1937
1938 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
1939                            struct mlx4_vhcr *vhcr,
1940                            struct mlx4_cmd_mailbox *inbox,
1941                            struct mlx4_cmd_mailbox *outbox,
1942                            struct mlx4_cmd_info *cmd)
1943 {
1944         struct mlx4_mtt mtt;
1945         __be64 *page_list = inbox->buf;
1946         u64 *pg_list = (u64 *)page_list;
1947         int i;
1948         struct res_mtt *rmtt = NULL;
1949         int start = be64_to_cpu(page_list[0]);
1950         int npages = vhcr->in_modifier;
1951         int err;
1952
1953         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
1954         if (err)
1955                 return err;
1956
1957         /* Call the SW implementation of write_mtt:
1958          * - Prepare a dummy mtt struct
1959          * - Translate inbox contents to simple addresses in host endianess */
1960         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
1961                             we don't really use it */
1962         mtt.order = 0;
1963         mtt.page_shift = 0;
1964         for (i = 0; i < npages; ++i)
1965                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
1966
1967         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
1968                                ((u64 *)page_list + 2));
1969
1970         if (rmtt)
1971                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
1972
1973         return err;
1974 }
1975
1976 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1977                           struct mlx4_vhcr *vhcr,
1978                           struct mlx4_cmd_mailbox *inbox,
1979                           struct mlx4_cmd_mailbox *outbox,
1980                           struct mlx4_cmd_info *cmd)
1981 {
1982         int eqn = vhcr->in_modifier;
1983         int res_id = eqn | (slave << 8);
1984         struct res_eq *eq;
1985         int err;
1986
1987         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
1988         if (err)
1989                 return err;
1990
1991         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
1992         if (err)
1993                 goto ex_abort;
1994
1995         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1996         if (err)
1997                 goto ex_put;
1998
1999         atomic_dec(&eq->mtt->ref_count);
2000         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2001         res_end_move(dev, slave, RES_EQ, res_id);
2002         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2003
2004         return 0;
2005
2006 ex_put:
2007         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2008 ex_abort:
2009         res_abort_move(dev, slave, RES_EQ, res_id);
2010
2011         return err;
2012 }
2013
2014 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2015 {
2016         struct mlx4_priv *priv = mlx4_priv(dev);
2017         struct mlx4_slave_event_eq_info *event_eq;
2018         struct mlx4_cmd_mailbox *mailbox;
2019         u32 in_modifier = 0;
2020         int err;
2021         int res_id;
2022         struct res_eq *req;
2023
2024         if (!priv->mfunc.master.slave_state)
2025                 return -EINVAL;
2026
2027         event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2028
2029         /* Create the event only if the slave is registered */
2030         if (event_eq->eqn < 0)
2031                 return 0;
2032
2033         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2034         res_id = (slave << 8) | event_eq->eqn;
2035         err = get_res(dev, slave, res_id, RES_EQ, &req);
2036         if (err)
2037                 goto unlock;
2038
2039         if (req->com.from_state != RES_EQ_HW) {
2040                 err = -EINVAL;
2041                 goto put;
2042         }
2043
2044         mailbox = mlx4_alloc_cmd_mailbox(dev);
2045         if (IS_ERR(mailbox)) {
2046                 err = PTR_ERR(mailbox);
2047                 goto put;
2048         }
2049
2050         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2051                 ++event_eq->token;
2052                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2053         }
2054
2055         memcpy(mailbox->buf, (u8 *) eqe, 28);
2056
2057         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2058
2059         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2060                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2061                        MLX4_CMD_NATIVE);
2062
2063         put_res(dev, slave, res_id, RES_EQ);
2064         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2065         mlx4_free_cmd_mailbox(dev, mailbox);
2066         return err;
2067
2068 put:
2069         put_res(dev, slave, res_id, RES_EQ);
2070
2071 unlock:
2072         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2073         return err;
2074 }
2075
2076 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2077                           struct mlx4_vhcr *vhcr,
2078                           struct mlx4_cmd_mailbox *inbox,
2079                           struct mlx4_cmd_mailbox *outbox,
2080                           struct mlx4_cmd_info *cmd)
2081 {
2082         int eqn = vhcr->in_modifier;
2083         int res_id = eqn | (slave << 8);
2084         struct res_eq *eq;
2085         int err;
2086
2087         err = get_res(dev, slave, res_id, RES_EQ, &eq);
2088         if (err)
2089                 return err;
2090
2091         if (eq->com.from_state != RES_EQ_HW) {
2092                 err = -EINVAL;
2093                 goto ex_put;
2094         }
2095
2096         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2097
2098 ex_put:
2099         put_res(dev, slave, res_id, RES_EQ);
2100         return err;
2101 }
2102
2103 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2104                           struct mlx4_vhcr *vhcr,
2105                           struct mlx4_cmd_mailbox *inbox,
2106                           struct mlx4_cmd_mailbox *outbox,
2107                           struct mlx4_cmd_info *cmd)
2108 {
2109         int err;
2110         int cqn = vhcr->in_modifier;
2111         struct mlx4_cq_context *cqc = inbox->buf;
2112         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2113         struct res_cq *cq;
2114         struct res_mtt *mtt;
2115
2116         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2117         if (err)
2118                 return err;
2119         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2120         if (err)
2121                 goto out_move;
2122         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2123         if (err)
2124                 goto out_put;
2125         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2126         if (err)
2127                 goto out_put;
2128         atomic_inc(&mtt->ref_count);
2129         cq->mtt = mtt;
2130         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2131         res_end_move(dev, slave, RES_CQ, cqn);
2132         return 0;
2133
2134 out_put:
2135         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2136 out_move:
2137         res_abort_move(dev, slave, RES_CQ, cqn);
2138         return err;
2139 }
2140
2141 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2142                           struct mlx4_vhcr *vhcr,
2143                           struct mlx4_cmd_mailbox *inbox,
2144                           struct mlx4_cmd_mailbox *outbox,
2145                           struct mlx4_cmd_info *cmd)
2146 {
2147         int err;
2148         int cqn = vhcr->in_modifier;
2149         struct res_cq *cq;
2150
2151         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2152         if (err)
2153                 return err;
2154         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2155         if (err)
2156                 goto out_move;
2157         atomic_dec(&cq->mtt->ref_count);
2158         res_end_move(dev, slave, RES_CQ, cqn);
2159         return 0;
2160
2161 out_move:
2162         res_abort_move(dev, slave, RES_CQ, cqn);
2163         return err;
2164 }
2165
2166 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2167                           struct mlx4_vhcr *vhcr,
2168                           struct mlx4_cmd_mailbox *inbox,
2169                           struct mlx4_cmd_mailbox *outbox,
2170                           struct mlx4_cmd_info *cmd)
2171 {
2172         int cqn = vhcr->in_modifier;
2173         struct res_cq *cq;
2174         int err;
2175
2176         err = get_res(dev, slave, cqn, RES_CQ, &cq);
2177         if (err)
2178                 return err;
2179
2180         if (cq->com.from_state != RES_CQ_HW)
2181                 goto ex_put;
2182
2183         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2184 ex_put:
2185         put_res(dev, slave, cqn, RES_CQ);
2186
2187         return err;
2188 }
2189
2190 static int handle_resize(struct mlx4_dev *dev, int slave,
2191                          struct mlx4_vhcr *vhcr,
2192                          struct mlx4_cmd_mailbox *inbox,
2193                          struct mlx4_cmd_mailbox *outbox,
2194                          struct mlx4_cmd_info *cmd,
2195                          struct res_cq *cq)
2196 {
2197         int err;
2198         struct res_mtt *orig_mtt;
2199         struct res_mtt *mtt;
2200         struct mlx4_cq_context *cqc = inbox->buf;
2201         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2202
2203         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2204         if (err)
2205                 return err;
2206
2207         if (orig_mtt != cq->mtt) {
2208                 err = -EINVAL;
2209                 goto ex_put;
2210         }
2211
2212         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2213         if (err)
2214                 goto ex_put;
2215
2216         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2217         if (err)
2218                 goto ex_put1;
2219         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2220         if (err)
2221                 goto ex_put1;
2222         atomic_dec(&orig_mtt->ref_count);
2223         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2224         atomic_inc(&mtt->ref_count);
2225         cq->mtt = mtt;
2226         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2227         return 0;
2228
2229 ex_put1:
2230         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2231 ex_put:
2232         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2233
2234         return err;
2235
2236 }
2237
2238 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2239                            struct mlx4_vhcr *vhcr,
2240                            struct mlx4_cmd_mailbox *inbox,
2241                            struct mlx4_cmd_mailbox *outbox,
2242                            struct mlx4_cmd_info *cmd)
2243 {
2244         int cqn = vhcr->in_modifier;
2245         struct res_cq *cq;
2246         int err;
2247
2248         err = get_res(dev, slave, cqn, RES_CQ, &cq);
2249         if (err)
2250                 return err;
2251
2252         if (cq->com.from_state != RES_CQ_HW)
2253                 goto ex_put;
2254
2255         if (vhcr->op_modifier == 0) {
2256                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2257                 if (err)
2258                         goto ex_put;
2259         }
2260
2261         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2262 ex_put:
2263         put_res(dev, slave, cqn, RES_CQ);
2264
2265         return err;
2266 }
2267
2268 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2269 {
2270         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2271         int log_rq_stride = srqc->logstride & 7;
2272         int page_shift = (srqc->log_page_size & 0x3f) + 12;
2273
2274         if (log_srq_size + log_rq_stride + 4 < page_shift)
2275                 return 1;
2276
2277         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2278 }
2279
2280 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2281                            struct mlx4_vhcr *vhcr,
2282                            struct mlx4_cmd_mailbox *inbox,
2283                            struct mlx4_cmd_mailbox *outbox,
2284                            struct mlx4_cmd_info *cmd)
2285 {
2286         int err;
2287         int srqn = vhcr->in_modifier;
2288         struct res_mtt *mtt;
2289         struct res_srq *srq;
2290         struct mlx4_srq_context *srqc = inbox->buf;
2291         int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2292
2293         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2294                 return -EINVAL;
2295
2296         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2297         if (err)
2298                 return err;
2299         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2300         if (err)
2301                 goto ex_abort;
2302         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2303                               mtt);
2304         if (err)
2305                 goto ex_put_mtt;
2306
2307         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2308         if (err)
2309                 goto ex_put_mtt;
2310
2311         atomic_inc(&mtt->ref_count);
2312         srq->mtt = mtt;
2313         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2314         res_end_move(dev, slave, RES_SRQ, srqn);
2315         return 0;
2316
2317 ex_put_mtt:
2318         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2319 ex_abort:
2320         res_abort_move(dev, slave, RES_SRQ, srqn);
2321
2322         return err;
2323 }
2324
2325 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2326                            struct mlx4_vhcr *vhcr,
2327                            struct mlx4_cmd_mailbox *inbox,
2328                            struct mlx4_cmd_mailbox *outbox,
2329                            struct mlx4_cmd_info *cmd)
2330 {
2331         int err;
2332         int srqn = vhcr->in_modifier;
2333         struct res_srq *srq;
2334
2335         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2336         if (err)
2337                 return err;
2338         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2339         if (err)
2340                 goto ex_abort;
2341         atomic_dec(&srq->mtt->ref_count);
2342         if (srq->cq)
2343                 atomic_dec(&srq->cq->ref_count);
2344         res_end_move(dev, slave, RES_SRQ, srqn);
2345
2346         return 0;
2347
2348 ex_abort:
2349         res_abort_move(dev, slave, RES_SRQ, srqn);
2350
2351         return err;
2352 }
2353
2354 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2355                            struct mlx4_vhcr *vhcr,
2356                            struct mlx4_cmd_mailbox *inbox,
2357                            struct mlx4_cmd_mailbox *outbox,
2358                            struct mlx4_cmd_info *cmd)
2359 {
2360         int err;
2361         int srqn = vhcr->in_modifier;
2362         struct res_srq *srq;
2363
2364         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2365         if (err)
2366                 return err;
2367         if (srq->com.from_state != RES_SRQ_HW) {
2368                 err = -EBUSY;
2369                 goto out;
2370         }
2371         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2372 out:
2373         put_res(dev, slave, srqn, RES_SRQ);
2374         return err;
2375 }
2376
2377 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2378                          struct mlx4_vhcr *vhcr,
2379                          struct mlx4_cmd_mailbox *inbox,
2380                          struct mlx4_cmd_mailbox *outbox,
2381                          struct mlx4_cmd_info *cmd)
2382 {
2383         int err;
2384         int srqn = vhcr->in_modifier;
2385         struct res_srq *srq;
2386
2387         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2388         if (err)
2389                 return err;
2390
2391         if (srq->com.from_state != RES_SRQ_HW) {
2392                 err = -EBUSY;
2393                 goto out;
2394         }
2395
2396         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2397 out:
2398         put_res(dev, slave, srqn, RES_SRQ);
2399         return err;
2400 }
2401
2402 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2403                         struct mlx4_vhcr *vhcr,
2404                         struct mlx4_cmd_mailbox *inbox,
2405                         struct mlx4_cmd_mailbox *outbox,
2406                         struct mlx4_cmd_info *cmd)
2407 {
2408         int err;
2409         int qpn = vhcr->in_modifier & 0x7fffff;
2410         struct res_qp *qp;
2411
2412         err = get_res(dev, slave, qpn, RES_QP, &qp);
2413         if (err)
2414                 return err;
2415         if (qp->com.from_state != RES_QP_HW) {
2416                 err = -EBUSY;
2417                 goto out;
2418         }
2419
2420         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2421 out:
2422         put_res(dev, slave, qpn, RES_QP);
2423         return err;
2424 }
2425
2426 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2427                              struct mlx4_vhcr *vhcr,
2428                              struct mlx4_cmd_mailbox *inbox,
2429                              struct mlx4_cmd_mailbox *outbox,
2430                              struct mlx4_cmd_info *cmd)
2431 {
2432         struct mlx4_qp_context *qpc = inbox->buf + 8;
2433
2434         update_ud_gid(dev, qpc, (u8)slave);
2435
2436         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2437 }
2438
2439 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2440                          struct mlx4_vhcr *vhcr,
2441                          struct mlx4_cmd_mailbox *inbox,
2442                          struct mlx4_cmd_mailbox *outbox,
2443                          struct mlx4_cmd_info *cmd)
2444 {
2445         int err;
2446         int qpn = vhcr->in_modifier & 0x7fffff;
2447         struct res_qp *qp;
2448
2449         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2450         if (err)
2451                 return err;
2452         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2453         if (err)
2454                 goto ex_abort;
2455
2456         atomic_dec(&qp->mtt->ref_count);
2457         atomic_dec(&qp->rcq->ref_count);
2458         atomic_dec(&qp->scq->ref_count);
2459         if (qp->srq)
2460                 atomic_dec(&qp->srq->ref_count);
2461         res_end_move(dev, slave, RES_QP, qpn);
2462         return 0;
2463
2464 ex_abort:
2465         res_abort_move(dev, slave, RES_QP, qpn);
2466
2467         return err;
2468 }
2469
2470 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2471                                 struct res_qp *rqp, u8 *gid)
2472 {
2473         struct res_gid *res;
2474
2475         list_for_each_entry(res, &rqp->mcg_list, list) {
2476                 if (!memcmp(res->gid, gid, 16))
2477                         return res;
2478         }
2479         return NULL;
2480 }
2481
2482 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2483                        u8 *gid, enum mlx4_protocol prot)
2484 {
2485         struct res_gid *res;
2486         int err;
2487
2488         res = kzalloc(sizeof *res, GFP_KERNEL);
2489         if (!res)
2490                 return -ENOMEM;
2491
2492         spin_lock_irq(&rqp->mcg_spl);
2493         if (find_gid(dev, slave, rqp, gid)) {
2494                 kfree(res);
2495                 err = -EEXIST;
2496         } else {
2497                 memcpy(res->gid, gid, 16);
2498                 res->prot = prot;
2499                 list_add_tail(&res->list, &rqp->mcg_list);
2500                 err = 0;
2501         }
2502         spin_unlock_irq(&rqp->mcg_spl);
2503
2504         return err;
2505 }
2506
2507 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2508                        u8 *gid, enum mlx4_protocol prot)
2509 {
2510         struct res_gid *res;
2511         int err;
2512
2513         spin_lock_irq(&rqp->mcg_spl);
2514         res = find_gid(dev, slave, rqp, gid);
2515         if (!res || res->prot != prot)
2516                 err = -EINVAL;
2517         else {
2518                 list_del(&res->list);
2519                 kfree(res);
2520                 err = 0;
2521         }
2522         spin_unlock_irq(&rqp->mcg_spl);
2523
2524         return err;
2525 }
2526
2527 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2528                                struct mlx4_vhcr *vhcr,
2529                                struct mlx4_cmd_mailbox *inbox,
2530                                struct mlx4_cmd_mailbox *outbox,
2531                                struct mlx4_cmd_info *cmd)
2532 {
2533         struct mlx4_qp qp; /* dummy for calling attach/detach */
2534         u8 *gid = inbox->buf;
2535         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2536         int err, err1;
2537         int qpn;
2538         struct res_qp *rqp;
2539         int attach = vhcr->op_modifier;
2540         int block_loopback = vhcr->in_modifier >> 31;
2541         u8 steer_type_mask = 2;
2542         enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
2543
2544         qpn = vhcr->in_modifier & 0xffffff;
2545         err = get_res(dev, slave, qpn, RES_QP, &rqp);
2546         if (err)
2547                 return err;
2548
2549         qp.qpn = qpn;
2550         if (attach) {
2551                 err = add_mcg_res(dev, slave, rqp, gid, prot);
2552                 if (err)
2553                         goto ex_put;
2554
2555                 err = mlx4_qp_attach_common(dev, &qp, gid,
2556                                             block_loopback, prot, type);
2557                 if (err)
2558                         goto ex_rem;
2559         } else {
2560                 err = rem_mcg_res(dev, slave, rqp, gid, prot);
2561                 if (err)
2562                         goto ex_put;
2563                 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2564         }
2565
2566         put_res(dev, slave, qpn, RES_QP);
2567         return 0;
2568
2569 ex_rem:
2570         /* ignore error return below, already in error */
2571         err1 = rem_mcg_res(dev, slave, rqp, gid, prot);
2572 ex_put:
2573         put_res(dev, slave, qpn, RES_QP);
2574
2575         return err;
2576 }
2577
2578 enum {
2579         BUSY_MAX_RETRIES = 10
2580 };
2581
2582 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2583                                struct mlx4_vhcr *vhcr,
2584                                struct mlx4_cmd_mailbox *inbox,
2585                                struct mlx4_cmd_mailbox *outbox,
2586                                struct mlx4_cmd_info *cmd)
2587 {
2588         int err;
2589         int index = vhcr->in_modifier & 0xffff;
2590
2591         err = get_res(dev, slave, index, RES_COUNTER, NULL);
2592         if (err)
2593                 return err;
2594
2595         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2596         put_res(dev, slave, index, RES_COUNTER);
2597         return err;
2598 }
2599
2600 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2601 {
2602         struct res_gid *rgid;
2603         struct res_gid *tmp;
2604         int err;
2605         struct mlx4_qp qp; /* dummy for calling attach/detach */
2606
2607         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2608                 qp.qpn = rqp->local_qpn;
2609                 err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2610                                             MLX4_MC_STEER);
2611                 list_del(&rgid->list);
2612                 kfree(rgid);
2613         }
2614 }
2615
2616 static int _move_all_busy(struct mlx4_dev *dev, int slave,
2617                           enum mlx4_resource type, int print)
2618 {
2619         struct mlx4_priv *priv = mlx4_priv(dev);
2620         struct mlx4_resource_tracker *tracker =
2621                 &priv->mfunc.master.res_tracker;
2622         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2623         struct res_common *r;
2624         struct res_common *tmp;
2625         int busy;
2626
2627         busy = 0;
2628         spin_lock_irq(mlx4_tlock(dev));
2629         list_for_each_entry_safe(r, tmp, rlist, list) {
2630                 if (r->owner == slave) {
2631                         if (!r->removing) {
2632                                 if (r->state == RES_ANY_BUSY) {
2633                                         if (print)
2634                                                 mlx4_dbg(dev,
2635                                                          "%s id 0x%x is busy\n",
2636                                                           ResourceType(type),
2637                                                           r->res_id);
2638                                         ++busy;
2639                                 } else {
2640                                         r->from_state = r->state;
2641                                         r->state = RES_ANY_BUSY;
2642                                         r->removing = 1;
2643                                 }
2644                         }
2645                 }
2646         }
2647         spin_unlock_irq(mlx4_tlock(dev));
2648
2649         return busy;
2650 }
2651
2652 static int move_all_busy(struct mlx4_dev *dev, int slave,
2653                          enum mlx4_resource type)
2654 {
2655         unsigned long begin;
2656         int busy;
2657
2658         begin = jiffies;
2659         do {
2660                 busy = _move_all_busy(dev, slave, type, 0);
2661                 if (time_after(jiffies, begin + 5 * HZ))
2662                         break;
2663                 if (busy)
2664                         cond_resched();
2665         } while (busy);
2666
2667         if (busy)
2668                 busy = _move_all_busy(dev, slave, type, 1);
2669
2670         return busy;
2671 }
2672 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
2673 {
2674         struct mlx4_priv *priv = mlx4_priv(dev);
2675         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2676         struct list_head *qp_list =
2677                 &tracker->slave_list[slave].res_list[RES_QP];
2678         struct res_qp *qp;
2679         struct res_qp *tmp;
2680         int state;
2681         u64 in_param;
2682         int qpn;
2683         int err;
2684
2685         err = move_all_busy(dev, slave, RES_QP);
2686         if (err)
2687                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
2688                           "for slave %d\n", slave);
2689
2690         spin_lock_irq(mlx4_tlock(dev));
2691         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
2692                 spin_unlock_irq(mlx4_tlock(dev));
2693                 if (qp->com.owner == slave) {
2694                         qpn = qp->com.res_id;
2695                         detach_qp(dev, slave, qp);
2696                         state = qp->com.from_state;
2697                         while (state != 0) {
2698                                 switch (state) {
2699                                 case RES_QP_RESERVED:
2700                                         spin_lock_irq(mlx4_tlock(dev));
2701                                         radix_tree_delete(&tracker->res_tree[RES_QP],
2702                                                           qp->com.res_id);
2703                                         list_del(&qp->com.list);
2704                                         spin_unlock_irq(mlx4_tlock(dev));
2705                                         kfree(qp);
2706                                         state = 0;
2707                                         break;
2708                                 case RES_QP_MAPPED:
2709                                         if (!valid_reserved(dev, slave, qpn))
2710                                                 __mlx4_qp_free_icm(dev, qpn);
2711                                         state = RES_QP_RESERVED;
2712                                         break;
2713                                 case RES_QP_HW:
2714                                         in_param = slave;
2715                                         err = mlx4_cmd(dev, in_param,
2716                                                        qp->local_qpn, 2,
2717                                                        MLX4_CMD_2RST_QP,
2718                                                        MLX4_CMD_TIME_CLASS_A,
2719                                                        MLX4_CMD_NATIVE);
2720                                         if (err)
2721                                                 mlx4_dbg(dev, "rem_slave_qps: failed"
2722                                                          " to move slave %d qpn %d to"
2723                                                          " reset\n", slave,
2724                                                          qp->local_qpn);
2725                                         atomic_dec(&qp->rcq->ref_count);
2726                                         atomic_dec(&qp->scq->ref_count);
2727                                         atomic_dec(&qp->mtt->ref_count);
2728                                         if (qp->srq)
2729                                                 atomic_dec(&qp->srq->ref_count);
2730                                         state = RES_QP_MAPPED;
2731                                         break;
2732                                 default:
2733                                         state = 0;
2734                                 }
2735                         }
2736                 }
2737                 spin_lock_irq(mlx4_tlock(dev));
2738         }
2739         spin_unlock_irq(mlx4_tlock(dev));
2740 }
2741
2742 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
2743 {
2744         struct mlx4_priv *priv = mlx4_priv(dev);
2745         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2746         struct list_head *srq_list =
2747                 &tracker->slave_list[slave].res_list[RES_SRQ];
2748         struct res_srq *srq;
2749         struct res_srq *tmp;
2750         int state;
2751         u64 in_param;
2752         LIST_HEAD(tlist);
2753         int srqn;
2754         int err;
2755
2756         err = move_all_busy(dev, slave, RES_SRQ);
2757         if (err)
2758                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
2759                           "busy for slave %d\n", slave);
2760
2761         spin_lock_irq(mlx4_tlock(dev));
2762         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
2763                 spin_unlock_irq(mlx4_tlock(dev));
2764                 if (srq->com.owner == slave) {
2765                         srqn = srq->com.res_id;
2766                         state = srq->com.from_state;
2767                         while (state != 0) {
2768                                 switch (state) {
2769                                 case RES_SRQ_ALLOCATED:
2770                                         __mlx4_srq_free_icm(dev, srqn);
2771                                         spin_lock_irq(mlx4_tlock(dev));
2772                                         radix_tree_delete(&tracker->res_tree[RES_SRQ],
2773                                                           srqn);
2774                                         list_del(&srq->com.list);
2775                                         spin_unlock_irq(mlx4_tlock(dev));
2776                                         kfree(srq);
2777                                         state = 0;
2778                                         break;
2779
2780                                 case RES_SRQ_HW:
2781                                         in_param = slave;
2782                                         err = mlx4_cmd(dev, in_param, srqn, 1,
2783                                                        MLX4_CMD_HW2SW_SRQ,
2784                                                        MLX4_CMD_TIME_CLASS_A,
2785                                                        MLX4_CMD_NATIVE);
2786                                         if (err)
2787                                                 mlx4_dbg(dev, "rem_slave_srqs: failed"
2788                                                          " to move slave %d srq %d to"
2789                                                          " SW ownership\n",
2790                                                          slave, srqn);
2791
2792                                         atomic_dec(&srq->mtt->ref_count);
2793                                         if (srq->cq)
2794                                                 atomic_dec(&srq->cq->ref_count);
2795                                         state = RES_SRQ_ALLOCATED;
2796                                         break;
2797
2798                                 default:
2799                                         state = 0;
2800                                 }
2801                         }
2802                 }
2803                 spin_lock_irq(mlx4_tlock(dev));
2804         }
2805         spin_unlock_irq(mlx4_tlock(dev));
2806 }
2807
2808 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
2809 {
2810         struct mlx4_priv *priv = mlx4_priv(dev);
2811         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2812         struct list_head *cq_list =
2813                 &tracker->slave_list[slave].res_list[RES_CQ];
2814         struct res_cq *cq;
2815         struct res_cq *tmp;
2816         int state;
2817         u64 in_param;
2818         LIST_HEAD(tlist);
2819         int cqn;
2820         int err;
2821
2822         err = move_all_busy(dev, slave, RES_CQ);
2823         if (err)
2824                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
2825                           "busy for slave %d\n", slave);
2826
2827         spin_lock_irq(mlx4_tlock(dev));
2828         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
2829                 spin_unlock_irq(mlx4_tlock(dev));
2830                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
2831                         cqn = cq->com.res_id;
2832                         state = cq->com.from_state;
2833                         while (state != 0) {
2834                                 switch (state) {
2835                                 case RES_CQ_ALLOCATED:
2836                                         __mlx4_cq_free_icm(dev, cqn);
2837                                         spin_lock_irq(mlx4_tlock(dev));
2838                                         radix_tree_delete(&tracker->res_tree[RES_CQ],
2839                                                           cqn);
2840                                         list_del(&cq->com.list);
2841                                         spin_unlock_irq(mlx4_tlock(dev));
2842                                         kfree(cq);
2843                                         state = 0;
2844                                         break;
2845
2846                                 case RES_CQ_HW:
2847                                         in_param = slave;
2848                                         err = mlx4_cmd(dev, in_param, cqn, 1,
2849                                                        MLX4_CMD_HW2SW_CQ,
2850                                                        MLX4_CMD_TIME_CLASS_A,
2851                                                        MLX4_CMD_NATIVE);
2852                                         if (err)
2853                                                 mlx4_dbg(dev, "rem_slave_cqs: failed"
2854                                                          " to move slave %d cq %d to"
2855                                                          " SW ownership\n",
2856                                                          slave, cqn);
2857                                         atomic_dec(&cq->mtt->ref_count);
2858                                         state = RES_CQ_ALLOCATED;
2859                                         break;
2860
2861                                 default:
2862                                         state = 0;
2863                                 }
2864                         }
2865                 }
2866                 spin_lock_irq(mlx4_tlock(dev));
2867         }
2868         spin_unlock_irq(mlx4_tlock(dev));
2869 }
2870
2871 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
2872 {
2873         struct mlx4_priv *priv = mlx4_priv(dev);
2874         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2875         struct list_head *mpt_list =
2876                 &tracker->slave_list[slave].res_list[RES_MPT];
2877         struct res_mpt *mpt;
2878         struct res_mpt *tmp;
2879         int state;
2880         u64 in_param;
2881         LIST_HEAD(tlist);
2882         int mptn;
2883         int err;
2884
2885         err = move_all_busy(dev, slave, RES_MPT);
2886         if (err)
2887                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
2888                           "busy for slave %d\n", slave);
2889
2890         spin_lock_irq(mlx4_tlock(dev));
2891         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
2892                 spin_unlock_irq(mlx4_tlock(dev));
2893                 if (mpt->com.owner == slave) {
2894                         mptn = mpt->com.res_id;
2895                         state = mpt->com.from_state;
2896                         while (state != 0) {
2897                                 switch (state) {
2898                                 case RES_MPT_RESERVED:
2899                                         __mlx4_mr_release(dev, mpt->key);
2900                                         spin_lock_irq(mlx4_tlock(dev));
2901                                         radix_tree_delete(&tracker->res_tree[RES_MPT],
2902                                                           mptn);
2903                                         list_del(&mpt->com.list);
2904                                         spin_unlock_irq(mlx4_tlock(dev));
2905                                         kfree(mpt);
2906                                         state = 0;
2907                                         break;
2908
2909                                 case RES_MPT_MAPPED:
2910                                         __mlx4_mr_free_icm(dev, mpt->key);
2911                                         state = RES_MPT_RESERVED;
2912                                         break;
2913
2914                                 case RES_MPT_HW:
2915                                         in_param = slave;
2916                                         err = mlx4_cmd(dev, in_param, mptn, 0,
2917                                                      MLX4_CMD_HW2SW_MPT,
2918                                                      MLX4_CMD_TIME_CLASS_A,
2919                                                      MLX4_CMD_NATIVE);
2920                                         if (err)
2921                                                 mlx4_dbg(dev, "rem_slave_mrs: failed"
2922                                                          " to move slave %d mpt %d to"
2923                                                          " SW ownership\n",
2924                                                          slave, mptn);
2925                                         if (mpt->mtt)
2926                                                 atomic_dec(&mpt->mtt->ref_count);
2927                                         state = RES_MPT_MAPPED;
2928                                         break;
2929                                 default:
2930                                         state = 0;
2931                                 }
2932                         }
2933                 }
2934                 spin_lock_irq(mlx4_tlock(dev));
2935         }
2936         spin_unlock_irq(mlx4_tlock(dev));
2937 }
2938
2939 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
2940 {
2941         struct mlx4_priv *priv = mlx4_priv(dev);
2942         struct mlx4_resource_tracker *tracker =
2943                 &priv->mfunc.master.res_tracker;
2944         struct list_head *mtt_list =
2945                 &tracker->slave_list[slave].res_list[RES_MTT];
2946         struct res_mtt *mtt;
2947         struct res_mtt *tmp;
2948         int state;
2949         LIST_HEAD(tlist);
2950         int base;
2951         int err;
2952
2953         err = move_all_busy(dev, slave, RES_MTT);
2954         if (err)
2955                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
2956                           "busy for slave %d\n", slave);
2957
2958         spin_lock_irq(mlx4_tlock(dev));
2959         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
2960                 spin_unlock_irq(mlx4_tlock(dev));
2961                 if (mtt->com.owner == slave) {
2962                         base = mtt->com.res_id;
2963                         state = mtt->com.from_state;
2964                         while (state != 0) {
2965                                 switch (state) {
2966                                 case RES_MTT_ALLOCATED:
2967                                         __mlx4_free_mtt_range(dev, base,
2968                                                               mtt->order);
2969                                         spin_lock_irq(mlx4_tlock(dev));
2970                                         radix_tree_delete(&tracker->res_tree[RES_MTT],
2971                                                           base);
2972                                         list_del(&mtt->com.list);
2973                                         spin_unlock_irq(mlx4_tlock(dev));
2974                                         kfree(mtt);
2975                                         state = 0;
2976                                         break;
2977
2978                                 default:
2979                                         state = 0;
2980                                 }
2981                         }
2982                 }
2983                 spin_lock_irq(mlx4_tlock(dev));
2984         }
2985         spin_unlock_irq(mlx4_tlock(dev));
2986 }
2987
2988 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
2989 {
2990         struct mlx4_priv *priv = mlx4_priv(dev);
2991         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2992         struct list_head *eq_list =
2993                 &tracker->slave_list[slave].res_list[RES_EQ];
2994         struct res_eq *eq;
2995         struct res_eq *tmp;
2996         int err;
2997         int state;
2998         LIST_HEAD(tlist);
2999         int eqn;
3000         struct mlx4_cmd_mailbox *mailbox;
3001
3002         err = move_all_busy(dev, slave, RES_EQ);
3003         if (err)
3004                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3005                           "busy for slave %d\n", slave);
3006
3007         spin_lock_irq(mlx4_tlock(dev));
3008         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3009                 spin_unlock_irq(mlx4_tlock(dev));
3010                 if (eq->com.owner == slave) {
3011                         eqn = eq->com.res_id;
3012                         state = eq->com.from_state;
3013                         while (state != 0) {
3014                                 switch (state) {
3015                                 case RES_EQ_RESERVED:
3016                                         spin_lock_irq(mlx4_tlock(dev));
3017                                         radix_tree_delete(&tracker->res_tree[RES_EQ],
3018                                                           eqn);
3019                                         list_del(&eq->com.list);
3020                                         spin_unlock_irq(mlx4_tlock(dev));
3021                                         kfree(eq);
3022                                         state = 0;
3023                                         break;
3024
3025                                 case RES_EQ_HW:
3026                                         mailbox = mlx4_alloc_cmd_mailbox(dev);
3027                                         if (IS_ERR(mailbox)) {
3028                                                 cond_resched();
3029                                                 continue;
3030                                         }
3031                                         err = mlx4_cmd_box(dev, slave, 0,
3032                                                            eqn & 0xff, 0,
3033                                                            MLX4_CMD_HW2SW_EQ,
3034                                                            MLX4_CMD_TIME_CLASS_A,
3035                                                            MLX4_CMD_NATIVE);
3036                                         mlx4_dbg(dev, "rem_slave_eqs: failed"
3037                                                  " to move slave %d eqs %d to"
3038                                                  " SW ownership\n", slave, eqn);
3039                                         mlx4_free_cmd_mailbox(dev, mailbox);
3040                                         if (!err) {
3041                                                 atomic_dec(&eq->mtt->ref_count);
3042                                                 state = RES_EQ_RESERVED;
3043                                         }
3044                                         break;
3045
3046                                 default:
3047                                         state = 0;
3048                                 }
3049                         }
3050                 }
3051                 spin_lock_irq(mlx4_tlock(dev));
3052         }
3053         spin_unlock_irq(mlx4_tlock(dev));
3054 }
3055
3056 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3057 {
3058         struct mlx4_priv *priv = mlx4_priv(dev);
3059
3060         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3061         /*VLAN*/
3062         rem_slave_macs(dev, slave);
3063         rem_slave_qps(dev, slave);
3064         rem_slave_srqs(dev, slave);
3065         rem_slave_cqs(dev, slave);
3066         rem_slave_mrs(dev, slave);
3067         rem_slave_eqs(dev, slave);
3068         rem_slave_mtts(dev, slave);
3069         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3070 }