mlx4: Ethernet port management modifications
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/mlx4/cmd.h>
42 #include <linux/mlx4/qp.h>
43
44 #include "mlx4.h"
45 #include "fw.h"
46
47 #define MLX4_MAC_VALID          (1ull << 63)
48 #define MLX4_MAC_MASK           0x7fffffffffffffffULL
49 #define ETH_ALEN                6
50
51 struct mac_res {
52         struct list_head list;
53         u64 mac;
54         u8 port;
55 };
56
57 struct res_common {
58         struct list_head        list;
59         u32                     res_id;
60         int                     owner;
61         int                     state;
62         int                     from_state;
63         int                     to_state;
64         int                     removing;
65 };
66
67 enum {
68         RES_ANY_BUSY = 1
69 };
70
71 struct res_gid {
72         struct list_head        list;
73         u8                      gid[16];
74         enum mlx4_protocol      prot;
75 };
76
77 enum res_qp_states {
78         RES_QP_BUSY = RES_ANY_BUSY,
79
80         /* QP number was allocated */
81         RES_QP_RESERVED,
82
83         /* ICM memory for QP context was mapped */
84         RES_QP_MAPPED,
85
86         /* QP is in hw ownership */
87         RES_QP_HW
88 };
89
90 static inline const char *qp_states_str(enum res_qp_states state)
91 {
92         switch (state) {
93         case RES_QP_BUSY: return "RES_QP_BUSY";
94         case RES_QP_RESERVED: return "RES_QP_RESERVED";
95         case RES_QP_MAPPED: return "RES_QP_MAPPED";
96         case RES_QP_HW: return "RES_QP_HW";
97         default: return "Unknown";
98         }
99 }
100
101 struct res_qp {
102         struct res_common       com;
103         struct res_mtt         *mtt;
104         struct res_cq          *rcq;
105         struct res_cq          *scq;
106         struct res_srq         *srq;
107         struct list_head        mcg_list;
108         spinlock_t              mcg_spl;
109         int                     local_qpn;
110 };
111
112 enum res_mtt_states {
113         RES_MTT_BUSY = RES_ANY_BUSY,
114         RES_MTT_ALLOCATED,
115 };
116
117 static inline const char *mtt_states_str(enum res_mtt_states state)
118 {
119         switch (state) {
120         case RES_MTT_BUSY: return "RES_MTT_BUSY";
121         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
122         default: return "Unknown";
123         }
124 }
125
126 struct res_mtt {
127         struct res_common       com;
128         int                     order;
129         atomic_t                ref_count;
130 };
131
132 enum res_mpt_states {
133         RES_MPT_BUSY = RES_ANY_BUSY,
134         RES_MPT_RESERVED,
135         RES_MPT_MAPPED,
136         RES_MPT_HW,
137 };
138
139 struct res_mpt {
140         struct res_common       com;
141         struct res_mtt         *mtt;
142         int                     key;
143 };
144
145 enum res_eq_states {
146         RES_EQ_BUSY = RES_ANY_BUSY,
147         RES_EQ_RESERVED,
148         RES_EQ_HW,
149 };
150
151 struct res_eq {
152         struct res_common       com;
153         struct res_mtt         *mtt;
154 };
155
156 enum res_cq_states {
157         RES_CQ_BUSY = RES_ANY_BUSY,
158         RES_CQ_ALLOCATED,
159         RES_CQ_HW,
160 };
161
162 struct res_cq {
163         struct res_common       com;
164         struct res_mtt         *mtt;
165         atomic_t                ref_count;
166 };
167
168 enum res_srq_states {
169         RES_SRQ_BUSY = RES_ANY_BUSY,
170         RES_SRQ_ALLOCATED,
171         RES_SRQ_HW,
172 };
173
174 static inline const char *srq_states_str(enum res_srq_states state)
175 {
176         switch (state) {
177         case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
178         case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
179         case RES_SRQ_HW: return "RES_SRQ_HW";
180         default: return "Unknown";
181         }
182 }
183
184 struct res_srq {
185         struct res_common       com;
186         struct res_mtt         *mtt;
187         struct res_cq          *cq;
188         atomic_t                ref_count;
189 };
190
191 enum res_counter_states {
192         RES_COUNTER_BUSY = RES_ANY_BUSY,
193         RES_COUNTER_ALLOCATED,
194 };
195
196 static inline const char *counter_states_str(enum res_counter_states state)
197 {
198         switch (state) {
199         case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
200         case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
201         default: return "Unknown";
202         }
203 }
204
205 struct res_counter {
206         struct res_common       com;
207         int                     port;
208 };
209
210 /* For Debug uses */
211 static const char *ResourceType(enum mlx4_resource rt)
212 {
213         switch (rt) {
214         case RES_QP: return "RES_QP";
215         case RES_CQ: return "RES_CQ";
216         case RES_SRQ: return "RES_SRQ";
217         case RES_MPT: return "RES_MPT";
218         case RES_MTT: return "RES_MTT";
219         case RES_MAC: return  "RES_MAC";
220         case RES_EQ: return "RES_EQ";
221         case RES_COUNTER: return "RES_COUNTER";
222         default: return "Unknown resource type !!!";
223         };
224 }
225
226 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
227 {
228         struct mlx4_priv *priv = mlx4_priv(dev);
229         int i;
230         int t;
231
232         priv->mfunc.master.res_tracker.slave_list =
233                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
234                         GFP_KERNEL);
235         if (!priv->mfunc.master.res_tracker.slave_list)
236                 return -ENOMEM;
237
238         for (i = 0 ; i < dev->num_slaves; i++) {
239                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
240                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
241                                        slave_list[i].res_list[t]);
242                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
243         }
244
245         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
246                  dev->num_slaves);
247         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
248                 INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
249                                 GFP_ATOMIC|__GFP_NOWARN);
250
251         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
252         return 0 ;
253 }
254
255 void mlx4_free_resource_tracker(struct mlx4_dev *dev)
256 {
257         struct mlx4_priv *priv = mlx4_priv(dev);
258         int i;
259
260         if (priv->mfunc.master.res_tracker.slave_list) {
261                 for (i = 0 ; i < dev->num_slaves; i++)
262                         mlx4_delete_all_resources_for_slave(dev, i);
263
264                 kfree(priv->mfunc.master.res_tracker.slave_list);
265         }
266 }
267
268 static void update_ud_gid(struct mlx4_dev *dev,
269                           struct mlx4_qp_context *qp_ctx, u8 slave)
270 {
271         u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
272
273         if (MLX4_QP_ST_UD == ts)
274                 qp_ctx->pri_path.mgid_index = 0x80 | slave;
275
276         mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
277                 slave, qp_ctx->pri_path.mgid_index);
278 }
279
280 static int mpt_mask(struct mlx4_dev *dev)
281 {
282         return dev->caps.num_mpts - 1;
283 }
284
285 static void *find_res(struct mlx4_dev *dev, int res_id,
286                       enum mlx4_resource type)
287 {
288         struct mlx4_priv *priv = mlx4_priv(dev);
289
290         return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
291                                  res_id);
292 }
293
294 static int get_res(struct mlx4_dev *dev, int slave, int res_id,
295                    enum mlx4_resource type,
296                    void *res)
297 {
298         struct res_common *r;
299         int err = 0;
300
301         spin_lock_irq(mlx4_tlock(dev));
302         r = find_res(dev, res_id, type);
303         if (!r) {
304                 err = -ENONET;
305                 goto exit;
306         }
307
308         if (r->state == RES_ANY_BUSY) {
309                 err = -EBUSY;
310                 goto exit;
311         }
312
313         if (r->owner != slave) {
314                 err = -EPERM;
315                 goto exit;
316         }
317
318         r->from_state = r->state;
319         r->state = RES_ANY_BUSY;
320         mlx4_dbg(dev, "res %s id 0x%x to busy\n",
321                  ResourceType(type), r->res_id);
322
323         if (res)
324                 *((struct res_common **)res) = r;
325
326 exit:
327         spin_unlock_irq(mlx4_tlock(dev));
328         return err;
329 }
330
331 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
332                                     enum mlx4_resource type,
333                                     int res_id, int *slave)
334 {
335
336         struct res_common *r;
337         int err = -ENOENT;
338         int id = res_id;
339
340         if (type == RES_QP)
341                 id &= 0x7fffff;
342         spin_lock_irq(mlx4_tlock(dev));
343
344         r = find_res(dev, id, type);
345         if (r) {
346                 *slave = r->owner;
347                 err = 0;
348         }
349         spin_unlock_irq(mlx4_tlock(dev));
350
351         return err;
352 }
353
354 static void put_res(struct mlx4_dev *dev, int slave, int res_id,
355                     enum mlx4_resource type)
356 {
357         struct res_common *r;
358
359         spin_lock_irq(mlx4_tlock(dev));
360         r = find_res(dev, res_id, type);
361         if (r)
362                 r->state = r->from_state;
363         spin_unlock_irq(mlx4_tlock(dev));
364 }
365
366 static struct res_common *alloc_qp_tr(int id)
367 {
368         struct res_qp *ret;
369
370         ret = kzalloc(sizeof *ret, GFP_KERNEL);
371         if (!ret)
372                 return NULL;
373
374         ret->com.res_id = id;
375         ret->com.state = RES_QP_RESERVED;
376         INIT_LIST_HEAD(&ret->mcg_list);
377         spin_lock_init(&ret->mcg_spl);
378
379         return &ret->com;
380 }
381
382 static struct res_common *alloc_mtt_tr(int id, int order)
383 {
384         struct res_mtt *ret;
385
386         ret = kzalloc(sizeof *ret, GFP_KERNEL);
387         if (!ret)
388                 return NULL;
389
390         ret->com.res_id = id;
391         ret->order = order;
392         ret->com.state = RES_MTT_ALLOCATED;
393         atomic_set(&ret->ref_count, 0);
394
395         return &ret->com;
396 }
397
398 static struct res_common *alloc_mpt_tr(int id, int key)
399 {
400         struct res_mpt *ret;
401
402         ret = kzalloc(sizeof *ret, GFP_KERNEL);
403         if (!ret)
404                 return NULL;
405
406         ret->com.res_id = id;
407         ret->com.state = RES_MPT_RESERVED;
408         ret->key = key;
409
410         return &ret->com;
411 }
412
413 static struct res_common *alloc_eq_tr(int id)
414 {
415         struct res_eq *ret;
416
417         ret = kzalloc(sizeof *ret, GFP_KERNEL);
418         if (!ret)
419                 return NULL;
420
421         ret->com.res_id = id;
422         ret->com.state = RES_EQ_RESERVED;
423
424         return &ret->com;
425 }
426
427 static struct res_common *alloc_cq_tr(int id)
428 {
429         struct res_cq *ret;
430
431         ret = kzalloc(sizeof *ret, GFP_KERNEL);
432         if (!ret)
433                 return NULL;
434
435         ret->com.res_id = id;
436         ret->com.state = RES_CQ_ALLOCATED;
437         atomic_set(&ret->ref_count, 0);
438
439         return &ret->com;
440 }
441
442 static struct res_common *alloc_srq_tr(int id)
443 {
444         struct res_srq *ret;
445
446         ret = kzalloc(sizeof *ret, GFP_KERNEL);
447         if (!ret)
448                 return NULL;
449
450         ret->com.res_id = id;
451         ret->com.state = RES_SRQ_ALLOCATED;
452         atomic_set(&ret->ref_count, 0);
453
454         return &ret->com;
455 }
456
457 static struct res_common *alloc_counter_tr(int id)
458 {
459         struct res_counter *ret;
460
461         ret = kzalloc(sizeof *ret, GFP_KERNEL);
462         if (!ret)
463                 return NULL;
464
465         ret->com.res_id = id;
466         ret->com.state = RES_COUNTER_ALLOCATED;
467
468         return &ret->com;
469 }
470
471 static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
472                                    int extra)
473 {
474         struct res_common *ret;
475
476         switch (type) {
477         case RES_QP:
478                 ret = alloc_qp_tr(id);
479                 break;
480         case RES_MPT:
481                 ret = alloc_mpt_tr(id, extra);
482                 break;
483         case RES_MTT:
484                 ret = alloc_mtt_tr(id, extra);
485                 break;
486         case RES_EQ:
487                 ret = alloc_eq_tr(id);
488                 break;
489         case RES_CQ:
490                 ret = alloc_cq_tr(id);
491                 break;
492         case RES_SRQ:
493                 ret = alloc_srq_tr(id);
494                 break;
495         case RES_MAC:
496                 printk(KERN_ERR "implementation missing\n");
497                 return NULL;
498         case RES_COUNTER:
499                 ret = alloc_counter_tr(id);
500                 break;
501
502         default:
503                 return NULL;
504         }
505         if (ret)
506                 ret->owner = slave;
507
508         return ret;
509 }
510
511 static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
512                          enum mlx4_resource type, int extra)
513 {
514         int i;
515         int err;
516         struct mlx4_priv *priv = mlx4_priv(dev);
517         struct res_common **res_arr;
518         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
519         struct radix_tree_root *root = &tracker->res_tree[type];
520
521         res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
522         if (!res_arr)
523                 return -ENOMEM;
524
525         for (i = 0; i < count; ++i) {
526                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
527                 if (!res_arr[i]) {
528                         for (--i; i >= 0; --i)
529                                 kfree(res_arr[i]);
530
531                         kfree(res_arr);
532                         return -ENOMEM;
533                 }
534         }
535
536         spin_lock_irq(mlx4_tlock(dev));
537         for (i = 0; i < count; ++i) {
538                 if (find_res(dev, base + i, type)) {
539                         err = -EEXIST;
540                         goto undo;
541                 }
542                 err = radix_tree_insert(root, base + i, res_arr[i]);
543                 if (err)
544                         goto undo;
545                 list_add_tail(&res_arr[i]->list,
546                               &tracker->slave_list[slave].res_list[type]);
547         }
548         spin_unlock_irq(mlx4_tlock(dev));
549         kfree(res_arr);
550
551         return 0;
552
553 undo:
554         for (--i; i >= base; --i)
555                 radix_tree_delete(&tracker->res_tree[type], i);
556
557         spin_unlock_irq(mlx4_tlock(dev));
558
559         for (i = 0; i < count; ++i)
560                 kfree(res_arr[i]);
561
562         kfree(res_arr);
563
564         return err;
565 }
566
567 static int remove_qp_ok(struct res_qp *res)
568 {
569         if (res->com.state == RES_QP_BUSY)
570                 return -EBUSY;
571         else if (res->com.state != RES_QP_RESERVED)
572                 return -EPERM;
573
574         return 0;
575 }
576
577 static int remove_mtt_ok(struct res_mtt *res, int order)
578 {
579         if (res->com.state == RES_MTT_BUSY ||
580             atomic_read(&res->ref_count)) {
581                 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
582                        __func__, __LINE__,
583                        mtt_states_str(res->com.state),
584                        atomic_read(&res->ref_count));
585                 return -EBUSY;
586         } else if (res->com.state != RES_MTT_ALLOCATED)
587                 return -EPERM;
588         else if (res->order != order)
589                 return -EINVAL;
590
591         return 0;
592 }
593
594 static int remove_mpt_ok(struct res_mpt *res)
595 {
596         if (res->com.state == RES_MPT_BUSY)
597                 return -EBUSY;
598         else if (res->com.state != RES_MPT_RESERVED)
599                 return -EPERM;
600
601         return 0;
602 }
603
604 static int remove_eq_ok(struct res_eq *res)
605 {
606         if (res->com.state == RES_MPT_BUSY)
607                 return -EBUSY;
608         else if (res->com.state != RES_MPT_RESERVED)
609                 return -EPERM;
610
611         return 0;
612 }
613
614 static int remove_counter_ok(struct res_counter *res)
615 {
616         if (res->com.state == RES_COUNTER_BUSY)
617                 return -EBUSY;
618         else if (res->com.state != RES_COUNTER_ALLOCATED)
619                 return -EPERM;
620
621         return 0;
622 }
623
624 static int remove_cq_ok(struct res_cq *res)
625 {
626         if (res->com.state == RES_CQ_BUSY)
627                 return -EBUSY;
628         else if (res->com.state != RES_CQ_ALLOCATED)
629                 return -EPERM;
630
631         return 0;
632 }
633
634 static int remove_srq_ok(struct res_srq *res)
635 {
636         if (res->com.state == RES_SRQ_BUSY)
637                 return -EBUSY;
638         else if (res->com.state != RES_SRQ_ALLOCATED)
639                 return -EPERM;
640
641         return 0;
642 }
643
644 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
645 {
646         switch (type) {
647         case RES_QP:
648                 return remove_qp_ok((struct res_qp *)res);
649         case RES_CQ:
650                 return remove_cq_ok((struct res_cq *)res);
651         case RES_SRQ:
652                 return remove_srq_ok((struct res_srq *)res);
653         case RES_MPT:
654                 return remove_mpt_ok((struct res_mpt *)res);
655         case RES_MTT:
656                 return remove_mtt_ok((struct res_mtt *)res, extra);
657         case RES_MAC:
658                 return -ENOSYS;
659         case RES_EQ:
660                 return remove_eq_ok((struct res_eq *)res);
661         case RES_COUNTER:
662                 return remove_counter_ok((struct res_counter *)res);
663         default:
664                 return -EINVAL;
665         }
666 }
667
668 static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
669                          enum mlx4_resource type, int extra)
670 {
671         int i;
672         int err;
673         struct mlx4_priv *priv = mlx4_priv(dev);
674         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
675         struct res_common *r;
676
677         spin_lock_irq(mlx4_tlock(dev));
678         for (i = base; i < base + count; ++i) {
679                 r = radix_tree_lookup(&tracker->res_tree[type], i);
680                 if (!r) {
681                         err = -ENOENT;
682                         goto out;
683                 }
684                 if (r->owner != slave) {
685                         err = -EPERM;
686                         goto out;
687                 }
688                 err = remove_ok(r, type, extra);
689                 if (err)
690                         goto out;
691         }
692
693         for (i = base; i < base + count; ++i) {
694                 r = radix_tree_lookup(&tracker->res_tree[type], i);
695                 radix_tree_delete(&tracker->res_tree[type], i);
696                 list_del(&r->list);
697                 kfree(r);
698         }
699         err = 0;
700
701 out:
702         spin_unlock_irq(mlx4_tlock(dev));
703
704         return err;
705 }
706
707 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
708                                 enum res_qp_states state, struct res_qp **qp,
709                                 int alloc)
710 {
711         struct mlx4_priv *priv = mlx4_priv(dev);
712         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
713         struct res_qp *r;
714         int err = 0;
715
716         spin_lock_irq(mlx4_tlock(dev));
717         r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
718         if (!r)
719                 err = -ENOENT;
720         else if (r->com.owner != slave)
721                 err = -EPERM;
722         else {
723                 switch (state) {
724                 case RES_QP_BUSY:
725                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
726                                  __func__, r->com.res_id);
727                         err = -EBUSY;
728                         break;
729
730                 case RES_QP_RESERVED:
731                         if (r->com.state == RES_QP_MAPPED && !alloc)
732                                 break;
733
734                         mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
735                         err = -EINVAL;
736                         break;
737
738                 case RES_QP_MAPPED:
739                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
740                             r->com.state == RES_QP_HW)
741                                 break;
742                         else {
743                                 mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
744                                           r->com.res_id);
745                                 err = -EINVAL;
746                         }
747
748                         break;
749
750                 case RES_QP_HW:
751                         if (r->com.state != RES_QP_MAPPED)
752                                 err = -EINVAL;
753                         break;
754                 default:
755                         err = -EINVAL;
756                 }
757
758                 if (!err) {
759                         r->com.from_state = r->com.state;
760                         r->com.to_state = state;
761                         r->com.state = RES_QP_BUSY;
762                         if (qp)
763                                 *qp = (struct res_qp *)r;
764                 }
765         }
766
767         spin_unlock_irq(mlx4_tlock(dev));
768
769         return err;
770 }
771
772 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
773                                 enum res_mpt_states state, struct res_mpt **mpt)
774 {
775         struct mlx4_priv *priv = mlx4_priv(dev);
776         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
777         struct res_mpt *r;
778         int err = 0;
779
780         spin_lock_irq(mlx4_tlock(dev));
781         r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
782         if (!r)
783                 err = -ENOENT;
784         else if (r->com.owner != slave)
785                 err = -EPERM;
786         else {
787                 switch (state) {
788                 case RES_MPT_BUSY:
789                         err = -EINVAL;
790                         break;
791
792                 case RES_MPT_RESERVED:
793                         if (r->com.state != RES_MPT_MAPPED)
794                                 err = -EINVAL;
795                         break;
796
797                 case RES_MPT_MAPPED:
798                         if (r->com.state != RES_MPT_RESERVED &&
799                             r->com.state != RES_MPT_HW)
800                                 err = -EINVAL;
801                         break;
802
803                 case RES_MPT_HW:
804                         if (r->com.state != RES_MPT_MAPPED)
805                                 err = -EINVAL;
806                         break;
807                 default:
808                         err = -EINVAL;
809                 }
810
811                 if (!err) {
812                         r->com.from_state = r->com.state;
813                         r->com.to_state = state;
814                         r->com.state = RES_MPT_BUSY;
815                         if (mpt)
816                                 *mpt = (struct res_mpt *)r;
817                 }
818         }
819
820         spin_unlock_irq(mlx4_tlock(dev));
821
822         return err;
823 }
824
825 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
826                                 enum res_eq_states state, struct res_eq **eq)
827 {
828         struct mlx4_priv *priv = mlx4_priv(dev);
829         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
830         struct res_eq *r;
831         int err = 0;
832
833         spin_lock_irq(mlx4_tlock(dev));
834         r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
835         if (!r)
836                 err = -ENOENT;
837         else if (r->com.owner != slave)
838                 err = -EPERM;
839         else {
840                 switch (state) {
841                 case RES_EQ_BUSY:
842                         err = -EINVAL;
843                         break;
844
845                 case RES_EQ_RESERVED:
846                         if (r->com.state != RES_EQ_HW)
847                                 err = -EINVAL;
848                         break;
849
850                 case RES_EQ_HW:
851                         if (r->com.state != RES_EQ_RESERVED)
852                                 err = -EINVAL;
853                         break;
854
855                 default:
856                         err = -EINVAL;
857                 }
858
859                 if (!err) {
860                         r->com.from_state = r->com.state;
861                         r->com.to_state = state;
862                         r->com.state = RES_EQ_BUSY;
863                         if (eq)
864                                 *eq = r;
865                 }
866         }
867
868         spin_unlock_irq(mlx4_tlock(dev));
869
870         return err;
871 }
872
873 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
874                                 enum res_cq_states state, struct res_cq **cq)
875 {
876         struct mlx4_priv *priv = mlx4_priv(dev);
877         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
878         struct res_cq *r;
879         int err;
880
881         spin_lock_irq(mlx4_tlock(dev));
882         r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
883         if (!r)
884                 err = -ENOENT;
885         else if (r->com.owner != slave)
886                 err = -EPERM;
887         else {
888                 switch (state) {
889                 case RES_CQ_BUSY:
890                         err = -EBUSY;
891                         break;
892
893                 case RES_CQ_ALLOCATED:
894                         if (r->com.state != RES_CQ_HW)
895                                 err = -EINVAL;
896                         else if (atomic_read(&r->ref_count))
897                                 err = -EBUSY;
898                         else
899                                 err = 0;
900                         break;
901
902                 case RES_CQ_HW:
903                         if (r->com.state != RES_CQ_ALLOCATED)
904                                 err = -EINVAL;
905                         else
906                                 err = 0;
907                         break;
908
909                 default:
910                         err = -EINVAL;
911                 }
912
913                 if (!err) {
914                         r->com.from_state = r->com.state;
915                         r->com.to_state = state;
916                         r->com.state = RES_CQ_BUSY;
917                         if (cq)
918                                 *cq = r;
919                 }
920         }
921
922         spin_unlock_irq(mlx4_tlock(dev));
923
924         return err;
925 }
926
927 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
928                                  enum res_cq_states state, struct res_srq **srq)
929 {
930         struct mlx4_priv *priv = mlx4_priv(dev);
931         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
932         struct res_srq *r;
933         int err = 0;
934
935         spin_lock_irq(mlx4_tlock(dev));
936         r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
937         if (!r)
938                 err = -ENOENT;
939         else if (r->com.owner != slave)
940                 err = -EPERM;
941         else {
942                 switch (state) {
943                 case RES_SRQ_BUSY:
944                         err = -EINVAL;
945                         break;
946
947                 case RES_SRQ_ALLOCATED:
948                         if (r->com.state != RES_SRQ_HW)
949                                 err = -EINVAL;
950                         else if (atomic_read(&r->ref_count))
951                                 err = -EBUSY;
952                         break;
953
954                 case RES_SRQ_HW:
955                         if (r->com.state != RES_SRQ_ALLOCATED)
956                                 err = -EINVAL;
957                         break;
958
959                 default:
960                         err = -EINVAL;
961                 }
962
963                 if (!err) {
964                         r->com.from_state = r->com.state;
965                         r->com.to_state = state;
966                         r->com.state = RES_SRQ_BUSY;
967                         if (srq)
968                                 *srq = r;
969                 }
970         }
971
972         spin_unlock_irq(mlx4_tlock(dev));
973
974         return err;
975 }
976
977 static void res_abort_move(struct mlx4_dev *dev, int slave,
978                            enum mlx4_resource type, int id)
979 {
980         struct mlx4_priv *priv = mlx4_priv(dev);
981         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
982         struct res_common *r;
983
984         spin_lock_irq(mlx4_tlock(dev));
985         r = radix_tree_lookup(&tracker->res_tree[type], id);
986         if (r && (r->owner == slave))
987                 r->state = r->from_state;
988         spin_unlock_irq(mlx4_tlock(dev));
989 }
990
991 static void res_end_move(struct mlx4_dev *dev, int slave,
992                          enum mlx4_resource type, int id)
993 {
994         struct mlx4_priv *priv = mlx4_priv(dev);
995         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
996         struct res_common *r;
997
998         spin_lock_irq(mlx4_tlock(dev));
999         r = radix_tree_lookup(&tracker->res_tree[type], id);
1000         if (r && (r->owner == slave))
1001                 r->state = r->to_state;
1002         spin_unlock_irq(mlx4_tlock(dev));
1003 }
1004
1005 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1006 {
1007         return mlx4_is_qp_reserved(dev, qpn);
1008 }
1009
1010 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1011                         u64 in_param, u64 *out_param)
1012 {
1013         int err;
1014         int count;
1015         int align;
1016         int base;
1017         int qpn;
1018
1019         switch (op) {
1020         case RES_OP_RESERVE:
1021                 count = get_param_l(&in_param);
1022                 align = get_param_h(&in_param);
1023                 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1024                 if (err)
1025                         return err;
1026
1027                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1028                 if (err) {
1029                         __mlx4_qp_release_range(dev, base, count);
1030                         return err;
1031                 }
1032                 set_param_l(out_param, base);
1033                 break;
1034         case RES_OP_MAP_ICM:
1035                 qpn = get_param_l(&in_param) & 0x7fffff;
1036                 if (valid_reserved(dev, slave, qpn)) {
1037                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1038                         if (err)
1039                                 return err;
1040                 }
1041
1042                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1043                                            NULL, 1);
1044                 if (err)
1045                         return err;
1046
1047                 if (!valid_reserved(dev, slave, qpn)) {
1048                         err = __mlx4_qp_alloc_icm(dev, qpn);
1049                         if (err) {
1050                                 res_abort_move(dev, slave, RES_QP, qpn);
1051                                 return err;
1052                         }
1053                 }
1054
1055                 res_end_move(dev, slave, RES_QP, qpn);
1056                 break;
1057
1058         default:
1059                 err = -EINVAL;
1060                 break;
1061         }
1062         return err;
1063 }
1064
1065 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1066                          u64 in_param, u64 *out_param)
1067 {
1068         int err = -EINVAL;
1069         int base;
1070         int order;
1071
1072         if (op != RES_OP_RESERVE_AND_MAP)
1073                 return err;
1074
1075         order = get_param_l(&in_param);
1076         base = __mlx4_alloc_mtt_range(dev, order);
1077         if (base == -1)
1078                 return -ENOMEM;
1079
1080         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1081         if (err)
1082                 __mlx4_free_mtt_range(dev, base, order);
1083         else
1084                 set_param_l(out_param, base);
1085
1086         return err;
1087 }
1088
1089 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1090                          u64 in_param, u64 *out_param)
1091 {
1092         int err = -EINVAL;
1093         int index;
1094         int id;
1095         struct res_mpt *mpt;
1096
1097         switch (op) {
1098         case RES_OP_RESERVE:
1099                 index = __mlx4_mr_reserve(dev);
1100                 if (index == -1)
1101                         break;
1102                 id = index & mpt_mask(dev);
1103
1104                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1105                 if (err) {
1106                         __mlx4_mr_release(dev, index);
1107                         break;
1108                 }
1109                 set_param_l(out_param, index);
1110                 break;
1111         case RES_OP_MAP_ICM:
1112                 index = get_param_l(&in_param);
1113                 id = index & mpt_mask(dev);
1114                 err = mr_res_start_move_to(dev, slave, id,
1115                                            RES_MPT_MAPPED, &mpt);
1116                 if (err)
1117                         return err;
1118
1119                 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1120                 if (err) {
1121                         res_abort_move(dev, slave, RES_MPT, id);
1122                         return err;
1123                 }
1124
1125                 res_end_move(dev, slave, RES_MPT, id);
1126                 break;
1127         }
1128         return err;
1129 }
1130
1131 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1132                         u64 in_param, u64 *out_param)
1133 {
1134         int cqn;
1135         int err;
1136
1137         switch (op) {
1138         case RES_OP_RESERVE_AND_MAP:
1139                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1140                 if (err)
1141                         break;
1142
1143                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1144                 if (err) {
1145                         __mlx4_cq_free_icm(dev, cqn);
1146                         break;
1147                 }
1148
1149                 set_param_l(out_param, cqn);
1150                 break;
1151
1152         default:
1153                 err = -EINVAL;
1154         }
1155
1156         return err;
1157 }
1158
1159 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1160                          u64 in_param, u64 *out_param)
1161 {
1162         int srqn;
1163         int err;
1164
1165         switch (op) {
1166         case RES_OP_RESERVE_AND_MAP:
1167                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1168                 if (err)
1169                         break;
1170
1171                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1172                 if (err) {
1173                         __mlx4_srq_free_icm(dev, srqn);
1174                         break;
1175                 }
1176
1177                 set_param_l(out_param, srqn);
1178                 break;
1179
1180         default:
1181                 err = -EINVAL;
1182         }
1183
1184         return err;
1185 }
1186
1187 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1188 {
1189         struct mlx4_priv *priv = mlx4_priv(dev);
1190         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1191         struct mac_res *res;
1192
1193         res = kzalloc(sizeof *res, GFP_KERNEL);
1194         if (!res)
1195                 return -ENOMEM;
1196         res->mac = mac;
1197         res->port = (u8) port;
1198         list_add_tail(&res->list,
1199                       &tracker->slave_list[slave].res_list[RES_MAC]);
1200         return 0;
1201 }
1202
1203 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1204                                int port)
1205 {
1206         struct mlx4_priv *priv = mlx4_priv(dev);
1207         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1208         struct list_head *mac_list =
1209                 &tracker->slave_list[slave].res_list[RES_MAC];
1210         struct mac_res *res, *tmp;
1211
1212         list_for_each_entry_safe(res, tmp, mac_list, list) {
1213                 if (res->mac == mac && res->port == (u8) port) {
1214                         list_del(&res->list);
1215                         kfree(res);
1216                         break;
1217                 }
1218         }
1219 }
1220
1221 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1222 {
1223         struct mlx4_priv *priv = mlx4_priv(dev);
1224         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1225         struct list_head *mac_list =
1226                 &tracker->slave_list[slave].res_list[RES_MAC];
1227         struct mac_res *res, *tmp;
1228
1229         list_for_each_entry_safe(res, tmp, mac_list, list) {
1230                 list_del(&res->list);
1231                 __mlx4_unregister_mac(dev, res->port, res->mac);
1232                 kfree(res);
1233         }
1234 }
1235
1236 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1237                          u64 in_param, u64 *out_param)
1238 {
1239         int err = -EINVAL;
1240         int port;
1241         u64 mac;
1242
1243         if (op != RES_OP_RESERVE_AND_MAP)
1244                 return err;
1245
1246         port = get_param_l(out_param);
1247         mac = in_param;
1248
1249         err = __mlx4_register_mac(dev, port, mac);
1250         if (err >= 0) {
1251                 set_param_l(out_param, err);
1252                 err = 0;
1253         }
1254
1255         if (!err) {
1256                 err = mac_add_to_slave(dev, slave, mac, port);
1257                 if (err)
1258                         __mlx4_unregister_mac(dev, port, mac);
1259         }
1260         return err;
1261 }
1262
1263 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1264                          u64 in_param, u64 *out_param)
1265 {
1266         return 0;
1267 }
1268
1269 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1270                            struct mlx4_vhcr *vhcr,
1271                            struct mlx4_cmd_mailbox *inbox,
1272                            struct mlx4_cmd_mailbox *outbox,
1273                            struct mlx4_cmd_info *cmd)
1274 {
1275         int err;
1276         int alop = vhcr->op_modifier;
1277
1278         switch (vhcr->in_modifier) {
1279         case RES_QP:
1280                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1281                                    vhcr->in_param, &vhcr->out_param);
1282                 break;
1283
1284         case RES_MTT:
1285                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1286                                     vhcr->in_param, &vhcr->out_param);
1287                 break;
1288
1289         case RES_MPT:
1290                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1291                                     vhcr->in_param, &vhcr->out_param);
1292                 break;
1293
1294         case RES_CQ:
1295                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1296                                    vhcr->in_param, &vhcr->out_param);
1297                 break;
1298
1299         case RES_SRQ:
1300                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1301                                     vhcr->in_param, &vhcr->out_param);
1302                 break;
1303
1304         case RES_MAC:
1305                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1306                                     vhcr->in_param, &vhcr->out_param);
1307                 break;
1308
1309         case RES_VLAN:
1310                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1311                                     vhcr->in_param, &vhcr->out_param);
1312                 break;
1313
1314         default:
1315                 err = -EINVAL;
1316                 break;
1317         }
1318
1319         return err;
1320 }
1321
1322 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1323                        u64 in_param)
1324 {
1325         int err;
1326         int count;
1327         int base;
1328         int qpn;
1329
1330         switch (op) {
1331         case RES_OP_RESERVE:
1332                 base = get_param_l(&in_param) & 0x7fffff;
1333                 count = get_param_h(&in_param);
1334                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1335                 if (err)
1336                         break;
1337                 __mlx4_qp_release_range(dev, base, count);
1338                 break;
1339         case RES_OP_MAP_ICM:
1340                 qpn = get_param_l(&in_param) & 0x7fffff;
1341                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1342                                            NULL, 0);
1343                 if (err)
1344                         return err;
1345
1346                 if (!valid_reserved(dev, slave, qpn))
1347                         __mlx4_qp_free_icm(dev, qpn);
1348
1349                 res_end_move(dev, slave, RES_QP, qpn);
1350
1351                 if (valid_reserved(dev, slave, qpn))
1352                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1353                 break;
1354         default:
1355                 err = -EINVAL;
1356                 break;
1357         }
1358         return err;
1359 }
1360
1361 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1362                         u64 in_param, u64 *out_param)
1363 {
1364         int err = -EINVAL;
1365         int base;
1366         int order;
1367
1368         if (op != RES_OP_RESERVE_AND_MAP)
1369                 return err;
1370
1371         base = get_param_l(&in_param);
1372         order = get_param_h(&in_param);
1373         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1374         if (!err)
1375                 __mlx4_free_mtt_range(dev, base, order);
1376         return err;
1377 }
1378
1379 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1380                         u64 in_param)
1381 {
1382         int err = -EINVAL;
1383         int index;
1384         int id;
1385         struct res_mpt *mpt;
1386
1387         switch (op) {
1388         case RES_OP_RESERVE:
1389                 index = get_param_l(&in_param);
1390                 id = index & mpt_mask(dev);
1391                 err = get_res(dev, slave, id, RES_MPT, &mpt);
1392                 if (err)
1393                         break;
1394                 index = mpt->key;
1395                 put_res(dev, slave, id, RES_MPT);
1396
1397                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1398                 if (err)
1399                         break;
1400                 __mlx4_mr_release(dev, index);
1401                 break;
1402         case RES_OP_MAP_ICM:
1403                         index = get_param_l(&in_param);
1404                         id = index & mpt_mask(dev);
1405                         err = mr_res_start_move_to(dev, slave, id,
1406                                                    RES_MPT_RESERVED, &mpt);
1407                         if (err)
1408                                 return err;
1409
1410                         __mlx4_mr_free_icm(dev, mpt->key);
1411                         res_end_move(dev, slave, RES_MPT, id);
1412                         return err;
1413                 break;
1414         default:
1415                 err = -EINVAL;
1416                 break;
1417         }
1418         return err;
1419 }
1420
1421 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1422                        u64 in_param, u64 *out_param)
1423 {
1424         int cqn;
1425         int err;
1426
1427         switch (op) {
1428         case RES_OP_RESERVE_AND_MAP:
1429                 cqn = get_param_l(&in_param);
1430                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1431                 if (err)
1432                         break;
1433
1434                 __mlx4_cq_free_icm(dev, cqn);
1435                 break;
1436
1437         default:
1438                 err = -EINVAL;
1439                 break;
1440         }
1441
1442         return err;
1443 }
1444
1445 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1446                         u64 in_param, u64 *out_param)
1447 {
1448         int srqn;
1449         int err;
1450
1451         switch (op) {
1452         case RES_OP_RESERVE_AND_MAP:
1453                 srqn = get_param_l(&in_param);
1454                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1455                 if (err)
1456                         break;
1457
1458                 __mlx4_srq_free_icm(dev, srqn);
1459                 break;
1460
1461         default:
1462                 err = -EINVAL;
1463                 break;
1464         }
1465
1466         return err;
1467 }
1468
1469 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1470                             u64 in_param, u64 *out_param)
1471 {
1472         int port;
1473         int err = 0;
1474
1475         switch (op) {
1476         case RES_OP_RESERVE_AND_MAP:
1477                 port = get_param_l(out_param);
1478                 mac_del_from_slave(dev, slave, in_param, port);
1479                 __mlx4_unregister_mac(dev, port, in_param);
1480                 break;
1481         default:
1482                 err = -EINVAL;
1483                 break;
1484         }
1485
1486         return err;
1487
1488 }
1489
1490 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1491                             u64 in_param, u64 *out_param)
1492 {
1493         return 0;
1494 }
1495
1496 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1497                           struct mlx4_vhcr *vhcr,
1498                           struct mlx4_cmd_mailbox *inbox,
1499                           struct mlx4_cmd_mailbox *outbox,
1500                           struct mlx4_cmd_info *cmd)
1501 {
1502         int err = -EINVAL;
1503         int alop = vhcr->op_modifier;
1504
1505         switch (vhcr->in_modifier) {
1506         case RES_QP:
1507                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1508                                   vhcr->in_param);
1509                 break;
1510
1511         case RES_MTT:
1512                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1513                                    vhcr->in_param, &vhcr->out_param);
1514                 break;
1515
1516         case RES_MPT:
1517                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1518                                    vhcr->in_param);
1519                 break;
1520
1521         case RES_CQ:
1522                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1523                                   vhcr->in_param, &vhcr->out_param);
1524                 break;
1525
1526         case RES_SRQ:
1527                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1528                                    vhcr->in_param, &vhcr->out_param);
1529                 break;
1530
1531         case RES_MAC:
1532                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1533                                    vhcr->in_param, &vhcr->out_param);
1534                 break;
1535
1536         case RES_VLAN:
1537                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1538                                    vhcr->in_param, &vhcr->out_param);
1539                 break;
1540
1541         default:
1542                 break;
1543         }
1544         return err;
1545 }
1546
1547 /* ugly but other choices are uglier */
1548 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1549 {
1550         return (be32_to_cpu(mpt->flags) >> 9) & 1;
1551 }
1552
1553 static int mr_get_mtt_seg(struct mlx4_mpt_entry *mpt)
1554 {
1555         return (int)be64_to_cpu(mpt->mtt_seg) & 0xfffffff8;
1556 }
1557
1558 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1559 {
1560         return be32_to_cpu(mpt->mtt_sz);
1561 }
1562
1563 static int mr_get_pdn(struct mlx4_mpt_entry *mpt)
1564 {
1565         return be32_to_cpu(mpt->pd_flags) & 0xffffff;
1566 }
1567
1568 static int qp_get_mtt_seg(struct mlx4_qp_context *qpc)
1569 {
1570         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1571 }
1572
1573 static int srq_get_mtt_seg(struct mlx4_srq_context *srqc)
1574 {
1575         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1576 }
1577
1578 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1579 {
1580         int page_shift = (qpc->log_page_size & 0x3f) + 12;
1581         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1582         int log_sq_sride = qpc->sq_size_stride & 7;
1583         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1584         int log_rq_stride = qpc->rq_size_stride & 7;
1585         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1586         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1587         int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1588         int sq_size;
1589         int rq_size;
1590         int total_pages;
1591         int total_mem;
1592         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1593
1594         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1595         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1596         total_mem = sq_size + rq_size;
1597         total_pages =
1598                 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1599                                    page_shift);
1600
1601         return total_pages;
1602 }
1603
1604 static int qp_get_pdn(struct mlx4_qp_context *qpc)
1605 {
1606         return be32_to_cpu(qpc->pd) & 0xffffff;
1607 }
1608
1609 static int pdn2slave(int pdn)
1610 {
1611         return (pdn >> NOT_MASKED_PD_BITS) - 1;
1612 }
1613
1614 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1615                            int size, struct res_mtt *mtt)
1616 {
1617         int res_start = mtt->com.res_id * dev->caps.mtts_per_seg;
1618         int res_size = (1 << mtt->order) * dev->caps.mtts_per_seg;
1619
1620         if (start < res_start || start + size > res_start + res_size)
1621                 return -EPERM;
1622         return 0;
1623 }
1624
1625 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1626                            struct mlx4_vhcr *vhcr,
1627                            struct mlx4_cmd_mailbox *inbox,
1628                            struct mlx4_cmd_mailbox *outbox,
1629                            struct mlx4_cmd_info *cmd)
1630 {
1631         int err;
1632         int index = vhcr->in_modifier;
1633         struct res_mtt *mtt;
1634         struct res_mpt *mpt;
1635         int mtt_base = (mr_get_mtt_seg(inbox->buf) / dev->caps.mtt_entry_sz) *
1636                 dev->caps.mtts_per_seg;
1637         int phys;
1638         int id;
1639
1640         id = index & mpt_mask(dev);
1641         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1642         if (err)
1643                 return err;
1644
1645         phys = mr_phys_mpt(inbox->buf);
1646         if (!phys) {
1647                 err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg,
1648                               RES_MTT, &mtt);
1649                 if (err)
1650                         goto ex_abort;
1651
1652                 err = check_mtt_range(dev, slave, mtt_base,
1653                                       mr_get_mtt_size(inbox->buf), mtt);
1654                 if (err)
1655                         goto ex_put;
1656
1657                 mpt->mtt = mtt;
1658         }
1659
1660         if (pdn2slave(mr_get_pdn(inbox->buf)) != slave) {
1661                 err = -EPERM;
1662                 goto ex_put;
1663         }
1664
1665         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1666         if (err)
1667                 goto ex_put;
1668
1669         if (!phys) {
1670                 atomic_inc(&mtt->ref_count);
1671                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1672         }
1673
1674         res_end_move(dev, slave, RES_MPT, id);
1675         return 0;
1676
1677 ex_put:
1678         if (!phys)
1679                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1680 ex_abort:
1681         res_abort_move(dev, slave, RES_MPT, id);
1682
1683         return err;
1684 }
1685
1686 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1687                            struct mlx4_vhcr *vhcr,
1688                            struct mlx4_cmd_mailbox *inbox,
1689                            struct mlx4_cmd_mailbox *outbox,
1690                            struct mlx4_cmd_info *cmd)
1691 {
1692         int err;
1693         int index = vhcr->in_modifier;
1694         struct res_mpt *mpt;
1695         int id;
1696
1697         id = index & mpt_mask(dev);
1698         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1699         if (err)
1700                 return err;
1701
1702         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1703         if (err)
1704                 goto ex_abort;
1705
1706         if (mpt->mtt)
1707                 atomic_dec(&mpt->mtt->ref_count);
1708
1709         res_end_move(dev, slave, RES_MPT, id);
1710         return 0;
1711
1712 ex_abort:
1713         res_abort_move(dev, slave, RES_MPT, id);
1714
1715         return err;
1716 }
1717
1718 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1719                            struct mlx4_vhcr *vhcr,
1720                            struct mlx4_cmd_mailbox *inbox,
1721                            struct mlx4_cmd_mailbox *outbox,
1722                            struct mlx4_cmd_info *cmd)
1723 {
1724         int err;
1725         int index = vhcr->in_modifier;
1726         struct res_mpt *mpt;
1727         int id;
1728
1729         id = index & mpt_mask(dev);
1730         err = get_res(dev, slave, id, RES_MPT, &mpt);
1731         if (err)
1732                 return err;
1733
1734         if (mpt->com.from_state != RES_MPT_HW) {
1735                 err = -EBUSY;
1736                 goto out;
1737         }
1738
1739         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1740
1741 out:
1742         put_res(dev, slave, id, RES_MPT);
1743         return err;
1744 }
1745
1746 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1747 {
1748         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1749 }
1750
1751 static int qp_get_scqn(struct mlx4_qp_context *qpc)
1752 {
1753         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1754 }
1755
1756 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1757 {
1758         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1759 }
1760
1761 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1762                              struct mlx4_vhcr *vhcr,
1763                              struct mlx4_cmd_mailbox *inbox,
1764                              struct mlx4_cmd_mailbox *outbox,
1765                              struct mlx4_cmd_info *cmd)
1766 {
1767         int err;
1768         int qpn = vhcr->in_modifier & 0x7fffff;
1769         struct res_mtt *mtt;
1770         struct res_qp *qp;
1771         struct mlx4_qp_context *qpc = inbox->buf + 8;
1772         int mtt_base = (qp_get_mtt_seg(qpc) / dev->caps.mtt_entry_sz) *
1773                 dev->caps.mtts_per_seg;
1774         int mtt_size = qp_get_mtt_size(qpc);
1775         struct res_cq *rcq;
1776         struct res_cq *scq;
1777         int rcqn = qp_get_rcqn(qpc);
1778         int scqn = qp_get_scqn(qpc);
1779         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1780         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1781         struct res_srq *srq;
1782         int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1783
1784         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1785         if (err)
1786                 return err;
1787         qp->local_qpn = local_qpn;
1788
1789         err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT,
1790                       &mtt);
1791         if (err)
1792                 goto ex_abort;
1793
1794         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1795         if (err)
1796                 goto ex_put_mtt;
1797
1798         if (pdn2slave(qp_get_pdn(qpc)) != slave) {
1799                 err = -EPERM;
1800                 goto ex_put_mtt;
1801         }
1802
1803         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1804         if (err)
1805                 goto ex_put_mtt;
1806
1807         if (scqn != rcqn) {
1808                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1809                 if (err)
1810                         goto ex_put_rcq;
1811         } else
1812                 scq = rcq;
1813
1814         if (use_srq) {
1815                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1816                 if (err)
1817                         goto ex_put_scq;
1818         }
1819
1820         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1821         if (err)
1822                 goto ex_put_srq;
1823         atomic_inc(&mtt->ref_count);
1824         qp->mtt = mtt;
1825         atomic_inc(&rcq->ref_count);
1826         qp->rcq = rcq;
1827         atomic_inc(&scq->ref_count);
1828         qp->scq = scq;
1829
1830         if (scqn != rcqn)
1831                 put_res(dev, slave, scqn, RES_CQ);
1832
1833         if (use_srq) {
1834                 atomic_inc(&srq->ref_count);
1835                 put_res(dev, slave, srqn, RES_SRQ);
1836                 qp->srq = srq;
1837         }
1838         put_res(dev, slave, rcqn, RES_CQ);
1839         put_res(dev, slave, mtt_base  / dev->caps.mtts_per_seg, RES_MTT);
1840         res_end_move(dev, slave, RES_QP, qpn);
1841
1842         return 0;
1843
1844 ex_put_srq:
1845         if (use_srq)
1846                 put_res(dev, slave, srqn, RES_SRQ);
1847 ex_put_scq:
1848         if (scqn != rcqn)
1849                 put_res(dev, slave, scqn, RES_CQ);
1850 ex_put_rcq:
1851         put_res(dev, slave, rcqn, RES_CQ);
1852 ex_put_mtt:
1853         put_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT);
1854 ex_abort:
1855         res_abort_move(dev, slave, RES_QP, qpn);
1856
1857         return err;
1858 }
1859
1860 static int eq_get_mtt_seg(struct mlx4_eq_context *eqc)
1861 {
1862         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
1863 }
1864
1865 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
1866 {
1867         int log_eq_size = eqc->log_eq_size & 0x1f;
1868         int page_shift = (eqc->log_page_size & 0x3f) + 12;
1869
1870         if (log_eq_size + 5 < page_shift)
1871                 return 1;
1872
1873         return 1 << (log_eq_size + 5 - page_shift);
1874 }
1875
1876 static int cq_get_mtt_seg(struct mlx4_cq_context *cqc)
1877 {
1878         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
1879 }
1880
1881 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
1882 {
1883         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
1884         int page_shift = (cqc->log_page_size & 0x3f) + 12;
1885
1886         if (log_cq_size + 5 < page_shift)
1887                 return 1;
1888
1889         return 1 << (log_cq_size + 5 - page_shift);
1890 }
1891
1892 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1893                           struct mlx4_vhcr *vhcr,
1894                           struct mlx4_cmd_mailbox *inbox,
1895                           struct mlx4_cmd_mailbox *outbox,
1896                           struct mlx4_cmd_info *cmd)
1897 {
1898         int err;
1899         int eqn = vhcr->in_modifier;
1900         int res_id = (slave << 8) | eqn;
1901         struct mlx4_eq_context *eqc = inbox->buf;
1902         int mtt_base = (eq_get_mtt_seg(eqc) / dev->caps.mtt_entry_sz) *
1903                 dev->caps.mtts_per_seg;
1904         int mtt_size = eq_get_mtt_size(eqc);
1905         struct res_eq *eq;
1906         struct res_mtt *mtt;
1907
1908         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1909         if (err)
1910                 return err;
1911         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
1912         if (err)
1913                 goto out_add;
1914
1915         err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT,
1916                       &mtt);
1917         if (err)
1918                 goto out_move;
1919
1920         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1921         if (err)
1922                 goto out_put;
1923
1924         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1925         if (err)
1926                 goto out_put;
1927
1928         atomic_inc(&mtt->ref_count);
1929         eq->mtt = mtt;
1930         put_res(dev, slave, mtt->com.res_id, RES_MTT);
1931         res_end_move(dev, slave, RES_EQ, res_id);
1932         return 0;
1933
1934 out_put:
1935         put_res(dev, slave, mtt->com.res_id, RES_MTT);
1936 out_move:
1937         res_abort_move(dev, slave, RES_EQ, res_id);
1938 out_add:
1939         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1940         return err;
1941 }
1942
1943 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
1944                               int len, struct res_mtt **res)
1945 {
1946         struct mlx4_priv *priv = mlx4_priv(dev);
1947         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1948         struct res_mtt *mtt;
1949         int err = -EINVAL;
1950
1951         spin_lock_irq(mlx4_tlock(dev));
1952         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
1953                             com.list) {
1954                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
1955                         *res = mtt;
1956                         mtt->com.from_state = mtt->com.state;
1957                         mtt->com.state = RES_MTT_BUSY;
1958                         err = 0;
1959                         break;
1960                 }
1961         }
1962         spin_unlock_irq(mlx4_tlock(dev));
1963
1964         return err;
1965 }
1966
1967 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
1968                            struct mlx4_vhcr *vhcr,
1969                            struct mlx4_cmd_mailbox *inbox,
1970                            struct mlx4_cmd_mailbox *outbox,
1971                            struct mlx4_cmd_info *cmd)
1972 {
1973         struct mlx4_mtt mtt;
1974         __be64 *page_list = inbox->buf;
1975         u64 *pg_list = (u64 *)page_list;
1976         int i;
1977         struct res_mtt *rmtt = NULL;
1978         int start = be64_to_cpu(page_list[0]);
1979         int npages = vhcr->in_modifier;
1980         int err;
1981
1982         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
1983         if (err)
1984                 return err;
1985
1986         /* Call the SW implementation of write_mtt:
1987          * - Prepare a dummy mtt struct
1988          * - Translate inbox contents to simple addresses in host endianess */
1989         mtt.first_seg = 0;
1990         mtt.order = 0;
1991         mtt.page_shift = 0;
1992         for (i = 0; i < npages; ++i)
1993                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
1994
1995         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
1996                                ((u64 *)page_list + 2));
1997
1998         if (rmtt)
1999                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2000
2001         return err;
2002 }
2003
2004 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2005                           struct mlx4_vhcr *vhcr,
2006                           struct mlx4_cmd_mailbox *inbox,
2007                           struct mlx4_cmd_mailbox *outbox,
2008                           struct mlx4_cmd_info *cmd)
2009 {
2010         int eqn = vhcr->in_modifier;
2011         int res_id = eqn | (slave << 8);
2012         struct res_eq *eq;
2013         int err;
2014
2015         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2016         if (err)
2017                 return err;
2018
2019         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2020         if (err)
2021                 goto ex_abort;
2022
2023         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2024         if (err)
2025                 goto ex_put;
2026
2027         atomic_dec(&eq->mtt->ref_count);
2028         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2029         res_end_move(dev, slave, RES_EQ, res_id);
2030         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2031
2032         return 0;
2033
2034 ex_put:
2035         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2036 ex_abort:
2037         res_abort_move(dev, slave, RES_EQ, res_id);
2038
2039         return err;
2040 }
2041
2042 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2043 {
2044         struct mlx4_priv *priv = mlx4_priv(dev);
2045         struct mlx4_slave_event_eq_info *event_eq;
2046         struct mlx4_cmd_mailbox *mailbox;
2047         u32 in_modifier = 0;
2048         int err;
2049         int res_id;
2050         struct res_eq *req;
2051
2052         if (!priv->mfunc.master.slave_state)
2053                 return -EINVAL;
2054
2055         event_eq = &priv->mfunc.master.slave_state[slave].event_eq;
2056
2057         /* Create the event only if the slave is registered */
2058         if ((event_eq->event_type & (1 << eqe->type)) == 0)
2059                 return 0;
2060
2061         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2062         res_id = (slave << 8) | event_eq->eqn;
2063         err = get_res(dev, slave, res_id, RES_EQ, &req);
2064         if (err)
2065                 goto unlock;
2066
2067         if (req->com.from_state != RES_EQ_HW) {
2068                 err = -EINVAL;
2069                 goto put;
2070         }
2071
2072         mailbox = mlx4_alloc_cmd_mailbox(dev);
2073         if (IS_ERR(mailbox)) {
2074                 err = PTR_ERR(mailbox);
2075                 goto put;
2076         }
2077
2078         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2079                 ++event_eq->token;
2080                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2081         }
2082
2083         memcpy(mailbox->buf, (u8 *) eqe, 28);
2084
2085         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2086
2087         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2088                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2089                        MLX4_CMD_NATIVE);
2090
2091         put_res(dev, slave, res_id, RES_EQ);
2092         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2093         mlx4_free_cmd_mailbox(dev, mailbox);
2094         return err;
2095
2096 put:
2097         put_res(dev, slave, res_id, RES_EQ);
2098
2099 unlock:
2100         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2101         return err;
2102 }
2103
2104 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2105                           struct mlx4_vhcr *vhcr,
2106                           struct mlx4_cmd_mailbox *inbox,
2107                           struct mlx4_cmd_mailbox *outbox,
2108                           struct mlx4_cmd_info *cmd)
2109 {
2110         int eqn = vhcr->in_modifier;
2111         int res_id = eqn | (slave << 8);
2112         struct res_eq *eq;
2113         int err;
2114
2115         err = get_res(dev, slave, res_id, RES_EQ, &eq);
2116         if (err)
2117                 return err;
2118
2119         if (eq->com.from_state != RES_EQ_HW) {
2120                 err = -EINVAL;
2121                 goto ex_put;
2122         }
2123
2124         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2125
2126 ex_put:
2127         put_res(dev, slave, res_id, RES_EQ);
2128         return err;
2129 }
2130
2131 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2132                           struct mlx4_vhcr *vhcr,
2133                           struct mlx4_cmd_mailbox *inbox,
2134                           struct mlx4_cmd_mailbox *outbox,
2135                           struct mlx4_cmd_info *cmd)
2136 {
2137         int err;
2138         int cqn = vhcr->in_modifier;
2139         struct mlx4_cq_context *cqc = inbox->buf;
2140         int mtt_base = (cq_get_mtt_seg(cqc) / dev->caps.mtt_entry_sz) *
2141                 dev->caps.mtts_per_seg;
2142         struct res_cq *cq;
2143         struct res_mtt *mtt;
2144
2145         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2146         if (err)
2147                 return err;
2148         err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT,
2149                       &mtt);
2150         if (err)
2151                 goto out_move;
2152         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2153         if (err)
2154                 goto out_put;
2155         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2156         if (err)
2157                 goto out_put;
2158         atomic_inc(&mtt->ref_count);
2159         cq->mtt = mtt;
2160         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2161         res_end_move(dev, slave, RES_CQ, cqn);
2162         return 0;
2163
2164 out_put:
2165         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2166 out_move:
2167         res_abort_move(dev, slave, RES_CQ, cqn);
2168         return err;
2169 }
2170
2171 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2172                           struct mlx4_vhcr *vhcr,
2173                           struct mlx4_cmd_mailbox *inbox,
2174                           struct mlx4_cmd_mailbox *outbox,
2175                           struct mlx4_cmd_info *cmd)
2176 {
2177         int err;
2178         int cqn = vhcr->in_modifier;
2179         struct res_cq *cq;
2180
2181         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2182         if (err)
2183                 return err;
2184         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2185         if (err)
2186                 goto out_move;
2187         atomic_dec(&cq->mtt->ref_count);
2188         res_end_move(dev, slave, RES_CQ, cqn);
2189         return 0;
2190
2191 out_move:
2192         res_abort_move(dev, slave, RES_CQ, cqn);
2193         return err;
2194 }
2195
2196 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2197                           struct mlx4_vhcr *vhcr,
2198                           struct mlx4_cmd_mailbox *inbox,
2199                           struct mlx4_cmd_mailbox *outbox,
2200                           struct mlx4_cmd_info *cmd)
2201 {
2202         int cqn = vhcr->in_modifier;
2203         struct res_cq *cq;
2204         int err;
2205
2206         err = get_res(dev, slave, cqn, RES_CQ, &cq);
2207         if (err)
2208                 return err;
2209
2210         if (cq->com.from_state != RES_CQ_HW)
2211                 goto ex_put;
2212
2213         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2214 ex_put:
2215         put_res(dev, slave, cqn, RES_CQ);
2216
2217         return err;
2218 }
2219
2220 static int handle_resize(struct mlx4_dev *dev, int slave,
2221                          struct mlx4_vhcr *vhcr,
2222                          struct mlx4_cmd_mailbox *inbox,
2223                          struct mlx4_cmd_mailbox *outbox,
2224                          struct mlx4_cmd_info *cmd,
2225                          struct res_cq *cq)
2226 {
2227         int err;
2228         struct res_mtt *orig_mtt;
2229         struct res_mtt *mtt;
2230         struct mlx4_cq_context *cqc = inbox->buf;
2231         int mtt_base = (cq_get_mtt_seg(cqc) / dev->caps.mtt_entry_sz) *
2232                 dev->caps.mtts_per_seg;
2233
2234         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2235         if (err)
2236                 return err;
2237
2238         if (orig_mtt != cq->mtt) {
2239                 err = -EINVAL;
2240                 goto ex_put;
2241         }
2242
2243         err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT,
2244                       &mtt);
2245         if (err)
2246                 goto ex_put;
2247
2248         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2249         if (err)
2250                 goto ex_put1;
2251         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2252         if (err)
2253                 goto ex_put1;
2254         atomic_dec(&orig_mtt->ref_count);
2255         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2256         atomic_inc(&mtt->ref_count);
2257         cq->mtt = mtt;
2258         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2259         return 0;
2260
2261 ex_put1:
2262         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2263 ex_put:
2264         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2265
2266         return err;
2267
2268 }
2269
2270 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2271                            struct mlx4_vhcr *vhcr,
2272                            struct mlx4_cmd_mailbox *inbox,
2273                            struct mlx4_cmd_mailbox *outbox,
2274                            struct mlx4_cmd_info *cmd)
2275 {
2276         int cqn = vhcr->in_modifier;
2277         struct res_cq *cq;
2278         int err;
2279
2280         err = get_res(dev, slave, cqn, RES_CQ, &cq);
2281         if (err)
2282                 return err;
2283
2284         if (cq->com.from_state != RES_CQ_HW)
2285                 goto ex_put;
2286
2287         if (vhcr->op_modifier == 0) {
2288                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2289                 if (err)
2290                         goto ex_put;
2291         }
2292
2293         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2294 ex_put:
2295         put_res(dev, slave, cqn, RES_CQ);
2296
2297         return err;
2298 }
2299
2300 static int srq_get_pdn(struct mlx4_srq_context *srqc)
2301 {
2302         return be32_to_cpu(srqc->pd) & 0xffffff;
2303 }
2304
2305 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2306 {
2307         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2308         int log_rq_stride = srqc->logstride & 7;
2309         int page_shift = (srqc->log_page_size & 0x3f) + 12;
2310
2311         if (log_srq_size + log_rq_stride + 4 < page_shift)
2312                 return 1;
2313
2314         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2315 }
2316
2317 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2318                            struct mlx4_vhcr *vhcr,
2319                            struct mlx4_cmd_mailbox *inbox,
2320                            struct mlx4_cmd_mailbox *outbox,
2321                            struct mlx4_cmd_info *cmd)
2322 {
2323         int err;
2324         int srqn = vhcr->in_modifier;
2325         struct res_mtt *mtt;
2326         struct res_srq *srq;
2327         struct mlx4_srq_context *srqc = inbox->buf;
2328         int mtt_base = (srq_get_mtt_seg(srqc) / dev->caps.mtt_entry_sz) *
2329                 dev->caps.mtts_per_seg;
2330
2331         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2332                 return -EINVAL;
2333
2334         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2335         if (err)
2336                 return err;
2337         err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg,
2338                       RES_MTT, &mtt);
2339         if (err)
2340                 goto ex_abort;
2341         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2342                               mtt);
2343         if (err)
2344                 goto ex_put_mtt;
2345
2346         if (pdn2slave(srq_get_pdn(srqc)) != slave) {
2347                 err = -EPERM;
2348                 goto ex_put_mtt;
2349         }
2350
2351         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2352         if (err)
2353                 goto ex_put_mtt;
2354
2355         atomic_inc(&mtt->ref_count);
2356         srq->mtt = mtt;
2357         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2358         res_end_move(dev, slave, RES_SRQ, srqn);
2359         return 0;
2360
2361 ex_put_mtt:
2362         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2363 ex_abort:
2364         res_abort_move(dev, slave, RES_SRQ, srqn);
2365
2366         return err;
2367 }
2368
2369 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2370                            struct mlx4_vhcr *vhcr,
2371                            struct mlx4_cmd_mailbox *inbox,
2372                            struct mlx4_cmd_mailbox *outbox,
2373                            struct mlx4_cmd_info *cmd)
2374 {
2375         int err;
2376         int srqn = vhcr->in_modifier;
2377         struct res_srq *srq;
2378
2379         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2380         if (err)
2381                 return err;
2382         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2383         if (err)
2384                 goto ex_abort;
2385         atomic_dec(&srq->mtt->ref_count);
2386         if (srq->cq)
2387                 atomic_dec(&srq->cq->ref_count);
2388         res_end_move(dev, slave, RES_SRQ, srqn);
2389
2390         return 0;
2391
2392 ex_abort:
2393         res_abort_move(dev, slave, RES_SRQ, srqn);
2394
2395         return err;
2396 }
2397
2398 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2399                            struct mlx4_vhcr *vhcr,
2400                            struct mlx4_cmd_mailbox *inbox,
2401                            struct mlx4_cmd_mailbox *outbox,
2402                            struct mlx4_cmd_info *cmd)
2403 {
2404         int err;
2405         int srqn = vhcr->in_modifier;
2406         struct res_srq *srq;
2407
2408         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2409         if (err)
2410                 return err;
2411         if (srq->com.from_state != RES_SRQ_HW) {
2412                 err = -EBUSY;
2413                 goto out;
2414         }
2415         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2416 out:
2417         put_res(dev, slave, srqn, RES_SRQ);
2418         return err;
2419 }
2420
2421 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2422                          struct mlx4_vhcr *vhcr,
2423                          struct mlx4_cmd_mailbox *inbox,
2424                          struct mlx4_cmd_mailbox *outbox,
2425                          struct mlx4_cmd_info *cmd)
2426 {
2427         int err;
2428         int srqn = vhcr->in_modifier;
2429         struct res_srq *srq;
2430
2431         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2432         if (err)
2433                 return err;
2434
2435         if (srq->com.from_state != RES_SRQ_HW) {
2436                 err = -EBUSY;
2437                 goto out;
2438         }
2439
2440         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2441 out:
2442         put_res(dev, slave, srqn, RES_SRQ);
2443         return err;
2444 }
2445
2446 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2447                         struct mlx4_vhcr *vhcr,
2448                         struct mlx4_cmd_mailbox *inbox,
2449                         struct mlx4_cmd_mailbox *outbox,
2450                         struct mlx4_cmd_info *cmd)
2451 {
2452         int err;
2453         int qpn = vhcr->in_modifier & 0x7fffff;
2454         struct res_qp *qp;
2455
2456         err = get_res(dev, slave, qpn, RES_QP, &qp);
2457         if (err)
2458                 return err;
2459         if (qp->com.from_state != RES_QP_HW) {
2460                 err = -EBUSY;
2461                 goto out;
2462         }
2463
2464         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2465 out:
2466         put_res(dev, slave, qpn, RES_QP);
2467         return err;
2468 }
2469
2470 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2471                              struct mlx4_vhcr *vhcr,
2472                              struct mlx4_cmd_mailbox *inbox,
2473                              struct mlx4_cmd_mailbox *outbox,
2474                              struct mlx4_cmd_info *cmd)
2475 {
2476         struct mlx4_qp_context *qpc = inbox->buf + 8;
2477
2478         update_ud_gid(dev, qpc, (u8)slave);
2479
2480         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2481 }
2482
2483 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2484                          struct mlx4_vhcr *vhcr,
2485                          struct mlx4_cmd_mailbox *inbox,
2486                          struct mlx4_cmd_mailbox *outbox,
2487                          struct mlx4_cmd_info *cmd)
2488 {
2489         int err;
2490         int qpn = vhcr->in_modifier & 0x7fffff;
2491         struct res_qp *qp;
2492
2493         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2494         if (err)
2495                 return err;
2496         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2497         if (err)
2498                 goto ex_abort;
2499
2500         atomic_dec(&qp->mtt->ref_count);
2501         atomic_dec(&qp->rcq->ref_count);
2502         atomic_dec(&qp->scq->ref_count);
2503         if (qp->srq)
2504                 atomic_dec(&qp->srq->ref_count);
2505         res_end_move(dev, slave, RES_QP, qpn);
2506         return 0;
2507
2508 ex_abort:
2509         res_abort_move(dev, slave, RES_QP, qpn);
2510
2511         return err;
2512 }
2513
2514 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2515                                 struct res_qp *rqp, u8 *gid)
2516 {
2517         struct res_gid *res;
2518
2519         list_for_each_entry(res, &rqp->mcg_list, list) {
2520                 if (!memcmp(res->gid, gid, 16))
2521                         return res;
2522         }
2523         return NULL;
2524 }
2525
2526 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2527                        u8 *gid, enum mlx4_protocol prot)
2528 {
2529         struct res_gid *res;
2530         int err;
2531
2532         res = kzalloc(sizeof *res, GFP_KERNEL);
2533         if (!res)
2534                 return -ENOMEM;
2535
2536         spin_lock_irq(&rqp->mcg_spl);
2537         if (find_gid(dev, slave, rqp, gid)) {
2538                 kfree(res);
2539                 err = -EEXIST;
2540         } else {
2541                 memcpy(res->gid, gid, 16);
2542                 res->prot = prot;
2543                 list_add_tail(&res->list, &rqp->mcg_list);
2544                 err = 0;
2545         }
2546         spin_unlock_irq(&rqp->mcg_spl);
2547
2548         return err;
2549 }
2550
2551 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2552                        u8 *gid, enum mlx4_protocol prot)
2553 {
2554         struct res_gid *res;
2555         int err;
2556
2557         spin_lock_irq(&rqp->mcg_spl);
2558         res = find_gid(dev, slave, rqp, gid);
2559         if (!res || res->prot != prot)
2560                 err = -EINVAL;
2561         else {
2562                 list_del(&res->list);
2563                 kfree(res);
2564                 err = 0;
2565         }
2566         spin_unlock_irq(&rqp->mcg_spl);
2567
2568         return err;
2569 }
2570
2571 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2572                                struct mlx4_vhcr *vhcr,
2573                                struct mlx4_cmd_mailbox *inbox,
2574                                struct mlx4_cmd_mailbox *outbox,
2575                                struct mlx4_cmd_info *cmd)
2576 {
2577         struct mlx4_qp qp; /* dummy for calling attach/detach */
2578         u8 *gid = inbox->buf;
2579         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2580         int err, err1;
2581         int qpn;
2582         struct res_qp *rqp;
2583         int attach = vhcr->op_modifier;
2584         int block_loopback = vhcr->in_modifier >> 31;
2585         u8 steer_type_mask = 2;
2586         enum mlx4_steer_type type = gid[7] & steer_type_mask;
2587
2588         qpn = vhcr->in_modifier & 0xffffff;
2589         err = get_res(dev, slave, qpn, RES_QP, &rqp);
2590         if (err)
2591                 return err;
2592
2593         qp.qpn = qpn;
2594         if (attach) {
2595                 err = add_mcg_res(dev, slave, rqp, gid, prot);
2596                 if (err)
2597                         goto ex_put;
2598
2599                 err = mlx4_qp_attach_common(dev, &qp, gid,
2600                                             block_loopback, prot, type);
2601                 if (err)
2602                         goto ex_rem;
2603         } else {
2604                 err = rem_mcg_res(dev, slave, rqp, gid, prot);
2605                 if (err)
2606                         goto ex_put;
2607                 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2608         }
2609
2610         put_res(dev, slave, qpn, RES_QP);
2611         return 0;
2612
2613 ex_rem:
2614         /* ignore error return below, already in error */
2615         err1 = rem_mcg_res(dev, slave, rqp, gid, prot);
2616 ex_put:
2617         put_res(dev, slave, qpn, RES_QP);
2618
2619         return err;
2620 }
2621
2622 enum {
2623         BUSY_MAX_RETRIES = 10
2624 };
2625
2626 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2627                                struct mlx4_vhcr *vhcr,
2628                                struct mlx4_cmd_mailbox *inbox,
2629                                struct mlx4_cmd_mailbox *outbox,
2630                                struct mlx4_cmd_info *cmd)
2631 {
2632         int err;
2633         int index = vhcr->in_modifier & 0xffff;
2634
2635         err = get_res(dev, slave, index, RES_COUNTER, NULL);
2636         if (err)
2637                 return err;
2638
2639         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2640         put_res(dev, slave, index, RES_COUNTER);
2641         return err;
2642 }
2643
2644 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2645 {
2646         struct res_gid *rgid;
2647         struct res_gid *tmp;
2648         int err;
2649         struct mlx4_qp qp; /* dummy for calling attach/detach */
2650
2651         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2652                 qp.qpn = rqp->local_qpn;
2653                 err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2654                                             MLX4_MC_STEER);
2655                 list_del(&rgid->list);
2656                 kfree(rgid);
2657         }
2658 }
2659
2660 static int _move_all_busy(struct mlx4_dev *dev, int slave,
2661                           enum mlx4_resource type, int print)
2662 {
2663         struct mlx4_priv *priv = mlx4_priv(dev);
2664         struct mlx4_resource_tracker *tracker =
2665                 &priv->mfunc.master.res_tracker;
2666         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2667         struct res_common *r;
2668         struct res_common *tmp;
2669         int busy;
2670
2671         busy = 0;
2672         spin_lock_irq(mlx4_tlock(dev));
2673         list_for_each_entry_safe(r, tmp, rlist, list) {
2674                 if (r->owner == slave) {
2675                         if (!r->removing) {
2676                                 if (r->state == RES_ANY_BUSY) {
2677                                         if (print)
2678                                                 mlx4_dbg(dev,
2679                                                          "%s id 0x%x is busy\n",
2680                                                           ResourceType(type),
2681                                                           r->res_id);
2682                                         ++busy;
2683                                 } else {
2684                                         r->from_state = r->state;
2685                                         r->state = RES_ANY_BUSY;
2686                                         r->removing = 1;
2687                                 }
2688                         }
2689                 }
2690         }
2691         spin_unlock_irq(mlx4_tlock(dev));
2692
2693         return busy;
2694 }
2695
2696 static int move_all_busy(struct mlx4_dev *dev, int slave,
2697                          enum mlx4_resource type)
2698 {
2699         unsigned long begin;
2700         int busy;
2701
2702         begin = jiffies;
2703         do {
2704                 busy = _move_all_busy(dev, slave, type, 0);
2705                 if (time_after(jiffies, begin + 5 * HZ))
2706                         break;
2707                 if (busy)
2708                         cond_resched();
2709         } while (busy);
2710
2711         if (busy)
2712                 busy = _move_all_busy(dev, slave, type, 1);
2713
2714         return busy;
2715 }
2716 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
2717 {
2718         struct mlx4_priv *priv = mlx4_priv(dev);
2719         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2720         struct list_head *qp_list =
2721                 &tracker->slave_list[slave].res_list[RES_QP];
2722         struct res_qp *qp;
2723         struct res_qp *tmp;
2724         int state;
2725         u64 in_param;
2726         int qpn;
2727         int err;
2728
2729         err = move_all_busy(dev, slave, RES_QP);
2730         if (err)
2731                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
2732                           "for slave %d\n", slave);
2733
2734         spin_lock_irq(mlx4_tlock(dev));
2735         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
2736                 spin_unlock_irq(mlx4_tlock(dev));
2737                 if (qp->com.owner == slave) {
2738                         qpn = qp->com.res_id;
2739                         detach_qp(dev, slave, qp);
2740                         state = qp->com.from_state;
2741                         while (state != 0) {
2742                                 switch (state) {
2743                                 case RES_QP_RESERVED:
2744                                         spin_lock_irq(mlx4_tlock(dev));
2745                                         radix_tree_delete(&tracker->res_tree[RES_QP],
2746                                                           qp->com.res_id);
2747                                         list_del(&qp->com.list);
2748                                         spin_unlock_irq(mlx4_tlock(dev));
2749                                         kfree(qp);
2750                                         state = 0;
2751                                         break;
2752                                 case RES_QP_MAPPED:
2753                                         if (!valid_reserved(dev, slave, qpn))
2754                                                 __mlx4_qp_free_icm(dev, qpn);
2755                                         state = RES_QP_RESERVED;
2756                                         break;
2757                                 case RES_QP_HW:
2758                                         in_param = slave;
2759                                         err = mlx4_cmd(dev, in_param,
2760                                                        qp->local_qpn, 2,
2761                                                        MLX4_CMD_2RST_QP,
2762                                                        MLX4_CMD_TIME_CLASS_A,
2763                                                        MLX4_CMD_NATIVE);
2764                                         if (err)
2765                                                 mlx4_dbg(dev, "rem_slave_qps: failed"
2766                                                          " to move slave %d qpn %d to"
2767                                                          " reset\n", slave,
2768                                                          qp->local_qpn);
2769                                         atomic_dec(&qp->rcq->ref_count);
2770                                         atomic_dec(&qp->scq->ref_count);
2771                                         atomic_dec(&qp->mtt->ref_count);
2772                                         if (qp->srq)
2773                                                 atomic_dec(&qp->srq->ref_count);
2774                                         state = RES_QP_MAPPED;
2775                                         break;
2776                                 default:
2777                                         state = 0;
2778                                 }
2779                         }
2780                 }
2781                 spin_lock_irq(mlx4_tlock(dev));
2782         }
2783         spin_unlock_irq(mlx4_tlock(dev));
2784 }
2785
2786 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
2787 {
2788         struct mlx4_priv *priv = mlx4_priv(dev);
2789         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2790         struct list_head *srq_list =
2791                 &tracker->slave_list[slave].res_list[RES_SRQ];
2792         struct res_srq *srq;
2793         struct res_srq *tmp;
2794         int state;
2795         u64 in_param;
2796         LIST_HEAD(tlist);
2797         int srqn;
2798         int err;
2799
2800         err = move_all_busy(dev, slave, RES_SRQ);
2801         if (err)
2802                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
2803                           "busy for slave %d\n", slave);
2804
2805         spin_lock_irq(mlx4_tlock(dev));
2806         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
2807                 spin_unlock_irq(mlx4_tlock(dev));
2808                 if (srq->com.owner == slave) {
2809                         srqn = srq->com.res_id;
2810                         state = srq->com.from_state;
2811                         while (state != 0) {
2812                                 switch (state) {
2813                                 case RES_SRQ_ALLOCATED:
2814                                         __mlx4_srq_free_icm(dev, srqn);
2815                                         spin_lock_irq(mlx4_tlock(dev));
2816                                         radix_tree_delete(&tracker->res_tree[RES_SRQ],
2817                                                           srqn);
2818                                         list_del(&srq->com.list);
2819                                         spin_unlock_irq(mlx4_tlock(dev));
2820                                         kfree(srq);
2821                                         state = 0;
2822                                         break;
2823
2824                                 case RES_SRQ_HW:
2825                                         in_param = slave;
2826                                         err = mlx4_cmd(dev, in_param, srqn, 1,
2827                                                        MLX4_CMD_HW2SW_SRQ,
2828                                                        MLX4_CMD_TIME_CLASS_A,
2829                                                        MLX4_CMD_NATIVE);
2830                                         if (err)
2831                                                 mlx4_dbg(dev, "rem_slave_srqs: failed"
2832                                                          " to move slave %d srq %d to"
2833                                                          " SW ownership\n",
2834                                                          slave, srqn);
2835
2836                                         atomic_dec(&srq->mtt->ref_count);
2837                                         if (srq->cq)
2838                                                 atomic_dec(&srq->cq->ref_count);
2839                                         state = RES_SRQ_ALLOCATED;
2840                                         break;
2841
2842                                 default:
2843                                         state = 0;
2844                                 }
2845                         }
2846                 }
2847                 spin_lock_irq(mlx4_tlock(dev));
2848         }
2849         spin_unlock_irq(mlx4_tlock(dev));
2850 }
2851
2852 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
2853 {
2854         struct mlx4_priv *priv = mlx4_priv(dev);
2855         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2856         struct list_head *cq_list =
2857                 &tracker->slave_list[slave].res_list[RES_CQ];
2858         struct res_cq *cq;
2859         struct res_cq *tmp;
2860         int state;
2861         u64 in_param;
2862         LIST_HEAD(tlist);
2863         int cqn;
2864         int err;
2865
2866         err = move_all_busy(dev, slave, RES_CQ);
2867         if (err)
2868                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
2869                           "busy for slave %d\n", slave);
2870
2871         spin_lock_irq(mlx4_tlock(dev));
2872         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
2873                 spin_unlock_irq(mlx4_tlock(dev));
2874                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
2875                         cqn = cq->com.res_id;
2876                         state = cq->com.from_state;
2877                         while (state != 0) {
2878                                 switch (state) {
2879                                 case RES_CQ_ALLOCATED:
2880                                         __mlx4_cq_free_icm(dev, cqn);
2881                                         spin_lock_irq(mlx4_tlock(dev));
2882                                         radix_tree_delete(&tracker->res_tree[RES_CQ],
2883                                                           cqn);
2884                                         list_del(&cq->com.list);
2885                                         spin_unlock_irq(mlx4_tlock(dev));
2886                                         kfree(cq);
2887                                         state = 0;
2888                                         break;
2889
2890                                 case RES_CQ_HW:
2891                                         in_param = slave;
2892                                         err = mlx4_cmd(dev, in_param, cqn, 1,
2893                                                        MLX4_CMD_HW2SW_CQ,
2894                                                        MLX4_CMD_TIME_CLASS_A,
2895                                                        MLX4_CMD_NATIVE);
2896                                         if (err)
2897                                                 mlx4_dbg(dev, "rem_slave_cqs: failed"
2898                                                          " to move slave %d cq %d to"
2899                                                          " SW ownership\n",
2900                                                          slave, cqn);
2901                                         atomic_dec(&cq->mtt->ref_count);
2902                                         state = RES_CQ_ALLOCATED;
2903                                         break;
2904
2905                                 default:
2906                                         state = 0;
2907                                 }
2908                         }
2909                 }
2910                 spin_lock_irq(mlx4_tlock(dev));
2911         }
2912         spin_unlock_irq(mlx4_tlock(dev));
2913 }
2914
2915 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
2916 {
2917         struct mlx4_priv *priv = mlx4_priv(dev);
2918         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2919         struct list_head *mpt_list =
2920                 &tracker->slave_list[slave].res_list[RES_MPT];
2921         struct res_mpt *mpt;
2922         struct res_mpt *tmp;
2923         int state;
2924         u64 in_param;
2925         LIST_HEAD(tlist);
2926         int mptn;
2927         int err;
2928
2929         err = move_all_busy(dev, slave, RES_MPT);
2930         if (err)
2931                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
2932                           "busy for slave %d\n", slave);
2933
2934         spin_lock_irq(mlx4_tlock(dev));
2935         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
2936                 spin_unlock_irq(mlx4_tlock(dev));
2937                 if (mpt->com.owner == slave) {
2938                         mptn = mpt->com.res_id;
2939                         state = mpt->com.from_state;
2940                         while (state != 0) {
2941                                 switch (state) {
2942                                 case RES_MPT_RESERVED:
2943                                         __mlx4_mr_release(dev, mpt->key);
2944                                         spin_lock_irq(mlx4_tlock(dev));
2945                                         radix_tree_delete(&tracker->res_tree[RES_MPT],
2946                                                           mptn);
2947                                         list_del(&mpt->com.list);
2948                                         spin_unlock_irq(mlx4_tlock(dev));
2949                                         kfree(mpt);
2950                                         state = 0;
2951                                         break;
2952
2953                                 case RES_MPT_MAPPED:
2954                                         __mlx4_mr_free_icm(dev, mpt->key);
2955                                         state = RES_MPT_RESERVED;
2956                                         break;
2957
2958                                 case RES_MPT_HW:
2959                                         in_param = slave;
2960                                         err = mlx4_cmd(dev, in_param, mptn, 0,
2961                                                      MLX4_CMD_HW2SW_MPT,
2962                                                      MLX4_CMD_TIME_CLASS_A,
2963                                                      MLX4_CMD_NATIVE);
2964                                         if (err)
2965                                                 mlx4_dbg(dev, "rem_slave_mrs: failed"
2966                                                          " to move slave %d mpt %d to"
2967                                                          " SW ownership\n",
2968                                                          slave, mptn);
2969                                         if (mpt->mtt)
2970                                                 atomic_dec(&mpt->mtt->ref_count);
2971                                         state = RES_MPT_MAPPED;
2972                                         break;
2973                                 default:
2974                                         state = 0;
2975                                 }
2976                         }
2977                 }
2978                 spin_lock_irq(mlx4_tlock(dev));
2979         }
2980         spin_unlock_irq(mlx4_tlock(dev));
2981 }
2982
2983 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
2984 {
2985         struct mlx4_priv *priv = mlx4_priv(dev);
2986         struct mlx4_resource_tracker *tracker =
2987                 &priv->mfunc.master.res_tracker;
2988         struct list_head *mtt_list =
2989                 &tracker->slave_list[slave].res_list[RES_MTT];
2990         struct res_mtt *mtt;
2991         struct res_mtt *tmp;
2992         int state;
2993         LIST_HEAD(tlist);
2994         int base;
2995         int err;
2996
2997         err = move_all_busy(dev, slave, RES_MTT);
2998         if (err)
2999                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3000                           "busy for slave %d\n", slave);
3001
3002         spin_lock_irq(mlx4_tlock(dev));
3003         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3004                 spin_unlock_irq(mlx4_tlock(dev));
3005                 if (mtt->com.owner == slave) {
3006                         base = mtt->com.res_id;
3007                         state = mtt->com.from_state;
3008                         while (state != 0) {
3009                                 switch (state) {
3010                                 case RES_MTT_ALLOCATED:
3011                                         __mlx4_free_mtt_range(dev, base,
3012                                                               mtt->order);
3013                                         spin_lock_irq(mlx4_tlock(dev));
3014                                         radix_tree_delete(&tracker->res_tree[RES_MTT],
3015                                                           base);
3016                                         list_del(&mtt->com.list);
3017                                         spin_unlock_irq(mlx4_tlock(dev));
3018                                         kfree(mtt);
3019                                         state = 0;
3020                                         break;
3021
3022                                 default:
3023                                         state = 0;
3024                                 }
3025                         }
3026                 }
3027                 spin_lock_irq(mlx4_tlock(dev));
3028         }
3029         spin_unlock_irq(mlx4_tlock(dev));
3030 }
3031
3032 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3033 {
3034         struct mlx4_priv *priv = mlx4_priv(dev);
3035         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3036         struct list_head *eq_list =
3037                 &tracker->slave_list[slave].res_list[RES_EQ];
3038         struct res_eq *eq;
3039         struct res_eq *tmp;
3040         int err;
3041         int state;
3042         LIST_HEAD(tlist);
3043         int eqn;
3044         struct mlx4_cmd_mailbox *mailbox;
3045
3046         err = move_all_busy(dev, slave, RES_EQ);
3047         if (err)
3048                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3049                           "busy for slave %d\n", slave);
3050
3051         spin_lock_irq(mlx4_tlock(dev));
3052         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3053                 spin_unlock_irq(mlx4_tlock(dev));
3054                 if (eq->com.owner == slave) {
3055                         eqn = eq->com.res_id;
3056                         state = eq->com.from_state;
3057                         while (state != 0) {
3058                                 switch (state) {
3059                                 case RES_EQ_RESERVED:
3060                                         spin_lock_irq(mlx4_tlock(dev));
3061                                         radix_tree_delete(&tracker->res_tree[RES_EQ],
3062                                                           eqn);
3063                                         list_del(&eq->com.list);
3064                                         spin_unlock_irq(mlx4_tlock(dev));
3065                                         kfree(eq);
3066                                         state = 0;
3067                                         break;
3068
3069                                 case RES_EQ_HW:
3070                                         mailbox = mlx4_alloc_cmd_mailbox(dev);
3071                                         if (IS_ERR(mailbox)) {
3072                                                 cond_resched();
3073                                                 continue;
3074                                         }
3075                                         err = mlx4_cmd_box(dev, slave, 0,
3076                                                            eqn & 0xff, 0,
3077                                                            MLX4_CMD_HW2SW_EQ,
3078                                                            MLX4_CMD_TIME_CLASS_A,
3079                                                            MLX4_CMD_NATIVE);
3080                                         mlx4_dbg(dev, "rem_slave_eqs: failed"
3081                                                  " to move slave %d eqs %d to"
3082                                                  " SW ownership\n", slave, eqn);
3083                                         mlx4_free_cmd_mailbox(dev, mailbox);
3084                                         if (!err) {
3085                                                 atomic_dec(&eq->mtt->ref_count);
3086                                                 state = RES_EQ_RESERVED;
3087                                         }
3088                                         break;
3089
3090                                 default:
3091                                         state = 0;
3092                                 }
3093                         }
3094                 }
3095                 spin_lock_irq(mlx4_tlock(dev));
3096         }
3097         spin_unlock_irq(mlx4_tlock(dev));
3098 }
3099
3100 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3101 {
3102         struct mlx4_priv *priv = mlx4_priv(dev);
3103
3104         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3105         /*VLAN*/
3106         rem_slave_macs(dev, slave);
3107         rem_slave_qps(dev, slave);
3108         rem_slave_srqs(dev, slave);
3109         rem_slave_cqs(dev, slave);
3110         rem_slave_mrs(dev, slave);
3111         rem_slave_eqs(dev, slave);
3112         rem_slave_mtts(dev, slave);
3113         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3114 }