net/mlx4: Make mlx4_is_eth visible inline funcion
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49
50 #define MLX4_MAC_VALID          (1ull << 63)
51
52 struct mac_res {
53         struct list_head list;
54         u64 mac;
55         int ref_count;
56         u8 smac_index;
57         u8 port;
58 };
59
60 struct vlan_res {
61         struct list_head list;
62         u16 vlan;
63         int ref_count;
64         int vlan_index;
65         u8 port;
66 };
67
68 struct res_common {
69         struct list_head        list;
70         struct rb_node          node;
71         u64                     res_id;
72         int                     owner;
73         int                     state;
74         int                     from_state;
75         int                     to_state;
76         int                     removing;
77 };
78
79 enum {
80         RES_ANY_BUSY = 1
81 };
82
83 struct res_gid {
84         struct list_head        list;
85         u8                      gid[16];
86         enum mlx4_protocol      prot;
87         enum mlx4_steer_type    steer;
88         u64                     reg_id;
89 };
90
91 enum res_qp_states {
92         RES_QP_BUSY = RES_ANY_BUSY,
93
94         /* QP number was allocated */
95         RES_QP_RESERVED,
96
97         /* ICM memory for QP context was mapped */
98         RES_QP_MAPPED,
99
100         /* QP is in hw ownership */
101         RES_QP_HW
102 };
103
104 struct res_qp {
105         struct res_common       com;
106         struct res_mtt         *mtt;
107         struct res_cq          *rcq;
108         struct res_cq          *scq;
109         struct res_srq         *srq;
110         struct list_head        mcg_list;
111         spinlock_t              mcg_spl;
112         int                     local_qpn;
113         atomic_t                ref_count;
114         u32                     qpc_flags;
115         /* saved qp params before VST enforcement in order to restore on VGT */
116         u8                      sched_queue;
117         __be32                  param3;
118         u8                      vlan_control;
119         u8                      fvl_rx;
120         u8                      pri_path_fl;
121         u8                      vlan_index;
122         u8                      feup;
123 };
124
125 enum res_mtt_states {
126         RES_MTT_BUSY = RES_ANY_BUSY,
127         RES_MTT_ALLOCATED,
128 };
129
130 static inline const char *mtt_states_str(enum res_mtt_states state)
131 {
132         switch (state) {
133         case RES_MTT_BUSY: return "RES_MTT_BUSY";
134         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
135         default: return "Unknown";
136         }
137 }
138
139 struct res_mtt {
140         struct res_common       com;
141         int                     order;
142         atomic_t                ref_count;
143 };
144
145 enum res_mpt_states {
146         RES_MPT_BUSY = RES_ANY_BUSY,
147         RES_MPT_RESERVED,
148         RES_MPT_MAPPED,
149         RES_MPT_HW,
150 };
151
152 struct res_mpt {
153         struct res_common       com;
154         struct res_mtt         *mtt;
155         int                     key;
156 };
157
158 enum res_eq_states {
159         RES_EQ_BUSY = RES_ANY_BUSY,
160         RES_EQ_RESERVED,
161         RES_EQ_HW,
162 };
163
164 struct res_eq {
165         struct res_common       com;
166         struct res_mtt         *mtt;
167 };
168
169 enum res_cq_states {
170         RES_CQ_BUSY = RES_ANY_BUSY,
171         RES_CQ_ALLOCATED,
172         RES_CQ_HW,
173 };
174
175 struct res_cq {
176         struct res_common       com;
177         struct res_mtt         *mtt;
178         atomic_t                ref_count;
179 };
180
181 enum res_srq_states {
182         RES_SRQ_BUSY = RES_ANY_BUSY,
183         RES_SRQ_ALLOCATED,
184         RES_SRQ_HW,
185 };
186
187 struct res_srq {
188         struct res_common       com;
189         struct res_mtt         *mtt;
190         struct res_cq          *cq;
191         atomic_t                ref_count;
192 };
193
194 enum res_counter_states {
195         RES_COUNTER_BUSY = RES_ANY_BUSY,
196         RES_COUNTER_ALLOCATED,
197 };
198
199 struct res_counter {
200         struct res_common       com;
201         int                     port;
202 };
203
204 enum res_xrcdn_states {
205         RES_XRCD_BUSY = RES_ANY_BUSY,
206         RES_XRCD_ALLOCATED,
207 };
208
209 struct res_xrcdn {
210         struct res_common       com;
211         int                     port;
212 };
213
214 enum res_fs_rule_states {
215         RES_FS_RULE_BUSY = RES_ANY_BUSY,
216         RES_FS_RULE_ALLOCATED,
217 };
218
219 struct res_fs_rule {
220         struct res_common       com;
221         int                     qpn;
222 };
223
224 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
225 {
226         struct rb_node *node = root->rb_node;
227
228         while (node) {
229                 struct res_common *res = container_of(node, struct res_common,
230                                                       node);
231
232                 if (res_id < res->res_id)
233                         node = node->rb_left;
234                 else if (res_id > res->res_id)
235                         node = node->rb_right;
236                 else
237                         return res;
238         }
239         return NULL;
240 }
241
242 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
243 {
244         struct rb_node **new = &(root->rb_node), *parent = NULL;
245
246         /* Figure out where to put new node */
247         while (*new) {
248                 struct res_common *this = container_of(*new, struct res_common,
249                                                        node);
250
251                 parent = *new;
252                 if (res->res_id < this->res_id)
253                         new = &((*new)->rb_left);
254                 else if (res->res_id > this->res_id)
255                         new = &((*new)->rb_right);
256                 else
257                         return -EEXIST;
258         }
259
260         /* Add new node and rebalance tree. */
261         rb_link_node(&res->node, parent, new);
262         rb_insert_color(&res->node, root);
263
264         return 0;
265 }
266
267 enum qp_transition {
268         QP_TRANS_INIT2RTR,
269         QP_TRANS_RTR2RTS,
270         QP_TRANS_RTS2RTS,
271         QP_TRANS_SQERR2RTS,
272         QP_TRANS_SQD2SQD,
273         QP_TRANS_SQD2RTS
274 };
275
276 /* For Debug uses */
277 static const char *resource_str(enum mlx4_resource rt)
278 {
279         switch (rt) {
280         case RES_QP: return "RES_QP";
281         case RES_CQ: return "RES_CQ";
282         case RES_SRQ: return "RES_SRQ";
283         case RES_MPT: return "RES_MPT";
284         case RES_MTT: return "RES_MTT";
285         case RES_MAC: return  "RES_MAC";
286         case RES_VLAN: return  "RES_VLAN";
287         case RES_EQ: return "RES_EQ";
288         case RES_COUNTER: return "RES_COUNTER";
289         case RES_FS_RULE: return "RES_FS_RULE";
290         case RES_XRCD: return "RES_XRCD";
291         default: return "Unknown resource type !!!";
292         };
293 }
294
295 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
296 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
297                                       enum mlx4_resource res_type, int count,
298                                       int port)
299 {
300         struct mlx4_priv *priv = mlx4_priv(dev);
301         struct resource_allocator *res_alloc =
302                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
303         int err = -EINVAL;
304         int allocated, free, reserved, guaranteed, from_free;
305         int from_rsvd;
306
307         if (slave > dev->persist->num_vfs)
308                 return -EINVAL;
309
310         spin_lock(&res_alloc->alloc_lock);
311         allocated = (port > 0) ?
312                 res_alloc->allocated[(port - 1) *
313                 (dev->persist->num_vfs + 1) + slave] :
314                 res_alloc->allocated[slave];
315         free = (port > 0) ? res_alloc->res_port_free[port - 1] :
316                 res_alloc->res_free;
317         reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
318                 res_alloc->res_reserved;
319         guaranteed = res_alloc->guaranteed[slave];
320
321         if (allocated + count > res_alloc->quota[slave]) {
322                 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
323                           slave, port, resource_str(res_type), count,
324                           allocated, res_alloc->quota[slave]);
325                 goto out;
326         }
327
328         if (allocated + count <= guaranteed) {
329                 err = 0;
330                 from_rsvd = count;
331         } else {
332                 /* portion may need to be obtained from free area */
333                 if (guaranteed - allocated > 0)
334                         from_free = count - (guaranteed - allocated);
335                 else
336                         from_free = count;
337
338                 from_rsvd = count - from_free;
339
340                 if (free - from_free >= reserved)
341                         err = 0;
342                 else
343                         mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
344                                   slave, port, resource_str(res_type), free,
345                                   from_free, reserved);
346         }
347
348         if (!err) {
349                 /* grant the request */
350                 if (port > 0) {
351                         res_alloc->allocated[(port - 1) *
352                         (dev->persist->num_vfs + 1) + slave] += count;
353                         res_alloc->res_port_free[port - 1] -= count;
354                         res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
355                 } else {
356                         res_alloc->allocated[slave] += count;
357                         res_alloc->res_free -= count;
358                         res_alloc->res_reserved -= from_rsvd;
359                 }
360         }
361
362 out:
363         spin_unlock(&res_alloc->alloc_lock);
364         return err;
365 }
366
367 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
368                                     enum mlx4_resource res_type, int count,
369                                     int port)
370 {
371         struct mlx4_priv *priv = mlx4_priv(dev);
372         struct resource_allocator *res_alloc =
373                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
374         int allocated, guaranteed, from_rsvd;
375
376         if (slave > dev->persist->num_vfs)
377                 return;
378
379         spin_lock(&res_alloc->alloc_lock);
380
381         allocated = (port > 0) ?
382                 res_alloc->allocated[(port - 1) *
383                 (dev->persist->num_vfs + 1) + slave] :
384                 res_alloc->allocated[slave];
385         guaranteed = res_alloc->guaranteed[slave];
386
387         if (allocated - count >= guaranteed) {
388                 from_rsvd = 0;
389         } else {
390                 /* portion may need to be returned to reserved area */
391                 if (allocated - guaranteed > 0)
392                         from_rsvd = count - (allocated - guaranteed);
393                 else
394                         from_rsvd = count;
395         }
396
397         if (port > 0) {
398                 res_alloc->allocated[(port - 1) *
399                 (dev->persist->num_vfs + 1) + slave] -= count;
400                 res_alloc->res_port_free[port - 1] += count;
401                 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
402         } else {
403                 res_alloc->allocated[slave] -= count;
404                 res_alloc->res_free += count;
405                 res_alloc->res_reserved += from_rsvd;
406         }
407
408         spin_unlock(&res_alloc->alloc_lock);
409         return;
410 }
411
412 static inline void initialize_res_quotas(struct mlx4_dev *dev,
413                                          struct resource_allocator *res_alloc,
414                                          enum mlx4_resource res_type,
415                                          int vf, int num_instances)
416 {
417         res_alloc->guaranteed[vf] = num_instances /
418                                     (2 * (dev->persist->num_vfs + 1));
419         res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
420         if (vf == mlx4_master_func_num(dev)) {
421                 res_alloc->res_free = num_instances;
422                 if (res_type == RES_MTT) {
423                         /* reserved mtts will be taken out of the PF allocation */
424                         res_alloc->res_free += dev->caps.reserved_mtts;
425                         res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
426                         res_alloc->quota[vf] += dev->caps.reserved_mtts;
427                 }
428         }
429 }
430
431 void mlx4_init_quotas(struct mlx4_dev *dev)
432 {
433         struct mlx4_priv *priv = mlx4_priv(dev);
434         int pf;
435
436         /* quotas for VFs are initialized in mlx4_slave_cap */
437         if (mlx4_is_slave(dev))
438                 return;
439
440         if (!mlx4_is_mfunc(dev)) {
441                 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
442                         mlx4_num_reserved_sqps(dev);
443                 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
444                 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
445                 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
446                 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
447                 return;
448         }
449
450         pf = mlx4_master_func_num(dev);
451         dev->quotas.qp =
452                 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
453         dev->quotas.cq =
454                 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
455         dev->quotas.srq =
456                 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
457         dev->quotas.mtt =
458                 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
459         dev->quotas.mpt =
460                 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
461 }
462 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
463 {
464         struct mlx4_priv *priv = mlx4_priv(dev);
465         int i, j;
466         int t;
467
468         priv->mfunc.master.res_tracker.slave_list =
469                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
470                         GFP_KERNEL);
471         if (!priv->mfunc.master.res_tracker.slave_list)
472                 return -ENOMEM;
473
474         for (i = 0 ; i < dev->num_slaves; i++) {
475                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
476                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
477                                        slave_list[i].res_list[t]);
478                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
479         }
480
481         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
482                  dev->num_slaves);
483         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
484                 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
485
486         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
487                 struct resource_allocator *res_alloc =
488                         &priv->mfunc.master.res_tracker.res_alloc[i];
489                 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
490                                            sizeof(int), GFP_KERNEL);
491                 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
492                                                 sizeof(int), GFP_KERNEL);
493                 if (i == RES_MAC || i == RES_VLAN)
494                         res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
495                                                        (dev->persist->num_vfs
496                                                        + 1) *
497                                                        sizeof(int), GFP_KERNEL);
498                 else
499                         res_alloc->allocated = kzalloc((dev->persist->
500                                                         num_vfs + 1) *
501                                                        sizeof(int), GFP_KERNEL);
502
503                 if (!res_alloc->quota || !res_alloc->guaranteed ||
504                     !res_alloc->allocated)
505                         goto no_mem_err;
506
507                 spin_lock_init(&res_alloc->alloc_lock);
508                 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
509                         struct mlx4_active_ports actv_ports =
510                                 mlx4_get_active_ports(dev, t);
511                         switch (i) {
512                         case RES_QP:
513                                 initialize_res_quotas(dev, res_alloc, RES_QP,
514                                                       t, dev->caps.num_qps -
515                                                       dev->caps.reserved_qps -
516                                                       mlx4_num_reserved_sqps(dev));
517                                 break;
518                         case RES_CQ:
519                                 initialize_res_quotas(dev, res_alloc, RES_CQ,
520                                                       t, dev->caps.num_cqs -
521                                                       dev->caps.reserved_cqs);
522                                 break;
523                         case RES_SRQ:
524                                 initialize_res_quotas(dev, res_alloc, RES_SRQ,
525                                                       t, dev->caps.num_srqs -
526                                                       dev->caps.reserved_srqs);
527                                 break;
528                         case RES_MPT:
529                                 initialize_res_quotas(dev, res_alloc, RES_MPT,
530                                                       t, dev->caps.num_mpts -
531                                                       dev->caps.reserved_mrws);
532                                 break;
533                         case RES_MTT:
534                                 initialize_res_quotas(dev, res_alloc, RES_MTT,
535                                                       t, dev->caps.num_mtts -
536                                                       dev->caps.reserved_mtts);
537                                 break;
538                         case RES_MAC:
539                                 if (t == mlx4_master_func_num(dev)) {
540                                         int max_vfs_pport = 0;
541                                         /* Calculate the max vfs per port for */
542                                         /* both ports.                        */
543                                         for (j = 0; j < dev->caps.num_ports;
544                                              j++) {
545                                                 struct mlx4_slaves_pport slaves_pport =
546                                                         mlx4_phys_to_slaves_pport(dev, j + 1);
547                                                 unsigned current_slaves =
548                                                         bitmap_weight(slaves_pport.slaves,
549                                                                       dev->caps.num_ports) - 1;
550                                                 if (max_vfs_pport < current_slaves)
551                                                         max_vfs_pport =
552                                                                 current_slaves;
553                                         }
554                                         res_alloc->quota[t] =
555                                                 MLX4_MAX_MAC_NUM -
556                                                 2 * max_vfs_pport;
557                                         res_alloc->guaranteed[t] = 2;
558                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
559                                                 res_alloc->res_port_free[j] =
560                                                         MLX4_MAX_MAC_NUM;
561                                 } else {
562                                         res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
563                                         res_alloc->guaranteed[t] = 2;
564                                 }
565                                 break;
566                         case RES_VLAN:
567                                 if (t == mlx4_master_func_num(dev)) {
568                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
569                                         res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
570                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
571                                                 res_alloc->res_port_free[j] =
572                                                         res_alloc->quota[t];
573                                 } else {
574                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
575                                         res_alloc->guaranteed[t] = 0;
576                                 }
577                                 break;
578                         case RES_COUNTER:
579                                 res_alloc->quota[t] = dev->caps.max_counters;
580                                 res_alloc->guaranteed[t] = 0;
581                                 if (t == mlx4_master_func_num(dev))
582                                         res_alloc->res_free = res_alloc->quota[t];
583                                 break;
584                         default:
585                                 break;
586                         }
587                         if (i == RES_MAC || i == RES_VLAN) {
588                                 for (j = 0; j < dev->caps.num_ports; j++)
589                                         if (test_bit(j, actv_ports.ports))
590                                                 res_alloc->res_port_rsvd[j] +=
591                                                         res_alloc->guaranteed[t];
592                         } else {
593                                 res_alloc->res_reserved += res_alloc->guaranteed[t];
594                         }
595                 }
596         }
597         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
598         return 0;
599
600 no_mem_err:
601         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
602                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
603                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
604                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
605                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
606                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
607                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
608         }
609         return -ENOMEM;
610 }
611
612 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
613                                 enum mlx4_res_tracker_free_type type)
614 {
615         struct mlx4_priv *priv = mlx4_priv(dev);
616         int i;
617
618         if (priv->mfunc.master.res_tracker.slave_list) {
619                 if (type != RES_TR_FREE_STRUCTS_ONLY) {
620                         for (i = 0; i < dev->num_slaves; i++) {
621                                 if (type == RES_TR_FREE_ALL ||
622                                     dev->caps.function != i)
623                                         mlx4_delete_all_resources_for_slave(dev, i);
624                         }
625                         /* free master's vlans */
626                         i = dev->caps.function;
627                         mlx4_reset_roce_gids(dev, i);
628                         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
629                         rem_slave_vlans(dev, i);
630                         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
631                 }
632
633                 if (type != RES_TR_FREE_SLAVES_ONLY) {
634                         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
635                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
636                                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
637                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
638                                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
639                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
640                                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
641                         }
642                         kfree(priv->mfunc.master.res_tracker.slave_list);
643                         priv->mfunc.master.res_tracker.slave_list = NULL;
644                 }
645         }
646 }
647
648 static void update_pkey_index(struct mlx4_dev *dev, int slave,
649                               struct mlx4_cmd_mailbox *inbox)
650 {
651         u8 sched = *(u8 *)(inbox->buf + 64);
652         u8 orig_index = *(u8 *)(inbox->buf + 35);
653         u8 new_index;
654         struct mlx4_priv *priv = mlx4_priv(dev);
655         int port;
656
657         port = (sched >> 6 & 1) + 1;
658
659         new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
660         *(u8 *)(inbox->buf + 35) = new_index;
661 }
662
663 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
664                        u8 slave)
665 {
666         struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
667         enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
668         u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
669         int port;
670
671         if (MLX4_QP_ST_UD == ts) {
672                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
673                 if (mlx4_is_eth(dev, port))
674                         qp_ctx->pri_path.mgid_index =
675                                 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
676                 else
677                         qp_ctx->pri_path.mgid_index = slave | 0x80;
678
679         } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
680                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
681                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
682                         if (mlx4_is_eth(dev, port)) {
683                                 qp_ctx->pri_path.mgid_index +=
684                                         mlx4_get_base_gid_ix(dev, slave, port);
685                                 qp_ctx->pri_path.mgid_index &= 0x7f;
686                         } else {
687                                 qp_ctx->pri_path.mgid_index = slave & 0x7F;
688                         }
689                 }
690                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
691                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
692                         if (mlx4_is_eth(dev, port)) {
693                                 qp_ctx->alt_path.mgid_index +=
694                                         mlx4_get_base_gid_ix(dev, slave, port);
695                                 qp_ctx->alt_path.mgid_index &= 0x7f;
696                         } else {
697                                 qp_ctx->alt_path.mgid_index = slave & 0x7F;
698                         }
699                 }
700         }
701 }
702
703 static int update_vport_qp_param(struct mlx4_dev *dev,
704                                  struct mlx4_cmd_mailbox *inbox,
705                                  u8 slave, u32 qpn)
706 {
707         struct mlx4_qp_context  *qpc = inbox->buf + 8;
708         struct mlx4_vport_oper_state *vp_oper;
709         struct mlx4_priv *priv;
710         u32 qp_type;
711         int port, err = 0;
712
713         port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
714         priv = mlx4_priv(dev);
715         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
716         qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
717
718         if (MLX4_VGT != vp_oper->state.default_vlan) {
719                 /* the reserved QPs (special, proxy, tunnel)
720                  * do not operate over vlans
721                  */
722                 if (mlx4_is_qp_reserved(dev, qpn))
723                         return 0;
724
725                 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
726                 if (qp_type == MLX4_QP_ST_UD ||
727                     (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
728                         if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
729                                 *(__be32 *)inbox->buf =
730                                         cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
731                                         MLX4_QP_OPTPAR_VLAN_STRIPPING);
732                                 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
733                         } else {
734                                 struct mlx4_update_qp_params params = {.flags = 0};
735
736                                 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
737                                 if (err)
738                                         goto out;
739                         }
740                 }
741
742                 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
743                     dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
744                         qpc->pri_path.vlan_control =
745                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
746                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
747                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
748                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
749                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
750                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
751                 } else if (0 != vp_oper->state.default_vlan) {
752                         qpc->pri_path.vlan_control =
753                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
754                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
755                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
756                 } else { /* priority tagged */
757                         qpc->pri_path.vlan_control =
758                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
759                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
760                 }
761
762                 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
763                 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
764                 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
765                 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
766                 qpc->pri_path.sched_queue &= 0xC7;
767                 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
768         }
769         if (vp_oper->state.spoofchk) {
770                 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
771                 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
772         }
773 out:
774         return err;
775 }
776
777 static int mpt_mask(struct mlx4_dev *dev)
778 {
779         return dev->caps.num_mpts - 1;
780 }
781
782 static void *find_res(struct mlx4_dev *dev, u64 res_id,
783                       enum mlx4_resource type)
784 {
785         struct mlx4_priv *priv = mlx4_priv(dev);
786
787         return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
788                                   res_id);
789 }
790
791 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
792                    enum mlx4_resource type,
793                    void *res)
794 {
795         struct res_common *r;
796         int err = 0;
797
798         spin_lock_irq(mlx4_tlock(dev));
799         r = find_res(dev, res_id, type);
800         if (!r) {
801                 err = -ENONET;
802                 goto exit;
803         }
804
805         if (r->state == RES_ANY_BUSY) {
806                 err = -EBUSY;
807                 goto exit;
808         }
809
810         if (r->owner != slave) {
811                 err = -EPERM;
812                 goto exit;
813         }
814
815         r->from_state = r->state;
816         r->state = RES_ANY_BUSY;
817
818         if (res)
819                 *((struct res_common **)res) = r;
820
821 exit:
822         spin_unlock_irq(mlx4_tlock(dev));
823         return err;
824 }
825
826 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
827                                     enum mlx4_resource type,
828                                     u64 res_id, int *slave)
829 {
830
831         struct res_common *r;
832         int err = -ENOENT;
833         int id = res_id;
834
835         if (type == RES_QP)
836                 id &= 0x7fffff;
837         spin_lock(mlx4_tlock(dev));
838
839         r = find_res(dev, id, type);
840         if (r) {
841                 *slave = r->owner;
842                 err = 0;
843         }
844         spin_unlock(mlx4_tlock(dev));
845
846         return err;
847 }
848
849 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
850                     enum mlx4_resource type)
851 {
852         struct res_common *r;
853
854         spin_lock_irq(mlx4_tlock(dev));
855         r = find_res(dev, res_id, type);
856         if (r)
857                 r->state = r->from_state;
858         spin_unlock_irq(mlx4_tlock(dev));
859 }
860
861 static struct res_common *alloc_qp_tr(int id)
862 {
863         struct res_qp *ret;
864
865         ret = kzalloc(sizeof *ret, GFP_KERNEL);
866         if (!ret)
867                 return NULL;
868
869         ret->com.res_id = id;
870         ret->com.state = RES_QP_RESERVED;
871         ret->local_qpn = id;
872         INIT_LIST_HEAD(&ret->mcg_list);
873         spin_lock_init(&ret->mcg_spl);
874         atomic_set(&ret->ref_count, 0);
875
876         return &ret->com;
877 }
878
879 static struct res_common *alloc_mtt_tr(int id, int order)
880 {
881         struct res_mtt *ret;
882
883         ret = kzalloc(sizeof *ret, GFP_KERNEL);
884         if (!ret)
885                 return NULL;
886
887         ret->com.res_id = id;
888         ret->order = order;
889         ret->com.state = RES_MTT_ALLOCATED;
890         atomic_set(&ret->ref_count, 0);
891
892         return &ret->com;
893 }
894
895 static struct res_common *alloc_mpt_tr(int id, int key)
896 {
897         struct res_mpt *ret;
898
899         ret = kzalloc(sizeof *ret, GFP_KERNEL);
900         if (!ret)
901                 return NULL;
902
903         ret->com.res_id = id;
904         ret->com.state = RES_MPT_RESERVED;
905         ret->key = key;
906
907         return &ret->com;
908 }
909
910 static struct res_common *alloc_eq_tr(int id)
911 {
912         struct res_eq *ret;
913
914         ret = kzalloc(sizeof *ret, GFP_KERNEL);
915         if (!ret)
916                 return NULL;
917
918         ret->com.res_id = id;
919         ret->com.state = RES_EQ_RESERVED;
920
921         return &ret->com;
922 }
923
924 static struct res_common *alloc_cq_tr(int id)
925 {
926         struct res_cq *ret;
927
928         ret = kzalloc(sizeof *ret, GFP_KERNEL);
929         if (!ret)
930                 return NULL;
931
932         ret->com.res_id = id;
933         ret->com.state = RES_CQ_ALLOCATED;
934         atomic_set(&ret->ref_count, 0);
935
936         return &ret->com;
937 }
938
939 static struct res_common *alloc_srq_tr(int id)
940 {
941         struct res_srq *ret;
942
943         ret = kzalloc(sizeof *ret, GFP_KERNEL);
944         if (!ret)
945                 return NULL;
946
947         ret->com.res_id = id;
948         ret->com.state = RES_SRQ_ALLOCATED;
949         atomic_set(&ret->ref_count, 0);
950
951         return &ret->com;
952 }
953
954 static struct res_common *alloc_counter_tr(int id)
955 {
956         struct res_counter *ret;
957
958         ret = kzalloc(sizeof *ret, GFP_KERNEL);
959         if (!ret)
960                 return NULL;
961
962         ret->com.res_id = id;
963         ret->com.state = RES_COUNTER_ALLOCATED;
964
965         return &ret->com;
966 }
967
968 static struct res_common *alloc_xrcdn_tr(int id)
969 {
970         struct res_xrcdn *ret;
971
972         ret = kzalloc(sizeof *ret, GFP_KERNEL);
973         if (!ret)
974                 return NULL;
975
976         ret->com.res_id = id;
977         ret->com.state = RES_XRCD_ALLOCATED;
978
979         return &ret->com;
980 }
981
982 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
983 {
984         struct res_fs_rule *ret;
985
986         ret = kzalloc(sizeof *ret, GFP_KERNEL);
987         if (!ret)
988                 return NULL;
989
990         ret->com.res_id = id;
991         ret->com.state = RES_FS_RULE_ALLOCATED;
992         ret->qpn = qpn;
993         return &ret->com;
994 }
995
996 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
997                                    int extra)
998 {
999         struct res_common *ret;
1000
1001         switch (type) {
1002         case RES_QP:
1003                 ret = alloc_qp_tr(id);
1004                 break;
1005         case RES_MPT:
1006                 ret = alloc_mpt_tr(id, extra);
1007                 break;
1008         case RES_MTT:
1009                 ret = alloc_mtt_tr(id, extra);
1010                 break;
1011         case RES_EQ:
1012                 ret = alloc_eq_tr(id);
1013                 break;
1014         case RES_CQ:
1015                 ret = alloc_cq_tr(id);
1016                 break;
1017         case RES_SRQ:
1018                 ret = alloc_srq_tr(id);
1019                 break;
1020         case RES_MAC:
1021                 pr_err("implementation missing\n");
1022                 return NULL;
1023         case RES_COUNTER:
1024                 ret = alloc_counter_tr(id);
1025                 break;
1026         case RES_XRCD:
1027                 ret = alloc_xrcdn_tr(id);
1028                 break;
1029         case RES_FS_RULE:
1030                 ret = alloc_fs_rule_tr(id, extra);
1031                 break;
1032         default:
1033                 return NULL;
1034         }
1035         if (ret)
1036                 ret->owner = slave;
1037
1038         return ret;
1039 }
1040
1041 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1042                          enum mlx4_resource type, int extra)
1043 {
1044         int i;
1045         int err;
1046         struct mlx4_priv *priv = mlx4_priv(dev);
1047         struct res_common **res_arr;
1048         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1049         struct rb_root *root = &tracker->res_tree[type];
1050
1051         res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1052         if (!res_arr)
1053                 return -ENOMEM;
1054
1055         for (i = 0; i < count; ++i) {
1056                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1057                 if (!res_arr[i]) {
1058                         for (--i; i >= 0; --i)
1059                                 kfree(res_arr[i]);
1060
1061                         kfree(res_arr);
1062                         return -ENOMEM;
1063                 }
1064         }
1065
1066         spin_lock_irq(mlx4_tlock(dev));
1067         for (i = 0; i < count; ++i) {
1068                 if (find_res(dev, base + i, type)) {
1069                         err = -EEXIST;
1070                         goto undo;
1071                 }
1072                 err = res_tracker_insert(root, res_arr[i]);
1073                 if (err)
1074                         goto undo;
1075                 list_add_tail(&res_arr[i]->list,
1076                               &tracker->slave_list[slave].res_list[type]);
1077         }
1078         spin_unlock_irq(mlx4_tlock(dev));
1079         kfree(res_arr);
1080
1081         return 0;
1082
1083 undo:
1084         for (--i; i >= base; --i)
1085                 rb_erase(&res_arr[i]->node, root);
1086
1087         spin_unlock_irq(mlx4_tlock(dev));
1088
1089         for (i = 0; i < count; ++i)
1090                 kfree(res_arr[i]);
1091
1092         kfree(res_arr);
1093
1094         return err;
1095 }
1096
1097 static int remove_qp_ok(struct res_qp *res)
1098 {
1099         if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1100             !list_empty(&res->mcg_list)) {
1101                 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1102                        res->com.state, atomic_read(&res->ref_count));
1103                 return -EBUSY;
1104         } else if (res->com.state != RES_QP_RESERVED) {
1105                 return -EPERM;
1106         }
1107
1108         return 0;
1109 }
1110
1111 static int remove_mtt_ok(struct res_mtt *res, int order)
1112 {
1113         if (res->com.state == RES_MTT_BUSY ||
1114             atomic_read(&res->ref_count)) {
1115                 pr_devel("%s-%d: state %s, ref_count %d\n",
1116                          __func__, __LINE__,
1117                          mtt_states_str(res->com.state),
1118                          atomic_read(&res->ref_count));
1119                 return -EBUSY;
1120         } else if (res->com.state != RES_MTT_ALLOCATED)
1121                 return -EPERM;
1122         else if (res->order != order)
1123                 return -EINVAL;
1124
1125         return 0;
1126 }
1127
1128 static int remove_mpt_ok(struct res_mpt *res)
1129 {
1130         if (res->com.state == RES_MPT_BUSY)
1131                 return -EBUSY;
1132         else if (res->com.state != RES_MPT_RESERVED)
1133                 return -EPERM;
1134
1135         return 0;
1136 }
1137
1138 static int remove_eq_ok(struct res_eq *res)
1139 {
1140         if (res->com.state == RES_MPT_BUSY)
1141                 return -EBUSY;
1142         else if (res->com.state != RES_MPT_RESERVED)
1143                 return -EPERM;
1144
1145         return 0;
1146 }
1147
1148 static int remove_counter_ok(struct res_counter *res)
1149 {
1150         if (res->com.state == RES_COUNTER_BUSY)
1151                 return -EBUSY;
1152         else if (res->com.state != RES_COUNTER_ALLOCATED)
1153                 return -EPERM;
1154
1155         return 0;
1156 }
1157
1158 static int remove_xrcdn_ok(struct res_xrcdn *res)
1159 {
1160         if (res->com.state == RES_XRCD_BUSY)
1161                 return -EBUSY;
1162         else if (res->com.state != RES_XRCD_ALLOCATED)
1163                 return -EPERM;
1164
1165         return 0;
1166 }
1167
1168 static int remove_fs_rule_ok(struct res_fs_rule *res)
1169 {
1170         if (res->com.state == RES_FS_RULE_BUSY)
1171                 return -EBUSY;
1172         else if (res->com.state != RES_FS_RULE_ALLOCATED)
1173                 return -EPERM;
1174
1175         return 0;
1176 }
1177
1178 static int remove_cq_ok(struct res_cq *res)
1179 {
1180         if (res->com.state == RES_CQ_BUSY)
1181                 return -EBUSY;
1182         else if (res->com.state != RES_CQ_ALLOCATED)
1183                 return -EPERM;
1184
1185         return 0;
1186 }
1187
1188 static int remove_srq_ok(struct res_srq *res)
1189 {
1190         if (res->com.state == RES_SRQ_BUSY)
1191                 return -EBUSY;
1192         else if (res->com.state != RES_SRQ_ALLOCATED)
1193                 return -EPERM;
1194
1195         return 0;
1196 }
1197
1198 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1199 {
1200         switch (type) {
1201         case RES_QP:
1202                 return remove_qp_ok((struct res_qp *)res);
1203         case RES_CQ:
1204                 return remove_cq_ok((struct res_cq *)res);
1205         case RES_SRQ:
1206                 return remove_srq_ok((struct res_srq *)res);
1207         case RES_MPT:
1208                 return remove_mpt_ok((struct res_mpt *)res);
1209         case RES_MTT:
1210                 return remove_mtt_ok((struct res_mtt *)res, extra);
1211         case RES_MAC:
1212                 return -ENOSYS;
1213         case RES_EQ:
1214                 return remove_eq_ok((struct res_eq *)res);
1215         case RES_COUNTER:
1216                 return remove_counter_ok((struct res_counter *)res);
1217         case RES_XRCD:
1218                 return remove_xrcdn_ok((struct res_xrcdn *)res);
1219         case RES_FS_RULE:
1220                 return remove_fs_rule_ok((struct res_fs_rule *)res);
1221         default:
1222                 return -EINVAL;
1223         }
1224 }
1225
1226 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1227                          enum mlx4_resource type, int extra)
1228 {
1229         u64 i;
1230         int err;
1231         struct mlx4_priv *priv = mlx4_priv(dev);
1232         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1233         struct res_common *r;
1234
1235         spin_lock_irq(mlx4_tlock(dev));
1236         for (i = base; i < base + count; ++i) {
1237                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1238                 if (!r) {
1239                         err = -ENOENT;
1240                         goto out;
1241                 }
1242                 if (r->owner != slave) {
1243                         err = -EPERM;
1244                         goto out;
1245                 }
1246                 err = remove_ok(r, type, extra);
1247                 if (err)
1248                         goto out;
1249         }
1250
1251         for (i = base; i < base + count; ++i) {
1252                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1253                 rb_erase(&r->node, &tracker->res_tree[type]);
1254                 list_del(&r->list);
1255                 kfree(r);
1256         }
1257         err = 0;
1258
1259 out:
1260         spin_unlock_irq(mlx4_tlock(dev));
1261
1262         return err;
1263 }
1264
1265 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1266                                 enum res_qp_states state, struct res_qp **qp,
1267                                 int alloc)
1268 {
1269         struct mlx4_priv *priv = mlx4_priv(dev);
1270         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1271         struct res_qp *r;
1272         int err = 0;
1273
1274         spin_lock_irq(mlx4_tlock(dev));
1275         r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1276         if (!r)
1277                 err = -ENOENT;
1278         else if (r->com.owner != slave)
1279                 err = -EPERM;
1280         else {
1281                 switch (state) {
1282                 case RES_QP_BUSY:
1283                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1284                                  __func__, r->com.res_id);
1285                         err = -EBUSY;
1286                         break;
1287
1288                 case RES_QP_RESERVED:
1289                         if (r->com.state == RES_QP_MAPPED && !alloc)
1290                                 break;
1291
1292                         mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1293                         err = -EINVAL;
1294                         break;
1295
1296                 case RES_QP_MAPPED:
1297                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
1298                             r->com.state == RES_QP_HW)
1299                                 break;
1300                         else {
1301                                 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1302                                           r->com.res_id);
1303                                 err = -EINVAL;
1304                         }
1305
1306                         break;
1307
1308                 case RES_QP_HW:
1309                         if (r->com.state != RES_QP_MAPPED)
1310                                 err = -EINVAL;
1311                         break;
1312                 default:
1313                         err = -EINVAL;
1314                 }
1315
1316                 if (!err) {
1317                         r->com.from_state = r->com.state;
1318                         r->com.to_state = state;
1319                         r->com.state = RES_QP_BUSY;
1320                         if (qp)
1321                                 *qp = r;
1322                 }
1323         }
1324
1325         spin_unlock_irq(mlx4_tlock(dev));
1326
1327         return err;
1328 }
1329
1330 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1331                                 enum res_mpt_states state, struct res_mpt **mpt)
1332 {
1333         struct mlx4_priv *priv = mlx4_priv(dev);
1334         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1335         struct res_mpt *r;
1336         int err = 0;
1337
1338         spin_lock_irq(mlx4_tlock(dev));
1339         r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1340         if (!r)
1341                 err = -ENOENT;
1342         else if (r->com.owner != slave)
1343                 err = -EPERM;
1344         else {
1345                 switch (state) {
1346                 case RES_MPT_BUSY:
1347                         err = -EINVAL;
1348                         break;
1349
1350                 case RES_MPT_RESERVED:
1351                         if (r->com.state != RES_MPT_MAPPED)
1352                                 err = -EINVAL;
1353                         break;
1354
1355                 case RES_MPT_MAPPED:
1356                         if (r->com.state != RES_MPT_RESERVED &&
1357                             r->com.state != RES_MPT_HW)
1358                                 err = -EINVAL;
1359                         break;
1360
1361                 case RES_MPT_HW:
1362                         if (r->com.state != RES_MPT_MAPPED)
1363                                 err = -EINVAL;
1364                         break;
1365                 default:
1366                         err = -EINVAL;
1367                 }
1368
1369                 if (!err) {
1370                         r->com.from_state = r->com.state;
1371                         r->com.to_state = state;
1372                         r->com.state = RES_MPT_BUSY;
1373                         if (mpt)
1374                                 *mpt = r;
1375                 }
1376         }
1377
1378         spin_unlock_irq(mlx4_tlock(dev));
1379
1380         return err;
1381 }
1382
1383 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1384                                 enum res_eq_states state, struct res_eq **eq)
1385 {
1386         struct mlx4_priv *priv = mlx4_priv(dev);
1387         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1388         struct res_eq *r;
1389         int err = 0;
1390
1391         spin_lock_irq(mlx4_tlock(dev));
1392         r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1393         if (!r)
1394                 err = -ENOENT;
1395         else if (r->com.owner != slave)
1396                 err = -EPERM;
1397         else {
1398                 switch (state) {
1399                 case RES_EQ_BUSY:
1400                         err = -EINVAL;
1401                         break;
1402
1403                 case RES_EQ_RESERVED:
1404                         if (r->com.state != RES_EQ_HW)
1405                                 err = -EINVAL;
1406                         break;
1407
1408                 case RES_EQ_HW:
1409                         if (r->com.state != RES_EQ_RESERVED)
1410                                 err = -EINVAL;
1411                         break;
1412
1413                 default:
1414                         err = -EINVAL;
1415                 }
1416
1417                 if (!err) {
1418                         r->com.from_state = r->com.state;
1419                         r->com.to_state = state;
1420                         r->com.state = RES_EQ_BUSY;
1421                         if (eq)
1422                                 *eq = r;
1423                 }
1424         }
1425
1426         spin_unlock_irq(mlx4_tlock(dev));
1427
1428         return err;
1429 }
1430
1431 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1432                                 enum res_cq_states state, struct res_cq **cq)
1433 {
1434         struct mlx4_priv *priv = mlx4_priv(dev);
1435         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1436         struct res_cq *r;
1437         int err;
1438
1439         spin_lock_irq(mlx4_tlock(dev));
1440         r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1441         if (!r) {
1442                 err = -ENOENT;
1443         } else if (r->com.owner != slave) {
1444                 err = -EPERM;
1445         } else if (state == RES_CQ_ALLOCATED) {
1446                 if (r->com.state != RES_CQ_HW)
1447                         err = -EINVAL;
1448                 else if (atomic_read(&r->ref_count))
1449                         err = -EBUSY;
1450                 else
1451                         err = 0;
1452         } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1453                 err = -EINVAL;
1454         } else {
1455                 err = 0;
1456         }
1457
1458         if (!err) {
1459                 r->com.from_state = r->com.state;
1460                 r->com.to_state = state;
1461                 r->com.state = RES_CQ_BUSY;
1462                 if (cq)
1463                         *cq = r;
1464         }
1465
1466         spin_unlock_irq(mlx4_tlock(dev));
1467
1468         return err;
1469 }
1470
1471 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1472                                  enum res_srq_states state, struct res_srq **srq)
1473 {
1474         struct mlx4_priv *priv = mlx4_priv(dev);
1475         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1476         struct res_srq *r;
1477         int err = 0;
1478
1479         spin_lock_irq(mlx4_tlock(dev));
1480         r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1481         if (!r) {
1482                 err = -ENOENT;
1483         } else if (r->com.owner != slave) {
1484                 err = -EPERM;
1485         } else if (state == RES_SRQ_ALLOCATED) {
1486                 if (r->com.state != RES_SRQ_HW)
1487                         err = -EINVAL;
1488                 else if (atomic_read(&r->ref_count))
1489                         err = -EBUSY;
1490         } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1491                 err = -EINVAL;
1492         }
1493
1494         if (!err) {
1495                 r->com.from_state = r->com.state;
1496                 r->com.to_state = state;
1497                 r->com.state = RES_SRQ_BUSY;
1498                 if (srq)
1499                         *srq = r;
1500         }
1501
1502         spin_unlock_irq(mlx4_tlock(dev));
1503
1504         return err;
1505 }
1506
1507 static void res_abort_move(struct mlx4_dev *dev, int slave,
1508                            enum mlx4_resource type, int id)
1509 {
1510         struct mlx4_priv *priv = mlx4_priv(dev);
1511         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1512         struct res_common *r;
1513
1514         spin_lock_irq(mlx4_tlock(dev));
1515         r = res_tracker_lookup(&tracker->res_tree[type], id);
1516         if (r && (r->owner == slave))
1517                 r->state = r->from_state;
1518         spin_unlock_irq(mlx4_tlock(dev));
1519 }
1520
1521 static void res_end_move(struct mlx4_dev *dev, int slave,
1522                          enum mlx4_resource type, int id)
1523 {
1524         struct mlx4_priv *priv = mlx4_priv(dev);
1525         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1526         struct res_common *r;
1527
1528         spin_lock_irq(mlx4_tlock(dev));
1529         r = res_tracker_lookup(&tracker->res_tree[type], id);
1530         if (r && (r->owner == slave))
1531                 r->state = r->to_state;
1532         spin_unlock_irq(mlx4_tlock(dev));
1533 }
1534
1535 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1536 {
1537         return mlx4_is_qp_reserved(dev, qpn) &&
1538                 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1539 }
1540
1541 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1542 {
1543         return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1544 }
1545
1546 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1547                         u64 in_param, u64 *out_param)
1548 {
1549         int err;
1550         int count;
1551         int align;
1552         int base;
1553         int qpn;
1554         u8 flags;
1555
1556         switch (op) {
1557         case RES_OP_RESERVE:
1558                 count = get_param_l(&in_param) & 0xffffff;
1559                 /* Turn off all unsupported QP allocation flags that the
1560                  * slave tries to set.
1561                  */
1562                 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1563                 align = get_param_h(&in_param);
1564                 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1565                 if (err)
1566                         return err;
1567
1568                 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1569                 if (err) {
1570                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1571                         return err;
1572                 }
1573
1574                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1575                 if (err) {
1576                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1577                         __mlx4_qp_release_range(dev, base, count);
1578                         return err;
1579                 }
1580                 set_param_l(out_param, base);
1581                 break;
1582         case RES_OP_MAP_ICM:
1583                 qpn = get_param_l(&in_param) & 0x7fffff;
1584                 if (valid_reserved(dev, slave, qpn)) {
1585                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1586                         if (err)
1587                                 return err;
1588                 }
1589
1590                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1591                                            NULL, 1);
1592                 if (err)
1593                         return err;
1594
1595                 if (!fw_reserved(dev, qpn)) {
1596                         err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1597                         if (err) {
1598                                 res_abort_move(dev, slave, RES_QP, qpn);
1599                                 return err;
1600                         }
1601                 }
1602
1603                 res_end_move(dev, slave, RES_QP, qpn);
1604                 break;
1605
1606         default:
1607                 err = -EINVAL;
1608                 break;
1609         }
1610         return err;
1611 }
1612
1613 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1614                          u64 in_param, u64 *out_param)
1615 {
1616         int err = -EINVAL;
1617         int base;
1618         int order;
1619
1620         if (op != RES_OP_RESERVE_AND_MAP)
1621                 return err;
1622
1623         order = get_param_l(&in_param);
1624
1625         err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1626         if (err)
1627                 return err;
1628
1629         base = __mlx4_alloc_mtt_range(dev, order);
1630         if (base == -1) {
1631                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1632                 return -ENOMEM;
1633         }
1634
1635         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1636         if (err) {
1637                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1638                 __mlx4_free_mtt_range(dev, base, order);
1639         } else {
1640                 set_param_l(out_param, base);
1641         }
1642
1643         return err;
1644 }
1645
1646 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1647                          u64 in_param, u64 *out_param)
1648 {
1649         int err = -EINVAL;
1650         int index;
1651         int id;
1652         struct res_mpt *mpt;
1653
1654         switch (op) {
1655         case RES_OP_RESERVE:
1656                 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1657                 if (err)
1658                         break;
1659
1660                 index = __mlx4_mpt_reserve(dev);
1661                 if (index == -1) {
1662                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1663                         break;
1664                 }
1665                 id = index & mpt_mask(dev);
1666
1667                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1668                 if (err) {
1669                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1670                         __mlx4_mpt_release(dev, index);
1671                         break;
1672                 }
1673                 set_param_l(out_param, index);
1674                 break;
1675         case RES_OP_MAP_ICM:
1676                 index = get_param_l(&in_param);
1677                 id = index & mpt_mask(dev);
1678                 err = mr_res_start_move_to(dev, slave, id,
1679                                            RES_MPT_MAPPED, &mpt);
1680                 if (err)
1681                         return err;
1682
1683                 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1684                 if (err) {
1685                         res_abort_move(dev, slave, RES_MPT, id);
1686                         return err;
1687                 }
1688
1689                 res_end_move(dev, slave, RES_MPT, id);
1690                 break;
1691         }
1692         return err;
1693 }
1694
1695 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1696                         u64 in_param, u64 *out_param)
1697 {
1698         int cqn;
1699         int err;
1700
1701         switch (op) {
1702         case RES_OP_RESERVE_AND_MAP:
1703                 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1704                 if (err)
1705                         break;
1706
1707                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1708                 if (err) {
1709                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1710                         break;
1711                 }
1712
1713                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1714                 if (err) {
1715                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1716                         __mlx4_cq_free_icm(dev, cqn);
1717                         break;
1718                 }
1719
1720                 set_param_l(out_param, cqn);
1721                 break;
1722
1723         default:
1724                 err = -EINVAL;
1725         }
1726
1727         return err;
1728 }
1729
1730 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1731                          u64 in_param, u64 *out_param)
1732 {
1733         int srqn;
1734         int err;
1735
1736         switch (op) {
1737         case RES_OP_RESERVE_AND_MAP:
1738                 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1739                 if (err)
1740                         break;
1741
1742                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1743                 if (err) {
1744                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1745                         break;
1746                 }
1747
1748                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1749                 if (err) {
1750                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1751                         __mlx4_srq_free_icm(dev, srqn);
1752                         break;
1753                 }
1754
1755                 set_param_l(out_param, srqn);
1756                 break;
1757
1758         default:
1759                 err = -EINVAL;
1760         }
1761
1762         return err;
1763 }
1764
1765 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1766                                      u8 smac_index, u64 *mac)
1767 {
1768         struct mlx4_priv *priv = mlx4_priv(dev);
1769         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1770         struct list_head *mac_list =
1771                 &tracker->slave_list[slave].res_list[RES_MAC];
1772         struct mac_res *res, *tmp;
1773
1774         list_for_each_entry_safe(res, tmp, mac_list, list) {
1775                 if (res->smac_index == smac_index && res->port == (u8) port) {
1776                         *mac = res->mac;
1777                         return 0;
1778                 }
1779         }
1780         return -ENOENT;
1781 }
1782
1783 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1784 {
1785         struct mlx4_priv *priv = mlx4_priv(dev);
1786         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1787         struct list_head *mac_list =
1788                 &tracker->slave_list[slave].res_list[RES_MAC];
1789         struct mac_res *res, *tmp;
1790
1791         list_for_each_entry_safe(res, tmp, mac_list, list) {
1792                 if (res->mac == mac && res->port == (u8) port) {
1793                         /* mac found. update ref count */
1794                         ++res->ref_count;
1795                         return 0;
1796                 }
1797         }
1798
1799         if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1800                 return -EINVAL;
1801         res = kzalloc(sizeof *res, GFP_KERNEL);
1802         if (!res) {
1803                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1804                 return -ENOMEM;
1805         }
1806         res->mac = mac;
1807         res->port = (u8) port;
1808         res->smac_index = smac_index;
1809         res->ref_count = 1;
1810         list_add_tail(&res->list,
1811                       &tracker->slave_list[slave].res_list[RES_MAC]);
1812         return 0;
1813 }
1814
1815 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1816                                int port)
1817 {
1818         struct mlx4_priv *priv = mlx4_priv(dev);
1819         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1820         struct list_head *mac_list =
1821                 &tracker->slave_list[slave].res_list[RES_MAC];
1822         struct mac_res *res, *tmp;
1823
1824         list_for_each_entry_safe(res, tmp, mac_list, list) {
1825                 if (res->mac == mac && res->port == (u8) port) {
1826                         if (!--res->ref_count) {
1827                                 list_del(&res->list);
1828                                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1829                                 kfree(res);
1830                         }
1831                         break;
1832                 }
1833         }
1834 }
1835
1836 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1837 {
1838         struct mlx4_priv *priv = mlx4_priv(dev);
1839         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1840         struct list_head *mac_list =
1841                 &tracker->slave_list[slave].res_list[RES_MAC];
1842         struct mac_res *res, *tmp;
1843         int i;
1844
1845         list_for_each_entry_safe(res, tmp, mac_list, list) {
1846                 list_del(&res->list);
1847                 /* dereference the mac the num times the slave referenced it */
1848                 for (i = 0; i < res->ref_count; i++)
1849                         __mlx4_unregister_mac(dev, res->port, res->mac);
1850                 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1851                 kfree(res);
1852         }
1853 }
1854
1855 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1856                          u64 in_param, u64 *out_param, int in_port)
1857 {
1858         int err = -EINVAL;
1859         int port;
1860         u64 mac;
1861         u8 smac_index;
1862
1863         if (op != RES_OP_RESERVE_AND_MAP)
1864                 return err;
1865
1866         port = !in_port ? get_param_l(out_param) : in_port;
1867         port = mlx4_slave_convert_port(
1868                         dev, slave, port);
1869
1870         if (port < 0)
1871                 return -EINVAL;
1872         mac = in_param;
1873
1874         err = __mlx4_register_mac(dev, port, mac);
1875         if (err >= 0) {
1876                 smac_index = err;
1877                 set_param_l(out_param, err);
1878                 err = 0;
1879         }
1880
1881         if (!err) {
1882                 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1883                 if (err)
1884                         __mlx4_unregister_mac(dev, port, mac);
1885         }
1886         return err;
1887 }
1888
1889 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1890                              int port, int vlan_index)
1891 {
1892         struct mlx4_priv *priv = mlx4_priv(dev);
1893         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1894         struct list_head *vlan_list =
1895                 &tracker->slave_list[slave].res_list[RES_VLAN];
1896         struct vlan_res *res, *tmp;
1897
1898         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1899                 if (res->vlan == vlan && res->port == (u8) port) {
1900                         /* vlan found. update ref count */
1901                         ++res->ref_count;
1902                         return 0;
1903                 }
1904         }
1905
1906         if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1907                 return -EINVAL;
1908         res = kzalloc(sizeof(*res), GFP_KERNEL);
1909         if (!res) {
1910                 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1911                 return -ENOMEM;
1912         }
1913         res->vlan = vlan;
1914         res->port = (u8) port;
1915         res->vlan_index = vlan_index;
1916         res->ref_count = 1;
1917         list_add_tail(&res->list,
1918                       &tracker->slave_list[slave].res_list[RES_VLAN]);
1919         return 0;
1920 }
1921
1922
1923 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1924                                 int port)
1925 {
1926         struct mlx4_priv *priv = mlx4_priv(dev);
1927         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1928         struct list_head *vlan_list =
1929                 &tracker->slave_list[slave].res_list[RES_VLAN];
1930         struct vlan_res *res, *tmp;
1931
1932         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1933                 if (res->vlan == vlan && res->port == (u8) port) {
1934                         if (!--res->ref_count) {
1935                                 list_del(&res->list);
1936                                 mlx4_release_resource(dev, slave, RES_VLAN,
1937                                                       1, port);
1938                                 kfree(res);
1939                         }
1940                         break;
1941                 }
1942         }
1943 }
1944
1945 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1946 {
1947         struct mlx4_priv *priv = mlx4_priv(dev);
1948         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1949         struct list_head *vlan_list =
1950                 &tracker->slave_list[slave].res_list[RES_VLAN];
1951         struct vlan_res *res, *tmp;
1952         int i;
1953
1954         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1955                 list_del(&res->list);
1956                 /* dereference the vlan the num times the slave referenced it */
1957                 for (i = 0; i < res->ref_count; i++)
1958                         __mlx4_unregister_vlan(dev, res->port, res->vlan);
1959                 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1960                 kfree(res);
1961         }
1962 }
1963
1964 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1965                           u64 in_param, u64 *out_param, int in_port)
1966 {
1967         struct mlx4_priv *priv = mlx4_priv(dev);
1968         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1969         int err;
1970         u16 vlan;
1971         int vlan_index;
1972         int port;
1973
1974         port = !in_port ? get_param_l(out_param) : in_port;
1975
1976         if (!port || op != RES_OP_RESERVE_AND_MAP)
1977                 return -EINVAL;
1978
1979         port = mlx4_slave_convert_port(
1980                         dev, slave, port);
1981
1982         if (port < 0)
1983                 return -EINVAL;
1984         /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1985         if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1986                 slave_state[slave].old_vlan_api = true;
1987                 return 0;
1988         }
1989
1990         vlan = (u16) in_param;
1991
1992         err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1993         if (!err) {
1994                 set_param_l(out_param, (u32) vlan_index);
1995                 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1996                 if (err)
1997                         __mlx4_unregister_vlan(dev, port, vlan);
1998         }
1999         return err;
2000 }
2001
2002 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2003                              u64 in_param, u64 *out_param)
2004 {
2005         u32 index;
2006         int err;
2007
2008         if (op != RES_OP_RESERVE)
2009                 return -EINVAL;
2010
2011         err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2012         if (err)
2013                 return err;
2014
2015         err = __mlx4_counter_alloc(dev, &index);
2016         if (err) {
2017                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2018                 return err;
2019         }
2020
2021         err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2022         if (err) {
2023                 __mlx4_counter_free(dev, index);
2024                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2025         } else {
2026                 set_param_l(out_param, index);
2027         }
2028
2029         return err;
2030 }
2031
2032 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2033                            u64 in_param, u64 *out_param)
2034 {
2035         u32 xrcdn;
2036         int err;
2037
2038         if (op != RES_OP_RESERVE)
2039                 return -EINVAL;
2040
2041         err = __mlx4_xrcd_alloc(dev, &xrcdn);
2042         if (err)
2043                 return err;
2044
2045         err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2046         if (err)
2047                 __mlx4_xrcd_free(dev, xrcdn);
2048         else
2049                 set_param_l(out_param, xrcdn);
2050
2051         return err;
2052 }
2053
2054 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2055                            struct mlx4_vhcr *vhcr,
2056                            struct mlx4_cmd_mailbox *inbox,
2057                            struct mlx4_cmd_mailbox *outbox,
2058                            struct mlx4_cmd_info *cmd)
2059 {
2060         int err;
2061         int alop = vhcr->op_modifier;
2062
2063         switch (vhcr->in_modifier & 0xFF) {
2064         case RES_QP:
2065                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2066                                    vhcr->in_param, &vhcr->out_param);
2067                 break;
2068
2069         case RES_MTT:
2070                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2071                                     vhcr->in_param, &vhcr->out_param);
2072                 break;
2073
2074         case RES_MPT:
2075                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2076                                     vhcr->in_param, &vhcr->out_param);
2077                 break;
2078
2079         case RES_CQ:
2080                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2081                                    vhcr->in_param, &vhcr->out_param);
2082                 break;
2083
2084         case RES_SRQ:
2085                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2086                                     vhcr->in_param, &vhcr->out_param);
2087                 break;
2088
2089         case RES_MAC:
2090                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2091                                     vhcr->in_param, &vhcr->out_param,
2092                                     (vhcr->in_modifier >> 8) & 0xFF);
2093                 break;
2094
2095         case RES_VLAN:
2096                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2097                                      vhcr->in_param, &vhcr->out_param,
2098                                      (vhcr->in_modifier >> 8) & 0xFF);
2099                 break;
2100
2101         case RES_COUNTER:
2102                 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2103                                         vhcr->in_param, &vhcr->out_param);
2104                 break;
2105
2106         case RES_XRCD:
2107                 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2108                                       vhcr->in_param, &vhcr->out_param);
2109                 break;
2110
2111         default:
2112                 err = -EINVAL;
2113                 break;
2114         }
2115
2116         return err;
2117 }
2118
2119 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2120                        u64 in_param)
2121 {
2122         int err;
2123         int count;
2124         int base;
2125         int qpn;
2126
2127         switch (op) {
2128         case RES_OP_RESERVE:
2129                 base = get_param_l(&in_param) & 0x7fffff;
2130                 count = get_param_h(&in_param);
2131                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2132                 if (err)
2133                         break;
2134                 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2135                 __mlx4_qp_release_range(dev, base, count);
2136                 break;
2137         case RES_OP_MAP_ICM:
2138                 qpn = get_param_l(&in_param) & 0x7fffff;
2139                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2140                                            NULL, 0);
2141                 if (err)
2142                         return err;
2143
2144                 if (!fw_reserved(dev, qpn))
2145                         __mlx4_qp_free_icm(dev, qpn);
2146
2147                 res_end_move(dev, slave, RES_QP, qpn);
2148
2149                 if (valid_reserved(dev, slave, qpn))
2150                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2151                 break;
2152         default:
2153                 err = -EINVAL;
2154                 break;
2155         }
2156         return err;
2157 }
2158
2159 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2160                         u64 in_param, u64 *out_param)
2161 {
2162         int err = -EINVAL;
2163         int base;
2164         int order;
2165
2166         if (op != RES_OP_RESERVE_AND_MAP)
2167                 return err;
2168
2169         base = get_param_l(&in_param);
2170         order = get_param_h(&in_param);
2171         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2172         if (!err) {
2173                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2174                 __mlx4_free_mtt_range(dev, base, order);
2175         }
2176         return err;
2177 }
2178
2179 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2180                         u64 in_param)
2181 {
2182         int err = -EINVAL;
2183         int index;
2184         int id;
2185         struct res_mpt *mpt;
2186
2187         switch (op) {
2188         case RES_OP_RESERVE:
2189                 index = get_param_l(&in_param);
2190                 id = index & mpt_mask(dev);
2191                 err = get_res(dev, slave, id, RES_MPT, &mpt);
2192                 if (err)
2193                         break;
2194                 index = mpt->key;
2195                 put_res(dev, slave, id, RES_MPT);
2196
2197                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2198                 if (err)
2199                         break;
2200                 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2201                 __mlx4_mpt_release(dev, index);
2202                 break;
2203         case RES_OP_MAP_ICM:
2204                         index = get_param_l(&in_param);
2205                         id = index & mpt_mask(dev);
2206                         err = mr_res_start_move_to(dev, slave, id,
2207                                                    RES_MPT_RESERVED, &mpt);
2208                         if (err)
2209                                 return err;
2210
2211                         __mlx4_mpt_free_icm(dev, mpt->key);
2212                         res_end_move(dev, slave, RES_MPT, id);
2213                         return err;
2214                 break;
2215         default:
2216                 err = -EINVAL;
2217                 break;
2218         }
2219         return err;
2220 }
2221
2222 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2223                        u64 in_param, u64 *out_param)
2224 {
2225         int cqn;
2226         int err;
2227
2228         switch (op) {
2229         case RES_OP_RESERVE_AND_MAP:
2230                 cqn = get_param_l(&in_param);
2231                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2232                 if (err)
2233                         break;
2234
2235                 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2236                 __mlx4_cq_free_icm(dev, cqn);
2237                 break;
2238
2239         default:
2240                 err = -EINVAL;
2241                 break;
2242         }
2243
2244         return err;
2245 }
2246
2247 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2248                         u64 in_param, u64 *out_param)
2249 {
2250         int srqn;
2251         int err;
2252
2253         switch (op) {
2254         case RES_OP_RESERVE_AND_MAP:
2255                 srqn = get_param_l(&in_param);
2256                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2257                 if (err)
2258                         break;
2259
2260                 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2261                 __mlx4_srq_free_icm(dev, srqn);
2262                 break;
2263
2264         default:
2265                 err = -EINVAL;
2266                 break;
2267         }
2268
2269         return err;
2270 }
2271
2272 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2273                             u64 in_param, u64 *out_param, int in_port)
2274 {
2275         int port;
2276         int err = 0;
2277
2278         switch (op) {
2279         case RES_OP_RESERVE_AND_MAP:
2280                 port = !in_port ? get_param_l(out_param) : in_port;
2281                 port = mlx4_slave_convert_port(
2282                                 dev, slave, port);
2283
2284                 if (port < 0)
2285                         return -EINVAL;
2286                 mac_del_from_slave(dev, slave, in_param, port);
2287                 __mlx4_unregister_mac(dev, port, in_param);
2288                 break;
2289         default:
2290                 err = -EINVAL;
2291                 break;
2292         }
2293
2294         return err;
2295
2296 }
2297
2298 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2299                             u64 in_param, u64 *out_param, int port)
2300 {
2301         struct mlx4_priv *priv = mlx4_priv(dev);
2302         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2303         int err = 0;
2304
2305         port = mlx4_slave_convert_port(
2306                         dev, slave, port);
2307
2308         if (port < 0)
2309                 return -EINVAL;
2310         switch (op) {
2311         case RES_OP_RESERVE_AND_MAP:
2312                 if (slave_state[slave].old_vlan_api)
2313                         return 0;
2314                 if (!port)
2315                         return -EINVAL;
2316                 vlan_del_from_slave(dev, slave, in_param, port);
2317                 __mlx4_unregister_vlan(dev, port, in_param);
2318                 break;
2319         default:
2320                 err = -EINVAL;
2321                 break;
2322         }
2323
2324         return err;
2325 }
2326
2327 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2328                             u64 in_param, u64 *out_param)
2329 {
2330         int index;
2331         int err;
2332
2333         if (op != RES_OP_RESERVE)
2334                 return -EINVAL;
2335
2336         index = get_param_l(&in_param);
2337         err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2338         if (err)
2339                 return err;
2340
2341         __mlx4_counter_free(dev, index);
2342         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2343
2344         return err;
2345 }
2346
2347 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2348                           u64 in_param, u64 *out_param)
2349 {
2350         int xrcdn;
2351         int err;
2352
2353         if (op != RES_OP_RESERVE)
2354                 return -EINVAL;
2355
2356         xrcdn = get_param_l(&in_param);
2357         err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2358         if (err)
2359                 return err;
2360
2361         __mlx4_xrcd_free(dev, xrcdn);
2362
2363         return err;
2364 }
2365
2366 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2367                           struct mlx4_vhcr *vhcr,
2368                           struct mlx4_cmd_mailbox *inbox,
2369                           struct mlx4_cmd_mailbox *outbox,
2370                           struct mlx4_cmd_info *cmd)
2371 {
2372         int err = -EINVAL;
2373         int alop = vhcr->op_modifier;
2374
2375         switch (vhcr->in_modifier & 0xFF) {
2376         case RES_QP:
2377                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2378                                   vhcr->in_param);
2379                 break;
2380
2381         case RES_MTT:
2382                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2383                                    vhcr->in_param, &vhcr->out_param);
2384                 break;
2385
2386         case RES_MPT:
2387                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2388                                    vhcr->in_param);
2389                 break;
2390
2391         case RES_CQ:
2392                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2393                                   vhcr->in_param, &vhcr->out_param);
2394                 break;
2395
2396         case RES_SRQ:
2397                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2398                                    vhcr->in_param, &vhcr->out_param);
2399                 break;
2400
2401         case RES_MAC:
2402                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2403                                    vhcr->in_param, &vhcr->out_param,
2404                                    (vhcr->in_modifier >> 8) & 0xFF);
2405                 break;
2406
2407         case RES_VLAN:
2408                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2409                                     vhcr->in_param, &vhcr->out_param,
2410                                     (vhcr->in_modifier >> 8) & 0xFF);
2411                 break;
2412
2413         case RES_COUNTER:
2414                 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2415                                        vhcr->in_param, &vhcr->out_param);
2416                 break;
2417
2418         case RES_XRCD:
2419                 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2420                                      vhcr->in_param, &vhcr->out_param);
2421
2422         default:
2423                 break;
2424         }
2425         return err;
2426 }
2427
2428 /* ugly but other choices are uglier */
2429 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2430 {
2431         return (be32_to_cpu(mpt->flags) >> 9) & 1;
2432 }
2433
2434 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2435 {
2436         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2437 }
2438
2439 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2440 {
2441         return be32_to_cpu(mpt->mtt_sz);
2442 }
2443
2444 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2445 {
2446         return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2447 }
2448
2449 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2450 {
2451         return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2452 }
2453
2454 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2455 {
2456         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2457 }
2458
2459 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2460 {
2461         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2462 }
2463
2464 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2465 {
2466         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2467 }
2468
2469 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2470 {
2471         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2472 }
2473
2474 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2475 {
2476         int page_shift = (qpc->log_page_size & 0x3f) + 12;
2477         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2478         int log_sq_sride = qpc->sq_size_stride & 7;
2479         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2480         int log_rq_stride = qpc->rq_size_stride & 7;
2481         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2482         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2483         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2484         int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2485         int sq_size;
2486         int rq_size;
2487         int total_pages;
2488         int total_mem;
2489         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2490
2491         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2492         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2493         total_mem = sq_size + rq_size;
2494         total_pages =
2495                 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2496                                    page_shift);
2497
2498         return total_pages;
2499 }
2500
2501 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2502                            int size, struct res_mtt *mtt)
2503 {
2504         int res_start = mtt->com.res_id;
2505         int res_size = (1 << mtt->order);
2506
2507         if (start < res_start || start + size > res_start + res_size)
2508                 return -EPERM;
2509         return 0;
2510 }
2511
2512 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2513                            struct mlx4_vhcr *vhcr,
2514                            struct mlx4_cmd_mailbox *inbox,
2515                            struct mlx4_cmd_mailbox *outbox,
2516                            struct mlx4_cmd_info *cmd)
2517 {
2518         int err;
2519         int index = vhcr->in_modifier;
2520         struct res_mtt *mtt;
2521         struct res_mpt *mpt;
2522         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2523         int phys;
2524         int id;
2525         u32 pd;
2526         int pd_slave;
2527
2528         id = index & mpt_mask(dev);
2529         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2530         if (err)
2531                 return err;
2532
2533         /* Disable memory windows for VFs. */
2534         if (!mr_is_region(inbox->buf)) {
2535                 err = -EPERM;
2536                 goto ex_abort;
2537         }
2538
2539         /* Make sure that the PD bits related to the slave id are zeros. */
2540         pd = mr_get_pd(inbox->buf);
2541         pd_slave = (pd >> 17) & 0x7f;
2542         if (pd_slave != 0 && --pd_slave != slave) {
2543                 err = -EPERM;
2544                 goto ex_abort;
2545         }
2546
2547         if (mr_is_fmr(inbox->buf)) {
2548                 /* FMR and Bind Enable are forbidden in slave devices. */
2549                 if (mr_is_bind_enabled(inbox->buf)) {
2550                         err = -EPERM;
2551                         goto ex_abort;
2552                 }
2553                 /* FMR and Memory Windows are also forbidden. */
2554                 if (!mr_is_region(inbox->buf)) {
2555                         err = -EPERM;
2556                         goto ex_abort;
2557                 }
2558         }
2559
2560         phys = mr_phys_mpt(inbox->buf);
2561         if (!phys) {
2562                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2563                 if (err)
2564                         goto ex_abort;
2565
2566                 err = check_mtt_range(dev, slave, mtt_base,
2567                                       mr_get_mtt_size(inbox->buf), mtt);
2568                 if (err)
2569                         goto ex_put;
2570
2571                 mpt->mtt = mtt;
2572         }
2573
2574         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2575         if (err)
2576                 goto ex_put;
2577
2578         if (!phys) {
2579                 atomic_inc(&mtt->ref_count);
2580                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2581         }
2582
2583         res_end_move(dev, slave, RES_MPT, id);
2584         return 0;
2585
2586 ex_put:
2587         if (!phys)
2588                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2589 ex_abort:
2590         res_abort_move(dev, slave, RES_MPT, id);
2591
2592         return err;
2593 }
2594
2595 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2596                            struct mlx4_vhcr *vhcr,
2597                            struct mlx4_cmd_mailbox *inbox,
2598                            struct mlx4_cmd_mailbox *outbox,
2599                            struct mlx4_cmd_info *cmd)
2600 {
2601         int err;
2602         int index = vhcr->in_modifier;
2603         struct res_mpt *mpt;
2604         int id;
2605
2606         id = index & mpt_mask(dev);
2607         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2608         if (err)
2609                 return err;
2610
2611         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2612         if (err)
2613                 goto ex_abort;
2614
2615         if (mpt->mtt)
2616                 atomic_dec(&mpt->mtt->ref_count);
2617
2618         res_end_move(dev, slave, RES_MPT, id);
2619         return 0;
2620
2621 ex_abort:
2622         res_abort_move(dev, slave, RES_MPT, id);
2623
2624         return err;
2625 }
2626
2627 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2628                            struct mlx4_vhcr *vhcr,
2629                            struct mlx4_cmd_mailbox *inbox,
2630                            struct mlx4_cmd_mailbox *outbox,
2631                            struct mlx4_cmd_info *cmd)
2632 {
2633         int err;
2634         int index = vhcr->in_modifier;
2635         struct res_mpt *mpt;
2636         int id;
2637
2638         id = index & mpt_mask(dev);
2639         err = get_res(dev, slave, id, RES_MPT, &mpt);
2640         if (err)
2641                 return err;
2642
2643         if (mpt->com.from_state == RES_MPT_MAPPED) {
2644                 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2645                  * that, the VF must read the MPT. But since the MPT entry memory is not
2646                  * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2647                  * entry contents. To guarantee that the MPT cannot be changed, the driver
2648                  * must perform HW2SW_MPT before this query and return the MPT entry to HW
2649                  * ownership fofollowing the change. The change here allows the VF to
2650                  * perform QUERY_MPT also when the entry is in SW ownership.
2651                  */
2652                 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2653                                         &mlx4_priv(dev)->mr_table.dmpt_table,
2654                                         mpt->key, NULL);
2655
2656                 if (NULL == mpt_entry || NULL == outbox->buf) {
2657                         err = -EINVAL;
2658                         goto out;
2659                 }
2660
2661                 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2662
2663                 err = 0;
2664         } else if (mpt->com.from_state == RES_MPT_HW) {
2665                 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2666         } else {
2667                 err = -EBUSY;
2668                 goto out;
2669         }
2670
2671
2672 out:
2673         put_res(dev, slave, id, RES_MPT);
2674         return err;
2675 }
2676
2677 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2678 {
2679         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2680 }
2681
2682 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2683 {
2684         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2685 }
2686
2687 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2688 {
2689         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2690 }
2691
2692 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2693                                   struct mlx4_qp_context *context)
2694 {
2695         u32 qpn = vhcr->in_modifier & 0xffffff;
2696         u32 qkey = 0;
2697
2698         if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2699                 return;
2700
2701         /* adjust qkey in qp context */
2702         context->qkey = cpu_to_be32(qkey);
2703 }
2704
2705 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2706                              struct mlx4_vhcr *vhcr,
2707                              struct mlx4_cmd_mailbox *inbox,
2708                              struct mlx4_cmd_mailbox *outbox,
2709                              struct mlx4_cmd_info *cmd)
2710 {
2711         int err;
2712         int qpn = vhcr->in_modifier & 0x7fffff;
2713         struct res_mtt *mtt;
2714         struct res_qp *qp;
2715         struct mlx4_qp_context *qpc = inbox->buf + 8;
2716         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2717         int mtt_size = qp_get_mtt_size(qpc);
2718         struct res_cq *rcq;
2719         struct res_cq *scq;
2720         int rcqn = qp_get_rcqn(qpc);
2721         int scqn = qp_get_scqn(qpc);
2722         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2723         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2724         struct res_srq *srq;
2725         int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2726
2727         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2728         if (err)
2729                 return err;
2730         qp->local_qpn = local_qpn;
2731         qp->sched_queue = 0;
2732         qp->param3 = 0;
2733         qp->vlan_control = 0;
2734         qp->fvl_rx = 0;
2735         qp->pri_path_fl = 0;
2736         qp->vlan_index = 0;
2737         qp->feup = 0;
2738         qp->qpc_flags = be32_to_cpu(qpc->flags);
2739
2740         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2741         if (err)
2742                 goto ex_abort;
2743
2744         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2745         if (err)
2746                 goto ex_put_mtt;
2747
2748         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2749         if (err)
2750                 goto ex_put_mtt;
2751
2752         if (scqn != rcqn) {
2753                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2754                 if (err)
2755                         goto ex_put_rcq;
2756         } else
2757                 scq = rcq;
2758
2759         if (use_srq) {
2760                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2761                 if (err)
2762                         goto ex_put_scq;
2763         }
2764
2765         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2766         update_pkey_index(dev, slave, inbox);
2767         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2768         if (err)
2769                 goto ex_put_srq;
2770         atomic_inc(&mtt->ref_count);
2771         qp->mtt = mtt;
2772         atomic_inc(&rcq->ref_count);
2773         qp->rcq = rcq;
2774         atomic_inc(&scq->ref_count);
2775         qp->scq = scq;
2776
2777         if (scqn != rcqn)
2778                 put_res(dev, slave, scqn, RES_CQ);
2779
2780         if (use_srq) {
2781                 atomic_inc(&srq->ref_count);
2782                 put_res(dev, slave, srqn, RES_SRQ);
2783                 qp->srq = srq;
2784         }
2785         put_res(dev, slave, rcqn, RES_CQ);
2786         put_res(dev, slave, mtt_base, RES_MTT);
2787         res_end_move(dev, slave, RES_QP, qpn);
2788
2789         return 0;
2790
2791 ex_put_srq:
2792         if (use_srq)
2793                 put_res(dev, slave, srqn, RES_SRQ);
2794 ex_put_scq:
2795         if (scqn != rcqn)
2796                 put_res(dev, slave, scqn, RES_CQ);
2797 ex_put_rcq:
2798         put_res(dev, slave, rcqn, RES_CQ);
2799 ex_put_mtt:
2800         put_res(dev, slave, mtt_base, RES_MTT);
2801 ex_abort:
2802         res_abort_move(dev, slave, RES_QP, qpn);
2803
2804         return err;
2805 }
2806
2807 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2808 {
2809         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2810 }
2811
2812 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2813 {
2814         int log_eq_size = eqc->log_eq_size & 0x1f;
2815         int page_shift = (eqc->log_page_size & 0x3f) + 12;
2816
2817         if (log_eq_size + 5 < page_shift)
2818                 return 1;
2819
2820         return 1 << (log_eq_size + 5 - page_shift);
2821 }
2822
2823 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2824 {
2825         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2826 }
2827
2828 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2829 {
2830         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2831         int page_shift = (cqc->log_page_size & 0x3f) + 12;
2832
2833         if (log_cq_size + 5 < page_shift)
2834                 return 1;
2835
2836         return 1 << (log_cq_size + 5 - page_shift);
2837 }
2838
2839 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2840                           struct mlx4_vhcr *vhcr,
2841                           struct mlx4_cmd_mailbox *inbox,
2842                           struct mlx4_cmd_mailbox *outbox,
2843                           struct mlx4_cmd_info *cmd)
2844 {
2845         int err;
2846         int eqn = vhcr->in_modifier;
2847         int res_id = (slave << 8) | eqn;
2848         struct mlx4_eq_context *eqc = inbox->buf;
2849         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2850         int mtt_size = eq_get_mtt_size(eqc);
2851         struct res_eq *eq;
2852         struct res_mtt *mtt;
2853
2854         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2855         if (err)
2856                 return err;
2857         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2858         if (err)
2859                 goto out_add;
2860
2861         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2862         if (err)
2863                 goto out_move;
2864
2865         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2866         if (err)
2867                 goto out_put;
2868
2869         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2870         if (err)
2871                 goto out_put;
2872
2873         atomic_inc(&mtt->ref_count);
2874         eq->mtt = mtt;
2875         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2876         res_end_move(dev, slave, RES_EQ, res_id);
2877         return 0;
2878
2879 out_put:
2880         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2881 out_move:
2882         res_abort_move(dev, slave, RES_EQ, res_id);
2883 out_add:
2884         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2885         return err;
2886 }
2887
2888 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
2889                             struct mlx4_vhcr *vhcr,
2890                             struct mlx4_cmd_mailbox *inbox,
2891                             struct mlx4_cmd_mailbox *outbox,
2892                             struct mlx4_cmd_info *cmd)
2893 {
2894         int err;
2895         u8 get = vhcr->op_modifier;
2896
2897         if (get != 1)
2898                 return -EPERM;
2899
2900         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2901
2902         return err;
2903 }
2904
2905 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2906                               int len, struct res_mtt **res)
2907 {
2908         struct mlx4_priv *priv = mlx4_priv(dev);
2909         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2910         struct res_mtt *mtt;
2911         int err = -EINVAL;
2912
2913         spin_lock_irq(mlx4_tlock(dev));
2914         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2915                             com.list) {
2916                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2917                         *res = mtt;
2918                         mtt->com.from_state = mtt->com.state;
2919                         mtt->com.state = RES_MTT_BUSY;
2920                         err = 0;
2921                         break;
2922                 }
2923         }
2924         spin_unlock_irq(mlx4_tlock(dev));
2925
2926         return err;
2927 }
2928
2929 static int verify_qp_parameters(struct mlx4_dev *dev,
2930                                 struct mlx4_vhcr *vhcr,
2931                                 struct mlx4_cmd_mailbox *inbox,
2932                                 enum qp_transition transition, u8 slave)
2933 {
2934         u32                     qp_type;
2935         u32                     qpn;
2936         struct mlx4_qp_context  *qp_ctx;
2937         enum mlx4_qp_optpar     optpar;
2938         int port;
2939         int num_gids;
2940
2941         qp_ctx  = inbox->buf + 8;
2942         qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2943         optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
2944
2945         if (slave != mlx4_master_func_num(dev)) {
2946                 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
2947                 /* setting QP rate-limit is disallowed for VFs */
2948                 if (qp_ctx->rate_limit_params)
2949                         return -EPERM;
2950         }
2951
2952         switch (qp_type) {
2953         case MLX4_QP_ST_RC:
2954         case MLX4_QP_ST_XRC:
2955         case MLX4_QP_ST_UC:
2956                 switch (transition) {
2957                 case QP_TRANS_INIT2RTR:
2958                 case QP_TRANS_RTR2RTS:
2959                 case QP_TRANS_RTS2RTS:
2960                 case QP_TRANS_SQD2SQD:
2961                 case QP_TRANS_SQD2RTS:
2962                         if (slave != mlx4_master_func_num(dev))
2963                                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2964                                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2965                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2966                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2967                                         else
2968                                                 num_gids = 1;
2969                                         if (qp_ctx->pri_path.mgid_index >= num_gids)
2970                                                 return -EINVAL;
2971                                 }
2972                                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2973                                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2974                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2975                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2976                                         else
2977                                                 num_gids = 1;
2978                                         if (qp_ctx->alt_path.mgid_index >= num_gids)
2979                                                 return -EINVAL;
2980                                 }
2981                         break;
2982                 default:
2983                         break;
2984                 }
2985                 break;
2986
2987         case MLX4_QP_ST_MLX:
2988                 qpn = vhcr->in_modifier & 0x7fffff;
2989                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2990                 if (transition == QP_TRANS_INIT2RTR &&
2991                     slave != mlx4_master_func_num(dev) &&
2992                     mlx4_is_qp_reserved(dev, qpn) &&
2993                     !mlx4_vf_smi_enabled(dev, slave, port)) {
2994                         /* only enabled VFs may create MLX proxy QPs */
2995                         mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
2996                                  __func__, slave, port);
2997                         return -EPERM;
2998                 }
2999                 break;
3000
3001         default:
3002                 break;
3003         }
3004
3005         return 0;
3006 }
3007
3008 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3009                            struct mlx4_vhcr *vhcr,
3010                            struct mlx4_cmd_mailbox *inbox,
3011                            struct mlx4_cmd_mailbox *outbox,
3012                            struct mlx4_cmd_info *cmd)
3013 {
3014         struct mlx4_mtt mtt;
3015         __be64 *page_list = inbox->buf;
3016         u64 *pg_list = (u64 *)page_list;
3017         int i;
3018         struct res_mtt *rmtt = NULL;
3019         int start = be64_to_cpu(page_list[0]);
3020         int npages = vhcr->in_modifier;
3021         int err;
3022
3023         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3024         if (err)
3025                 return err;
3026
3027         /* Call the SW implementation of write_mtt:
3028          * - Prepare a dummy mtt struct
3029          * - Translate inbox contents to simple addresses in host endianness */
3030         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
3031                             we don't really use it */
3032         mtt.order = 0;
3033         mtt.page_shift = 0;
3034         for (i = 0; i < npages; ++i)
3035                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3036
3037         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3038                                ((u64 *)page_list + 2));
3039
3040         if (rmtt)
3041                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3042
3043         return err;
3044 }
3045
3046 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3047                           struct mlx4_vhcr *vhcr,
3048                           struct mlx4_cmd_mailbox *inbox,
3049                           struct mlx4_cmd_mailbox *outbox,
3050                           struct mlx4_cmd_info *cmd)
3051 {
3052         int eqn = vhcr->in_modifier;
3053         int res_id = eqn | (slave << 8);
3054         struct res_eq *eq;
3055         int err;
3056
3057         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3058         if (err)
3059                 return err;
3060
3061         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3062         if (err)
3063                 goto ex_abort;
3064
3065         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3066         if (err)
3067                 goto ex_put;
3068
3069         atomic_dec(&eq->mtt->ref_count);
3070         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3071         res_end_move(dev, slave, RES_EQ, res_id);
3072         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3073
3074         return 0;
3075
3076 ex_put:
3077         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3078 ex_abort:
3079         res_abort_move(dev, slave, RES_EQ, res_id);
3080
3081         return err;
3082 }
3083
3084 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3085 {
3086         struct mlx4_priv *priv = mlx4_priv(dev);
3087         struct mlx4_slave_event_eq_info *event_eq;
3088         struct mlx4_cmd_mailbox *mailbox;
3089         u32 in_modifier = 0;
3090         int err;
3091         int res_id;
3092         struct res_eq *req;
3093
3094         if (!priv->mfunc.master.slave_state)
3095                 return -EINVAL;
3096
3097         /* check for slave valid, slave not PF, and slave active */
3098         if (slave < 0 || slave > dev->persist->num_vfs ||
3099             slave == dev->caps.function ||
3100             !priv->mfunc.master.slave_state[slave].active)
3101                 return 0;
3102
3103         event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3104
3105         /* Create the event only if the slave is registered */
3106         if (event_eq->eqn < 0)
3107                 return 0;
3108
3109         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3110         res_id = (slave << 8) | event_eq->eqn;
3111         err = get_res(dev, slave, res_id, RES_EQ, &req);
3112         if (err)
3113                 goto unlock;
3114
3115         if (req->com.from_state != RES_EQ_HW) {
3116                 err = -EINVAL;
3117                 goto put;
3118         }
3119
3120         mailbox = mlx4_alloc_cmd_mailbox(dev);
3121         if (IS_ERR(mailbox)) {
3122                 err = PTR_ERR(mailbox);
3123                 goto put;
3124         }
3125
3126         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3127                 ++event_eq->token;
3128                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3129         }
3130
3131         memcpy(mailbox->buf, (u8 *) eqe, 28);
3132
3133         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
3134
3135         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3136                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3137                        MLX4_CMD_NATIVE);
3138
3139         put_res(dev, slave, res_id, RES_EQ);
3140         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3141         mlx4_free_cmd_mailbox(dev, mailbox);
3142         return err;
3143
3144 put:
3145         put_res(dev, slave, res_id, RES_EQ);
3146
3147 unlock:
3148         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3149         return err;
3150 }
3151
3152 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3153                           struct mlx4_vhcr *vhcr,
3154                           struct mlx4_cmd_mailbox *inbox,
3155                           struct mlx4_cmd_mailbox *outbox,
3156                           struct mlx4_cmd_info *cmd)
3157 {
3158         int eqn = vhcr->in_modifier;
3159         int res_id = eqn | (slave << 8);
3160         struct res_eq *eq;
3161         int err;
3162
3163         err = get_res(dev, slave, res_id, RES_EQ, &eq);
3164         if (err)
3165                 return err;
3166
3167         if (eq->com.from_state != RES_EQ_HW) {
3168                 err = -EINVAL;
3169                 goto ex_put;
3170         }
3171
3172         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3173
3174 ex_put:
3175         put_res(dev, slave, res_id, RES_EQ);
3176         return err;
3177 }
3178
3179 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3180                           struct mlx4_vhcr *vhcr,
3181                           struct mlx4_cmd_mailbox *inbox,
3182                           struct mlx4_cmd_mailbox *outbox,
3183                           struct mlx4_cmd_info *cmd)
3184 {
3185         int err;
3186         int cqn = vhcr->in_modifier;
3187         struct mlx4_cq_context *cqc = inbox->buf;
3188         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3189         struct res_cq *cq;
3190         struct res_mtt *mtt;
3191
3192         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3193         if (err)
3194                 return err;
3195         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3196         if (err)
3197                 goto out_move;
3198         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3199         if (err)
3200                 goto out_put;
3201         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3202         if (err)
3203                 goto out_put;
3204         atomic_inc(&mtt->ref_count);
3205         cq->mtt = mtt;
3206         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3207         res_end_move(dev, slave, RES_CQ, cqn);
3208         return 0;
3209
3210 out_put:
3211         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3212 out_move:
3213         res_abort_move(dev, slave, RES_CQ, cqn);
3214         return err;
3215 }
3216
3217 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3218                           struct mlx4_vhcr *vhcr,
3219                           struct mlx4_cmd_mailbox *inbox,
3220                           struct mlx4_cmd_mailbox *outbox,
3221                           struct mlx4_cmd_info *cmd)
3222 {
3223         int err;
3224         int cqn = vhcr->in_modifier;
3225         struct res_cq *cq;
3226
3227         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3228         if (err)
3229                 return err;
3230         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3231         if (err)
3232                 goto out_move;
3233         atomic_dec(&cq->mtt->ref_count);
3234         res_end_move(dev, slave, RES_CQ, cqn);
3235         return 0;
3236
3237 out_move:
3238         res_abort_move(dev, slave, RES_CQ, cqn);
3239         return err;
3240 }
3241
3242 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3243                           struct mlx4_vhcr *vhcr,
3244                           struct mlx4_cmd_mailbox *inbox,
3245                           struct mlx4_cmd_mailbox *outbox,
3246                           struct mlx4_cmd_info *cmd)
3247 {
3248         int cqn = vhcr->in_modifier;
3249         struct res_cq *cq;
3250         int err;
3251
3252         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3253         if (err)
3254                 return err;
3255
3256         if (cq->com.from_state != RES_CQ_HW)
3257                 goto ex_put;
3258
3259         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3260 ex_put:
3261         put_res(dev, slave, cqn, RES_CQ);
3262
3263         return err;
3264 }
3265
3266 static int handle_resize(struct mlx4_dev *dev, int slave,
3267                          struct mlx4_vhcr *vhcr,
3268                          struct mlx4_cmd_mailbox *inbox,
3269                          struct mlx4_cmd_mailbox *outbox,
3270                          struct mlx4_cmd_info *cmd,
3271                          struct res_cq *cq)
3272 {
3273         int err;
3274         struct res_mtt *orig_mtt;
3275         struct res_mtt *mtt;
3276         struct mlx4_cq_context *cqc = inbox->buf;
3277         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3278
3279         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3280         if (err)
3281                 return err;
3282
3283         if (orig_mtt != cq->mtt) {
3284                 err = -EINVAL;
3285                 goto ex_put;
3286         }
3287
3288         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3289         if (err)
3290                 goto ex_put;
3291
3292         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3293         if (err)
3294                 goto ex_put1;
3295         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3296         if (err)
3297                 goto ex_put1;
3298         atomic_dec(&orig_mtt->ref_count);
3299         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3300         atomic_inc(&mtt->ref_count);
3301         cq->mtt = mtt;
3302         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3303         return 0;
3304
3305 ex_put1:
3306         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3307 ex_put:
3308         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3309
3310         return err;
3311
3312 }
3313
3314 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3315                            struct mlx4_vhcr *vhcr,
3316                            struct mlx4_cmd_mailbox *inbox,
3317                            struct mlx4_cmd_mailbox *outbox,
3318                            struct mlx4_cmd_info *cmd)
3319 {
3320         int cqn = vhcr->in_modifier;
3321         struct res_cq *cq;
3322         int err;
3323
3324         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3325         if (err)
3326                 return err;
3327
3328         if (cq->com.from_state != RES_CQ_HW)
3329                 goto ex_put;
3330
3331         if (vhcr->op_modifier == 0) {
3332                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3333                 goto ex_put;
3334         }
3335
3336         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3337 ex_put:
3338         put_res(dev, slave, cqn, RES_CQ);
3339
3340         return err;
3341 }
3342
3343 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3344 {
3345         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3346         int log_rq_stride = srqc->logstride & 7;
3347         int page_shift = (srqc->log_page_size & 0x3f) + 12;
3348
3349         if (log_srq_size + log_rq_stride + 4 < page_shift)
3350                 return 1;
3351
3352         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3353 }
3354
3355 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3356                            struct mlx4_vhcr *vhcr,
3357                            struct mlx4_cmd_mailbox *inbox,
3358                            struct mlx4_cmd_mailbox *outbox,
3359                            struct mlx4_cmd_info *cmd)
3360 {
3361         int err;
3362         int srqn = vhcr->in_modifier;
3363         struct res_mtt *mtt;
3364         struct res_srq *srq;
3365         struct mlx4_srq_context *srqc = inbox->buf;
3366         int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3367
3368         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3369                 return -EINVAL;
3370
3371         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3372         if (err)
3373                 return err;
3374         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3375         if (err)
3376                 goto ex_abort;
3377         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3378                               mtt);
3379         if (err)
3380                 goto ex_put_mtt;
3381
3382         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3383         if (err)
3384                 goto ex_put_mtt;
3385
3386         atomic_inc(&mtt->ref_count);
3387         srq->mtt = mtt;
3388         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3389         res_end_move(dev, slave, RES_SRQ, srqn);
3390         return 0;
3391
3392 ex_put_mtt:
3393         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3394 ex_abort:
3395         res_abort_move(dev, slave, RES_SRQ, srqn);
3396
3397         return err;
3398 }
3399
3400 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3401                            struct mlx4_vhcr *vhcr,
3402                            struct mlx4_cmd_mailbox *inbox,
3403                            struct mlx4_cmd_mailbox *outbox,
3404                            struct mlx4_cmd_info *cmd)
3405 {
3406         int err;
3407         int srqn = vhcr->in_modifier;
3408         struct res_srq *srq;
3409
3410         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3411         if (err)
3412                 return err;
3413         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3414         if (err)
3415                 goto ex_abort;
3416         atomic_dec(&srq->mtt->ref_count);
3417         if (srq->cq)
3418                 atomic_dec(&srq->cq->ref_count);
3419         res_end_move(dev, slave, RES_SRQ, srqn);
3420
3421         return 0;
3422
3423 ex_abort:
3424         res_abort_move(dev, slave, RES_SRQ, srqn);
3425
3426         return err;
3427 }
3428
3429 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3430                            struct mlx4_vhcr *vhcr,
3431                            struct mlx4_cmd_mailbox *inbox,
3432                            struct mlx4_cmd_mailbox *outbox,
3433                            struct mlx4_cmd_info *cmd)
3434 {
3435         int err;
3436         int srqn = vhcr->in_modifier;
3437         struct res_srq *srq;
3438
3439         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3440         if (err)
3441                 return err;
3442         if (srq->com.from_state != RES_SRQ_HW) {
3443                 err = -EBUSY;
3444                 goto out;
3445         }
3446         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3447 out:
3448         put_res(dev, slave, srqn, RES_SRQ);
3449         return err;
3450 }
3451
3452 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3453                          struct mlx4_vhcr *vhcr,
3454                          struct mlx4_cmd_mailbox *inbox,
3455                          struct mlx4_cmd_mailbox *outbox,
3456                          struct mlx4_cmd_info *cmd)
3457 {
3458         int err;
3459         int srqn = vhcr->in_modifier;
3460         struct res_srq *srq;
3461
3462         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3463         if (err)
3464                 return err;
3465
3466         if (srq->com.from_state != RES_SRQ_HW) {
3467                 err = -EBUSY;
3468                 goto out;
3469         }
3470
3471         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3472 out:
3473         put_res(dev, slave, srqn, RES_SRQ);
3474         return err;
3475 }
3476
3477 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3478                         struct mlx4_vhcr *vhcr,
3479                         struct mlx4_cmd_mailbox *inbox,
3480                         struct mlx4_cmd_mailbox *outbox,
3481                         struct mlx4_cmd_info *cmd)
3482 {
3483         int err;
3484         int qpn = vhcr->in_modifier & 0x7fffff;
3485         struct res_qp *qp;
3486
3487         err = get_res(dev, slave, qpn, RES_QP, &qp);
3488         if (err)
3489                 return err;
3490         if (qp->com.from_state != RES_QP_HW) {
3491                 err = -EBUSY;
3492                 goto out;
3493         }
3494
3495         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3496 out:
3497         put_res(dev, slave, qpn, RES_QP);
3498         return err;
3499 }
3500
3501 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3502                               struct mlx4_vhcr *vhcr,
3503                               struct mlx4_cmd_mailbox *inbox,
3504                               struct mlx4_cmd_mailbox *outbox,
3505                               struct mlx4_cmd_info *cmd)
3506 {
3507         struct mlx4_qp_context *context = inbox->buf + 8;
3508         adjust_proxy_tun_qkey(dev, vhcr, context);
3509         update_pkey_index(dev, slave, inbox);
3510         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3511 }
3512
3513 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3514                                   struct mlx4_qp_context *qpc,
3515                                   struct mlx4_cmd_mailbox *inbox)
3516 {
3517         enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3518         u8 pri_sched_queue;
3519         int port = mlx4_slave_convert_port(
3520                    dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3521
3522         if (port < 0)
3523                 return -EINVAL;
3524
3525         pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3526                           ((port & 1) << 6);
3527
3528         if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
3529             mlx4_is_eth(dev, port + 1)) {
3530                 qpc->pri_path.sched_queue = pri_sched_queue;
3531         }
3532
3533         if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3534                 port = mlx4_slave_convert_port(
3535                                 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3536                                 + 1) - 1;
3537                 if (port < 0)
3538                         return -EINVAL;
3539                 qpc->alt_path.sched_queue =
3540                         (qpc->alt_path.sched_queue & ~(1 << 6)) |
3541                         (port & 1) << 6;
3542         }
3543         return 0;
3544 }
3545
3546 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3547                                 struct mlx4_qp_context *qpc,
3548                                 struct mlx4_cmd_mailbox *inbox)
3549 {
3550         u64 mac;
3551         int port;
3552         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3553         u8 sched = *(u8 *)(inbox->buf + 64);
3554         u8 smac_ix;
3555
3556         port = (sched >> 6 & 1) + 1;
3557         if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3558                 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3559                 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3560                         return -ENOENT;
3561         }
3562         return 0;
3563 }
3564
3565 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3566                              struct mlx4_vhcr *vhcr,
3567                              struct mlx4_cmd_mailbox *inbox,
3568                              struct mlx4_cmd_mailbox *outbox,
3569                              struct mlx4_cmd_info *cmd)
3570 {
3571         int err;
3572         struct mlx4_qp_context *qpc = inbox->buf + 8;
3573         int qpn = vhcr->in_modifier & 0x7fffff;
3574         struct res_qp *qp;
3575         u8 orig_sched_queue;
3576         __be32  orig_param3 = qpc->param3;
3577         u8 orig_vlan_control = qpc->pri_path.vlan_control;
3578         u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3579         u8 orig_pri_path_fl = qpc->pri_path.fl;
3580         u8 orig_vlan_index = qpc->pri_path.vlan_index;
3581         u8 orig_feup = qpc->pri_path.feup;
3582
3583         err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3584         if (err)
3585                 return err;
3586         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3587         if (err)
3588                 return err;
3589
3590         if (roce_verify_mac(dev, slave, qpc, inbox))
3591                 return -EINVAL;
3592
3593         update_pkey_index(dev, slave, inbox);
3594         update_gid(dev, inbox, (u8)slave);
3595         adjust_proxy_tun_qkey(dev, vhcr, qpc);
3596         orig_sched_queue = qpc->pri_path.sched_queue;
3597         err = update_vport_qp_param(dev, inbox, slave, qpn);
3598         if (err)
3599                 return err;
3600
3601         err = get_res(dev, slave, qpn, RES_QP, &qp);
3602         if (err)
3603                 return err;
3604         if (qp->com.from_state != RES_QP_HW) {
3605                 err = -EBUSY;
3606                 goto out;
3607         }
3608
3609         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3610 out:
3611         /* if no error, save sched queue value passed in by VF. This is
3612          * essentially the QOS value provided by the VF. This will be useful
3613          * if we allow dynamic changes from VST back to VGT
3614          */
3615         if (!err) {
3616                 qp->sched_queue = orig_sched_queue;
3617                 qp->param3      = orig_param3;
3618                 qp->vlan_control = orig_vlan_control;
3619                 qp->fvl_rx      =  orig_fvl_rx;
3620                 qp->pri_path_fl = orig_pri_path_fl;
3621                 qp->vlan_index  = orig_vlan_index;
3622                 qp->feup        = orig_feup;
3623         }
3624         put_res(dev, slave, qpn, RES_QP);
3625         return err;
3626 }
3627
3628 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3629                             struct mlx4_vhcr *vhcr,
3630                             struct mlx4_cmd_mailbox *inbox,
3631                             struct mlx4_cmd_mailbox *outbox,
3632                             struct mlx4_cmd_info *cmd)
3633 {
3634         int err;
3635         struct mlx4_qp_context *context = inbox->buf + 8;
3636
3637         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3638         if (err)
3639                 return err;
3640         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3641         if (err)
3642                 return err;
3643
3644         update_pkey_index(dev, slave, inbox);
3645         update_gid(dev, inbox, (u8)slave);
3646         adjust_proxy_tun_qkey(dev, vhcr, context);
3647         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3648 }
3649
3650 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3651                             struct mlx4_vhcr *vhcr,
3652                             struct mlx4_cmd_mailbox *inbox,
3653                             struct mlx4_cmd_mailbox *outbox,
3654                             struct mlx4_cmd_info *cmd)
3655 {
3656         int err;
3657         struct mlx4_qp_context *context = inbox->buf + 8;
3658
3659         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3660         if (err)
3661                 return err;
3662         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3663         if (err)
3664                 return err;
3665
3666         update_pkey_index(dev, slave, inbox);
3667         update_gid(dev, inbox, (u8)slave);
3668         adjust_proxy_tun_qkey(dev, vhcr, context);
3669         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3670 }
3671
3672
3673 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3674                               struct mlx4_vhcr *vhcr,
3675                               struct mlx4_cmd_mailbox *inbox,
3676                               struct mlx4_cmd_mailbox *outbox,
3677                               struct mlx4_cmd_info *cmd)
3678 {
3679         struct mlx4_qp_context *context = inbox->buf + 8;
3680         int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3681         if (err)
3682                 return err;
3683         adjust_proxy_tun_qkey(dev, vhcr, context);
3684         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3685 }
3686
3687 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3688                             struct mlx4_vhcr *vhcr,
3689                             struct mlx4_cmd_mailbox *inbox,
3690                             struct mlx4_cmd_mailbox *outbox,
3691                             struct mlx4_cmd_info *cmd)
3692 {
3693         int err;
3694         struct mlx4_qp_context *context = inbox->buf + 8;
3695
3696         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3697         if (err)
3698                 return err;
3699         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3700         if (err)
3701                 return err;
3702
3703         adjust_proxy_tun_qkey(dev, vhcr, context);
3704         update_gid(dev, inbox, (u8)slave);
3705         update_pkey_index(dev, slave, inbox);
3706         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3707 }
3708
3709 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3710                             struct mlx4_vhcr *vhcr,
3711                             struct mlx4_cmd_mailbox *inbox,
3712                             struct mlx4_cmd_mailbox *outbox,
3713                             struct mlx4_cmd_info *cmd)
3714 {
3715         int err;
3716         struct mlx4_qp_context *context = inbox->buf + 8;
3717
3718         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3719         if (err)
3720                 return err;
3721         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3722         if (err)
3723                 return err;
3724
3725         adjust_proxy_tun_qkey(dev, vhcr, context);
3726         update_gid(dev, inbox, (u8)slave);
3727         update_pkey_index(dev, slave, inbox);
3728         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3729 }
3730
3731 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3732                          struct mlx4_vhcr *vhcr,
3733                          struct mlx4_cmd_mailbox *inbox,
3734                          struct mlx4_cmd_mailbox *outbox,
3735                          struct mlx4_cmd_info *cmd)
3736 {
3737         int err;
3738         int qpn = vhcr->in_modifier & 0x7fffff;
3739         struct res_qp *qp;
3740
3741         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3742         if (err)
3743                 return err;
3744         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3745         if (err)
3746                 goto ex_abort;
3747
3748         atomic_dec(&qp->mtt->ref_count);
3749         atomic_dec(&qp->rcq->ref_count);
3750         atomic_dec(&qp->scq->ref_count);
3751         if (qp->srq)
3752                 atomic_dec(&qp->srq->ref_count);
3753         res_end_move(dev, slave, RES_QP, qpn);
3754         return 0;
3755
3756 ex_abort:
3757         res_abort_move(dev, slave, RES_QP, qpn);
3758
3759         return err;
3760 }
3761
3762 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3763                                 struct res_qp *rqp, u8 *gid)
3764 {
3765         struct res_gid *res;
3766
3767         list_for_each_entry(res, &rqp->mcg_list, list) {
3768                 if (!memcmp(res->gid, gid, 16))
3769                         return res;
3770         }
3771         return NULL;
3772 }
3773
3774 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3775                        u8 *gid, enum mlx4_protocol prot,
3776                        enum mlx4_steer_type steer, u64 reg_id)
3777 {
3778         struct res_gid *res;
3779         int err;
3780
3781         res = kzalloc(sizeof *res, GFP_KERNEL);
3782         if (!res)
3783                 return -ENOMEM;
3784
3785         spin_lock_irq(&rqp->mcg_spl);
3786         if (find_gid(dev, slave, rqp, gid)) {
3787                 kfree(res);
3788                 err = -EEXIST;
3789         } else {
3790                 memcpy(res->gid, gid, 16);
3791                 res->prot = prot;
3792                 res->steer = steer;
3793                 res->reg_id = reg_id;
3794                 list_add_tail(&res->list, &rqp->mcg_list);
3795                 err = 0;
3796         }
3797         spin_unlock_irq(&rqp->mcg_spl);
3798
3799         return err;
3800 }
3801
3802 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3803                        u8 *gid, enum mlx4_protocol prot,
3804                        enum mlx4_steer_type steer, u64 *reg_id)
3805 {
3806         struct res_gid *res;
3807         int err;
3808
3809         spin_lock_irq(&rqp->mcg_spl);
3810         res = find_gid(dev, slave, rqp, gid);
3811         if (!res || res->prot != prot || res->steer != steer)
3812                 err = -EINVAL;
3813         else {
3814                 *reg_id = res->reg_id;
3815                 list_del(&res->list);
3816                 kfree(res);
3817                 err = 0;
3818         }
3819         spin_unlock_irq(&rqp->mcg_spl);
3820
3821         return err;
3822 }
3823
3824 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3825                      u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3826                      enum mlx4_steer_type type, u64 *reg_id)
3827 {
3828         switch (dev->caps.steering_mode) {
3829         case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3830                 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3831                 if (port < 0)
3832                         return port;
3833                 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
3834                                                 block_loopback, prot,
3835                                                 reg_id);
3836         }
3837         case MLX4_STEERING_MODE_B0:
3838                 if (prot == MLX4_PROT_ETH) {
3839                         int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3840                         if (port < 0)
3841                                 return port;
3842                         gid[5] = port;
3843                 }
3844                 return mlx4_qp_attach_common(dev, qp, gid,
3845                                             block_loopback, prot, type);
3846         default:
3847                 return -EINVAL;
3848         }
3849 }
3850
3851 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3852                      u8 gid[16], enum mlx4_protocol prot,
3853                      enum mlx4_steer_type type, u64 reg_id)
3854 {
3855         switch (dev->caps.steering_mode) {
3856         case MLX4_STEERING_MODE_DEVICE_MANAGED:
3857                 return mlx4_flow_detach(dev, reg_id);
3858         case MLX4_STEERING_MODE_B0:
3859                 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3860         default:
3861                 return -EINVAL;
3862         }
3863 }
3864
3865 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3866                             u8 *gid, enum mlx4_protocol prot)
3867 {
3868         int real_port;
3869
3870         if (prot != MLX4_PROT_ETH)
3871                 return 0;
3872
3873         if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3874             dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3875                 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3876                 if (real_port < 0)
3877                         return -EINVAL;
3878                 gid[5] = real_port;
3879         }
3880
3881         return 0;
3882 }
3883
3884 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3885                                struct mlx4_vhcr *vhcr,
3886                                struct mlx4_cmd_mailbox *inbox,
3887                                struct mlx4_cmd_mailbox *outbox,
3888                                struct mlx4_cmd_info *cmd)
3889 {
3890         struct mlx4_qp qp; /* dummy for calling attach/detach */
3891         u8 *gid = inbox->buf;
3892         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3893         int err;
3894         int qpn;
3895         struct res_qp *rqp;
3896         u64 reg_id = 0;
3897         int attach = vhcr->op_modifier;
3898         int block_loopback = vhcr->in_modifier >> 31;
3899         u8 steer_type_mask = 2;
3900         enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3901
3902         qpn = vhcr->in_modifier & 0xffffff;
3903         err = get_res(dev, slave, qpn, RES_QP, &rqp);
3904         if (err)
3905                 return err;
3906
3907         qp.qpn = qpn;
3908         if (attach) {
3909                 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
3910                                 type, &reg_id);
3911                 if (err) {
3912                         pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3913                         goto ex_put;
3914                 }
3915                 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3916                 if (err)
3917                         goto ex_detach;
3918         } else {
3919                 err = mlx4_adjust_port(dev, slave, gid, prot);
3920                 if (err)
3921                         goto ex_put;
3922
3923                 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3924                 if (err)
3925                         goto ex_put;
3926
3927                 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3928                 if (err)
3929                         pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3930                                qpn, reg_id);
3931         }
3932         put_res(dev, slave, qpn, RES_QP);
3933         return err;
3934
3935 ex_detach:
3936         qp_detach(dev, &qp, gid, prot, type, reg_id);
3937 ex_put:
3938         put_res(dev, slave, qpn, RES_QP);
3939         return err;
3940 }
3941
3942 /*
3943  * MAC validation for Flow Steering rules.
3944  * VF can attach rules only with a mac address which is assigned to it.
3945  */
3946 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3947                                    struct list_head *rlist)
3948 {
3949         struct mac_res *res, *tmp;
3950         __be64 be_mac;
3951
3952         /* make sure it isn't multicast or broadcast mac*/
3953         if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3954             !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3955                 list_for_each_entry_safe(res, tmp, rlist, list) {
3956                         be_mac = cpu_to_be64(res->mac << 16);
3957                         if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
3958                                 return 0;
3959                 }
3960                 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3961                        eth_header->eth.dst_mac, slave);
3962                 return -EINVAL;
3963         }
3964         return 0;
3965 }
3966
3967 /*
3968  * In case of missing eth header, append eth header with a MAC address
3969  * assigned to the VF.
3970  */
3971 static int add_eth_header(struct mlx4_dev *dev, int slave,
3972                           struct mlx4_cmd_mailbox *inbox,
3973                           struct list_head *rlist, int header_id)
3974 {
3975         struct mac_res *res, *tmp;
3976         u8 port;
3977         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3978         struct mlx4_net_trans_rule_hw_eth *eth_header;
3979         struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3980         struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3981         __be64 be_mac = 0;
3982         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3983
3984         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3985         port = ctrl->port;
3986         eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3987
3988         /* Clear a space in the inbox for eth header */
3989         switch (header_id) {
3990         case MLX4_NET_TRANS_RULE_ID_IPV4:
3991                 ip_header =
3992                         (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3993                 memmove(ip_header, eth_header,
3994                         sizeof(*ip_header) + sizeof(*l4_header));
3995                 break;
3996         case MLX4_NET_TRANS_RULE_ID_TCP:
3997         case MLX4_NET_TRANS_RULE_ID_UDP:
3998                 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3999                             (eth_header + 1);
4000                 memmove(l4_header, eth_header, sizeof(*l4_header));
4001                 break;
4002         default:
4003                 return -EINVAL;
4004         }
4005         list_for_each_entry_safe(res, tmp, rlist, list) {
4006                 if (port == res->port) {
4007                         be_mac = cpu_to_be64(res->mac << 16);
4008                         break;
4009                 }
4010         }
4011         if (!be_mac) {
4012                 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4013                        port);
4014                 return -EINVAL;
4015         }
4016
4017         memset(eth_header, 0, sizeof(*eth_header));
4018         eth_header->size = sizeof(*eth_header) >> 2;
4019         eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4020         memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4021         memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4022
4023         return 0;
4024
4025 }
4026
4027 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
4028 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4029                            struct mlx4_vhcr *vhcr,
4030                            struct mlx4_cmd_mailbox *inbox,
4031                            struct mlx4_cmd_mailbox *outbox,
4032                            struct mlx4_cmd_info *cmd_info)
4033 {
4034         int err;
4035         u32 qpn = vhcr->in_modifier & 0xffffff;
4036         struct res_qp *rqp;
4037         u64 mac;
4038         unsigned port;
4039         u64 pri_addr_path_mask;
4040         struct mlx4_update_qp_context *cmd;
4041         int smac_index;
4042
4043         cmd = (struct mlx4_update_qp_context *)inbox->buf;
4044
4045         pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4046         if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4047             (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4048                 return -EPERM;
4049
4050         /* Just change the smac for the QP */
4051         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4052         if (err) {
4053                 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4054                 return err;
4055         }
4056
4057         port = (rqp->sched_queue >> 6 & 1) + 1;
4058
4059         if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4060                 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4061                 err = mac_find_smac_ix_in_slave(dev, slave, port,
4062                                                 smac_index, &mac);
4063
4064                 if (err) {
4065                         mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4066                                  qpn, smac_index);
4067                         goto err_mac;
4068                 }
4069         }
4070
4071         err = mlx4_cmd(dev, inbox->dma,
4072                        vhcr->in_modifier, 0,
4073                        MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4074                        MLX4_CMD_NATIVE);
4075         if (err) {
4076                 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4077                 goto err_mac;
4078         }
4079
4080 err_mac:
4081         put_res(dev, slave, qpn, RES_QP);
4082         return err;
4083 }
4084
4085 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4086                                          struct mlx4_vhcr *vhcr,
4087                                          struct mlx4_cmd_mailbox *inbox,
4088                                          struct mlx4_cmd_mailbox *outbox,
4089                                          struct mlx4_cmd_info *cmd)
4090 {
4091
4092         struct mlx4_priv *priv = mlx4_priv(dev);
4093         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4094         struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4095         int err;
4096         int qpn;
4097         struct res_qp *rqp;
4098         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4099         struct _rule_hw  *rule_header;
4100         int header_id;
4101
4102         if (dev->caps.steering_mode !=
4103             MLX4_STEERING_MODE_DEVICE_MANAGED)
4104                 return -EOPNOTSUPP;
4105
4106         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4107         ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4108         if (ctrl->port <= 0)
4109                 return -EINVAL;
4110         qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4111         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4112         if (err) {
4113                 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4114                 return err;
4115         }
4116         rule_header = (struct _rule_hw *)(ctrl + 1);
4117         header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4118
4119         switch (header_id) {
4120         case MLX4_NET_TRANS_RULE_ID_ETH:
4121                 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4122                         err = -EINVAL;
4123                         goto err_put;
4124                 }
4125                 break;
4126         case MLX4_NET_TRANS_RULE_ID_IB:
4127                 break;
4128         case MLX4_NET_TRANS_RULE_ID_IPV4:
4129         case MLX4_NET_TRANS_RULE_ID_TCP:
4130         case MLX4_NET_TRANS_RULE_ID_UDP:
4131                 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4132                 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4133                         err = -EINVAL;
4134                         goto err_put;
4135                 }
4136                 vhcr->in_modifier +=
4137                         sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4138                 break;
4139         default:
4140                 pr_err("Corrupted mailbox\n");
4141                 err = -EINVAL;
4142                 goto err_put;
4143         }
4144
4145         err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4146                            vhcr->in_modifier, 0,
4147                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4148                            MLX4_CMD_NATIVE);
4149         if (err)
4150                 goto err_put;
4151
4152         err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4153         if (err) {
4154                 mlx4_err(dev, "Fail to add flow steering resources\n");
4155                 /* detach rule*/
4156                 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4157                          MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4158                          MLX4_CMD_NATIVE);
4159                 goto err_put;
4160         }
4161         atomic_inc(&rqp->ref_count);
4162 err_put:
4163         put_res(dev, slave, qpn, RES_QP);
4164         return err;
4165 }
4166
4167 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4168                                          struct mlx4_vhcr *vhcr,
4169                                          struct mlx4_cmd_mailbox *inbox,
4170                                          struct mlx4_cmd_mailbox *outbox,
4171                                          struct mlx4_cmd_info *cmd)
4172 {
4173         int err;
4174         struct res_qp *rqp;
4175         struct res_fs_rule *rrule;
4176
4177         if (dev->caps.steering_mode !=
4178             MLX4_STEERING_MODE_DEVICE_MANAGED)
4179                 return -EOPNOTSUPP;
4180
4181         err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4182         if (err)
4183                 return err;
4184         /* Release the rule form busy state before removal */
4185         put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4186         err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4187         if (err)
4188                 return err;
4189
4190         err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4191         if (err) {
4192                 mlx4_err(dev, "Fail to remove flow steering resources\n");
4193                 goto out;
4194         }
4195
4196         err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4197                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4198                        MLX4_CMD_NATIVE);
4199         if (!err)
4200                 atomic_dec(&rqp->ref_count);
4201 out:
4202         put_res(dev, slave, rrule->qpn, RES_QP);
4203         return err;
4204 }
4205
4206 enum {
4207         BUSY_MAX_RETRIES = 10
4208 };
4209
4210 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4211                                struct mlx4_vhcr *vhcr,
4212                                struct mlx4_cmd_mailbox *inbox,
4213                                struct mlx4_cmd_mailbox *outbox,
4214                                struct mlx4_cmd_info *cmd)
4215 {
4216         int err;
4217         int index = vhcr->in_modifier & 0xffff;
4218
4219         err = get_res(dev, slave, index, RES_COUNTER, NULL);
4220         if (err)
4221                 return err;
4222
4223         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4224         put_res(dev, slave, index, RES_COUNTER);
4225         return err;
4226 }
4227
4228 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4229 {
4230         struct res_gid *rgid;
4231         struct res_gid *tmp;
4232         struct mlx4_qp qp; /* dummy for calling attach/detach */
4233
4234         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4235                 switch (dev->caps.steering_mode) {
4236                 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4237                         mlx4_flow_detach(dev, rgid->reg_id);
4238                         break;
4239                 case MLX4_STEERING_MODE_B0:
4240                         qp.qpn = rqp->local_qpn;
4241                         (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4242                                                      rgid->prot, rgid->steer);
4243                         break;
4244                 }
4245                 list_del(&rgid->list);
4246                 kfree(rgid);
4247         }
4248 }
4249
4250 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4251                           enum mlx4_resource type, int print)
4252 {
4253         struct mlx4_priv *priv = mlx4_priv(dev);
4254         struct mlx4_resource_tracker *tracker =
4255                 &priv->mfunc.master.res_tracker;
4256         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4257         struct res_common *r;
4258         struct res_common *tmp;
4259         int busy;
4260
4261         busy = 0;
4262         spin_lock_irq(mlx4_tlock(dev));
4263         list_for_each_entry_safe(r, tmp, rlist, list) {
4264                 if (r->owner == slave) {
4265                         if (!r->removing) {
4266                                 if (r->state == RES_ANY_BUSY) {
4267                                         if (print)
4268                                                 mlx4_dbg(dev,
4269                                                          "%s id 0x%llx is busy\n",
4270                                                           resource_str(type),
4271                                                           r->res_id);
4272                                         ++busy;
4273                                 } else {
4274                                         r->from_state = r->state;
4275                                         r->state = RES_ANY_BUSY;
4276                                         r->removing = 1;
4277                                 }
4278                         }
4279                 }
4280         }
4281         spin_unlock_irq(mlx4_tlock(dev));
4282
4283         return busy;
4284 }
4285
4286 static int move_all_busy(struct mlx4_dev *dev, int slave,
4287                          enum mlx4_resource type)
4288 {
4289         unsigned long begin;
4290         int busy;
4291
4292         begin = jiffies;
4293         do {
4294                 busy = _move_all_busy(dev, slave, type, 0);
4295                 if (time_after(jiffies, begin + 5 * HZ))
4296                         break;
4297                 if (busy)
4298                         cond_resched();
4299         } while (busy);
4300
4301         if (busy)
4302                 busy = _move_all_busy(dev, slave, type, 1);
4303
4304         return busy;
4305 }
4306 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4307 {
4308         struct mlx4_priv *priv = mlx4_priv(dev);
4309         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4310         struct list_head *qp_list =
4311                 &tracker->slave_list[slave].res_list[RES_QP];
4312         struct res_qp *qp;
4313         struct res_qp *tmp;
4314         int state;
4315         u64 in_param;
4316         int qpn;
4317         int err;
4318
4319         err = move_all_busy(dev, slave, RES_QP);
4320         if (err)
4321                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4322                           slave);
4323
4324         spin_lock_irq(mlx4_tlock(dev));
4325         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4326                 spin_unlock_irq(mlx4_tlock(dev));
4327                 if (qp->com.owner == slave) {
4328                         qpn = qp->com.res_id;
4329                         detach_qp(dev, slave, qp);
4330                         state = qp->com.from_state;
4331                         while (state != 0) {
4332                                 switch (state) {
4333                                 case RES_QP_RESERVED:
4334                                         spin_lock_irq(mlx4_tlock(dev));
4335                                         rb_erase(&qp->com.node,
4336                                                  &tracker->res_tree[RES_QP]);
4337                                         list_del(&qp->com.list);
4338                                         spin_unlock_irq(mlx4_tlock(dev));
4339                                         if (!valid_reserved(dev, slave, qpn)) {
4340                                                 __mlx4_qp_release_range(dev, qpn, 1);
4341                                                 mlx4_release_resource(dev, slave,
4342                                                                       RES_QP, 1, 0);
4343                                         }
4344                                         kfree(qp);
4345                                         state = 0;
4346                                         break;
4347                                 case RES_QP_MAPPED:
4348                                         if (!valid_reserved(dev, slave, qpn))
4349                                                 __mlx4_qp_free_icm(dev, qpn);
4350                                         state = RES_QP_RESERVED;
4351                                         break;
4352                                 case RES_QP_HW:
4353                                         in_param = slave;
4354                                         err = mlx4_cmd(dev, in_param,
4355                                                        qp->local_qpn, 2,
4356                                                        MLX4_CMD_2RST_QP,
4357                                                        MLX4_CMD_TIME_CLASS_A,
4358                                                        MLX4_CMD_NATIVE);
4359                                         if (err)
4360                                                 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4361                                                          slave, qp->local_qpn);
4362                                         atomic_dec(&qp->rcq->ref_count);
4363                                         atomic_dec(&qp->scq->ref_count);
4364                                         atomic_dec(&qp->mtt->ref_count);
4365                                         if (qp->srq)
4366                                                 atomic_dec(&qp->srq->ref_count);
4367                                         state = RES_QP_MAPPED;
4368                                         break;
4369                                 default:
4370                                         state = 0;
4371                                 }
4372                         }
4373                 }
4374                 spin_lock_irq(mlx4_tlock(dev));
4375         }
4376         spin_unlock_irq(mlx4_tlock(dev));
4377 }
4378
4379 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4380 {
4381         struct mlx4_priv *priv = mlx4_priv(dev);
4382         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4383         struct list_head *srq_list =
4384                 &tracker->slave_list[slave].res_list[RES_SRQ];
4385         struct res_srq *srq;
4386         struct res_srq *tmp;
4387         int state;
4388         u64 in_param;
4389         LIST_HEAD(tlist);
4390         int srqn;
4391         int err;
4392
4393         err = move_all_busy(dev, slave, RES_SRQ);
4394         if (err)
4395                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4396                           slave);
4397
4398         spin_lock_irq(mlx4_tlock(dev));
4399         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4400                 spin_unlock_irq(mlx4_tlock(dev));
4401                 if (srq->com.owner == slave) {
4402                         srqn = srq->com.res_id;
4403                         state = srq->com.from_state;
4404                         while (state != 0) {
4405                                 switch (state) {
4406                                 case RES_SRQ_ALLOCATED:
4407                                         __mlx4_srq_free_icm(dev, srqn);
4408                                         spin_lock_irq(mlx4_tlock(dev));
4409                                         rb_erase(&srq->com.node,
4410                                                  &tracker->res_tree[RES_SRQ]);
4411                                         list_del(&srq->com.list);
4412                                         spin_unlock_irq(mlx4_tlock(dev));
4413                                         mlx4_release_resource(dev, slave,
4414                                                               RES_SRQ, 1, 0);
4415                                         kfree(srq);
4416                                         state = 0;
4417                                         break;
4418
4419                                 case RES_SRQ_HW:
4420                                         in_param = slave;
4421                                         err = mlx4_cmd(dev, in_param, srqn, 1,
4422                                                        MLX4_CMD_HW2SW_SRQ,
4423                                                        MLX4_CMD_TIME_CLASS_A,
4424                                                        MLX4_CMD_NATIVE);
4425                                         if (err)
4426                                                 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4427                                                          slave, srqn);
4428
4429                                         atomic_dec(&srq->mtt->ref_count);
4430                                         if (srq->cq)
4431                                                 atomic_dec(&srq->cq->ref_count);
4432                                         state = RES_SRQ_ALLOCATED;
4433                                         break;
4434
4435                                 default:
4436                                         state = 0;
4437                                 }
4438                         }
4439                 }
4440                 spin_lock_irq(mlx4_tlock(dev));
4441         }
4442         spin_unlock_irq(mlx4_tlock(dev));
4443 }
4444
4445 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4446 {
4447         struct mlx4_priv *priv = mlx4_priv(dev);
4448         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4449         struct list_head *cq_list =
4450                 &tracker->slave_list[slave].res_list[RES_CQ];
4451         struct res_cq *cq;
4452         struct res_cq *tmp;
4453         int state;
4454         u64 in_param;
4455         LIST_HEAD(tlist);
4456         int cqn;
4457         int err;
4458
4459         err = move_all_busy(dev, slave, RES_CQ);
4460         if (err)
4461                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4462                           slave);
4463
4464         spin_lock_irq(mlx4_tlock(dev));
4465         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4466                 spin_unlock_irq(mlx4_tlock(dev));
4467                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4468                         cqn = cq->com.res_id;
4469                         state = cq->com.from_state;
4470                         while (state != 0) {
4471                                 switch (state) {
4472                                 case RES_CQ_ALLOCATED:
4473                                         __mlx4_cq_free_icm(dev, cqn);
4474                                         spin_lock_irq(mlx4_tlock(dev));
4475                                         rb_erase(&cq->com.node,
4476                                                  &tracker->res_tree[RES_CQ]);
4477                                         list_del(&cq->com.list);
4478                                         spin_unlock_irq(mlx4_tlock(dev));
4479                                         mlx4_release_resource(dev, slave,
4480                                                               RES_CQ, 1, 0);
4481                                         kfree(cq);
4482                                         state = 0;
4483                                         break;
4484
4485                                 case RES_CQ_HW:
4486                                         in_param = slave;
4487                                         err = mlx4_cmd(dev, in_param, cqn, 1,
4488                                                        MLX4_CMD_HW2SW_CQ,
4489                                                        MLX4_CMD_TIME_CLASS_A,
4490                                                        MLX4_CMD_NATIVE);
4491                                         if (err)
4492                                                 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4493                                                          slave, cqn);
4494                                         atomic_dec(&cq->mtt->ref_count);
4495                                         state = RES_CQ_ALLOCATED;
4496                                         break;
4497
4498                                 default:
4499                                         state = 0;
4500                                 }
4501                         }
4502                 }
4503                 spin_lock_irq(mlx4_tlock(dev));
4504         }
4505         spin_unlock_irq(mlx4_tlock(dev));
4506 }
4507
4508 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4509 {
4510         struct mlx4_priv *priv = mlx4_priv(dev);
4511         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4512         struct list_head *mpt_list =
4513                 &tracker->slave_list[slave].res_list[RES_MPT];
4514         struct res_mpt *mpt;
4515         struct res_mpt *tmp;
4516         int state;
4517         u64 in_param;
4518         LIST_HEAD(tlist);
4519         int mptn;
4520         int err;
4521
4522         err = move_all_busy(dev, slave, RES_MPT);
4523         if (err)
4524                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4525                           slave);
4526
4527         spin_lock_irq(mlx4_tlock(dev));
4528         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4529                 spin_unlock_irq(mlx4_tlock(dev));
4530                 if (mpt->com.owner == slave) {
4531                         mptn = mpt->com.res_id;
4532                         state = mpt->com.from_state;
4533                         while (state != 0) {
4534                                 switch (state) {
4535                                 case RES_MPT_RESERVED:
4536                                         __mlx4_mpt_release(dev, mpt->key);
4537                                         spin_lock_irq(mlx4_tlock(dev));
4538                                         rb_erase(&mpt->com.node,
4539                                                  &tracker->res_tree[RES_MPT]);
4540                                         list_del(&mpt->com.list);
4541                                         spin_unlock_irq(mlx4_tlock(dev));
4542                                         mlx4_release_resource(dev, slave,
4543                                                               RES_MPT, 1, 0);
4544                                         kfree(mpt);
4545                                         state = 0;
4546                                         break;
4547
4548                                 case RES_MPT_MAPPED:
4549                                         __mlx4_mpt_free_icm(dev, mpt->key);
4550                                         state = RES_MPT_RESERVED;
4551                                         break;
4552
4553                                 case RES_MPT_HW:
4554                                         in_param = slave;
4555                                         err = mlx4_cmd(dev, in_param, mptn, 0,
4556                                                      MLX4_CMD_HW2SW_MPT,
4557                                                      MLX4_CMD_TIME_CLASS_A,
4558                                                      MLX4_CMD_NATIVE);
4559                                         if (err)
4560                                                 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4561                                                          slave, mptn);
4562                                         if (mpt->mtt)
4563                                                 atomic_dec(&mpt->mtt->ref_count);
4564                                         state = RES_MPT_MAPPED;
4565                                         break;
4566                                 default:
4567                                         state = 0;
4568                                 }
4569                         }
4570                 }
4571                 spin_lock_irq(mlx4_tlock(dev));
4572         }
4573         spin_unlock_irq(mlx4_tlock(dev));
4574 }
4575
4576 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4577 {
4578         struct mlx4_priv *priv = mlx4_priv(dev);
4579         struct mlx4_resource_tracker *tracker =
4580                 &priv->mfunc.master.res_tracker;
4581         struct list_head *mtt_list =
4582                 &tracker->slave_list[slave].res_list[RES_MTT];
4583         struct res_mtt *mtt;
4584         struct res_mtt *tmp;
4585         int state;
4586         LIST_HEAD(tlist);
4587         int base;
4588         int err;
4589
4590         err = move_all_busy(dev, slave, RES_MTT);
4591         if (err)
4592                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
4593                           slave);
4594
4595         spin_lock_irq(mlx4_tlock(dev));
4596         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4597                 spin_unlock_irq(mlx4_tlock(dev));
4598                 if (mtt->com.owner == slave) {
4599                         base = mtt->com.res_id;
4600                         state = mtt->com.from_state;
4601                         while (state != 0) {
4602                                 switch (state) {
4603                                 case RES_MTT_ALLOCATED:
4604                                         __mlx4_free_mtt_range(dev, base,
4605                                                               mtt->order);
4606                                         spin_lock_irq(mlx4_tlock(dev));
4607                                         rb_erase(&mtt->com.node,
4608                                                  &tracker->res_tree[RES_MTT]);
4609                                         list_del(&mtt->com.list);
4610                                         spin_unlock_irq(mlx4_tlock(dev));
4611                                         mlx4_release_resource(dev, slave, RES_MTT,
4612                                                               1 << mtt->order, 0);
4613                                         kfree(mtt);
4614                                         state = 0;
4615                                         break;
4616
4617                                 default:
4618                                         state = 0;
4619                                 }
4620                         }
4621                 }
4622                 spin_lock_irq(mlx4_tlock(dev));
4623         }
4624         spin_unlock_irq(mlx4_tlock(dev));
4625 }
4626
4627 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4628 {
4629         struct mlx4_priv *priv = mlx4_priv(dev);
4630         struct mlx4_resource_tracker *tracker =
4631                 &priv->mfunc.master.res_tracker;
4632         struct list_head *fs_rule_list =
4633                 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4634         struct res_fs_rule *fs_rule;
4635         struct res_fs_rule *tmp;
4636         int state;
4637         u64 base;
4638         int err;
4639
4640         err = move_all_busy(dev, slave, RES_FS_RULE);
4641         if (err)
4642                 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4643                           slave);
4644
4645         spin_lock_irq(mlx4_tlock(dev));
4646         list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4647                 spin_unlock_irq(mlx4_tlock(dev));
4648                 if (fs_rule->com.owner == slave) {
4649                         base = fs_rule->com.res_id;
4650                         state = fs_rule->com.from_state;
4651                         while (state != 0) {
4652                                 switch (state) {
4653                                 case RES_FS_RULE_ALLOCATED:
4654                                         /* detach rule */
4655                                         err = mlx4_cmd(dev, base, 0, 0,
4656                                                        MLX4_QP_FLOW_STEERING_DETACH,
4657                                                        MLX4_CMD_TIME_CLASS_A,
4658                                                        MLX4_CMD_NATIVE);
4659
4660                                         spin_lock_irq(mlx4_tlock(dev));
4661                                         rb_erase(&fs_rule->com.node,
4662                                                  &tracker->res_tree[RES_FS_RULE]);
4663                                         list_del(&fs_rule->com.list);
4664                                         spin_unlock_irq(mlx4_tlock(dev));
4665                                         kfree(fs_rule);
4666                                         state = 0;
4667                                         break;
4668
4669                                 default:
4670                                         state = 0;
4671                                 }
4672                         }
4673                 }
4674                 spin_lock_irq(mlx4_tlock(dev));
4675         }
4676         spin_unlock_irq(mlx4_tlock(dev));
4677 }
4678
4679 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4680 {
4681         struct mlx4_priv *priv = mlx4_priv(dev);
4682         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4683         struct list_head *eq_list =
4684                 &tracker->slave_list[slave].res_list[RES_EQ];
4685         struct res_eq *eq;
4686         struct res_eq *tmp;
4687         int err;
4688         int state;
4689         LIST_HEAD(tlist);
4690         int eqn;
4691
4692         err = move_all_busy(dev, slave, RES_EQ);
4693         if (err)
4694                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4695                           slave);
4696
4697         spin_lock_irq(mlx4_tlock(dev));
4698         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4699                 spin_unlock_irq(mlx4_tlock(dev));
4700                 if (eq->com.owner == slave) {
4701                         eqn = eq->com.res_id;
4702                         state = eq->com.from_state;
4703                         while (state != 0) {
4704                                 switch (state) {
4705                                 case RES_EQ_RESERVED:
4706                                         spin_lock_irq(mlx4_tlock(dev));
4707                                         rb_erase(&eq->com.node,
4708                                                  &tracker->res_tree[RES_EQ]);
4709                                         list_del(&eq->com.list);
4710                                         spin_unlock_irq(mlx4_tlock(dev));
4711                                         kfree(eq);
4712                                         state = 0;
4713                                         break;
4714
4715                                 case RES_EQ_HW:
4716                                         err = mlx4_cmd(dev, slave, eqn & 0xff,
4717                                                        1, MLX4_CMD_HW2SW_EQ,
4718                                                        MLX4_CMD_TIME_CLASS_A,
4719                                                        MLX4_CMD_NATIVE);
4720                                         if (err)
4721                                                 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4722                                                          slave, eqn);
4723                                         atomic_dec(&eq->mtt->ref_count);
4724                                         state = RES_EQ_RESERVED;
4725                                         break;
4726
4727                                 default:
4728                                         state = 0;
4729                                 }
4730                         }
4731                 }
4732                 spin_lock_irq(mlx4_tlock(dev));
4733         }
4734         spin_unlock_irq(mlx4_tlock(dev));
4735 }
4736
4737 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4738 {
4739         struct mlx4_priv *priv = mlx4_priv(dev);
4740         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4741         struct list_head *counter_list =
4742                 &tracker->slave_list[slave].res_list[RES_COUNTER];
4743         struct res_counter *counter;
4744         struct res_counter *tmp;
4745         int err;
4746         int index;
4747
4748         err = move_all_busy(dev, slave, RES_COUNTER);
4749         if (err)
4750                 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4751                           slave);
4752
4753         spin_lock_irq(mlx4_tlock(dev));
4754         list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4755                 if (counter->com.owner == slave) {
4756                         index = counter->com.res_id;
4757                         rb_erase(&counter->com.node,
4758                                  &tracker->res_tree[RES_COUNTER]);
4759                         list_del(&counter->com.list);
4760                         kfree(counter);
4761                         __mlx4_counter_free(dev, index);
4762                         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4763                 }
4764         }
4765         spin_unlock_irq(mlx4_tlock(dev));
4766 }
4767
4768 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4769 {
4770         struct mlx4_priv *priv = mlx4_priv(dev);
4771         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4772         struct list_head *xrcdn_list =
4773                 &tracker->slave_list[slave].res_list[RES_XRCD];
4774         struct res_xrcdn *xrcd;
4775         struct res_xrcdn *tmp;
4776         int err;
4777         int xrcdn;
4778
4779         err = move_all_busy(dev, slave, RES_XRCD);
4780         if (err)
4781                 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4782                           slave);
4783
4784         spin_lock_irq(mlx4_tlock(dev));
4785         list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4786                 if (xrcd->com.owner == slave) {
4787                         xrcdn = xrcd->com.res_id;
4788                         rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4789                         list_del(&xrcd->com.list);
4790                         kfree(xrcd);
4791                         __mlx4_xrcd_free(dev, xrcdn);
4792                 }
4793         }
4794         spin_unlock_irq(mlx4_tlock(dev));
4795 }
4796
4797 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4798 {
4799         struct mlx4_priv *priv = mlx4_priv(dev);
4800         mlx4_reset_roce_gids(dev, slave);
4801         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4802         rem_slave_vlans(dev, slave);
4803         rem_slave_macs(dev, slave);
4804         rem_slave_fs_rule(dev, slave);
4805         rem_slave_qps(dev, slave);
4806         rem_slave_srqs(dev, slave);
4807         rem_slave_cqs(dev, slave);
4808         rem_slave_mrs(dev, slave);
4809         rem_slave_eqs(dev, slave);
4810         rem_slave_mtts(dev, slave);
4811         rem_slave_counters(dev, slave);
4812         rem_slave_xrcdns(dev, slave);
4813         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4814 }
4815
4816 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4817 {
4818         struct mlx4_vf_immed_vlan_work *work =
4819                 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4820         struct mlx4_cmd_mailbox *mailbox;
4821         struct mlx4_update_qp_context *upd_context;
4822         struct mlx4_dev *dev = &work->priv->dev;
4823         struct mlx4_resource_tracker *tracker =
4824                 &work->priv->mfunc.master.res_tracker;
4825         struct list_head *qp_list =
4826                 &tracker->slave_list[work->slave].res_list[RES_QP];
4827         struct res_qp *qp;
4828         struct res_qp *tmp;
4829         u64 qp_path_mask_vlan_ctrl =
4830                        ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4831                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4832                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4833                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4834                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4835                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4836
4837         u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4838                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4839                        (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4840                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4841                        (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4842                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4843                        (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4844
4845         int err;
4846         int port, errors = 0;
4847         u8 vlan_control;
4848
4849         if (mlx4_is_slave(dev)) {
4850                 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4851                           work->slave);
4852                 goto out;
4853         }
4854
4855         mailbox = mlx4_alloc_cmd_mailbox(dev);
4856         if (IS_ERR(mailbox))
4857                 goto out;
4858         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4859                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4860                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4861                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4862                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4863                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4864                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4865         else if (!work->vlan_id)
4866                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4867                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4868         else
4869                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4870                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4871                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4872
4873         upd_context = mailbox->buf;
4874         upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
4875
4876         spin_lock_irq(mlx4_tlock(dev));
4877         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4878                 spin_unlock_irq(mlx4_tlock(dev));
4879                 if (qp->com.owner == work->slave) {
4880                         if (qp->com.from_state != RES_QP_HW ||
4881                             !qp->sched_queue ||  /* no INIT2RTR trans yet */
4882                             mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4883                             qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4884                                 spin_lock_irq(mlx4_tlock(dev));
4885                                 continue;
4886                         }
4887                         port = (qp->sched_queue >> 6 & 1) + 1;
4888                         if (port != work->port) {
4889                                 spin_lock_irq(mlx4_tlock(dev));
4890                                 continue;
4891                         }
4892                         if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4893                                 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4894                         else
4895                                 upd_context->primary_addr_path_mask =
4896                                         cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4897                         if (work->vlan_id == MLX4_VGT) {
4898                                 upd_context->qp_context.param3 = qp->param3;
4899                                 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4900                                 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4901                                 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4902                                 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4903                                 upd_context->qp_context.pri_path.feup = qp->feup;
4904                                 upd_context->qp_context.pri_path.sched_queue =
4905                                         qp->sched_queue;
4906                         } else {
4907                                 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4908                                 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4909                                 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4910                                 upd_context->qp_context.pri_path.fvl_rx =
4911                                         qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4912                                 upd_context->qp_context.pri_path.fl =
4913                                         qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4914                                 upd_context->qp_context.pri_path.feup =
4915                                         qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4916                                 upd_context->qp_context.pri_path.sched_queue =
4917                                         qp->sched_queue & 0xC7;
4918                                 upd_context->qp_context.pri_path.sched_queue |=
4919                                         ((work->qos & 0x7) << 3);
4920                         }
4921
4922                         err = mlx4_cmd(dev, mailbox->dma,
4923                                        qp->local_qpn & 0xffffff,
4924                                        0, MLX4_CMD_UPDATE_QP,
4925                                        MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4926                         if (err) {
4927                                 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4928                                           work->slave, port, qp->local_qpn, err);
4929                                 errors++;
4930                         }
4931                 }
4932                 spin_lock_irq(mlx4_tlock(dev));
4933         }
4934         spin_unlock_irq(mlx4_tlock(dev));
4935         mlx4_free_cmd_mailbox(dev, mailbox);
4936
4937         if (errors)
4938                 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4939                          errors, work->slave, work->port);
4940
4941         /* unregister previous vlan_id if needed and we had no errors
4942          * while updating the QPs
4943          */
4944         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4945             NO_INDX != work->orig_vlan_ix)
4946                 __mlx4_unregister_vlan(&work->priv->dev, work->port,
4947                                        work->orig_vlan_id);
4948 out:
4949         kfree(work);
4950         return;
4951 }