net/mlx4_core: Fix HW2SW_EQ to conform to the firmware spec
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49
50 #define MLX4_MAC_VALID          (1ull << 63)
51
52 struct mac_res {
53         struct list_head list;
54         u64 mac;
55         int ref_count;
56         u8 smac_index;
57         u8 port;
58 };
59
60 struct vlan_res {
61         struct list_head list;
62         u16 vlan;
63         int ref_count;
64         int vlan_index;
65         u8 port;
66 };
67
68 struct res_common {
69         struct list_head        list;
70         struct rb_node          node;
71         u64                     res_id;
72         int                     owner;
73         int                     state;
74         int                     from_state;
75         int                     to_state;
76         int                     removing;
77 };
78
79 enum {
80         RES_ANY_BUSY = 1
81 };
82
83 struct res_gid {
84         struct list_head        list;
85         u8                      gid[16];
86         enum mlx4_protocol      prot;
87         enum mlx4_steer_type    steer;
88         u64                     reg_id;
89 };
90
91 enum res_qp_states {
92         RES_QP_BUSY = RES_ANY_BUSY,
93
94         /* QP number was allocated */
95         RES_QP_RESERVED,
96
97         /* ICM memory for QP context was mapped */
98         RES_QP_MAPPED,
99
100         /* QP is in hw ownership */
101         RES_QP_HW
102 };
103
104 struct res_qp {
105         struct res_common       com;
106         struct res_mtt         *mtt;
107         struct res_cq          *rcq;
108         struct res_cq          *scq;
109         struct res_srq         *srq;
110         struct list_head        mcg_list;
111         spinlock_t              mcg_spl;
112         int                     local_qpn;
113         atomic_t                ref_count;
114         u32                     qpc_flags;
115         /* saved qp params before VST enforcement in order to restore on VGT */
116         u8                      sched_queue;
117         __be32                  param3;
118         u8                      vlan_control;
119         u8                      fvl_rx;
120         u8                      pri_path_fl;
121         u8                      vlan_index;
122         u8                      feup;
123 };
124
125 enum res_mtt_states {
126         RES_MTT_BUSY = RES_ANY_BUSY,
127         RES_MTT_ALLOCATED,
128 };
129
130 static inline const char *mtt_states_str(enum res_mtt_states state)
131 {
132         switch (state) {
133         case RES_MTT_BUSY: return "RES_MTT_BUSY";
134         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
135         default: return "Unknown";
136         }
137 }
138
139 struct res_mtt {
140         struct res_common       com;
141         int                     order;
142         atomic_t                ref_count;
143 };
144
145 enum res_mpt_states {
146         RES_MPT_BUSY = RES_ANY_BUSY,
147         RES_MPT_RESERVED,
148         RES_MPT_MAPPED,
149         RES_MPT_HW,
150 };
151
152 struct res_mpt {
153         struct res_common       com;
154         struct res_mtt         *mtt;
155         int                     key;
156 };
157
158 enum res_eq_states {
159         RES_EQ_BUSY = RES_ANY_BUSY,
160         RES_EQ_RESERVED,
161         RES_EQ_HW,
162 };
163
164 struct res_eq {
165         struct res_common       com;
166         struct res_mtt         *mtt;
167 };
168
169 enum res_cq_states {
170         RES_CQ_BUSY = RES_ANY_BUSY,
171         RES_CQ_ALLOCATED,
172         RES_CQ_HW,
173 };
174
175 struct res_cq {
176         struct res_common       com;
177         struct res_mtt         *mtt;
178         atomic_t                ref_count;
179 };
180
181 enum res_srq_states {
182         RES_SRQ_BUSY = RES_ANY_BUSY,
183         RES_SRQ_ALLOCATED,
184         RES_SRQ_HW,
185 };
186
187 struct res_srq {
188         struct res_common       com;
189         struct res_mtt         *mtt;
190         struct res_cq          *cq;
191         atomic_t                ref_count;
192 };
193
194 enum res_counter_states {
195         RES_COUNTER_BUSY = RES_ANY_BUSY,
196         RES_COUNTER_ALLOCATED,
197 };
198
199 struct res_counter {
200         struct res_common       com;
201         int                     port;
202 };
203
204 enum res_xrcdn_states {
205         RES_XRCD_BUSY = RES_ANY_BUSY,
206         RES_XRCD_ALLOCATED,
207 };
208
209 struct res_xrcdn {
210         struct res_common       com;
211         int                     port;
212 };
213
214 enum res_fs_rule_states {
215         RES_FS_RULE_BUSY = RES_ANY_BUSY,
216         RES_FS_RULE_ALLOCATED,
217 };
218
219 struct res_fs_rule {
220         struct res_common       com;
221         int                     qpn;
222 };
223
224 static int mlx4_is_eth(struct mlx4_dev *dev, int port)
225 {
226         return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
227 }
228
229 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
230 {
231         struct rb_node *node = root->rb_node;
232
233         while (node) {
234                 struct res_common *res = container_of(node, struct res_common,
235                                                       node);
236
237                 if (res_id < res->res_id)
238                         node = node->rb_left;
239                 else if (res_id > res->res_id)
240                         node = node->rb_right;
241                 else
242                         return res;
243         }
244         return NULL;
245 }
246
247 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
248 {
249         struct rb_node **new = &(root->rb_node), *parent = NULL;
250
251         /* Figure out where to put new node */
252         while (*new) {
253                 struct res_common *this = container_of(*new, struct res_common,
254                                                        node);
255
256                 parent = *new;
257                 if (res->res_id < this->res_id)
258                         new = &((*new)->rb_left);
259                 else if (res->res_id > this->res_id)
260                         new = &((*new)->rb_right);
261                 else
262                         return -EEXIST;
263         }
264
265         /* Add new node and rebalance tree. */
266         rb_link_node(&res->node, parent, new);
267         rb_insert_color(&res->node, root);
268
269         return 0;
270 }
271
272 enum qp_transition {
273         QP_TRANS_INIT2RTR,
274         QP_TRANS_RTR2RTS,
275         QP_TRANS_RTS2RTS,
276         QP_TRANS_SQERR2RTS,
277         QP_TRANS_SQD2SQD,
278         QP_TRANS_SQD2RTS
279 };
280
281 /* For Debug uses */
282 static const char *resource_str(enum mlx4_resource rt)
283 {
284         switch (rt) {
285         case RES_QP: return "RES_QP";
286         case RES_CQ: return "RES_CQ";
287         case RES_SRQ: return "RES_SRQ";
288         case RES_MPT: return "RES_MPT";
289         case RES_MTT: return "RES_MTT";
290         case RES_MAC: return  "RES_MAC";
291         case RES_VLAN: return  "RES_VLAN";
292         case RES_EQ: return "RES_EQ";
293         case RES_COUNTER: return "RES_COUNTER";
294         case RES_FS_RULE: return "RES_FS_RULE";
295         case RES_XRCD: return "RES_XRCD";
296         default: return "Unknown resource type !!!";
297         };
298 }
299
300 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
301 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
302                                       enum mlx4_resource res_type, int count,
303                                       int port)
304 {
305         struct mlx4_priv *priv = mlx4_priv(dev);
306         struct resource_allocator *res_alloc =
307                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
308         int err = -EINVAL;
309         int allocated, free, reserved, guaranteed, from_free;
310         int from_rsvd;
311
312         if (slave > dev->persist->num_vfs)
313                 return -EINVAL;
314
315         spin_lock(&res_alloc->alloc_lock);
316         allocated = (port > 0) ?
317                 res_alloc->allocated[(port - 1) *
318                 (dev->persist->num_vfs + 1) + slave] :
319                 res_alloc->allocated[slave];
320         free = (port > 0) ? res_alloc->res_port_free[port - 1] :
321                 res_alloc->res_free;
322         reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
323                 res_alloc->res_reserved;
324         guaranteed = res_alloc->guaranteed[slave];
325
326         if (allocated + count > res_alloc->quota[slave]) {
327                 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
328                           slave, port, resource_str(res_type), count,
329                           allocated, res_alloc->quota[slave]);
330                 goto out;
331         }
332
333         if (allocated + count <= guaranteed) {
334                 err = 0;
335                 from_rsvd = count;
336         } else {
337                 /* portion may need to be obtained from free area */
338                 if (guaranteed - allocated > 0)
339                         from_free = count - (guaranteed - allocated);
340                 else
341                         from_free = count;
342
343                 from_rsvd = count - from_free;
344
345                 if (free - from_free >= reserved)
346                         err = 0;
347                 else
348                         mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
349                                   slave, port, resource_str(res_type), free,
350                                   from_free, reserved);
351         }
352
353         if (!err) {
354                 /* grant the request */
355                 if (port > 0) {
356                         res_alloc->allocated[(port - 1) *
357                         (dev->persist->num_vfs + 1) + slave] += count;
358                         res_alloc->res_port_free[port - 1] -= count;
359                         res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
360                 } else {
361                         res_alloc->allocated[slave] += count;
362                         res_alloc->res_free -= count;
363                         res_alloc->res_reserved -= from_rsvd;
364                 }
365         }
366
367 out:
368         spin_unlock(&res_alloc->alloc_lock);
369         return err;
370 }
371
372 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
373                                     enum mlx4_resource res_type, int count,
374                                     int port)
375 {
376         struct mlx4_priv *priv = mlx4_priv(dev);
377         struct resource_allocator *res_alloc =
378                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
379         int allocated, guaranteed, from_rsvd;
380
381         if (slave > dev->persist->num_vfs)
382                 return;
383
384         spin_lock(&res_alloc->alloc_lock);
385
386         allocated = (port > 0) ?
387                 res_alloc->allocated[(port - 1) *
388                 (dev->persist->num_vfs + 1) + slave] :
389                 res_alloc->allocated[slave];
390         guaranteed = res_alloc->guaranteed[slave];
391
392         if (allocated - count >= guaranteed) {
393                 from_rsvd = 0;
394         } else {
395                 /* portion may need to be returned to reserved area */
396                 if (allocated - guaranteed > 0)
397                         from_rsvd = count - (allocated - guaranteed);
398                 else
399                         from_rsvd = count;
400         }
401
402         if (port > 0) {
403                 res_alloc->allocated[(port - 1) *
404                 (dev->persist->num_vfs + 1) + slave] -= count;
405                 res_alloc->res_port_free[port - 1] += count;
406                 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
407         } else {
408                 res_alloc->allocated[slave] -= count;
409                 res_alloc->res_free += count;
410                 res_alloc->res_reserved += from_rsvd;
411         }
412
413         spin_unlock(&res_alloc->alloc_lock);
414         return;
415 }
416
417 static inline void initialize_res_quotas(struct mlx4_dev *dev,
418                                          struct resource_allocator *res_alloc,
419                                          enum mlx4_resource res_type,
420                                          int vf, int num_instances)
421 {
422         res_alloc->guaranteed[vf] = num_instances /
423                                     (2 * (dev->persist->num_vfs + 1));
424         res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
425         if (vf == mlx4_master_func_num(dev)) {
426                 res_alloc->res_free = num_instances;
427                 if (res_type == RES_MTT) {
428                         /* reserved mtts will be taken out of the PF allocation */
429                         res_alloc->res_free += dev->caps.reserved_mtts;
430                         res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
431                         res_alloc->quota[vf] += dev->caps.reserved_mtts;
432                 }
433         }
434 }
435
436 void mlx4_init_quotas(struct mlx4_dev *dev)
437 {
438         struct mlx4_priv *priv = mlx4_priv(dev);
439         int pf;
440
441         /* quotas for VFs are initialized in mlx4_slave_cap */
442         if (mlx4_is_slave(dev))
443                 return;
444
445         if (!mlx4_is_mfunc(dev)) {
446                 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
447                         mlx4_num_reserved_sqps(dev);
448                 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
449                 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
450                 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
451                 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
452                 return;
453         }
454
455         pf = mlx4_master_func_num(dev);
456         dev->quotas.qp =
457                 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
458         dev->quotas.cq =
459                 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
460         dev->quotas.srq =
461                 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
462         dev->quotas.mtt =
463                 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
464         dev->quotas.mpt =
465                 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
466 }
467 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
468 {
469         struct mlx4_priv *priv = mlx4_priv(dev);
470         int i, j;
471         int t;
472
473         priv->mfunc.master.res_tracker.slave_list =
474                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
475                         GFP_KERNEL);
476         if (!priv->mfunc.master.res_tracker.slave_list)
477                 return -ENOMEM;
478
479         for (i = 0 ; i < dev->num_slaves; i++) {
480                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
481                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
482                                        slave_list[i].res_list[t]);
483                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
484         }
485
486         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
487                  dev->num_slaves);
488         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
489                 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
490
491         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
492                 struct resource_allocator *res_alloc =
493                         &priv->mfunc.master.res_tracker.res_alloc[i];
494                 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
495                                            sizeof(int), GFP_KERNEL);
496                 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
497                                                 sizeof(int), GFP_KERNEL);
498                 if (i == RES_MAC || i == RES_VLAN)
499                         res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
500                                                        (dev->persist->num_vfs
501                                                        + 1) *
502                                                        sizeof(int), GFP_KERNEL);
503                 else
504                         res_alloc->allocated = kzalloc((dev->persist->
505                                                         num_vfs + 1) *
506                                                        sizeof(int), GFP_KERNEL);
507
508                 if (!res_alloc->quota || !res_alloc->guaranteed ||
509                     !res_alloc->allocated)
510                         goto no_mem_err;
511
512                 spin_lock_init(&res_alloc->alloc_lock);
513                 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
514                         struct mlx4_active_ports actv_ports =
515                                 mlx4_get_active_ports(dev, t);
516                         switch (i) {
517                         case RES_QP:
518                                 initialize_res_quotas(dev, res_alloc, RES_QP,
519                                                       t, dev->caps.num_qps -
520                                                       dev->caps.reserved_qps -
521                                                       mlx4_num_reserved_sqps(dev));
522                                 break;
523                         case RES_CQ:
524                                 initialize_res_quotas(dev, res_alloc, RES_CQ,
525                                                       t, dev->caps.num_cqs -
526                                                       dev->caps.reserved_cqs);
527                                 break;
528                         case RES_SRQ:
529                                 initialize_res_quotas(dev, res_alloc, RES_SRQ,
530                                                       t, dev->caps.num_srqs -
531                                                       dev->caps.reserved_srqs);
532                                 break;
533                         case RES_MPT:
534                                 initialize_res_quotas(dev, res_alloc, RES_MPT,
535                                                       t, dev->caps.num_mpts -
536                                                       dev->caps.reserved_mrws);
537                                 break;
538                         case RES_MTT:
539                                 initialize_res_quotas(dev, res_alloc, RES_MTT,
540                                                       t, dev->caps.num_mtts -
541                                                       dev->caps.reserved_mtts);
542                                 break;
543                         case RES_MAC:
544                                 if (t == mlx4_master_func_num(dev)) {
545                                         int max_vfs_pport = 0;
546                                         /* Calculate the max vfs per port for */
547                                         /* both ports.                        */
548                                         for (j = 0; j < dev->caps.num_ports;
549                                              j++) {
550                                                 struct mlx4_slaves_pport slaves_pport =
551                                                         mlx4_phys_to_slaves_pport(dev, j + 1);
552                                                 unsigned current_slaves =
553                                                         bitmap_weight(slaves_pport.slaves,
554                                                                       dev->caps.num_ports) - 1;
555                                                 if (max_vfs_pport < current_slaves)
556                                                         max_vfs_pport =
557                                                                 current_slaves;
558                                         }
559                                         res_alloc->quota[t] =
560                                                 MLX4_MAX_MAC_NUM -
561                                                 2 * max_vfs_pport;
562                                         res_alloc->guaranteed[t] = 2;
563                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
564                                                 res_alloc->res_port_free[j] =
565                                                         MLX4_MAX_MAC_NUM;
566                                 } else {
567                                         res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
568                                         res_alloc->guaranteed[t] = 2;
569                                 }
570                                 break;
571                         case RES_VLAN:
572                                 if (t == mlx4_master_func_num(dev)) {
573                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
574                                         res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
575                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
576                                                 res_alloc->res_port_free[j] =
577                                                         res_alloc->quota[t];
578                                 } else {
579                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
580                                         res_alloc->guaranteed[t] = 0;
581                                 }
582                                 break;
583                         case RES_COUNTER:
584                                 res_alloc->quota[t] = dev->caps.max_counters;
585                                 res_alloc->guaranteed[t] = 0;
586                                 if (t == mlx4_master_func_num(dev))
587                                         res_alloc->res_free = res_alloc->quota[t];
588                                 break;
589                         default:
590                                 break;
591                         }
592                         if (i == RES_MAC || i == RES_VLAN) {
593                                 for (j = 0; j < dev->caps.num_ports; j++)
594                                         if (test_bit(j, actv_ports.ports))
595                                                 res_alloc->res_port_rsvd[j] +=
596                                                         res_alloc->guaranteed[t];
597                         } else {
598                                 res_alloc->res_reserved += res_alloc->guaranteed[t];
599                         }
600                 }
601         }
602         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
603         return 0;
604
605 no_mem_err:
606         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
607                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
608                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
609                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
610                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
611                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
612                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
613         }
614         return -ENOMEM;
615 }
616
617 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
618                                 enum mlx4_res_tracker_free_type type)
619 {
620         struct mlx4_priv *priv = mlx4_priv(dev);
621         int i;
622
623         if (priv->mfunc.master.res_tracker.slave_list) {
624                 if (type != RES_TR_FREE_STRUCTS_ONLY) {
625                         for (i = 0; i < dev->num_slaves; i++) {
626                                 if (type == RES_TR_FREE_ALL ||
627                                     dev->caps.function != i)
628                                         mlx4_delete_all_resources_for_slave(dev, i);
629                         }
630                         /* free master's vlans */
631                         i = dev->caps.function;
632                         mlx4_reset_roce_gids(dev, i);
633                         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
634                         rem_slave_vlans(dev, i);
635                         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
636                 }
637
638                 if (type != RES_TR_FREE_SLAVES_ONLY) {
639                         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
640                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
641                                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
642                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
643                                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
644                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
645                                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
646                         }
647                         kfree(priv->mfunc.master.res_tracker.slave_list);
648                         priv->mfunc.master.res_tracker.slave_list = NULL;
649                 }
650         }
651 }
652
653 static void update_pkey_index(struct mlx4_dev *dev, int slave,
654                               struct mlx4_cmd_mailbox *inbox)
655 {
656         u8 sched = *(u8 *)(inbox->buf + 64);
657         u8 orig_index = *(u8 *)(inbox->buf + 35);
658         u8 new_index;
659         struct mlx4_priv *priv = mlx4_priv(dev);
660         int port;
661
662         port = (sched >> 6 & 1) + 1;
663
664         new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
665         *(u8 *)(inbox->buf + 35) = new_index;
666 }
667
668 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
669                        u8 slave)
670 {
671         struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
672         enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
673         u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
674         int port;
675
676         if (MLX4_QP_ST_UD == ts) {
677                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
678                 if (mlx4_is_eth(dev, port))
679                         qp_ctx->pri_path.mgid_index =
680                                 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
681                 else
682                         qp_ctx->pri_path.mgid_index = slave | 0x80;
683
684         } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
685                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
686                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
687                         if (mlx4_is_eth(dev, port)) {
688                                 qp_ctx->pri_path.mgid_index +=
689                                         mlx4_get_base_gid_ix(dev, slave, port);
690                                 qp_ctx->pri_path.mgid_index &= 0x7f;
691                         } else {
692                                 qp_ctx->pri_path.mgid_index = slave & 0x7F;
693                         }
694                 }
695                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
696                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
697                         if (mlx4_is_eth(dev, port)) {
698                                 qp_ctx->alt_path.mgid_index +=
699                                         mlx4_get_base_gid_ix(dev, slave, port);
700                                 qp_ctx->alt_path.mgid_index &= 0x7f;
701                         } else {
702                                 qp_ctx->alt_path.mgid_index = slave & 0x7F;
703                         }
704                 }
705         }
706 }
707
708 static int update_vport_qp_param(struct mlx4_dev *dev,
709                                  struct mlx4_cmd_mailbox *inbox,
710                                  u8 slave, u32 qpn)
711 {
712         struct mlx4_qp_context  *qpc = inbox->buf + 8;
713         struct mlx4_vport_oper_state *vp_oper;
714         struct mlx4_priv *priv;
715         u32 qp_type;
716         int port;
717
718         port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
719         priv = mlx4_priv(dev);
720         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
721         qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
722
723         if (MLX4_VGT != vp_oper->state.default_vlan) {
724                 /* the reserved QPs (special, proxy, tunnel)
725                  * do not operate over vlans
726                  */
727                 if (mlx4_is_qp_reserved(dev, qpn))
728                         return 0;
729
730                 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
731                 if (qp_type == MLX4_QP_ST_UD ||
732                     (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
733                         if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
734                                 *(__be32 *)inbox->buf =
735                                         cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
736                                         MLX4_QP_OPTPAR_VLAN_STRIPPING);
737                                 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
738                         } else {
739                                 struct mlx4_update_qp_params params = {.flags = 0};
740
741                                 mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
742                         }
743                 }
744
745                 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
746                     dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
747                         qpc->pri_path.vlan_control =
748                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
749                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
750                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
751                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
752                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
753                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
754                 } else if (0 != vp_oper->state.default_vlan) {
755                         qpc->pri_path.vlan_control =
756                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
757                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
758                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
759                 } else { /* priority tagged */
760                         qpc->pri_path.vlan_control =
761                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
762                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
763                 }
764
765                 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
766                 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
767                 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
768                 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
769                 qpc->pri_path.sched_queue &= 0xC7;
770                 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
771         }
772         if (vp_oper->state.spoofchk) {
773                 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
774                 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
775         }
776         return 0;
777 }
778
779 static int mpt_mask(struct mlx4_dev *dev)
780 {
781         return dev->caps.num_mpts - 1;
782 }
783
784 static void *find_res(struct mlx4_dev *dev, u64 res_id,
785                       enum mlx4_resource type)
786 {
787         struct mlx4_priv *priv = mlx4_priv(dev);
788
789         return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
790                                   res_id);
791 }
792
793 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
794                    enum mlx4_resource type,
795                    void *res)
796 {
797         struct res_common *r;
798         int err = 0;
799
800         spin_lock_irq(mlx4_tlock(dev));
801         r = find_res(dev, res_id, type);
802         if (!r) {
803                 err = -ENONET;
804                 goto exit;
805         }
806
807         if (r->state == RES_ANY_BUSY) {
808                 err = -EBUSY;
809                 goto exit;
810         }
811
812         if (r->owner != slave) {
813                 err = -EPERM;
814                 goto exit;
815         }
816
817         r->from_state = r->state;
818         r->state = RES_ANY_BUSY;
819
820         if (res)
821                 *((struct res_common **)res) = r;
822
823 exit:
824         spin_unlock_irq(mlx4_tlock(dev));
825         return err;
826 }
827
828 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
829                                     enum mlx4_resource type,
830                                     u64 res_id, int *slave)
831 {
832
833         struct res_common *r;
834         int err = -ENOENT;
835         int id = res_id;
836
837         if (type == RES_QP)
838                 id &= 0x7fffff;
839         spin_lock(mlx4_tlock(dev));
840
841         r = find_res(dev, id, type);
842         if (r) {
843                 *slave = r->owner;
844                 err = 0;
845         }
846         spin_unlock(mlx4_tlock(dev));
847
848         return err;
849 }
850
851 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
852                     enum mlx4_resource type)
853 {
854         struct res_common *r;
855
856         spin_lock_irq(mlx4_tlock(dev));
857         r = find_res(dev, res_id, type);
858         if (r)
859                 r->state = r->from_state;
860         spin_unlock_irq(mlx4_tlock(dev));
861 }
862
863 static struct res_common *alloc_qp_tr(int id)
864 {
865         struct res_qp *ret;
866
867         ret = kzalloc(sizeof *ret, GFP_KERNEL);
868         if (!ret)
869                 return NULL;
870
871         ret->com.res_id = id;
872         ret->com.state = RES_QP_RESERVED;
873         ret->local_qpn = id;
874         INIT_LIST_HEAD(&ret->mcg_list);
875         spin_lock_init(&ret->mcg_spl);
876         atomic_set(&ret->ref_count, 0);
877
878         return &ret->com;
879 }
880
881 static struct res_common *alloc_mtt_tr(int id, int order)
882 {
883         struct res_mtt *ret;
884
885         ret = kzalloc(sizeof *ret, GFP_KERNEL);
886         if (!ret)
887                 return NULL;
888
889         ret->com.res_id = id;
890         ret->order = order;
891         ret->com.state = RES_MTT_ALLOCATED;
892         atomic_set(&ret->ref_count, 0);
893
894         return &ret->com;
895 }
896
897 static struct res_common *alloc_mpt_tr(int id, int key)
898 {
899         struct res_mpt *ret;
900
901         ret = kzalloc(sizeof *ret, GFP_KERNEL);
902         if (!ret)
903                 return NULL;
904
905         ret->com.res_id = id;
906         ret->com.state = RES_MPT_RESERVED;
907         ret->key = key;
908
909         return &ret->com;
910 }
911
912 static struct res_common *alloc_eq_tr(int id)
913 {
914         struct res_eq *ret;
915
916         ret = kzalloc(sizeof *ret, GFP_KERNEL);
917         if (!ret)
918                 return NULL;
919
920         ret->com.res_id = id;
921         ret->com.state = RES_EQ_RESERVED;
922
923         return &ret->com;
924 }
925
926 static struct res_common *alloc_cq_tr(int id)
927 {
928         struct res_cq *ret;
929
930         ret = kzalloc(sizeof *ret, GFP_KERNEL);
931         if (!ret)
932                 return NULL;
933
934         ret->com.res_id = id;
935         ret->com.state = RES_CQ_ALLOCATED;
936         atomic_set(&ret->ref_count, 0);
937
938         return &ret->com;
939 }
940
941 static struct res_common *alloc_srq_tr(int id)
942 {
943         struct res_srq *ret;
944
945         ret = kzalloc(sizeof *ret, GFP_KERNEL);
946         if (!ret)
947                 return NULL;
948
949         ret->com.res_id = id;
950         ret->com.state = RES_SRQ_ALLOCATED;
951         atomic_set(&ret->ref_count, 0);
952
953         return &ret->com;
954 }
955
956 static struct res_common *alloc_counter_tr(int id)
957 {
958         struct res_counter *ret;
959
960         ret = kzalloc(sizeof *ret, GFP_KERNEL);
961         if (!ret)
962                 return NULL;
963
964         ret->com.res_id = id;
965         ret->com.state = RES_COUNTER_ALLOCATED;
966
967         return &ret->com;
968 }
969
970 static struct res_common *alloc_xrcdn_tr(int id)
971 {
972         struct res_xrcdn *ret;
973
974         ret = kzalloc(sizeof *ret, GFP_KERNEL);
975         if (!ret)
976                 return NULL;
977
978         ret->com.res_id = id;
979         ret->com.state = RES_XRCD_ALLOCATED;
980
981         return &ret->com;
982 }
983
984 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
985 {
986         struct res_fs_rule *ret;
987
988         ret = kzalloc(sizeof *ret, GFP_KERNEL);
989         if (!ret)
990                 return NULL;
991
992         ret->com.res_id = id;
993         ret->com.state = RES_FS_RULE_ALLOCATED;
994         ret->qpn = qpn;
995         return &ret->com;
996 }
997
998 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
999                                    int extra)
1000 {
1001         struct res_common *ret;
1002
1003         switch (type) {
1004         case RES_QP:
1005                 ret = alloc_qp_tr(id);
1006                 break;
1007         case RES_MPT:
1008                 ret = alloc_mpt_tr(id, extra);
1009                 break;
1010         case RES_MTT:
1011                 ret = alloc_mtt_tr(id, extra);
1012                 break;
1013         case RES_EQ:
1014                 ret = alloc_eq_tr(id);
1015                 break;
1016         case RES_CQ:
1017                 ret = alloc_cq_tr(id);
1018                 break;
1019         case RES_SRQ:
1020                 ret = alloc_srq_tr(id);
1021                 break;
1022         case RES_MAC:
1023                 pr_err("implementation missing\n");
1024                 return NULL;
1025         case RES_COUNTER:
1026                 ret = alloc_counter_tr(id);
1027                 break;
1028         case RES_XRCD:
1029                 ret = alloc_xrcdn_tr(id);
1030                 break;
1031         case RES_FS_RULE:
1032                 ret = alloc_fs_rule_tr(id, extra);
1033                 break;
1034         default:
1035                 return NULL;
1036         }
1037         if (ret)
1038                 ret->owner = slave;
1039
1040         return ret;
1041 }
1042
1043 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1044                          enum mlx4_resource type, int extra)
1045 {
1046         int i;
1047         int err;
1048         struct mlx4_priv *priv = mlx4_priv(dev);
1049         struct res_common **res_arr;
1050         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1051         struct rb_root *root = &tracker->res_tree[type];
1052
1053         res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1054         if (!res_arr)
1055                 return -ENOMEM;
1056
1057         for (i = 0; i < count; ++i) {
1058                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1059                 if (!res_arr[i]) {
1060                         for (--i; i >= 0; --i)
1061                                 kfree(res_arr[i]);
1062
1063                         kfree(res_arr);
1064                         return -ENOMEM;
1065                 }
1066         }
1067
1068         spin_lock_irq(mlx4_tlock(dev));
1069         for (i = 0; i < count; ++i) {
1070                 if (find_res(dev, base + i, type)) {
1071                         err = -EEXIST;
1072                         goto undo;
1073                 }
1074                 err = res_tracker_insert(root, res_arr[i]);
1075                 if (err)
1076                         goto undo;
1077                 list_add_tail(&res_arr[i]->list,
1078                               &tracker->slave_list[slave].res_list[type]);
1079         }
1080         spin_unlock_irq(mlx4_tlock(dev));
1081         kfree(res_arr);
1082
1083         return 0;
1084
1085 undo:
1086         for (--i; i >= base; --i)
1087                 rb_erase(&res_arr[i]->node, root);
1088
1089         spin_unlock_irq(mlx4_tlock(dev));
1090
1091         for (i = 0; i < count; ++i)
1092                 kfree(res_arr[i]);
1093
1094         kfree(res_arr);
1095
1096         return err;
1097 }
1098
1099 static int remove_qp_ok(struct res_qp *res)
1100 {
1101         if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1102             !list_empty(&res->mcg_list)) {
1103                 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1104                        res->com.state, atomic_read(&res->ref_count));
1105                 return -EBUSY;
1106         } else if (res->com.state != RES_QP_RESERVED) {
1107                 return -EPERM;
1108         }
1109
1110         return 0;
1111 }
1112
1113 static int remove_mtt_ok(struct res_mtt *res, int order)
1114 {
1115         if (res->com.state == RES_MTT_BUSY ||
1116             atomic_read(&res->ref_count)) {
1117                 pr_devel("%s-%d: state %s, ref_count %d\n",
1118                          __func__, __LINE__,
1119                          mtt_states_str(res->com.state),
1120                          atomic_read(&res->ref_count));
1121                 return -EBUSY;
1122         } else if (res->com.state != RES_MTT_ALLOCATED)
1123                 return -EPERM;
1124         else if (res->order != order)
1125                 return -EINVAL;
1126
1127         return 0;
1128 }
1129
1130 static int remove_mpt_ok(struct res_mpt *res)
1131 {
1132         if (res->com.state == RES_MPT_BUSY)
1133                 return -EBUSY;
1134         else if (res->com.state != RES_MPT_RESERVED)
1135                 return -EPERM;
1136
1137         return 0;
1138 }
1139
1140 static int remove_eq_ok(struct res_eq *res)
1141 {
1142         if (res->com.state == RES_MPT_BUSY)
1143                 return -EBUSY;
1144         else if (res->com.state != RES_MPT_RESERVED)
1145                 return -EPERM;
1146
1147         return 0;
1148 }
1149
1150 static int remove_counter_ok(struct res_counter *res)
1151 {
1152         if (res->com.state == RES_COUNTER_BUSY)
1153                 return -EBUSY;
1154         else if (res->com.state != RES_COUNTER_ALLOCATED)
1155                 return -EPERM;
1156
1157         return 0;
1158 }
1159
1160 static int remove_xrcdn_ok(struct res_xrcdn *res)
1161 {
1162         if (res->com.state == RES_XRCD_BUSY)
1163                 return -EBUSY;
1164         else if (res->com.state != RES_XRCD_ALLOCATED)
1165                 return -EPERM;
1166
1167         return 0;
1168 }
1169
1170 static int remove_fs_rule_ok(struct res_fs_rule *res)
1171 {
1172         if (res->com.state == RES_FS_RULE_BUSY)
1173                 return -EBUSY;
1174         else if (res->com.state != RES_FS_RULE_ALLOCATED)
1175                 return -EPERM;
1176
1177         return 0;
1178 }
1179
1180 static int remove_cq_ok(struct res_cq *res)
1181 {
1182         if (res->com.state == RES_CQ_BUSY)
1183                 return -EBUSY;
1184         else if (res->com.state != RES_CQ_ALLOCATED)
1185                 return -EPERM;
1186
1187         return 0;
1188 }
1189
1190 static int remove_srq_ok(struct res_srq *res)
1191 {
1192         if (res->com.state == RES_SRQ_BUSY)
1193                 return -EBUSY;
1194         else if (res->com.state != RES_SRQ_ALLOCATED)
1195                 return -EPERM;
1196
1197         return 0;
1198 }
1199
1200 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1201 {
1202         switch (type) {
1203         case RES_QP:
1204                 return remove_qp_ok((struct res_qp *)res);
1205         case RES_CQ:
1206                 return remove_cq_ok((struct res_cq *)res);
1207         case RES_SRQ:
1208                 return remove_srq_ok((struct res_srq *)res);
1209         case RES_MPT:
1210                 return remove_mpt_ok((struct res_mpt *)res);
1211         case RES_MTT:
1212                 return remove_mtt_ok((struct res_mtt *)res, extra);
1213         case RES_MAC:
1214                 return -ENOSYS;
1215         case RES_EQ:
1216                 return remove_eq_ok((struct res_eq *)res);
1217         case RES_COUNTER:
1218                 return remove_counter_ok((struct res_counter *)res);
1219         case RES_XRCD:
1220                 return remove_xrcdn_ok((struct res_xrcdn *)res);
1221         case RES_FS_RULE:
1222                 return remove_fs_rule_ok((struct res_fs_rule *)res);
1223         default:
1224                 return -EINVAL;
1225         }
1226 }
1227
1228 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1229                          enum mlx4_resource type, int extra)
1230 {
1231         u64 i;
1232         int err;
1233         struct mlx4_priv *priv = mlx4_priv(dev);
1234         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1235         struct res_common *r;
1236
1237         spin_lock_irq(mlx4_tlock(dev));
1238         for (i = base; i < base + count; ++i) {
1239                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1240                 if (!r) {
1241                         err = -ENOENT;
1242                         goto out;
1243                 }
1244                 if (r->owner != slave) {
1245                         err = -EPERM;
1246                         goto out;
1247                 }
1248                 err = remove_ok(r, type, extra);
1249                 if (err)
1250                         goto out;
1251         }
1252
1253         for (i = base; i < base + count; ++i) {
1254                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1255                 rb_erase(&r->node, &tracker->res_tree[type]);
1256                 list_del(&r->list);
1257                 kfree(r);
1258         }
1259         err = 0;
1260
1261 out:
1262         spin_unlock_irq(mlx4_tlock(dev));
1263
1264         return err;
1265 }
1266
1267 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1268                                 enum res_qp_states state, struct res_qp **qp,
1269                                 int alloc)
1270 {
1271         struct mlx4_priv *priv = mlx4_priv(dev);
1272         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1273         struct res_qp *r;
1274         int err = 0;
1275
1276         spin_lock_irq(mlx4_tlock(dev));
1277         r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1278         if (!r)
1279                 err = -ENOENT;
1280         else if (r->com.owner != slave)
1281                 err = -EPERM;
1282         else {
1283                 switch (state) {
1284                 case RES_QP_BUSY:
1285                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1286                                  __func__, r->com.res_id);
1287                         err = -EBUSY;
1288                         break;
1289
1290                 case RES_QP_RESERVED:
1291                         if (r->com.state == RES_QP_MAPPED && !alloc)
1292                                 break;
1293
1294                         mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1295                         err = -EINVAL;
1296                         break;
1297
1298                 case RES_QP_MAPPED:
1299                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
1300                             r->com.state == RES_QP_HW)
1301                                 break;
1302                         else {
1303                                 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1304                                           r->com.res_id);
1305                                 err = -EINVAL;
1306                         }
1307
1308                         break;
1309
1310                 case RES_QP_HW:
1311                         if (r->com.state != RES_QP_MAPPED)
1312                                 err = -EINVAL;
1313                         break;
1314                 default:
1315                         err = -EINVAL;
1316                 }
1317
1318                 if (!err) {
1319                         r->com.from_state = r->com.state;
1320                         r->com.to_state = state;
1321                         r->com.state = RES_QP_BUSY;
1322                         if (qp)
1323                                 *qp = r;
1324                 }
1325         }
1326
1327         spin_unlock_irq(mlx4_tlock(dev));
1328
1329         return err;
1330 }
1331
1332 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1333                                 enum res_mpt_states state, struct res_mpt **mpt)
1334 {
1335         struct mlx4_priv *priv = mlx4_priv(dev);
1336         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1337         struct res_mpt *r;
1338         int err = 0;
1339
1340         spin_lock_irq(mlx4_tlock(dev));
1341         r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1342         if (!r)
1343                 err = -ENOENT;
1344         else if (r->com.owner != slave)
1345                 err = -EPERM;
1346         else {
1347                 switch (state) {
1348                 case RES_MPT_BUSY:
1349                         err = -EINVAL;
1350                         break;
1351
1352                 case RES_MPT_RESERVED:
1353                         if (r->com.state != RES_MPT_MAPPED)
1354                                 err = -EINVAL;
1355                         break;
1356
1357                 case RES_MPT_MAPPED:
1358                         if (r->com.state != RES_MPT_RESERVED &&
1359                             r->com.state != RES_MPT_HW)
1360                                 err = -EINVAL;
1361                         break;
1362
1363                 case RES_MPT_HW:
1364                         if (r->com.state != RES_MPT_MAPPED)
1365                                 err = -EINVAL;
1366                         break;
1367                 default:
1368                         err = -EINVAL;
1369                 }
1370
1371                 if (!err) {
1372                         r->com.from_state = r->com.state;
1373                         r->com.to_state = state;
1374                         r->com.state = RES_MPT_BUSY;
1375                         if (mpt)
1376                                 *mpt = r;
1377                 }
1378         }
1379
1380         spin_unlock_irq(mlx4_tlock(dev));
1381
1382         return err;
1383 }
1384
1385 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1386                                 enum res_eq_states state, struct res_eq **eq)
1387 {
1388         struct mlx4_priv *priv = mlx4_priv(dev);
1389         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1390         struct res_eq *r;
1391         int err = 0;
1392
1393         spin_lock_irq(mlx4_tlock(dev));
1394         r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1395         if (!r)
1396                 err = -ENOENT;
1397         else if (r->com.owner != slave)
1398                 err = -EPERM;
1399         else {
1400                 switch (state) {
1401                 case RES_EQ_BUSY:
1402                         err = -EINVAL;
1403                         break;
1404
1405                 case RES_EQ_RESERVED:
1406                         if (r->com.state != RES_EQ_HW)
1407                                 err = -EINVAL;
1408                         break;
1409
1410                 case RES_EQ_HW:
1411                         if (r->com.state != RES_EQ_RESERVED)
1412                                 err = -EINVAL;
1413                         break;
1414
1415                 default:
1416                         err = -EINVAL;
1417                 }
1418
1419                 if (!err) {
1420                         r->com.from_state = r->com.state;
1421                         r->com.to_state = state;
1422                         r->com.state = RES_EQ_BUSY;
1423                         if (eq)
1424                                 *eq = r;
1425                 }
1426         }
1427
1428         spin_unlock_irq(mlx4_tlock(dev));
1429
1430         return err;
1431 }
1432
1433 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1434                                 enum res_cq_states state, struct res_cq **cq)
1435 {
1436         struct mlx4_priv *priv = mlx4_priv(dev);
1437         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1438         struct res_cq *r;
1439         int err;
1440
1441         spin_lock_irq(mlx4_tlock(dev));
1442         r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1443         if (!r) {
1444                 err = -ENOENT;
1445         } else if (r->com.owner != slave) {
1446                 err = -EPERM;
1447         } else if (state == RES_CQ_ALLOCATED) {
1448                 if (r->com.state != RES_CQ_HW)
1449                         err = -EINVAL;
1450                 else if (atomic_read(&r->ref_count))
1451                         err = -EBUSY;
1452                 else
1453                         err = 0;
1454         } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1455                 err = -EINVAL;
1456         } else {
1457                 err = 0;
1458         }
1459
1460         if (!err) {
1461                 r->com.from_state = r->com.state;
1462                 r->com.to_state = state;
1463                 r->com.state = RES_CQ_BUSY;
1464                 if (cq)
1465                         *cq = r;
1466         }
1467
1468         spin_unlock_irq(mlx4_tlock(dev));
1469
1470         return err;
1471 }
1472
1473 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1474                                  enum res_srq_states state, struct res_srq **srq)
1475 {
1476         struct mlx4_priv *priv = mlx4_priv(dev);
1477         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1478         struct res_srq *r;
1479         int err = 0;
1480
1481         spin_lock_irq(mlx4_tlock(dev));
1482         r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1483         if (!r) {
1484                 err = -ENOENT;
1485         } else if (r->com.owner != slave) {
1486                 err = -EPERM;
1487         } else if (state == RES_SRQ_ALLOCATED) {
1488                 if (r->com.state != RES_SRQ_HW)
1489                         err = -EINVAL;
1490                 else if (atomic_read(&r->ref_count))
1491                         err = -EBUSY;
1492         } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1493                 err = -EINVAL;
1494         }
1495
1496         if (!err) {
1497                 r->com.from_state = r->com.state;
1498                 r->com.to_state = state;
1499                 r->com.state = RES_SRQ_BUSY;
1500                 if (srq)
1501                         *srq = r;
1502         }
1503
1504         spin_unlock_irq(mlx4_tlock(dev));
1505
1506         return err;
1507 }
1508
1509 static void res_abort_move(struct mlx4_dev *dev, int slave,
1510                            enum mlx4_resource type, int id)
1511 {
1512         struct mlx4_priv *priv = mlx4_priv(dev);
1513         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1514         struct res_common *r;
1515
1516         spin_lock_irq(mlx4_tlock(dev));
1517         r = res_tracker_lookup(&tracker->res_tree[type], id);
1518         if (r && (r->owner == slave))
1519                 r->state = r->from_state;
1520         spin_unlock_irq(mlx4_tlock(dev));
1521 }
1522
1523 static void res_end_move(struct mlx4_dev *dev, int slave,
1524                          enum mlx4_resource type, int id)
1525 {
1526         struct mlx4_priv *priv = mlx4_priv(dev);
1527         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1528         struct res_common *r;
1529
1530         spin_lock_irq(mlx4_tlock(dev));
1531         r = res_tracker_lookup(&tracker->res_tree[type], id);
1532         if (r && (r->owner == slave))
1533                 r->state = r->to_state;
1534         spin_unlock_irq(mlx4_tlock(dev));
1535 }
1536
1537 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1538 {
1539         return mlx4_is_qp_reserved(dev, qpn) &&
1540                 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1541 }
1542
1543 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1544 {
1545         return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1546 }
1547
1548 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1549                         u64 in_param, u64 *out_param)
1550 {
1551         int err;
1552         int count;
1553         int align;
1554         int base;
1555         int qpn;
1556         u8 flags;
1557
1558         switch (op) {
1559         case RES_OP_RESERVE:
1560                 count = get_param_l(&in_param) & 0xffffff;
1561                 /* Turn off all unsupported QP allocation flags that the
1562                  * slave tries to set.
1563                  */
1564                 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1565                 align = get_param_h(&in_param);
1566                 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1567                 if (err)
1568                         return err;
1569
1570                 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1571                 if (err) {
1572                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1573                         return err;
1574                 }
1575
1576                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1577                 if (err) {
1578                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1579                         __mlx4_qp_release_range(dev, base, count);
1580                         return err;
1581                 }
1582                 set_param_l(out_param, base);
1583                 break;
1584         case RES_OP_MAP_ICM:
1585                 qpn = get_param_l(&in_param) & 0x7fffff;
1586                 if (valid_reserved(dev, slave, qpn)) {
1587                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1588                         if (err)
1589                                 return err;
1590                 }
1591
1592                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1593                                            NULL, 1);
1594                 if (err)
1595                         return err;
1596
1597                 if (!fw_reserved(dev, qpn)) {
1598                         err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1599                         if (err) {
1600                                 res_abort_move(dev, slave, RES_QP, qpn);
1601                                 return err;
1602                         }
1603                 }
1604
1605                 res_end_move(dev, slave, RES_QP, qpn);
1606                 break;
1607
1608         default:
1609                 err = -EINVAL;
1610                 break;
1611         }
1612         return err;
1613 }
1614
1615 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1616                          u64 in_param, u64 *out_param)
1617 {
1618         int err = -EINVAL;
1619         int base;
1620         int order;
1621
1622         if (op != RES_OP_RESERVE_AND_MAP)
1623                 return err;
1624
1625         order = get_param_l(&in_param);
1626
1627         err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1628         if (err)
1629                 return err;
1630
1631         base = __mlx4_alloc_mtt_range(dev, order);
1632         if (base == -1) {
1633                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1634                 return -ENOMEM;
1635         }
1636
1637         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1638         if (err) {
1639                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1640                 __mlx4_free_mtt_range(dev, base, order);
1641         } else {
1642                 set_param_l(out_param, base);
1643         }
1644
1645         return err;
1646 }
1647
1648 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1649                          u64 in_param, u64 *out_param)
1650 {
1651         int err = -EINVAL;
1652         int index;
1653         int id;
1654         struct res_mpt *mpt;
1655
1656         switch (op) {
1657         case RES_OP_RESERVE:
1658                 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1659                 if (err)
1660                         break;
1661
1662                 index = __mlx4_mpt_reserve(dev);
1663                 if (index == -1) {
1664                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1665                         break;
1666                 }
1667                 id = index & mpt_mask(dev);
1668
1669                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1670                 if (err) {
1671                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1672                         __mlx4_mpt_release(dev, index);
1673                         break;
1674                 }
1675                 set_param_l(out_param, index);
1676                 break;
1677         case RES_OP_MAP_ICM:
1678                 index = get_param_l(&in_param);
1679                 id = index & mpt_mask(dev);
1680                 err = mr_res_start_move_to(dev, slave, id,
1681                                            RES_MPT_MAPPED, &mpt);
1682                 if (err)
1683                         return err;
1684
1685                 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1686                 if (err) {
1687                         res_abort_move(dev, slave, RES_MPT, id);
1688                         return err;
1689                 }
1690
1691                 res_end_move(dev, slave, RES_MPT, id);
1692                 break;
1693         }
1694         return err;
1695 }
1696
1697 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1698                         u64 in_param, u64 *out_param)
1699 {
1700         int cqn;
1701         int err;
1702
1703         switch (op) {
1704         case RES_OP_RESERVE_AND_MAP:
1705                 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1706                 if (err)
1707                         break;
1708
1709                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1710                 if (err) {
1711                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1712                         break;
1713                 }
1714
1715                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1716                 if (err) {
1717                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1718                         __mlx4_cq_free_icm(dev, cqn);
1719                         break;
1720                 }
1721
1722                 set_param_l(out_param, cqn);
1723                 break;
1724
1725         default:
1726                 err = -EINVAL;
1727         }
1728
1729         return err;
1730 }
1731
1732 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1733                          u64 in_param, u64 *out_param)
1734 {
1735         int srqn;
1736         int err;
1737
1738         switch (op) {
1739         case RES_OP_RESERVE_AND_MAP:
1740                 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1741                 if (err)
1742                         break;
1743
1744                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1745                 if (err) {
1746                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1747                         break;
1748                 }
1749
1750                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1751                 if (err) {
1752                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1753                         __mlx4_srq_free_icm(dev, srqn);
1754                         break;
1755                 }
1756
1757                 set_param_l(out_param, srqn);
1758                 break;
1759
1760         default:
1761                 err = -EINVAL;
1762         }
1763
1764         return err;
1765 }
1766
1767 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1768                                      u8 smac_index, u64 *mac)
1769 {
1770         struct mlx4_priv *priv = mlx4_priv(dev);
1771         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1772         struct list_head *mac_list =
1773                 &tracker->slave_list[slave].res_list[RES_MAC];
1774         struct mac_res *res, *tmp;
1775
1776         list_for_each_entry_safe(res, tmp, mac_list, list) {
1777                 if (res->smac_index == smac_index && res->port == (u8) port) {
1778                         *mac = res->mac;
1779                         return 0;
1780                 }
1781         }
1782         return -ENOENT;
1783 }
1784
1785 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1786 {
1787         struct mlx4_priv *priv = mlx4_priv(dev);
1788         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1789         struct list_head *mac_list =
1790                 &tracker->slave_list[slave].res_list[RES_MAC];
1791         struct mac_res *res, *tmp;
1792
1793         list_for_each_entry_safe(res, tmp, mac_list, list) {
1794                 if (res->mac == mac && res->port == (u8) port) {
1795                         /* mac found. update ref count */
1796                         ++res->ref_count;
1797                         return 0;
1798                 }
1799         }
1800
1801         if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1802                 return -EINVAL;
1803         res = kzalloc(sizeof *res, GFP_KERNEL);
1804         if (!res) {
1805                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1806                 return -ENOMEM;
1807         }
1808         res->mac = mac;
1809         res->port = (u8) port;
1810         res->smac_index = smac_index;
1811         res->ref_count = 1;
1812         list_add_tail(&res->list,
1813                       &tracker->slave_list[slave].res_list[RES_MAC]);
1814         return 0;
1815 }
1816
1817 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1818                                int port)
1819 {
1820         struct mlx4_priv *priv = mlx4_priv(dev);
1821         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1822         struct list_head *mac_list =
1823                 &tracker->slave_list[slave].res_list[RES_MAC];
1824         struct mac_res *res, *tmp;
1825
1826         list_for_each_entry_safe(res, tmp, mac_list, list) {
1827                 if (res->mac == mac && res->port == (u8) port) {
1828                         if (!--res->ref_count) {
1829                                 list_del(&res->list);
1830                                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1831                                 kfree(res);
1832                         }
1833                         break;
1834                 }
1835         }
1836 }
1837
1838 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1839 {
1840         struct mlx4_priv *priv = mlx4_priv(dev);
1841         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1842         struct list_head *mac_list =
1843                 &tracker->slave_list[slave].res_list[RES_MAC];
1844         struct mac_res *res, *tmp;
1845         int i;
1846
1847         list_for_each_entry_safe(res, tmp, mac_list, list) {
1848                 list_del(&res->list);
1849                 /* dereference the mac the num times the slave referenced it */
1850                 for (i = 0; i < res->ref_count; i++)
1851                         __mlx4_unregister_mac(dev, res->port, res->mac);
1852                 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1853                 kfree(res);
1854         }
1855 }
1856
1857 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1858                          u64 in_param, u64 *out_param, int in_port)
1859 {
1860         int err = -EINVAL;
1861         int port;
1862         u64 mac;
1863         u8 smac_index;
1864
1865         if (op != RES_OP_RESERVE_AND_MAP)
1866                 return err;
1867
1868         port = !in_port ? get_param_l(out_param) : in_port;
1869         port = mlx4_slave_convert_port(
1870                         dev, slave, port);
1871
1872         if (port < 0)
1873                 return -EINVAL;
1874         mac = in_param;
1875
1876         err = __mlx4_register_mac(dev, port, mac);
1877         if (err >= 0) {
1878                 smac_index = err;
1879                 set_param_l(out_param, err);
1880                 err = 0;
1881         }
1882
1883         if (!err) {
1884                 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1885                 if (err)
1886                         __mlx4_unregister_mac(dev, port, mac);
1887         }
1888         return err;
1889 }
1890
1891 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1892                              int port, int vlan_index)
1893 {
1894         struct mlx4_priv *priv = mlx4_priv(dev);
1895         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1896         struct list_head *vlan_list =
1897                 &tracker->slave_list[slave].res_list[RES_VLAN];
1898         struct vlan_res *res, *tmp;
1899
1900         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1901                 if (res->vlan == vlan && res->port == (u8) port) {
1902                         /* vlan found. update ref count */
1903                         ++res->ref_count;
1904                         return 0;
1905                 }
1906         }
1907
1908         if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1909                 return -EINVAL;
1910         res = kzalloc(sizeof(*res), GFP_KERNEL);
1911         if (!res) {
1912                 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1913                 return -ENOMEM;
1914         }
1915         res->vlan = vlan;
1916         res->port = (u8) port;
1917         res->vlan_index = vlan_index;
1918         res->ref_count = 1;
1919         list_add_tail(&res->list,
1920                       &tracker->slave_list[slave].res_list[RES_VLAN]);
1921         return 0;
1922 }
1923
1924
1925 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1926                                 int port)
1927 {
1928         struct mlx4_priv *priv = mlx4_priv(dev);
1929         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1930         struct list_head *vlan_list =
1931                 &tracker->slave_list[slave].res_list[RES_VLAN];
1932         struct vlan_res *res, *tmp;
1933
1934         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1935                 if (res->vlan == vlan && res->port == (u8) port) {
1936                         if (!--res->ref_count) {
1937                                 list_del(&res->list);
1938                                 mlx4_release_resource(dev, slave, RES_VLAN,
1939                                                       1, port);
1940                                 kfree(res);
1941                         }
1942                         break;
1943                 }
1944         }
1945 }
1946
1947 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1948 {
1949         struct mlx4_priv *priv = mlx4_priv(dev);
1950         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1951         struct list_head *vlan_list =
1952                 &tracker->slave_list[slave].res_list[RES_VLAN];
1953         struct vlan_res *res, *tmp;
1954         int i;
1955
1956         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1957                 list_del(&res->list);
1958                 /* dereference the vlan the num times the slave referenced it */
1959                 for (i = 0; i < res->ref_count; i++)
1960                         __mlx4_unregister_vlan(dev, res->port, res->vlan);
1961                 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1962                 kfree(res);
1963         }
1964 }
1965
1966 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1967                           u64 in_param, u64 *out_param, int in_port)
1968 {
1969         struct mlx4_priv *priv = mlx4_priv(dev);
1970         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1971         int err;
1972         u16 vlan;
1973         int vlan_index;
1974         int port;
1975
1976         port = !in_port ? get_param_l(out_param) : in_port;
1977
1978         if (!port || op != RES_OP_RESERVE_AND_MAP)
1979                 return -EINVAL;
1980
1981         port = mlx4_slave_convert_port(
1982                         dev, slave, port);
1983
1984         if (port < 0)
1985                 return -EINVAL;
1986         /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1987         if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1988                 slave_state[slave].old_vlan_api = true;
1989                 return 0;
1990         }
1991
1992         vlan = (u16) in_param;
1993
1994         err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1995         if (!err) {
1996                 set_param_l(out_param, (u32) vlan_index);
1997                 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1998                 if (err)
1999                         __mlx4_unregister_vlan(dev, port, vlan);
2000         }
2001         return err;
2002 }
2003
2004 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2005                              u64 in_param, u64 *out_param)
2006 {
2007         u32 index;
2008         int err;
2009
2010         if (op != RES_OP_RESERVE)
2011                 return -EINVAL;
2012
2013         err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2014         if (err)
2015                 return err;
2016
2017         err = __mlx4_counter_alloc(dev, &index);
2018         if (err) {
2019                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2020                 return err;
2021         }
2022
2023         err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2024         if (err) {
2025                 __mlx4_counter_free(dev, index);
2026                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2027         } else {
2028                 set_param_l(out_param, index);
2029         }
2030
2031         return err;
2032 }
2033
2034 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2035                            u64 in_param, u64 *out_param)
2036 {
2037         u32 xrcdn;
2038         int err;
2039
2040         if (op != RES_OP_RESERVE)
2041                 return -EINVAL;
2042
2043         err = __mlx4_xrcd_alloc(dev, &xrcdn);
2044         if (err)
2045                 return err;
2046
2047         err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2048         if (err)
2049                 __mlx4_xrcd_free(dev, xrcdn);
2050         else
2051                 set_param_l(out_param, xrcdn);
2052
2053         return err;
2054 }
2055
2056 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2057                            struct mlx4_vhcr *vhcr,
2058                            struct mlx4_cmd_mailbox *inbox,
2059                            struct mlx4_cmd_mailbox *outbox,
2060                            struct mlx4_cmd_info *cmd)
2061 {
2062         int err;
2063         int alop = vhcr->op_modifier;
2064
2065         switch (vhcr->in_modifier & 0xFF) {
2066         case RES_QP:
2067                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2068                                    vhcr->in_param, &vhcr->out_param);
2069                 break;
2070
2071         case RES_MTT:
2072                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2073                                     vhcr->in_param, &vhcr->out_param);
2074                 break;
2075
2076         case RES_MPT:
2077                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2078                                     vhcr->in_param, &vhcr->out_param);
2079                 break;
2080
2081         case RES_CQ:
2082                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2083                                    vhcr->in_param, &vhcr->out_param);
2084                 break;
2085
2086         case RES_SRQ:
2087                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2088                                     vhcr->in_param, &vhcr->out_param);
2089                 break;
2090
2091         case RES_MAC:
2092                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2093                                     vhcr->in_param, &vhcr->out_param,
2094                                     (vhcr->in_modifier >> 8) & 0xFF);
2095                 break;
2096
2097         case RES_VLAN:
2098                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2099                                      vhcr->in_param, &vhcr->out_param,
2100                                      (vhcr->in_modifier >> 8) & 0xFF);
2101                 break;
2102
2103         case RES_COUNTER:
2104                 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2105                                         vhcr->in_param, &vhcr->out_param);
2106                 break;
2107
2108         case RES_XRCD:
2109                 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2110                                       vhcr->in_param, &vhcr->out_param);
2111                 break;
2112
2113         default:
2114                 err = -EINVAL;
2115                 break;
2116         }
2117
2118         return err;
2119 }
2120
2121 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2122                        u64 in_param)
2123 {
2124         int err;
2125         int count;
2126         int base;
2127         int qpn;
2128
2129         switch (op) {
2130         case RES_OP_RESERVE:
2131                 base = get_param_l(&in_param) & 0x7fffff;
2132                 count = get_param_h(&in_param);
2133                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2134                 if (err)
2135                         break;
2136                 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2137                 __mlx4_qp_release_range(dev, base, count);
2138                 break;
2139         case RES_OP_MAP_ICM:
2140                 qpn = get_param_l(&in_param) & 0x7fffff;
2141                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2142                                            NULL, 0);
2143                 if (err)
2144                         return err;
2145
2146                 if (!fw_reserved(dev, qpn))
2147                         __mlx4_qp_free_icm(dev, qpn);
2148
2149                 res_end_move(dev, slave, RES_QP, qpn);
2150
2151                 if (valid_reserved(dev, slave, qpn))
2152                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2153                 break;
2154         default:
2155                 err = -EINVAL;
2156                 break;
2157         }
2158         return err;
2159 }
2160
2161 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2162                         u64 in_param, u64 *out_param)
2163 {
2164         int err = -EINVAL;
2165         int base;
2166         int order;
2167
2168         if (op != RES_OP_RESERVE_AND_MAP)
2169                 return err;
2170
2171         base = get_param_l(&in_param);
2172         order = get_param_h(&in_param);
2173         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2174         if (!err) {
2175                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2176                 __mlx4_free_mtt_range(dev, base, order);
2177         }
2178         return err;
2179 }
2180
2181 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2182                         u64 in_param)
2183 {
2184         int err = -EINVAL;
2185         int index;
2186         int id;
2187         struct res_mpt *mpt;
2188
2189         switch (op) {
2190         case RES_OP_RESERVE:
2191                 index = get_param_l(&in_param);
2192                 id = index & mpt_mask(dev);
2193                 err = get_res(dev, slave, id, RES_MPT, &mpt);
2194                 if (err)
2195                         break;
2196                 index = mpt->key;
2197                 put_res(dev, slave, id, RES_MPT);
2198
2199                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2200                 if (err)
2201                         break;
2202                 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2203                 __mlx4_mpt_release(dev, index);
2204                 break;
2205         case RES_OP_MAP_ICM:
2206                         index = get_param_l(&in_param);
2207                         id = index & mpt_mask(dev);
2208                         err = mr_res_start_move_to(dev, slave, id,
2209                                                    RES_MPT_RESERVED, &mpt);
2210                         if (err)
2211                                 return err;
2212
2213                         __mlx4_mpt_free_icm(dev, mpt->key);
2214                         res_end_move(dev, slave, RES_MPT, id);
2215                         return err;
2216                 break;
2217         default:
2218                 err = -EINVAL;
2219                 break;
2220         }
2221         return err;
2222 }
2223
2224 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2225                        u64 in_param, u64 *out_param)
2226 {
2227         int cqn;
2228         int err;
2229
2230         switch (op) {
2231         case RES_OP_RESERVE_AND_MAP:
2232                 cqn = get_param_l(&in_param);
2233                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2234                 if (err)
2235                         break;
2236
2237                 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2238                 __mlx4_cq_free_icm(dev, cqn);
2239                 break;
2240
2241         default:
2242                 err = -EINVAL;
2243                 break;
2244         }
2245
2246         return err;
2247 }
2248
2249 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2250                         u64 in_param, u64 *out_param)
2251 {
2252         int srqn;
2253         int err;
2254
2255         switch (op) {
2256         case RES_OP_RESERVE_AND_MAP:
2257                 srqn = get_param_l(&in_param);
2258                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2259                 if (err)
2260                         break;
2261
2262                 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2263                 __mlx4_srq_free_icm(dev, srqn);
2264                 break;
2265
2266         default:
2267                 err = -EINVAL;
2268                 break;
2269         }
2270
2271         return err;
2272 }
2273
2274 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2275                             u64 in_param, u64 *out_param, int in_port)
2276 {
2277         int port;
2278         int err = 0;
2279
2280         switch (op) {
2281         case RES_OP_RESERVE_AND_MAP:
2282                 port = !in_port ? get_param_l(out_param) : in_port;
2283                 port = mlx4_slave_convert_port(
2284                                 dev, slave, port);
2285
2286                 if (port < 0)
2287                         return -EINVAL;
2288                 mac_del_from_slave(dev, slave, in_param, port);
2289                 __mlx4_unregister_mac(dev, port, in_param);
2290                 break;
2291         default:
2292                 err = -EINVAL;
2293                 break;
2294         }
2295
2296         return err;
2297
2298 }
2299
2300 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2301                             u64 in_param, u64 *out_param, int port)
2302 {
2303         struct mlx4_priv *priv = mlx4_priv(dev);
2304         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2305         int err = 0;
2306
2307         port = mlx4_slave_convert_port(
2308                         dev, slave, port);
2309
2310         if (port < 0)
2311                 return -EINVAL;
2312         switch (op) {
2313         case RES_OP_RESERVE_AND_MAP:
2314                 if (slave_state[slave].old_vlan_api)
2315                         return 0;
2316                 if (!port)
2317                         return -EINVAL;
2318                 vlan_del_from_slave(dev, slave, in_param, port);
2319                 __mlx4_unregister_vlan(dev, port, in_param);
2320                 break;
2321         default:
2322                 err = -EINVAL;
2323                 break;
2324         }
2325
2326         return err;
2327 }
2328
2329 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2330                             u64 in_param, u64 *out_param)
2331 {
2332         int index;
2333         int err;
2334
2335         if (op != RES_OP_RESERVE)
2336                 return -EINVAL;
2337
2338         index = get_param_l(&in_param);
2339         err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2340         if (err)
2341                 return err;
2342
2343         __mlx4_counter_free(dev, index);
2344         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2345
2346         return err;
2347 }
2348
2349 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2350                           u64 in_param, u64 *out_param)
2351 {
2352         int xrcdn;
2353         int err;
2354
2355         if (op != RES_OP_RESERVE)
2356                 return -EINVAL;
2357
2358         xrcdn = get_param_l(&in_param);
2359         err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2360         if (err)
2361                 return err;
2362
2363         __mlx4_xrcd_free(dev, xrcdn);
2364
2365         return err;
2366 }
2367
2368 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2369                           struct mlx4_vhcr *vhcr,
2370                           struct mlx4_cmd_mailbox *inbox,
2371                           struct mlx4_cmd_mailbox *outbox,
2372                           struct mlx4_cmd_info *cmd)
2373 {
2374         int err = -EINVAL;
2375         int alop = vhcr->op_modifier;
2376
2377         switch (vhcr->in_modifier & 0xFF) {
2378         case RES_QP:
2379                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2380                                   vhcr->in_param);
2381                 break;
2382
2383         case RES_MTT:
2384                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2385                                    vhcr->in_param, &vhcr->out_param);
2386                 break;
2387
2388         case RES_MPT:
2389                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2390                                    vhcr->in_param);
2391                 break;
2392
2393         case RES_CQ:
2394                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2395                                   vhcr->in_param, &vhcr->out_param);
2396                 break;
2397
2398         case RES_SRQ:
2399                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2400                                    vhcr->in_param, &vhcr->out_param);
2401                 break;
2402
2403         case RES_MAC:
2404                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2405                                    vhcr->in_param, &vhcr->out_param,
2406                                    (vhcr->in_modifier >> 8) & 0xFF);
2407                 break;
2408
2409         case RES_VLAN:
2410                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2411                                     vhcr->in_param, &vhcr->out_param,
2412                                     (vhcr->in_modifier >> 8) & 0xFF);
2413                 break;
2414
2415         case RES_COUNTER:
2416                 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2417                                        vhcr->in_param, &vhcr->out_param);
2418                 break;
2419
2420         case RES_XRCD:
2421                 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2422                                      vhcr->in_param, &vhcr->out_param);
2423
2424         default:
2425                 break;
2426         }
2427         return err;
2428 }
2429
2430 /* ugly but other choices are uglier */
2431 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2432 {
2433         return (be32_to_cpu(mpt->flags) >> 9) & 1;
2434 }
2435
2436 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2437 {
2438         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2439 }
2440
2441 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2442 {
2443         return be32_to_cpu(mpt->mtt_sz);
2444 }
2445
2446 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2447 {
2448         return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2449 }
2450
2451 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2452 {
2453         return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2454 }
2455
2456 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2457 {
2458         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2459 }
2460
2461 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2462 {
2463         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2464 }
2465
2466 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2467 {
2468         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2469 }
2470
2471 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2472 {
2473         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2474 }
2475
2476 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2477 {
2478         int page_shift = (qpc->log_page_size & 0x3f) + 12;
2479         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2480         int log_sq_sride = qpc->sq_size_stride & 7;
2481         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2482         int log_rq_stride = qpc->rq_size_stride & 7;
2483         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2484         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2485         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2486         int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2487         int sq_size;
2488         int rq_size;
2489         int total_pages;
2490         int total_mem;
2491         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2492
2493         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2494         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2495         total_mem = sq_size + rq_size;
2496         total_pages =
2497                 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2498                                    page_shift);
2499
2500         return total_pages;
2501 }
2502
2503 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2504                            int size, struct res_mtt *mtt)
2505 {
2506         int res_start = mtt->com.res_id;
2507         int res_size = (1 << mtt->order);
2508
2509         if (start < res_start || start + size > res_start + res_size)
2510                 return -EPERM;
2511         return 0;
2512 }
2513
2514 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2515                            struct mlx4_vhcr *vhcr,
2516                            struct mlx4_cmd_mailbox *inbox,
2517                            struct mlx4_cmd_mailbox *outbox,
2518                            struct mlx4_cmd_info *cmd)
2519 {
2520         int err;
2521         int index = vhcr->in_modifier;
2522         struct res_mtt *mtt;
2523         struct res_mpt *mpt;
2524         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2525         int phys;
2526         int id;
2527         u32 pd;
2528         int pd_slave;
2529
2530         id = index & mpt_mask(dev);
2531         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2532         if (err)
2533                 return err;
2534
2535         /* Disable memory windows for VFs. */
2536         if (!mr_is_region(inbox->buf)) {
2537                 err = -EPERM;
2538                 goto ex_abort;
2539         }
2540
2541         /* Make sure that the PD bits related to the slave id are zeros. */
2542         pd = mr_get_pd(inbox->buf);
2543         pd_slave = (pd >> 17) & 0x7f;
2544         if (pd_slave != 0 && pd_slave != slave) {
2545                 err = -EPERM;
2546                 goto ex_abort;
2547         }
2548
2549         if (mr_is_fmr(inbox->buf)) {
2550                 /* FMR and Bind Enable are forbidden in slave devices. */
2551                 if (mr_is_bind_enabled(inbox->buf)) {
2552                         err = -EPERM;
2553                         goto ex_abort;
2554                 }
2555                 /* FMR and Memory Windows are also forbidden. */
2556                 if (!mr_is_region(inbox->buf)) {
2557                         err = -EPERM;
2558                         goto ex_abort;
2559                 }
2560         }
2561
2562         phys = mr_phys_mpt(inbox->buf);
2563         if (!phys) {
2564                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2565                 if (err)
2566                         goto ex_abort;
2567
2568                 err = check_mtt_range(dev, slave, mtt_base,
2569                                       mr_get_mtt_size(inbox->buf), mtt);
2570                 if (err)
2571                         goto ex_put;
2572
2573                 mpt->mtt = mtt;
2574         }
2575
2576         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2577         if (err)
2578                 goto ex_put;
2579
2580         if (!phys) {
2581                 atomic_inc(&mtt->ref_count);
2582                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2583         }
2584
2585         res_end_move(dev, slave, RES_MPT, id);
2586         return 0;
2587
2588 ex_put:
2589         if (!phys)
2590                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2591 ex_abort:
2592         res_abort_move(dev, slave, RES_MPT, id);
2593
2594         return err;
2595 }
2596
2597 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2598                            struct mlx4_vhcr *vhcr,
2599                            struct mlx4_cmd_mailbox *inbox,
2600                            struct mlx4_cmd_mailbox *outbox,
2601                            struct mlx4_cmd_info *cmd)
2602 {
2603         int err;
2604         int index = vhcr->in_modifier;
2605         struct res_mpt *mpt;
2606         int id;
2607
2608         id = index & mpt_mask(dev);
2609         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2610         if (err)
2611                 return err;
2612
2613         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2614         if (err)
2615                 goto ex_abort;
2616
2617         if (mpt->mtt)
2618                 atomic_dec(&mpt->mtt->ref_count);
2619
2620         res_end_move(dev, slave, RES_MPT, id);
2621         return 0;
2622
2623 ex_abort:
2624         res_abort_move(dev, slave, RES_MPT, id);
2625
2626         return err;
2627 }
2628
2629 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2630                            struct mlx4_vhcr *vhcr,
2631                            struct mlx4_cmd_mailbox *inbox,
2632                            struct mlx4_cmd_mailbox *outbox,
2633                            struct mlx4_cmd_info *cmd)
2634 {
2635         int err;
2636         int index = vhcr->in_modifier;
2637         struct res_mpt *mpt;
2638         int id;
2639
2640         id = index & mpt_mask(dev);
2641         err = get_res(dev, slave, id, RES_MPT, &mpt);
2642         if (err)
2643                 return err;
2644
2645         if (mpt->com.from_state == RES_MPT_MAPPED) {
2646                 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2647                  * that, the VF must read the MPT. But since the MPT entry memory is not
2648                  * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2649                  * entry contents. To guarantee that the MPT cannot be changed, the driver
2650                  * must perform HW2SW_MPT before this query and return the MPT entry to HW
2651                  * ownership fofollowing the change. The change here allows the VF to
2652                  * perform QUERY_MPT also when the entry is in SW ownership.
2653                  */
2654                 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2655                                         &mlx4_priv(dev)->mr_table.dmpt_table,
2656                                         mpt->key, NULL);
2657
2658                 if (NULL == mpt_entry || NULL == outbox->buf) {
2659                         err = -EINVAL;
2660                         goto out;
2661                 }
2662
2663                 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2664
2665                 err = 0;
2666         } else if (mpt->com.from_state == RES_MPT_HW) {
2667                 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2668         } else {
2669                 err = -EBUSY;
2670                 goto out;
2671         }
2672
2673
2674 out:
2675         put_res(dev, slave, id, RES_MPT);
2676         return err;
2677 }
2678
2679 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2680 {
2681         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2682 }
2683
2684 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2685 {
2686         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2687 }
2688
2689 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2690 {
2691         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2692 }
2693
2694 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2695                                   struct mlx4_qp_context *context)
2696 {
2697         u32 qpn = vhcr->in_modifier & 0xffffff;
2698         u32 qkey = 0;
2699
2700         if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2701                 return;
2702
2703         /* adjust qkey in qp context */
2704         context->qkey = cpu_to_be32(qkey);
2705 }
2706
2707 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2708                              struct mlx4_vhcr *vhcr,
2709                              struct mlx4_cmd_mailbox *inbox,
2710                              struct mlx4_cmd_mailbox *outbox,
2711                              struct mlx4_cmd_info *cmd)
2712 {
2713         int err;
2714         int qpn = vhcr->in_modifier & 0x7fffff;
2715         struct res_mtt *mtt;
2716         struct res_qp *qp;
2717         struct mlx4_qp_context *qpc = inbox->buf + 8;
2718         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2719         int mtt_size = qp_get_mtt_size(qpc);
2720         struct res_cq *rcq;
2721         struct res_cq *scq;
2722         int rcqn = qp_get_rcqn(qpc);
2723         int scqn = qp_get_scqn(qpc);
2724         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2725         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2726         struct res_srq *srq;
2727         int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2728
2729         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2730         if (err)
2731                 return err;
2732         qp->local_qpn = local_qpn;
2733         qp->sched_queue = 0;
2734         qp->param3 = 0;
2735         qp->vlan_control = 0;
2736         qp->fvl_rx = 0;
2737         qp->pri_path_fl = 0;
2738         qp->vlan_index = 0;
2739         qp->feup = 0;
2740         qp->qpc_flags = be32_to_cpu(qpc->flags);
2741
2742         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2743         if (err)
2744                 goto ex_abort;
2745
2746         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2747         if (err)
2748                 goto ex_put_mtt;
2749
2750         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2751         if (err)
2752                 goto ex_put_mtt;
2753
2754         if (scqn != rcqn) {
2755                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2756                 if (err)
2757                         goto ex_put_rcq;
2758         } else
2759                 scq = rcq;
2760
2761         if (use_srq) {
2762                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2763                 if (err)
2764                         goto ex_put_scq;
2765         }
2766
2767         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2768         update_pkey_index(dev, slave, inbox);
2769         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2770         if (err)
2771                 goto ex_put_srq;
2772         atomic_inc(&mtt->ref_count);
2773         qp->mtt = mtt;
2774         atomic_inc(&rcq->ref_count);
2775         qp->rcq = rcq;
2776         atomic_inc(&scq->ref_count);
2777         qp->scq = scq;
2778
2779         if (scqn != rcqn)
2780                 put_res(dev, slave, scqn, RES_CQ);
2781
2782         if (use_srq) {
2783                 atomic_inc(&srq->ref_count);
2784                 put_res(dev, slave, srqn, RES_SRQ);
2785                 qp->srq = srq;
2786         }
2787         put_res(dev, slave, rcqn, RES_CQ);
2788         put_res(dev, slave, mtt_base, RES_MTT);
2789         res_end_move(dev, slave, RES_QP, qpn);
2790
2791         return 0;
2792
2793 ex_put_srq:
2794         if (use_srq)
2795                 put_res(dev, slave, srqn, RES_SRQ);
2796 ex_put_scq:
2797         if (scqn != rcqn)
2798                 put_res(dev, slave, scqn, RES_CQ);
2799 ex_put_rcq:
2800         put_res(dev, slave, rcqn, RES_CQ);
2801 ex_put_mtt:
2802         put_res(dev, slave, mtt_base, RES_MTT);
2803 ex_abort:
2804         res_abort_move(dev, slave, RES_QP, qpn);
2805
2806         return err;
2807 }
2808
2809 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2810 {
2811         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2812 }
2813
2814 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2815 {
2816         int log_eq_size = eqc->log_eq_size & 0x1f;
2817         int page_shift = (eqc->log_page_size & 0x3f) + 12;
2818
2819         if (log_eq_size + 5 < page_shift)
2820                 return 1;
2821
2822         return 1 << (log_eq_size + 5 - page_shift);
2823 }
2824
2825 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2826 {
2827         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2828 }
2829
2830 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2831 {
2832         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2833         int page_shift = (cqc->log_page_size & 0x3f) + 12;
2834
2835         if (log_cq_size + 5 < page_shift)
2836                 return 1;
2837
2838         return 1 << (log_cq_size + 5 - page_shift);
2839 }
2840
2841 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2842                           struct mlx4_vhcr *vhcr,
2843                           struct mlx4_cmd_mailbox *inbox,
2844                           struct mlx4_cmd_mailbox *outbox,
2845                           struct mlx4_cmd_info *cmd)
2846 {
2847         int err;
2848         int eqn = vhcr->in_modifier;
2849         int res_id = (slave << 8) | eqn;
2850         struct mlx4_eq_context *eqc = inbox->buf;
2851         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2852         int mtt_size = eq_get_mtt_size(eqc);
2853         struct res_eq *eq;
2854         struct res_mtt *mtt;
2855
2856         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2857         if (err)
2858                 return err;
2859         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2860         if (err)
2861                 goto out_add;
2862
2863         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2864         if (err)
2865                 goto out_move;
2866
2867         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2868         if (err)
2869                 goto out_put;
2870
2871         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2872         if (err)
2873                 goto out_put;
2874
2875         atomic_inc(&mtt->ref_count);
2876         eq->mtt = mtt;
2877         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2878         res_end_move(dev, slave, RES_EQ, res_id);
2879         return 0;
2880
2881 out_put:
2882         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2883 out_move:
2884         res_abort_move(dev, slave, RES_EQ, res_id);
2885 out_add:
2886         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2887         return err;
2888 }
2889
2890 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
2891                             struct mlx4_vhcr *vhcr,
2892                             struct mlx4_cmd_mailbox *inbox,
2893                             struct mlx4_cmd_mailbox *outbox,
2894                             struct mlx4_cmd_info *cmd)
2895 {
2896         int err;
2897         u8 get = vhcr->op_modifier;
2898
2899         if (get != 1)
2900                 return -EPERM;
2901
2902         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2903
2904         return err;
2905 }
2906
2907 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2908                               int len, struct res_mtt **res)
2909 {
2910         struct mlx4_priv *priv = mlx4_priv(dev);
2911         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2912         struct res_mtt *mtt;
2913         int err = -EINVAL;
2914
2915         spin_lock_irq(mlx4_tlock(dev));
2916         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2917                             com.list) {
2918                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2919                         *res = mtt;
2920                         mtt->com.from_state = mtt->com.state;
2921                         mtt->com.state = RES_MTT_BUSY;
2922                         err = 0;
2923                         break;
2924                 }
2925         }
2926         spin_unlock_irq(mlx4_tlock(dev));
2927
2928         return err;
2929 }
2930
2931 static int verify_qp_parameters(struct mlx4_dev *dev,
2932                                 struct mlx4_vhcr *vhcr,
2933                                 struct mlx4_cmd_mailbox *inbox,
2934                                 enum qp_transition transition, u8 slave)
2935 {
2936         u32                     qp_type;
2937         u32                     qpn;
2938         struct mlx4_qp_context  *qp_ctx;
2939         enum mlx4_qp_optpar     optpar;
2940         int port;
2941         int num_gids;
2942
2943         qp_ctx  = inbox->buf + 8;
2944         qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2945         optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
2946
2947         switch (qp_type) {
2948         case MLX4_QP_ST_RC:
2949         case MLX4_QP_ST_XRC:
2950         case MLX4_QP_ST_UC:
2951                 switch (transition) {
2952                 case QP_TRANS_INIT2RTR:
2953                 case QP_TRANS_RTR2RTS:
2954                 case QP_TRANS_RTS2RTS:
2955                 case QP_TRANS_SQD2SQD:
2956                 case QP_TRANS_SQD2RTS:
2957                         if (slave != mlx4_master_func_num(dev))
2958                                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2959                                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2960                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2961                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2962                                         else
2963                                                 num_gids = 1;
2964                                         if (qp_ctx->pri_path.mgid_index >= num_gids)
2965                                                 return -EINVAL;
2966                                 }
2967                                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2968                                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2969                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2970                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2971                                         else
2972                                                 num_gids = 1;
2973                                         if (qp_ctx->alt_path.mgid_index >= num_gids)
2974                                                 return -EINVAL;
2975                                 }
2976                         break;
2977                 default:
2978                         break;
2979                 }
2980                 break;
2981
2982         case MLX4_QP_ST_MLX:
2983                 qpn = vhcr->in_modifier & 0x7fffff;
2984                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2985                 if (transition == QP_TRANS_INIT2RTR &&
2986                     slave != mlx4_master_func_num(dev) &&
2987                     mlx4_is_qp_reserved(dev, qpn) &&
2988                     !mlx4_vf_smi_enabled(dev, slave, port)) {
2989                         /* only enabled VFs may create MLX proxy QPs */
2990                         mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
2991                                  __func__, slave, port);
2992                         return -EPERM;
2993                 }
2994                 break;
2995
2996         default:
2997                 break;
2998         }
2999
3000         return 0;
3001 }
3002
3003 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3004                            struct mlx4_vhcr *vhcr,
3005                            struct mlx4_cmd_mailbox *inbox,
3006                            struct mlx4_cmd_mailbox *outbox,
3007                            struct mlx4_cmd_info *cmd)
3008 {
3009         struct mlx4_mtt mtt;
3010         __be64 *page_list = inbox->buf;
3011         u64 *pg_list = (u64 *)page_list;
3012         int i;
3013         struct res_mtt *rmtt = NULL;
3014         int start = be64_to_cpu(page_list[0]);
3015         int npages = vhcr->in_modifier;
3016         int err;
3017
3018         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3019         if (err)
3020                 return err;
3021
3022         /* Call the SW implementation of write_mtt:
3023          * - Prepare a dummy mtt struct
3024          * - Translate inbox contents to simple addresses in host endianess */
3025         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
3026                             we don't really use it */
3027         mtt.order = 0;
3028         mtt.page_shift = 0;
3029         for (i = 0; i < npages; ++i)
3030                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3031
3032         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3033                                ((u64 *)page_list + 2));
3034
3035         if (rmtt)
3036                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3037
3038         return err;
3039 }
3040
3041 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3042                           struct mlx4_vhcr *vhcr,
3043                           struct mlx4_cmd_mailbox *inbox,
3044                           struct mlx4_cmd_mailbox *outbox,
3045                           struct mlx4_cmd_info *cmd)
3046 {
3047         int eqn = vhcr->in_modifier;
3048         int res_id = eqn | (slave << 8);
3049         struct res_eq *eq;
3050         int err;
3051
3052         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3053         if (err)
3054                 return err;
3055
3056         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3057         if (err)
3058                 goto ex_abort;
3059
3060         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3061         if (err)
3062                 goto ex_put;
3063
3064         atomic_dec(&eq->mtt->ref_count);
3065         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3066         res_end_move(dev, slave, RES_EQ, res_id);
3067         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3068
3069         return 0;
3070
3071 ex_put:
3072         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3073 ex_abort:
3074         res_abort_move(dev, slave, RES_EQ, res_id);
3075
3076         return err;
3077 }
3078
3079 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3080 {
3081         struct mlx4_priv *priv = mlx4_priv(dev);
3082         struct mlx4_slave_event_eq_info *event_eq;
3083         struct mlx4_cmd_mailbox *mailbox;
3084         u32 in_modifier = 0;
3085         int err;
3086         int res_id;
3087         struct res_eq *req;
3088
3089         if (!priv->mfunc.master.slave_state)
3090                 return -EINVAL;
3091
3092         event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3093
3094         /* Create the event only if the slave is registered */
3095         if (event_eq->eqn < 0)
3096                 return 0;
3097
3098         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3099         res_id = (slave << 8) | event_eq->eqn;
3100         err = get_res(dev, slave, res_id, RES_EQ, &req);
3101         if (err)
3102                 goto unlock;
3103
3104         if (req->com.from_state != RES_EQ_HW) {
3105                 err = -EINVAL;
3106                 goto put;
3107         }
3108
3109         mailbox = mlx4_alloc_cmd_mailbox(dev);
3110         if (IS_ERR(mailbox)) {
3111                 err = PTR_ERR(mailbox);
3112                 goto put;
3113         }
3114
3115         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3116                 ++event_eq->token;
3117                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3118         }
3119
3120         memcpy(mailbox->buf, (u8 *) eqe, 28);
3121
3122         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
3123
3124         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3125                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3126                        MLX4_CMD_NATIVE);
3127
3128         put_res(dev, slave, res_id, RES_EQ);
3129         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3130         mlx4_free_cmd_mailbox(dev, mailbox);
3131         return err;
3132
3133 put:
3134         put_res(dev, slave, res_id, RES_EQ);
3135
3136 unlock:
3137         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3138         return err;
3139 }
3140
3141 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3142                           struct mlx4_vhcr *vhcr,
3143                           struct mlx4_cmd_mailbox *inbox,
3144                           struct mlx4_cmd_mailbox *outbox,
3145                           struct mlx4_cmd_info *cmd)
3146 {
3147         int eqn = vhcr->in_modifier;
3148         int res_id = eqn | (slave << 8);
3149         struct res_eq *eq;
3150         int err;
3151
3152         err = get_res(dev, slave, res_id, RES_EQ, &eq);
3153         if (err)
3154                 return err;
3155
3156         if (eq->com.from_state != RES_EQ_HW) {
3157                 err = -EINVAL;
3158                 goto ex_put;
3159         }
3160
3161         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3162
3163 ex_put:
3164         put_res(dev, slave, res_id, RES_EQ);
3165         return err;
3166 }
3167
3168 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3169                           struct mlx4_vhcr *vhcr,
3170                           struct mlx4_cmd_mailbox *inbox,
3171                           struct mlx4_cmd_mailbox *outbox,
3172                           struct mlx4_cmd_info *cmd)
3173 {
3174         int err;
3175         int cqn = vhcr->in_modifier;
3176         struct mlx4_cq_context *cqc = inbox->buf;
3177         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3178         struct res_cq *cq;
3179         struct res_mtt *mtt;
3180
3181         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3182         if (err)
3183                 return err;
3184         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3185         if (err)
3186                 goto out_move;
3187         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3188         if (err)
3189                 goto out_put;
3190         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3191         if (err)
3192                 goto out_put;
3193         atomic_inc(&mtt->ref_count);
3194         cq->mtt = mtt;
3195         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3196         res_end_move(dev, slave, RES_CQ, cqn);
3197         return 0;
3198
3199 out_put:
3200         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3201 out_move:
3202         res_abort_move(dev, slave, RES_CQ, cqn);
3203         return err;
3204 }
3205
3206 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3207                           struct mlx4_vhcr *vhcr,
3208                           struct mlx4_cmd_mailbox *inbox,
3209                           struct mlx4_cmd_mailbox *outbox,
3210                           struct mlx4_cmd_info *cmd)
3211 {
3212         int err;
3213         int cqn = vhcr->in_modifier;
3214         struct res_cq *cq;
3215
3216         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3217         if (err)
3218                 return err;
3219         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3220         if (err)
3221                 goto out_move;
3222         atomic_dec(&cq->mtt->ref_count);
3223         res_end_move(dev, slave, RES_CQ, cqn);
3224         return 0;
3225
3226 out_move:
3227         res_abort_move(dev, slave, RES_CQ, cqn);
3228         return err;
3229 }
3230
3231 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3232                           struct mlx4_vhcr *vhcr,
3233                           struct mlx4_cmd_mailbox *inbox,
3234                           struct mlx4_cmd_mailbox *outbox,
3235                           struct mlx4_cmd_info *cmd)
3236 {
3237         int cqn = vhcr->in_modifier;
3238         struct res_cq *cq;
3239         int err;
3240
3241         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3242         if (err)
3243                 return err;
3244
3245         if (cq->com.from_state != RES_CQ_HW)
3246                 goto ex_put;
3247
3248         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3249 ex_put:
3250         put_res(dev, slave, cqn, RES_CQ);
3251
3252         return err;
3253 }
3254
3255 static int handle_resize(struct mlx4_dev *dev, int slave,
3256                          struct mlx4_vhcr *vhcr,
3257                          struct mlx4_cmd_mailbox *inbox,
3258                          struct mlx4_cmd_mailbox *outbox,
3259                          struct mlx4_cmd_info *cmd,
3260                          struct res_cq *cq)
3261 {
3262         int err;
3263         struct res_mtt *orig_mtt;
3264         struct res_mtt *mtt;
3265         struct mlx4_cq_context *cqc = inbox->buf;
3266         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3267
3268         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3269         if (err)
3270                 return err;
3271
3272         if (orig_mtt != cq->mtt) {
3273                 err = -EINVAL;
3274                 goto ex_put;
3275         }
3276
3277         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3278         if (err)
3279                 goto ex_put;
3280
3281         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3282         if (err)
3283                 goto ex_put1;
3284         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3285         if (err)
3286                 goto ex_put1;
3287         atomic_dec(&orig_mtt->ref_count);
3288         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3289         atomic_inc(&mtt->ref_count);
3290         cq->mtt = mtt;
3291         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3292         return 0;
3293
3294 ex_put1:
3295         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3296 ex_put:
3297         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3298
3299         return err;
3300
3301 }
3302
3303 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3304                            struct mlx4_vhcr *vhcr,
3305                            struct mlx4_cmd_mailbox *inbox,
3306                            struct mlx4_cmd_mailbox *outbox,
3307                            struct mlx4_cmd_info *cmd)
3308 {
3309         int cqn = vhcr->in_modifier;
3310         struct res_cq *cq;
3311         int err;
3312
3313         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3314         if (err)
3315                 return err;
3316
3317         if (cq->com.from_state != RES_CQ_HW)
3318                 goto ex_put;
3319
3320         if (vhcr->op_modifier == 0) {
3321                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3322                 goto ex_put;
3323         }
3324
3325         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3326 ex_put:
3327         put_res(dev, slave, cqn, RES_CQ);
3328
3329         return err;
3330 }
3331
3332 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3333 {
3334         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3335         int log_rq_stride = srqc->logstride & 7;
3336         int page_shift = (srqc->log_page_size & 0x3f) + 12;
3337
3338         if (log_srq_size + log_rq_stride + 4 < page_shift)
3339                 return 1;
3340
3341         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3342 }
3343
3344 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3345                            struct mlx4_vhcr *vhcr,
3346                            struct mlx4_cmd_mailbox *inbox,
3347                            struct mlx4_cmd_mailbox *outbox,
3348                            struct mlx4_cmd_info *cmd)
3349 {
3350         int err;
3351         int srqn = vhcr->in_modifier;
3352         struct res_mtt *mtt;
3353         struct res_srq *srq;
3354         struct mlx4_srq_context *srqc = inbox->buf;
3355         int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3356
3357         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3358                 return -EINVAL;
3359
3360         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3361         if (err)
3362                 return err;
3363         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3364         if (err)
3365                 goto ex_abort;
3366         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3367                               mtt);
3368         if (err)
3369                 goto ex_put_mtt;
3370
3371         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3372         if (err)
3373                 goto ex_put_mtt;
3374
3375         atomic_inc(&mtt->ref_count);
3376         srq->mtt = mtt;
3377         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3378         res_end_move(dev, slave, RES_SRQ, srqn);
3379         return 0;
3380
3381 ex_put_mtt:
3382         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3383 ex_abort:
3384         res_abort_move(dev, slave, RES_SRQ, srqn);
3385
3386         return err;
3387 }
3388
3389 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3390                            struct mlx4_vhcr *vhcr,
3391                            struct mlx4_cmd_mailbox *inbox,
3392                            struct mlx4_cmd_mailbox *outbox,
3393                            struct mlx4_cmd_info *cmd)
3394 {
3395         int err;
3396         int srqn = vhcr->in_modifier;
3397         struct res_srq *srq;
3398
3399         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3400         if (err)
3401                 return err;
3402         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3403         if (err)
3404                 goto ex_abort;
3405         atomic_dec(&srq->mtt->ref_count);
3406         if (srq->cq)
3407                 atomic_dec(&srq->cq->ref_count);
3408         res_end_move(dev, slave, RES_SRQ, srqn);
3409
3410         return 0;
3411
3412 ex_abort:
3413         res_abort_move(dev, slave, RES_SRQ, srqn);
3414
3415         return err;
3416 }
3417
3418 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3419                            struct mlx4_vhcr *vhcr,
3420                            struct mlx4_cmd_mailbox *inbox,
3421                            struct mlx4_cmd_mailbox *outbox,
3422                            struct mlx4_cmd_info *cmd)
3423 {
3424         int err;
3425         int srqn = vhcr->in_modifier;
3426         struct res_srq *srq;
3427
3428         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3429         if (err)
3430                 return err;
3431         if (srq->com.from_state != RES_SRQ_HW) {
3432                 err = -EBUSY;
3433                 goto out;
3434         }
3435         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3436 out:
3437         put_res(dev, slave, srqn, RES_SRQ);
3438         return err;
3439 }
3440
3441 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3442                          struct mlx4_vhcr *vhcr,
3443                          struct mlx4_cmd_mailbox *inbox,
3444                          struct mlx4_cmd_mailbox *outbox,
3445                          struct mlx4_cmd_info *cmd)
3446 {
3447         int err;
3448         int srqn = vhcr->in_modifier;
3449         struct res_srq *srq;
3450
3451         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3452         if (err)
3453                 return err;
3454
3455         if (srq->com.from_state != RES_SRQ_HW) {
3456                 err = -EBUSY;
3457                 goto out;
3458         }
3459
3460         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3461 out:
3462         put_res(dev, slave, srqn, RES_SRQ);
3463         return err;
3464 }
3465
3466 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3467                         struct mlx4_vhcr *vhcr,
3468                         struct mlx4_cmd_mailbox *inbox,
3469                         struct mlx4_cmd_mailbox *outbox,
3470                         struct mlx4_cmd_info *cmd)
3471 {
3472         int err;
3473         int qpn = vhcr->in_modifier & 0x7fffff;
3474         struct res_qp *qp;
3475
3476         err = get_res(dev, slave, qpn, RES_QP, &qp);
3477         if (err)
3478                 return err;
3479         if (qp->com.from_state != RES_QP_HW) {
3480                 err = -EBUSY;
3481                 goto out;
3482         }
3483
3484         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3485 out:
3486         put_res(dev, slave, qpn, RES_QP);
3487         return err;
3488 }
3489
3490 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3491                               struct mlx4_vhcr *vhcr,
3492                               struct mlx4_cmd_mailbox *inbox,
3493                               struct mlx4_cmd_mailbox *outbox,
3494                               struct mlx4_cmd_info *cmd)
3495 {
3496         struct mlx4_qp_context *context = inbox->buf + 8;
3497         adjust_proxy_tun_qkey(dev, vhcr, context);
3498         update_pkey_index(dev, slave, inbox);
3499         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3500 }
3501
3502 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3503                                   struct mlx4_qp_context *qpc,
3504                                   struct mlx4_cmd_mailbox *inbox)
3505 {
3506         enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3507         u8 pri_sched_queue;
3508         int port = mlx4_slave_convert_port(
3509                    dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3510
3511         if (port < 0)
3512                 return -EINVAL;
3513
3514         pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3515                           ((port & 1) << 6);
3516
3517         if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
3518             mlx4_is_eth(dev, port + 1)) {
3519                 qpc->pri_path.sched_queue = pri_sched_queue;
3520         }
3521
3522         if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3523                 port = mlx4_slave_convert_port(
3524                                 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3525                                 + 1) - 1;
3526                 if (port < 0)
3527                         return -EINVAL;
3528                 qpc->alt_path.sched_queue =
3529                         (qpc->alt_path.sched_queue & ~(1 << 6)) |
3530                         (port & 1) << 6;
3531         }
3532         return 0;
3533 }
3534
3535 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3536                                 struct mlx4_qp_context *qpc,
3537                                 struct mlx4_cmd_mailbox *inbox)
3538 {
3539         u64 mac;
3540         int port;
3541         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3542         u8 sched = *(u8 *)(inbox->buf + 64);
3543         u8 smac_ix;
3544
3545         port = (sched >> 6 & 1) + 1;
3546         if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3547                 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3548                 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3549                         return -ENOENT;
3550         }
3551         return 0;
3552 }
3553
3554 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3555                              struct mlx4_vhcr *vhcr,
3556                              struct mlx4_cmd_mailbox *inbox,
3557                              struct mlx4_cmd_mailbox *outbox,
3558                              struct mlx4_cmd_info *cmd)
3559 {
3560         int err;
3561         struct mlx4_qp_context *qpc = inbox->buf + 8;
3562         int qpn = vhcr->in_modifier & 0x7fffff;
3563         struct res_qp *qp;
3564         u8 orig_sched_queue;
3565         __be32  orig_param3 = qpc->param3;
3566         u8 orig_vlan_control = qpc->pri_path.vlan_control;
3567         u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3568         u8 orig_pri_path_fl = qpc->pri_path.fl;
3569         u8 orig_vlan_index = qpc->pri_path.vlan_index;
3570         u8 orig_feup = qpc->pri_path.feup;
3571
3572         err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3573         if (err)
3574                 return err;
3575         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3576         if (err)
3577                 return err;
3578
3579         if (roce_verify_mac(dev, slave, qpc, inbox))
3580                 return -EINVAL;
3581
3582         update_pkey_index(dev, slave, inbox);
3583         update_gid(dev, inbox, (u8)slave);
3584         adjust_proxy_tun_qkey(dev, vhcr, qpc);
3585         orig_sched_queue = qpc->pri_path.sched_queue;
3586         err = update_vport_qp_param(dev, inbox, slave, qpn);
3587         if (err)
3588                 return err;
3589
3590         err = get_res(dev, slave, qpn, RES_QP, &qp);
3591         if (err)
3592                 return err;
3593         if (qp->com.from_state != RES_QP_HW) {
3594                 err = -EBUSY;
3595                 goto out;
3596         }
3597
3598         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3599 out:
3600         /* if no error, save sched queue value passed in by VF. This is
3601          * essentially the QOS value provided by the VF. This will be useful
3602          * if we allow dynamic changes from VST back to VGT
3603          */
3604         if (!err) {
3605                 qp->sched_queue = orig_sched_queue;
3606                 qp->param3      = orig_param3;
3607                 qp->vlan_control = orig_vlan_control;
3608                 qp->fvl_rx      =  orig_fvl_rx;
3609                 qp->pri_path_fl = orig_pri_path_fl;
3610                 qp->vlan_index  = orig_vlan_index;
3611                 qp->feup        = orig_feup;
3612         }
3613         put_res(dev, slave, qpn, RES_QP);
3614         return err;
3615 }
3616
3617 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3618                             struct mlx4_vhcr *vhcr,
3619                             struct mlx4_cmd_mailbox *inbox,
3620                             struct mlx4_cmd_mailbox *outbox,
3621                             struct mlx4_cmd_info *cmd)
3622 {
3623         int err;
3624         struct mlx4_qp_context *context = inbox->buf + 8;
3625
3626         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3627         if (err)
3628                 return err;
3629         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3630         if (err)
3631                 return err;
3632
3633         update_pkey_index(dev, slave, inbox);
3634         update_gid(dev, inbox, (u8)slave);
3635         adjust_proxy_tun_qkey(dev, vhcr, context);
3636         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3637 }
3638
3639 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3640                             struct mlx4_vhcr *vhcr,
3641                             struct mlx4_cmd_mailbox *inbox,
3642                             struct mlx4_cmd_mailbox *outbox,
3643                             struct mlx4_cmd_info *cmd)
3644 {
3645         int err;
3646         struct mlx4_qp_context *context = inbox->buf + 8;
3647
3648         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3649         if (err)
3650                 return err;
3651         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3652         if (err)
3653                 return err;
3654
3655         update_pkey_index(dev, slave, inbox);
3656         update_gid(dev, inbox, (u8)slave);
3657         adjust_proxy_tun_qkey(dev, vhcr, context);
3658         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3659 }
3660
3661
3662 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3663                               struct mlx4_vhcr *vhcr,
3664                               struct mlx4_cmd_mailbox *inbox,
3665                               struct mlx4_cmd_mailbox *outbox,
3666                               struct mlx4_cmd_info *cmd)
3667 {
3668         struct mlx4_qp_context *context = inbox->buf + 8;
3669         int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3670         if (err)
3671                 return err;
3672         adjust_proxy_tun_qkey(dev, vhcr, context);
3673         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3674 }
3675
3676 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3677                             struct mlx4_vhcr *vhcr,
3678                             struct mlx4_cmd_mailbox *inbox,
3679                             struct mlx4_cmd_mailbox *outbox,
3680                             struct mlx4_cmd_info *cmd)
3681 {
3682         int err;
3683         struct mlx4_qp_context *context = inbox->buf + 8;
3684
3685         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3686         if (err)
3687                 return err;
3688         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3689         if (err)
3690                 return err;
3691
3692         adjust_proxy_tun_qkey(dev, vhcr, context);
3693         update_gid(dev, inbox, (u8)slave);
3694         update_pkey_index(dev, slave, inbox);
3695         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3696 }
3697
3698 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3699                             struct mlx4_vhcr *vhcr,
3700                             struct mlx4_cmd_mailbox *inbox,
3701                             struct mlx4_cmd_mailbox *outbox,
3702                             struct mlx4_cmd_info *cmd)
3703 {
3704         int err;
3705         struct mlx4_qp_context *context = inbox->buf + 8;
3706
3707         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3708         if (err)
3709                 return err;
3710         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3711         if (err)
3712                 return err;
3713
3714         adjust_proxy_tun_qkey(dev, vhcr, context);
3715         update_gid(dev, inbox, (u8)slave);
3716         update_pkey_index(dev, slave, inbox);
3717         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3718 }
3719
3720 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3721                          struct mlx4_vhcr *vhcr,
3722                          struct mlx4_cmd_mailbox *inbox,
3723                          struct mlx4_cmd_mailbox *outbox,
3724                          struct mlx4_cmd_info *cmd)
3725 {
3726         int err;
3727         int qpn = vhcr->in_modifier & 0x7fffff;
3728         struct res_qp *qp;
3729
3730         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3731         if (err)
3732                 return err;
3733         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3734         if (err)
3735                 goto ex_abort;
3736
3737         atomic_dec(&qp->mtt->ref_count);
3738         atomic_dec(&qp->rcq->ref_count);
3739         atomic_dec(&qp->scq->ref_count);
3740         if (qp->srq)
3741                 atomic_dec(&qp->srq->ref_count);
3742         res_end_move(dev, slave, RES_QP, qpn);
3743         return 0;
3744
3745 ex_abort:
3746         res_abort_move(dev, slave, RES_QP, qpn);
3747
3748         return err;
3749 }
3750
3751 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3752                                 struct res_qp *rqp, u8 *gid)
3753 {
3754         struct res_gid *res;
3755
3756         list_for_each_entry(res, &rqp->mcg_list, list) {
3757                 if (!memcmp(res->gid, gid, 16))
3758                         return res;
3759         }
3760         return NULL;
3761 }
3762
3763 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3764                        u8 *gid, enum mlx4_protocol prot,
3765                        enum mlx4_steer_type steer, u64 reg_id)
3766 {
3767         struct res_gid *res;
3768         int err;
3769
3770         res = kzalloc(sizeof *res, GFP_KERNEL);
3771         if (!res)
3772                 return -ENOMEM;
3773
3774         spin_lock_irq(&rqp->mcg_spl);
3775         if (find_gid(dev, slave, rqp, gid)) {
3776                 kfree(res);
3777                 err = -EEXIST;
3778         } else {
3779                 memcpy(res->gid, gid, 16);
3780                 res->prot = prot;
3781                 res->steer = steer;
3782                 res->reg_id = reg_id;
3783                 list_add_tail(&res->list, &rqp->mcg_list);
3784                 err = 0;
3785         }
3786         spin_unlock_irq(&rqp->mcg_spl);
3787
3788         return err;
3789 }
3790
3791 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3792                        u8 *gid, enum mlx4_protocol prot,
3793                        enum mlx4_steer_type steer, u64 *reg_id)
3794 {
3795         struct res_gid *res;
3796         int err;
3797
3798         spin_lock_irq(&rqp->mcg_spl);
3799         res = find_gid(dev, slave, rqp, gid);
3800         if (!res || res->prot != prot || res->steer != steer)
3801                 err = -EINVAL;
3802         else {
3803                 *reg_id = res->reg_id;
3804                 list_del(&res->list);
3805                 kfree(res);
3806                 err = 0;
3807         }
3808         spin_unlock_irq(&rqp->mcg_spl);
3809
3810         return err;
3811 }
3812
3813 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3814                      u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3815                      enum mlx4_steer_type type, u64 *reg_id)
3816 {
3817         switch (dev->caps.steering_mode) {
3818         case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3819                 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3820                 if (port < 0)
3821                         return port;
3822                 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
3823                                                 block_loopback, prot,
3824                                                 reg_id);
3825         }
3826         case MLX4_STEERING_MODE_B0:
3827                 if (prot == MLX4_PROT_ETH) {
3828                         int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3829                         if (port < 0)
3830                                 return port;
3831                         gid[5] = port;
3832                 }
3833                 return mlx4_qp_attach_common(dev, qp, gid,
3834                                             block_loopback, prot, type);
3835         default:
3836                 return -EINVAL;
3837         }
3838 }
3839
3840 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3841                      u8 gid[16], enum mlx4_protocol prot,
3842                      enum mlx4_steer_type type, u64 reg_id)
3843 {
3844         switch (dev->caps.steering_mode) {
3845         case MLX4_STEERING_MODE_DEVICE_MANAGED:
3846                 return mlx4_flow_detach(dev, reg_id);
3847         case MLX4_STEERING_MODE_B0:
3848                 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3849         default:
3850                 return -EINVAL;
3851         }
3852 }
3853
3854 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3855                             u8 *gid, enum mlx4_protocol prot)
3856 {
3857         int real_port;
3858
3859         if (prot != MLX4_PROT_ETH)
3860                 return 0;
3861
3862         if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3863             dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3864                 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3865                 if (real_port < 0)
3866                         return -EINVAL;
3867                 gid[5] = real_port;
3868         }
3869
3870         return 0;
3871 }
3872
3873 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3874                                struct mlx4_vhcr *vhcr,
3875                                struct mlx4_cmd_mailbox *inbox,
3876                                struct mlx4_cmd_mailbox *outbox,
3877                                struct mlx4_cmd_info *cmd)
3878 {
3879         struct mlx4_qp qp; /* dummy for calling attach/detach */
3880         u8 *gid = inbox->buf;
3881         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3882         int err;
3883         int qpn;
3884         struct res_qp *rqp;
3885         u64 reg_id = 0;
3886         int attach = vhcr->op_modifier;
3887         int block_loopback = vhcr->in_modifier >> 31;
3888         u8 steer_type_mask = 2;
3889         enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3890
3891         qpn = vhcr->in_modifier & 0xffffff;
3892         err = get_res(dev, slave, qpn, RES_QP, &rqp);
3893         if (err)
3894                 return err;
3895
3896         qp.qpn = qpn;
3897         if (attach) {
3898                 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
3899                                 type, &reg_id);
3900                 if (err) {
3901                         pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3902                         goto ex_put;
3903                 }
3904                 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3905                 if (err)
3906                         goto ex_detach;
3907         } else {
3908                 err = mlx4_adjust_port(dev, slave, gid, prot);
3909                 if (err)
3910                         goto ex_put;
3911
3912                 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3913                 if (err)
3914                         goto ex_put;
3915
3916                 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3917                 if (err)
3918                         pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3919                                qpn, reg_id);
3920         }
3921         put_res(dev, slave, qpn, RES_QP);
3922         return err;
3923
3924 ex_detach:
3925         qp_detach(dev, &qp, gid, prot, type, reg_id);
3926 ex_put:
3927         put_res(dev, slave, qpn, RES_QP);
3928         return err;
3929 }
3930
3931 /*
3932  * MAC validation for Flow Steering rules.
3933  * VF can attach rules only with a mac address which is assigned to it.
3934  */
3935 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3936                                    struct list_head *rlist)
3937 {
3938         struct mac_res *res, *tmp;
3939         __be64 be_mac;
3940
3941         /* make sure it isn't multicast or broadcast mac*/
3942         if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3943             !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3944                 list_for_each_entry_safe(res, tmp, rlist, list) {
3945                         be_mac = cpu_to_be64(res->mac << 16);
3946                         if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
3947                                 return 0;
3948                 }
3949                 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3950                        eth_header->eth.dst_mac, slave);
3951                 return -EINVAL;
3952         }
3953         return 0;
3954 }
3955
3956 /*
3957  * In case of missing eth header, append eth header with a MAC address
3958  * assigned to the VF.
3959  */
3960 static int add_eth_header(struct mlx4_dev *dev, int slave,
3961                           struct mlx4_cmd_mailbox *inbox,
3962                           struct list_head *rlist, int header_id)
3963 {
3964         struct mac_res *res, *tmp;
3965         u8 port;
3966         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3967         struct mlx4_net_trans_rule_hw_eth *eth_header;
3968         struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3969         struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3970         __be64 be_mac = 0;
3971         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3972
3973         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3974         port = ctrl->port;
3975         eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3976
3977         /* Clear a space in the inbox for eth header */
3978         switch (header_id) {
3979         case MLX4_NET_TRANS_RULE_ID_IPV4:
3980                 ip_header =
3981                         (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3982                 memmove(ip_header, eth_header,
3983                         sizeof(*ip_header) + sizeof(*l4_header));
3984                 break;
3985         case MLX4_NET_TRANS_RULE_ID_TCP:
3986         case MLX4_NET_TRANS_RULE_ID_UDP:
3987                 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3988                             (eth_header + 1);
3989                 memmove(l4_header, eth_header, sizeof(*l4_header));
3990                 break;
3991         default:
3992                 return -EINVAL;
3993         }
3994         list_for_each_entry_safe(res, tmp, rlist, list) {
3995                 if (port == res->port) {
3996                         be_mac = cpu_to_be64(res->mac << 16);
3997                         break;
3998                 }
3999         }
4000         if (!be_mac) {
4001                 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4002                        port);
4003                 return -EINVAL;
4004         }
4005
4006         memset(eth_header, 0, sizeof(*eth_header));
4007         eth_header->size = sizeof(*eth_header) >> 2;
4008         eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4009         memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4010         memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4011
4012         return 0;
4013
4014 }
4015
4016 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
4017 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4018                            struct mlx4_vhcr *vhcr,
4019                            struct mlx4_cmd_mailbox *inbox,
4020                            struct mlx4_cmd_mailbox *outbox,
4021                            struct mlx4_cmd_info *cmd_info)
4022 {
4023         int err;
4024         u32 qpn = vhcr->in_modifier & 0xffffff;
4025         struct res_qp *rqp;
4026         u64 mac;
4027         unsigned port;
4028         u64 pri_addr_path_mask;
4029         struct mlx4_update_qp_context *cmd;
4030         int smac_index;
4031
4032         cmd = (struct mlx4_update_qp_context *)inbox->buf;
4033
4034         pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4035         if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4036             (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4037                 return -EPERM;
4038
4039         /* Just change the smac for the QP */
4040         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4041         if (err) {
4042                 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4043                 return err;
4044         }
4045
4046         port = (rqp->sched_queue >> 6 & 1) + 1;
4047
4048         if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4049                 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4050                 err = mac_find_smac_ix_in_slave(dev, slave, port,
4051                                                 smac_index, &mac);
4052
4053                 if (err) {
4054                         mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4055                                  qpn, smac_index);
4056                         goto err_mac;
4057                 }
4058         }
4059
4060         err = mlx4_cmd(dev, inbox->dma,
4061                        vhcr->in_modifier, 0,
4062                        MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4063                        MLX4_CMD_NATIVE);
4064         if (err) {
4065                 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4066                 goto err_mac;
4067         }
4068
4069 err_mac:
4070         put_res(dev, slave, qpn, RES_QP);
4071         return err;
4072 }
4073
4074 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4075                                          struct mlx4_vhcr *vhcr,
4076                                          struct mlx4_cmd_mailbox *inbox,
4077                                          struct mlx4_cmd_mailbox *outbox,
4078                                          struct mlx4_cmd_info *cmd)
4079 {
4080
4081         struct mlx4_priv *priv = mlx4_priv(dev);
4082         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4083         struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4084         int err;
4085         int qpn;
4086         struct res_qp *rqp;
4087         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4088         struct _rule_hw  *rule_header;
4089         int header_id;
4090
4091         if (dev->caps.steering_mode !=
4092             MLX4_STEERING_MODE_DEVICE_MANAGED)
4093                 return -EOPNOTSUPP;
4094
4095         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4096         ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4097         if (ctrl->port <= 0)
4098                 return -EINVAL;
4099         qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4100         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4101         if (err) {
4102                 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4103                 return err;
4104         }
4105         rule_header = (struct _rule_hw *)(ctrl + 1);
4106         header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4107
4108         switch (header_id) {
4109         case MLX4_NET_TRANS_RULE_ID_ETH:
4110                 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4111                         err = -EINVAL;
4112                         goto err_put;
4113                 }
4114                 break;
4115         case MLX4_NET_TRANS_RULE_ID_IB:
4116                 break;
4117         case MLX4_NET_TRANS_RULE_ID_IPV4:
4118         case MLX4_NET_TRANS_RULE_ID_TCP:
4119         case MLX4_NET_TRANS_RULE_ID_UDP:
4120                 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4121                 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4122                         err = -EINVAL;
4123                         goto err_put;
4124                 }
4125                 vhcr->in_modifier +=
4126                         sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4127                 break;
4128         default:
4129                 pr_err("Corrupted mailbox\n");
4130                 err = -EINVAL;
4131                 goto err_put;
4132         }
4133
4134         err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4135                            vhcr->in_modifier, 0,
4136                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4137                            MLX4_CMD_NATIVE);
4138         if (err)
4139                 goto err_put;
4140
4141         err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4142         if (err) {
4143                 mlx4_err(dev, "Fail to add flow steering resources\n");
4144                 /* detach rule*/
4145                 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4146                          MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4147                          MLX4_CMD_NATIVE);
4148                 goto err_put;
4149         }
4150         atomic_inc(&rqp->ref_count);
4151 err_put:
4152         put_res(dev, slave, qpn, RES_QP);
4153         return err;
4154 }
4155
4156 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4157                                          struct mlx4_vhcr *vhcr,
4158                                          struct mlx4_cmd_mailbox *inbox,
4159                                          struct mlx4_cmd_mailbox *outbox,
4160                                          struct mlx4_cmd_info *cmd)
4161 {
4162         int err;
4163         struct res_qp *rqp;
4164         struct res_fs_rule *rrule;
4165
4166         if (dev->caps.steering_mode !=
4167             MLX4_STEERING_MODE_DEVICE_MANAGED)
4168                 return -EOPNOTSUPP;
4169
4170         err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4171         if (err)
4172                 return err;
4173         /* Release the rule form busy state before removal */
4174         put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4175         err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4176         if (err)
4177                 return err;
4178
4179         err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4180         if (err) {
4181                 mlx4_err(dev, "Fail to remove flow steering resources\n");
4182                 goto out;
4183         }
4184
4185         err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4186                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4187                        MLX4_CMD_NATIVE);
4188         if (!err)
4189                 atomic_dec(&rqp->ref_count);
4190 out:
4191         put_res(dev, slave, rrule->qpn, RES_QP);
4192         return err;
4193 }
4194
4195 enum {
4196         BUSY_MAX_RETRIES = 10
4197 };
4198
4199 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4200                                struct mlx4_vhcr *vhcr,
4201                                struct mlx4_cmd_mailbox *inbox,
4202                                struct mlx4_cmd_mailbox *outbox,
4203                                struct mlx4_cmd_info *cmd)
4204 {
4205         int err;
4206         int index = vhcr->in_modifier & 0xffff;
4207
4208         err = get_res(dev, slave, index, RES_COUNTER, NULL);
4209         if (err)
4210                 return err;
4211
4212         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4213         put_res(dev, slave, index, RES_COUNTER);
4214         return err;
4215 }
4216
4217 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4218 {
4219         struct res_gid *rgid;
4220         struct res_gid *tmp;
4221         struct mlx4_qp qp; /* dummy for calling attach/detach */
4222
4223         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4224                 switch (dev->caps.steering_mode) {
4225                 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4226                         mlx4_flow_detach(dev, rgid->reg_id);
4227                         break;
4228                 case MLX4_STEERING_MODE_B0:
4229                         qp.qpn = rqp->local_qpn;
4230                         (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4231                                                      rgid->prot, rgid->steer);
4232                         break;
4233                 }
4234                 list_del(&rgid->list);
4235                 kfree(rgid);
4236         }
4237 }
4238
4239 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4240                           enum mlx4_resource type, int print)
4241 {
4242         struct mlx4_priv *priv = mlx4_priv(dev);
4243         struct mlx4_resource_tracker *tracker =
4244                 &priv->mfunc.master.res_tracker;
4245         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4246         struct res_common *r;
4247         struct res_common *tmp;
4248         int busy;
4249
4250         busy = 0;
4251         spin_lock_irq(mlx4_tlock(dev));
4252         list_for_each_entry_safe(r, tmp, rlist, list) {
4253                 if (r->owner == slave) {
4254                         if (!r->removing) {
4255                                 if (r->state == RES_ANY_BUSY) {
4256                                         if (print)
4257                                                 mlx4_dbg(dev,
4258                                                          "%s id 0x%llx is busy\n",
4259                                                           resource_str(type),
4260                                                           r->res_id);
4261                                         ++busy;
4262                                 } else {
4263                                         r->from_state = r->state;
4264                                         r->state = RES_ANY_BUSY;
4265                                         r->removing = 1;
4266                                 }
4267                         }
4268                 }
4269         }
4270         spin_unlock_irq(mlx4_tlock(dev));
4271
4272         return busy;
4273 }
4274
4275 static int move_all_busy(struct mlx4_dev *dev, int slave,
4276                          enum mlx4_resource type)
4277 {
4278         unsigned long begin;
4279         int busy;
4280
4281         begin = jiffies;
4282         do {
4283                 busy = _move_all_busy(dev, slave, type, 0);
4284                 if (time_after(jiffies, begin + 5 * HZ))
4285                         break;
4286                 if (busy)
4287                         cond_resched();
4288         } while (busy);
4289
4290         if (busy)
4291                 busy = _move_all_busy(dev, slave, type, 1);
4292
4293         return busy;
4294 }
4295 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4296 {
4297         struct mlx4_priv *priv = mlx4_priv(dev);
4298         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4299         struct list_head *qp_list =
4300                 &tracker->slave_list[slave].res_list[RES_QP];
4301         struct res_qp *qp;
4302         struct res_qp *tmp;
4303         int state;
4304         u64 in_param;
4305         int qpn;
4306         int err;
4307
4308         err = move_all_busy(dev, slave, RES_QP);
4309         if (err)
4310                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4311                           slave);
4312
4313         spin_lock_irq(mlx4_tlock(dev));
4314         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4315                 spin_unlock_irq(mlx4_tlock(dev));
4316                 if (qp->com.owner == slave) {
4317                         qpn = qp->com.res_id;
4318                         detach_qp(dev, slave, qp);
4319                         state = qp->com.from_state;
4320                         while (state != 0) {
4321                                 switch (state) {
4322                                 case RES_QP_RESERVED:
4323                                         spin_lock_irq(mlx4_tlock(dev));
4324                                         rb_erase(&qp->com.node,
4325                                                  &tracker->res_tree[RES_QP]);
4326                                         list_del(&qp->com.list);
4327                                         spin_unlock_irq(mlx4_tlock(dev));
4328                                         if (!valid_reserved(dev, slave, qpn)) {
4329                                                 __mlx4_qp_release_range(dev, qpn, 1);
4330                                                 mlx4_release_resource(dev, slave,
4331                                                                       RES_QP, 1, 0);
4332                                         }
4333                                         kfree(qp);
4334                                         state = 0;
4335                                         break;
4336                                 case RES_QP_MAPPED:
4337                                         if (!valid_reserved(dev, slave, qpn))
4338                                                 __mlx4_qp_free_icm(dev, qpn);
4339                                         state = RES_QP_RESERVED;
4340                                         break;
4341                                 case RES_QP_HW:
4342                                         in_param = slave;
4343                                         err = mlx4_cmd(dev, in_param,
4344                                                        qp->local_qpn, 2,
4345                                                        MLX4_CMD_2RST_QP,
4346                                                        MLX4_CMD_TIME_CLASS_A,
4347                                                        MLX4_CMD_NATIVE);
4348                                         if (err)
4349                                                 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4350                                                          slave, qp->local_qpn);
4351                                         atomic_dec(&qp->rcq->ref_count);
4352                                         atomic_dec(&qp->scq->ref_count);
4353                                         atomic_dec(&qp->mtt->ref_count);
4354                                         if (qp->srq)
4355                                                 atomic_dec(&qp->srq->ref_count);
4356                                         state = RES_QP_MAPPED;
4357                                         break;
4358                                 default:
4359                                         state = 0;
4360                                 }
4361                         }
4362                 }
4363                 spin_lock_irq(mlx4_tlock(dev));
4364         }
4365         spin_unlock_irq(mlx4_tlock(dev));
4366 }
4367
4368 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4369 {
4370         struct mlx4_priv *priv = mlx4_priv(dev);
4371         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4372         struct list_head *srq_list =
4373                 &tracker->slave_list[slave].res_list[RES_SRQ];
4374         struct res_srq *srq;
4375         struct res_srq *tmp;
4376         int state;
4377         u64 in_param;
4378         LIST_HEAD(tlist);
4379         int srqn;
4380         int err;
4381
4382         err = move_all_busy(dev, slave, RES_SRQ);
4383         if (err)
4384                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4385                           slave);
4386
4387         spin_lock_irq(mlx4_tlock(dev));
4388         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4389                 spin_unlock_irq(mlx4_tlock(dev));
4390                 if (srq->com.owner == slave) {
4391                         srqn = srq->com.res_id;
4392                         state = srq->com.from_state;
4393                         while (state != 0) {
4394                                 switch (state) {
4395                                 case RES_SRQ_ALLOCATED:
4396                                         __mlx4_srq_free_icm(dev, srqn);
4397                                         spin_lock_irq(mlx4_tlock(dev));
4398                                         rb_erase(&srq->com.node,
4399                                                  &tracker->res_tree[RES_SRQ]);
4400                                         list_del(&srq->com.list);
4401                                         spin_unlock_irq(mlx4_tlock(dev));
4402                                         mlx4_release_resource(dev, slave,
4403                                                               RES_SRQ, 1, 0);
4404                                         kfree(srq);
4405                                         state = 0;
4406                                         break;
4407
4408                                 case RES_SRQ_HW:
4409                                         in_param = slave;
4410                                         err = mlx4_cmd(dev, in_param, srqn, 1,
4411                                                        MLX4_CMD_HW2SW_SRQ,
4412                                                        MLX4_CMD_TIME_CLASS_A,
4413                                                        MLX4_CMD_NATIVE);
4414                                         if (err)
4415                                                 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4416                                                          slave, srqn);
4417
4418                                         atomic_dec(&srq->mtt->ref_count);
4419                                         if (srq->cq)
4420                                                 atomic_dec(&srq->cq->ref_count);
4421                                         state = RES_SRQ_ALLOCATED;
4422                                         break;
4423
4424                                 default:
4425                                         state = 0;
4426                                 }
4427                         }
4428                 }
4429                 spin_lock_irq(mlx4_tlock(dev));
4430         }
4431         spin_unlock_irq(mlx4_tlock(dev));
4432 }
4433
4434 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4435 {
4436         struct mlx4_priv *priv = mlx4_priv(dev);
4437         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4438         struct list_head *cq_list =
4439                 &tracker->slave_list[slave].res_list[RES_CQ];
4440         struct res_cq *cq;
4441         struct res_cq *tmp;
4442         int state;
4443         u64 in_param;
4444         LIST_HEAD(tlist);
4445         int cqn;
4446         int err;
4447
4448         err = move_all_busy(dev, slave, RES_CQ);
4449         if (err)
4450                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4451                           slave);
4452
4453         spin_lock_irq(mlx4_tlock(dev));
4454         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4455                 spin_unlock_irq(mlx4_tlock(dev));
4456                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4457                         cqn = cq->com.res_id;
4458                         state = cq->com.from_state;
4459                         while (state != 0) {
4460                                 switch (state) {
4461                                 case RES_CQ_ALLOCATED:
4462                                         __mlx4_cq_free_icm(dev, cqn);
4463                                         spin_lock_irq(mlx4_tlock(dev));
4464                                         rb_erase(&cq->com.node,
4465                                                  &tracker->res_tree[RES_CQ]);
4466                                         list_del(&cq->com.list);
4467                                         spin_unlock_irq(mlx4_tlock(dev));
4468                                         mlx4_release_resource(dev, slave,
4469                                                               RES_CQ, 1, 0);
4470                                         kfree(cq);
4471                                         state = 0;
4472                                         break;
4473
4474                                 case RES_CQ_HW:
4475                                         in_param = slave;
4476                                         err = mlx4_cmd(dev, in_param, cqn, 1,
4477                                                        MLX4_CMD_HW2SW_CQ,
4478                                                        MLX4_CMD_TIME_CLASS_A,
4479                                                        MLX4_CMD_NATIVE);
4480                                         if (err)
4481                                                 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4482                                                          slave, cqn);
4483                                         atomic_dec(&cq->mtt->ref_count);
4484                                         state = RES_CQ_ALLOCATED;
4485                                         break;
4486
4487                                 default:
4488                                         state = 0;
4489                                 }
4490                         }
4491                 }
4492                 spin_lock_irq(mlx4_tlock(dev));
4493         }
4494         spin_unlock_irq(mlx4_tlock(dev));
4495 }
4496
4497 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4498 {
4499         struct mlx4_priv *priv = mlx4_priv(dev);
4500         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4501         struct list_head *mpt_list =
4502                 &tracker->slave_list[slave].res_list[RES_MPT];
4503         struct res_mpt *mpt;
4504         struct res_mpt *tmp;
4505         int state;
4506         u64 in_param;
4507         LIST_HEAD(tlist);
4508         int mptn;
4509         int err;
4510
4511         err = move_all_busy(dev, slave, RES_MPT);
4512         if (err)
4513                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4514                           slave);
4515
4516         spin_lock_irq(mlx4_tlock(dev));
4517         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4518                 spin_unlock_irq(mlx4_tlock(dev));
4519                 if (mpt->com.owner == slave) {
4520                         mptn = mpt->com.res_id;
4521                         state = mpt->com.from_state;
4522                         while (state != 0) {
4523                                 switch (state) {
4524                                 case RES_MPT_RESERVED:
4525                                         __mlx4_mpt_release(dev, mpt->key);
4526                                         spin_lock_irq(mlx4_tlock(dev));
4527                                         rb_erase(&mpt->com.node,
4528                                                  &tracker->res_tree[RES_MPT]);
4529                                         list_del(&mpt->com.list);
4530                                         spin_unlock_irq(mlx4_tlock(dev));
4531                                         mlx4_release_resource(dev, slave,
4532                                                               RES_MPT, 1, 0);
4533                                         kfree(mpt);
4534                                         state = 0;
4535                                         break;
4536
4537                                 case RES_MPT_MAPPED:
4538                                         __mlx4_mpt_free_icm(dev, mpt->key);
4539                                         state = RES_MPT_RESERVED;
4540                                         break;
4541
4542                                 case RES_MPT_HW:
4543                                         in_param = slave;
4544                                         err = mlx4_cmd(dev, in_param, mptn, 0,
4545                                                      MLX4_CMD_HW2SW_MPT,
4546                                                      MLX4_CMD_TIME_CLASS_A,
4547                                                      MLX4_CMD_NATIVE);
4548                                         if (err)
4549                                                 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4550                                                          slave, mptn);
4551                                         if (mpt->mtt)
4552                                                 atomic_dec(&mpt->mtt->ref_count);
4553                                         state = RES_MPT_MAPPED;
4554                                         break;
4555                                 default:
4556                                         state = 0;
4557                                 }
4558                         }
4559                 }
4560                 spin_lock_irq(mlx4_tlock(dev));
4561         }
4562         spin_unlock_irq(mlx4_tlock(dev));
4563 }
4564
4565 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4566 {
4567         struct mlx4_priv *priv = mlx4_priv(dev);
4568         struct mlx4_resource_tracker *tracker =
4569                 &priv->mfunc.master.res_tracker;
4570         struct list_head *mtt_list =
4571                 &tracker->slave_list[slave].res_list[RES_MTT];
4572         struct res_mtt *mtt;
4573         struct res_mtt *tmp;
4574         int state;
4575         LIST_HEAD(tlist);
4576         int base;
4577         int err;
4578
4579         err = move_all_busy(dev, slave, RES_MTT);
4580         if (err)
4581                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
4582                           slave);
4583
4584         spin_lock_irq(mlx4_tlock(dev));
4585         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4586                 spin_unlock_irq(mlx4_tlock(dev));
4587                 if (mtt->com.owner == slave) {
4588                         base = mtt->com.res_id;
4589                         state = mtt->com.from_state;
4590                         while (state != 0) {
4591                                 switch (state) {
4592                                 case RES_MTT_ALLOCATED:
4593                                         __mlx4_free_mtt_range(dev, base,
4594                                                               mtt->order);
4595                                         spin_lock_irq(mlx4_tlock(dev));
4596                                         rb_erase(&mtt->com.node,
4597                                                  &tracker->res_tree[RES_MTT]);
4598                                         list_del(&mtt->com.list);
4599                                         spin_unlock_irq(mlx4_tlock(dev));
4600                                         mlx4_release_resource(dev, slave, RES_MTT,
4601                                                               1 << mtt->order, 0);
4602                                         kfree(mtt);
4603                                         state = 0;
4604                                         break;
4605
4606                                 default:
4607                                         state = 0;
4608                                 }
4609                         }
4610                 }
4611                 spin_lock_irq(mlx4_tlock(dev));
4612         }
4613         spin_unlock_irq(mlx4_tlock(dev));
4614 }
4615
4616 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4617 {
4618         struct mlx4_priv *priv = mlx4_priv(dev);
4619         struct mlx4_resource_tracker *tracker =
4620                 &priv->mfunc.master.res_tracker;
4621         struct list_head *fs_rule_list =
4622                 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4623         struct res_fs_rule *fs_rule;
4624         struct res_fs_rule *tmp;
4625         int state;
4626         u64 base;
4627         int err;
4628
4629         err = move_all_busy(dev, slave, RES_FS_RULE);
4630         if (err)
4631                 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4632                           slave);
4633
4634         spin_lock_irq(mlx4_tlock(dev));
4635         list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4636                 spin_unlock_irq(mlx4_tlock(dev));
4637                 if (fs_rule->com.owner == slave) {
4638                         base = fs_rule->com.res_id;
4639                         state = fs_rule->com.from_state;
4640                         while (state != 0) {
4641                                 switch (state) {
4642                                 case RES_FS_RULE_ALLOCATED:
4643                                         /* detach rule */
4644                                         err = mlx4_cmd(dev, base, 0, 0,
4645                                                        MLX4_QP_FLOW_STEERING_DETACH,
4646                                                        MLX4_CMD_TIME_CLASS_A,
4647                                                        MLX4_CMD_NATIVE);
4648
4649                                         spin_lock_irq(mlx4_tlock(dev));
4650                                         rb_erase(&fs_rule->com.node,
4651                                                  &tracker->res_tree[RES_FS_RULE]);
4652                                         list_del(&fs_rule->com.list);
4653                                         spin_unlock_irq(mlx4_tlock(dev));
4654                                         kfree(fs_rule);
4655                                         state = 0;
4656                                         break;
4657
4658                                 default:
4659                                         state = 0;
4660                                 }
4661                         }
4662                 }
4663                 spin_lock_irq(mlx4_tlock(dev));
4664         }
4665         spin_unlock_irq(mlx4_tlock(dev));
4666 }
4667
4668 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4669 {
4670         struct mlx4_priv *priv = mlx4_priv(dev);
4671         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4672         struct list_head *eq_list =
4673                 &tracker->slave_list[slave].res_list[RES_EQ];
4674         struct res_eq *eq;
4675         struct res_eq *tmp;
4676         int err;
4677         int state;
4678         LIST_HEAD(tlist);
4679         int eqn;
4680
4681         err = move_all_busy(dev, slave, RES_EQ);
4682         if (err)
4683                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4684                           slave);
4685
4686         spin_lock_irq(mlx4_tlock(dev));
4687         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4688                 spin_unlock_irq(mlx4_tlock(dev));
4689                 if (eq->com.owner == slave) {
4690                         eqn = eq->com.res_id;
4691                         state = eq->com.from_state;
4692                         while (state != 0) {
4693                                 switch (state) {
4694                                 case RES_EQ_RESERVED:
4695                                         spin_lock_irq(mlx4_tlock(dev));
4696                                         rb_erase(&eq->com.node,
4697                                                  &tracker->res_tree[RES_EQ]);
4698                                         list_del(&eq->com.list);
4699                                         spin_unlock_irq(mlx4_tlock(dev));
4700                                         kfree(eq);
4701                                         state = 0;
4702                                         break;
4703
4704                                 case RES_EQ_HW:
4705                                         err = mlx4_cmd(dev, slave, eqn & 0xff,
4706                                                        1, MLX4_CMD_HW2SW_EQ,
4707                                                        MLX4_CMD_TIME_CLASS_A,
4708                                                        MLX4_CMD_NATIVE);
4709                                         if (err)
4710                                                 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4711                                                          slave, eqn);
4712                                         atomic_dec(&eq->mtt->ref_count);
4713                                         state = RES_EQ_RESERVED;
4714                                         break;
4715
4716                                 default:
4717                                         state = 0;
4718                                 }
4719                         }
4720                 }
4721                 spin_lock_irq(mlx4_tlock(dev));
4722         }
4723         spin_unlock_irq(mlx4_tlock(dev));
4724 }
4725
4726 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4727 {
4728         struct mlx4_priv *priv = mlx4_priv(dev);
4729         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4730         struct list_head *counter_list =
4731                 &tracker->slave_list[slave].res_list[RES_COUNTER];
4732         struct res_counter *counter;
4733         struct res_counter *tmp;
4734         int err;
4735         int index;
4736
4737         err = move_all_busy(dev, slave, RES_COUNTER);
4738         if (err)
4739                 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4740                           slave);
4741
4742         spin_lock_irq(mlx4_tlock(dev));
4743         list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4744                 if (counter->com.owner == slave) {
4745                         index = counter->com.res_id;
4746                         rb_erase(&counter->com.node,
4747                                  &tracker->res_tree[RES_COUNTER]);
4748                         list_del(&counter->com.list);
4749                         kfree(counter);
4750                         __mlx4_counter_free(dev, index);
4751                         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4752                 }
4753         }
4754         spin_unlock_irq(mlx4_tlock(dev));
4755 }
4756
4757 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4758 {
4759         struct mlx4_priv *priv = mlx4_priv(dev);
4760         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4761         struct list_head *xrcdn_list =
4762                 &tracker->slave_list[slave].res_list[RES_XRCD];
4763         struct res_xrcdn *xrcd;
4764         struct res_xrcdn *tmp;
4765         int err;
4766         int xrcdn;
4767
4768         err = move_all_busy(dev, slave, RES_XRCD);
4769         if (err)
4770                 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4771                           slave);
4772
4773         spin_lock_irq(mlx4_tlock(dev));
4774         list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4775                 if (xrcd->com.owner == slave) {
4776                         xrcdn = xrcd->com.res_id;
4777                         rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4778                         list_del(&xrcd->com.list);
4779                         kfree(xrcd);
4780                         __mlx4_xrcd_free(dev, xrcdn);
4781                 }
4782         }
4783         spin_unlock_irq(mlx4_tlock(dev));
4784 }
4785
4786 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4787 {
4788         struct mlx4_priv *priv = mlx4_priv(dev);
4789         mlx4_reset_roce_gids(dev, slave);
4790         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4791         rem_slave_vlans(dev, slave);
4792         rem_slave_macs(dev, slave);
4793         rem_slave_fs_rule(dev, slave);
4794         rem_slave_qps(dev, slave);
4795         rem_slave_srqs(dev, slave);
4796         rem_slave_cqs(dev, slave);
4797         rem_slave_mrs(dev, slave);
4798         rem_slave_eqs(dev, slave);
4799         rem_slave_mtts(dev, slave);
4800         rem_slave_counters(dev, slave);
4801         rem_slave_xrcdns(dev, slave);
4802         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4803 }
4804
4805 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4806 {
4807         struct mlx4_vf_immed_vlan_work *work =
4808                 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4809         struct mlx4_cmd_mailbox *mailbox;
4810         struct mlx4_update_qp_context *upd_context;
4811         struct mlx4_dev *dev = &work->priv->dev;
4812         struct mlx4_resource_tracker *tracker =
4813                 &work->priv->mfunc.master.res_tracker;
4814         struct list_head *qp_list =
4815                 &tracker->slave_list[work->slave].res_list[RES_QP];
4816         struct res_qp *qp;
4817         struct res_qp *tmp;
4818         u64 qp_path_mask_vlan_ctrl =
4819                        ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4820                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4821                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4822                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4823                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4824                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4825
4826         u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4827                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4828                        (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4829                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4830                        (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4831                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4832                        (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4833
4834         int err;
4835         int port, errors = 0;
4836         u8 vlan_control;
4837
4838         if (mlx4_is_slave(dev)) {
4839                 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4840                           work->slave);
4841                 goto out;
4842         }
4843
4844         mailbox = mlx4_alloc_cmd_mailbox(dev);
4845         if (IS_ERR(mailbox))
4846                 goto out;
4847         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4848                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4849                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4850                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4851                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4852                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4853                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4854         else if (!work->vlan_id)
4855                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4856                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4857         else
4858                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4859                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4860                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4861
4862         upd_context = mailbox->buf;
4863         upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
4864
4865         spin_lock_irq(mlx4_tlock(dev));
4866         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4867                 spin_unlock_irq(mlx4_tlock(dev));
4868                 if (qp->com.owner == work->slave) {
4869                         if (qp->com.from_state != RES_QP_HW ||
4870                             !qp->sched_queue ||  /* no INIT2RTR trans yet */
4871                             mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4872                             qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4873                                 spin_lock_irq(mlx4_tlock(dev));
4874                                 continue;
4875                         }
4876                         port = (qp->sched_queue >> 6 & 1) + 1;
4877                         if (port != work->port) {
4878                                 spin_lock_irq(mlx4_tlock(dev));
4879                                 continue;
4880                         }
4881                         if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4882                                 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4883                         else
4884                                 upd_context->primary_addr_path_mask =
4885                                         cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4886                         if (work->vlan_id == MLX4_VGT) {
4887                                 upd_context->qp_context.param3 = qp->param3;
4888                                 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4889                                 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4890                                 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4891                                 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4892                                 upd_context->qp_context.pri_path.feup = qp->feup;
4893                                 upd_context->qp_context.pri_path.sched_queue =
4894                                         qp->sched_queue;
4895                         } else {
4896                                 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4897                                 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4898                                 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4899                                 upd_context->qp_context.pri_path.fvl_rx =
4900                                         qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4901                                 upd_context->qp_context.pri_path.fl =
4902                                         qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4903                                 upd_context->qp_context.pri_path.feup =
4904                                         qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4905                                 upd_context->qp_context.pri_path.sched_queue =
4906                                         qp->sched_queue & 0xC7;
4907                                 upd_context->qp_context.pri_path.sched_queue |=
4908                                         ((work->qos & 0x7) << 3);
4909                         }
4910
4911                         err = mlx4_cmd(dev, mailbox->dma,
4912                                        qp->local_qpn & 0xffffff,
4913                                        0, MLX4_CMD_UPDATE_QP,
4914                                        MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4915                         if (err) {
4916                                 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4917                                           work->slave, port, qp->local_qpn, err);
4918                                 errors++;
4919                         }
4920                 }
4921                 spin_lock_irq(mlx4_tlock(dev));
4922         }
4923         spin_unlock_irq(mlx4_tlock(dev));
4924         mlx4_free_cmd_mailbox(dev, mailbox);
4925
4926         if (errors)
4927                 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4928                          errors, work->slave, work->port);
4929
4930         /* unregister previous vlan_id if needed and we had no errors
4931          * while updating the QPs
4932          */
4933         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4934             NO_INDX != work->orig_vlan_ix)
4935                 __mlx4_unregister_vlan(&work->priv->dev, work->port,
4936                                        work->orig_vlan_id);
4937 out:
4938         kfree(work);
4939         return;
4940 }