net/mlx4_core: Implement resource quota enforcement
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49
50 #define MLX4_MAC_VALID          (1ull << 63)
51
52 struct mac_res {
53         struct list_head list;
54         u64 mac;
55         u8 port;
56 };
57
58 struct vlan_res {
59         struct list_head list;
60         u16 vlan;
61         int ref_count;
62         int vlan_index;
63         u8 port;
64 };
65
66 struct res_common {
67         struct list_head        list;
68         struct rb_node          node;
69         u64                     res_id;
70         int                     owner;
71         int                     state;
72         int                     from_state;
73         int                     to_state;
74         int                     removing;
75 };
76
77 enum {
78         RES_ANY_BUSY = 1
79 };
80
81 struct res_gid {
82         struct list_head        list;
83         u8                      gid[16];
84         enum mlx4_protocol      prot;
85         enum mlx4_steer_type    steer;
86         u64                     reg_id;
87 };
88
89 enum res_qp_states {
90         RES_QP_BUSY = RES_ANY_BUSY,
91
92         /* QP number was allocated */
93         RES_QP_RESERVED,
94
95         /* ICM memory for QP context was mapped */
96         RES_QP_MAPPED,
97
98         /* QP is in hw ownership */
99         RES_QP_HW
100 };
101
102 struct res_qp {
103         struct res_common       com;
104         struct res_mtt         *mtt;
105         struct res_cq          *rcq;
106         struct res_cq          *scq;
107         struct res_srq         *srq;
108         struct list_head        mcg_list;
109         spinlock_t              mcg_spl;
110         int                     local_qpn;
111         atomic_t                ref_count;
112         u32                     qpc_flags;
113         u8                      sched_queue;
114 };
115
116 enum res_mtt_states {
117         RES_MTT_BUSY = RES_ANY_BUSY,
118         RES_MTT_ALLOCATED,
119 };
120
121 static inline const char *mtt_states_str(enum res_mtt_states state)
122 {
123         switch (state) {
124         case RES_MTT_BUSY: return "RES_MTT_BUSY";
125         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
126         default: return "Unknown";
127         }
128 }
129
130 struct res_mtt {
131         struct res_common       com;
132         int                     order;
133         atomic_t                ref_count;
134 };
135
136 enum res_mpt_states {
137         RES_MPT_BUSY = RES_ANY_BUSY,
138         RES_MPT_RESERVED,
139         RES_MPT_MAPPED,
140         RES_MPT_HW,
141 };
142
143 struct res_mpt {
144         struct res_common       com;
145         struct res_mtt         *mtt;
146         int                     key;
147 };
148
149 enum res_eq_states {
150         RES_EQ_BUSY = RES_ANY_BUSY,
151         RES_EQ_RESERVED,
152         RES_EQ_HW,
153 };
154
155 struct res_eq {
156         struct res_common       com;
157         struct res_mtt         *mtt;
158 };
159
160 enum res_cq_states {
161         RES_CQ_BUSY = RES_ANY_BUSY,
162         RES_CQ_ALLOCATED,
163         RES_CQ_HW,
164 };
165
166 struct res_cq {
167         struct res_common       com;
168         struct res_mtt         *mtt;
169         atomic_t                ref_count;
170 };
171
172 enum res_srq_states {
173         RES_SRQ_BUSY = RES_ANY_BUSY,
174         RES_SRQ_ALLOCATED,
175         RES_SRQ_HW,
176 };
177
178 struct res_srq {
179         struct res_common       com;
180         struct res_mtt         *mtt;
181         struct res_cq          *cq;
182         atomic_t                ref_count;
183 };
184
185 enum res_counter_states {
186         RES_COUNTER_BUSY = RES_ANY_BUSY,
187         RES_COUNTER_ALLOCATED,
188 };
189
190 struct res_counter {
191         struct res_common       com;
192         int                     port;
193 };
194
195 enum res_xrcdn_states {
196         RES_XRCD_BUSY = RES_ANY_BUSY,
197         RES_XRCD_ALLOCATED,
198 };
199
200 struct res_xrcdn {
201         struct res_common       com;
202         int                     port;
203 };
204
205 enum res_fs_rule_states {
206         RES_FS_RULE_BUSY = RES_ANY_BUSY,
207         RES_FS_RULE_ALLOCATED,
208 };
209
210 struct res_fs_rule {
211         struct res_common       com;
212         int                     qpn;
213 };
214
215 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
216 {
217         struct rb_node *node = root->rb_node;
218
219         while (node) {
220                 struct res_common *res = container_of(node, struct res_common,
221                                                       node);
222
223                 if (res_id < res->res_id)
224                         node = node->rb_left;
225                 else if (res_id > res->res_id)
226                         node = node->rb_right;
227                 else
228                         return res;
229         }
230         return NULL;
231 }
232
233 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
234 {
235         struct rb_node **new = &(root->rb_node), *parent = NULL;
236
237         /* Figure out where to put new node */
238         while (*new) {
239                 struct res_common *this = container_of(*new, struct res_common,
240                                                        node);
241
242                 parent = *new;
243                 if (res->res_id < this->res_id)
244                         new = &((*new)->rb_left);
245                 else if (res->res_id > this->res_id)
246                         new = &((*new)->rb_right);
247                 else
248                         return -EEXIST;
249         }
250
251         /* Add new node and rebalance tree. */
252         rb_link_node(&res->node, parent, new);
253         rb_insert_color(&res->node, root);
254
255         return 0;
256 }
257
258 enum qp_transition {
259         QP_TRANS_INIT2RTR,
260         QP_TRANS_RTR2RTS,
261         QP_TRANS_RTS2RTS,
262         QP_TRANS_SQERR2RTS,
263         QP_TRANS_SQD2SQD,
264         QP_TRANS_SQD2RTS
265 };
266
267 /* For Debug uses */
268 static const char *ResourceType(enum mlx4_resource rt)
269 {
270         switch (rt) {
271         case RES_QP: return "RES_QP";
272         case RES_CQ: return "RES_CQ";
273         case RES_SRQ: return "RES_SRQ";
274         case RES_MPT: return "RES_MPT";
275         case RES_MTT: return "RES_MTT";
276         case RES_MAC: return  "RES_MAC";
277         case RES_VLAN: return  "RES_VLAN";
278         case RES_EQ: return "RES_EQ";
279         case RES_COUNTER: return "RES_COUNTER";
280         case RES_FS_RULE: return "RES_FS_RULE";
281         case RES_XRCD: return "RES_XRCD";
282         default: return "Unknown resource type !!!";
283         };
284 }
285
286 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
287 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
288                                       enum mlx4_resource res_type, int count,
289                                       int port)
290 {
291         struct mlx4_priv *priv = mlx4_priv(dev);
292         struct resource_allocator *res_alloc =
293                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
294         int err = -EINVAL;
295         int allocated, free, reserved, guaranteed, from_free;
296
297         if (slave > dev->num_vfs)
298                 return -EINVAL;
299
300         spin_lock(&res_alloc->alloc_lock);
301         allocated = (port > 0) ?
302                 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
303                 res_alloc->allocated[slave];
304         free = (port > 0) ? res_alloc->res_port_free[port - 1] :
305                 res_alloc->res_free;
306         reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
307                 res_alloc->res_reserved;
308         guaranteed = res_alloc->guaranteed[slave];
309
310         if (allocated + count > res_alloc->quota[slave])
311                 goto out;
312
313         if (allocated + count <= guaranteed) {
314                 err = 0;
315         } else {
316                 /* portion may need to be obtained from free area */
317                 if (guaranteed - allocated > 0)
318                         from_free = count - (guaranteed - allocated);
319                 else
320                         from_free = count;
321
322                 if (free - from_free > reserved)
323                         err = 0;
324         }
325
326         if (!err) {
327                 /* grant the request */
328                 if (port > 0) {
329                         res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
330                         res_alloc->res_port_free[port - 1] -= count;
331                 } else {
332                         res_alloc->allocated[slave] += count;
333                         res_alloc->res_free -= count;
334                 }
335         }
336
337 out:
338         spin_unlock(&res_alloc->alloc_lock);
339         return err;
340 }
341
342 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
343                                     enum mlx4_resource res_type, int count,
344                                     int port)
345 {
346         struct mlx4_priv *priv = mlx4_priv(dev);
347         struct resource_allocator *res_alloc =
348                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
349
350         if (slave > dev->num_vfs)
351                 return;
352
353         spin_lock(&res_alloc->alloc_lock);
354         if (port > 0) {
355                 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
356                 res_alloc->res_port_free[port - 1] += count;
357         } else {
358                 res_alloc->allocated[slave] -= count;
359                 res_alloc->res_free += count;
360         }
361
362         spin_unlock(&res_alloc->alloc_lock);
363         return;
364 }
365
366 static inline void initialize_res_quotas(struct mlx4_dev *dev,
367                                          struct resource_allocator *res_alloc,
368                                          enum mlx4_resource res_type,
369                                          int vf, int num_instances)
370 {
371         res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
372         res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
373         if (vf == mlx4_master_func_num(dev)) {
374                 res_alloc->res_free = num_instances;
375                 if (res_type == RES_MTT) {
376                         /* reserved mtts will be taken out of the PF allocation */
377                         res_alloc->res_free += dev->caps.reserved_mtts;
378                         res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
379                         res_alloc->quota[vf] += dev->caps.reserved_mtts;
380                 }
381         }
382 }
383
384 void mlx4_init_quotas(struct mlx4_dev *dev)
385 {
386         struct mlx4_priv *priv = mlx4_priv(dev);
387         int pf;
388
389         /* quotas for VFs are initialized in mlx4_slave_cap */
390         if (mlx4_is_slave(dev))
391                 return;
392
393         if (!mlx4_is_mfunc(dev)) {
394                 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
395                         mlx4_num_reserved_sqps(dev);
396                 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
397                 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
398                 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
399                 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
400                 return;
401         }
402
403         pf = mlx4_master_func_num(dev);
404         dev->quotas.qp =
405                 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
406         dev->quotas.cq =
407                 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
408         dev->quotas.srq =
409                 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
410         dev->quotas.mtt =
411                 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
412         dev->quotas.mpt =
413                 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
414 }
415 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
416 {
417         struct mlx4_priv *priv = mlx4_priv(dev);
418         int i, j;
419         int t;
420
421         priv->mfunc.master.res_tracker.slave_list =
422                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
423                         GFP_KERNEL);
424         if (!priv->mfunc.master.res_tracker.slave_list)
425                 return -ENOMEM;
426
427         for (i = 0 ; i < dev->num_slaves; i++) {
428                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
429                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
430                                        slave_list[i].res_list[t]);
431                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
432         }
433
434         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
435                  dev->num_slaves);
436         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
437                 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
438
439         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
440                 struct resource_allocator *res_alloc =
441                         &priv->mfunc.master.res_tracker.res_alloc[i];
442                 res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
443                 res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
444                 if (i == RES_MAC || i == RES_VLAN)
445                         res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
446                                                        (dev->num_vfs + 1) * sizeof(int),
447                                                         GFP_KERNEL);
448                 else
449                         res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
450
451                 if (!res_alloc->quota || !res_alloc->guaranteed ||
452                     !res_alloc->allocated)
453                         goto no_mem_err;
454
455                 spin_lock_init(&res_alloc->alloc_lock);
456                 for (t = 0; t < dev->num_vfs + 1; t++) {
457                         switch (i) {
458                         case RES_QP:
459                                 initialize_res_quotas(dev, res_alloc, RES_QP,
460                                                       t, dev->caps.num_qps -
461                                                       dev->caps.reserved_qps -
462                                                       mlx4_num_reserved_sqps(dev));
463                                 break;
464                         case RES_CQ:
465                                 initialize_res_quotas(dev, res_alloc, RES_CQ,
466                                                       t, dev->caps.num_cqs -
467                                                       dev->caps.reserved_cqs);
468                                 break;
469                         case RES_SRQ:
470                                 initialize_res_quotas(dev, res_alloc, RES_SRQ,
471                                                       t, dev->caps.num_srqs -
472                                                       dev->caps.reserved_srqs);
473                                 break;
474                         case RES_MPT:
475                                 initialize_res_quotas(dev, res_alloc, RES_MPT,
476                                                       t, dev->caps.num_mpts -
477                                                       dev->caps.reserved_mrws);
478                                 break;
479                         case RES_MTT:
480                                 initialize_res_quotas(dev, res_alloc, RES_MTT,
481                                                       t, dev->caps.num_mtts -
482                                                       dev->caps.reserved_mtts);
483                                 break;
484                         case RES_MAC:
485                                 if (t == mlx4_master_func_num(dev)) {
486                                         res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
487                                         res_alloc->guaranteed[t] = 2;
488                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
489                                                 res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
490                                 } else {
491                                         res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
492                                         res_alloc->guaranteed[t] = 2;
493                                 }
494                                 break;
495                         case RES_VLAN:
496                                 if (t == mlx4_master_func_num(dev)) {
497                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
498                                         res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
499                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
500                                                 res_alloc->res_port_free[j] =
501                                                         res_alloc->quota[t];
502                                 } else {
503                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
504                                         res_alloc->guaranteed[t] = 0;
505                                 }
506                                 break;
507                         case RES_COUNTER:
508                                 res_alloc->quota[t] = dev->caps.max_counters;
509                                 res_alloc->guaranteed[t] = 0;
510                                 if (t == mlx4_master_func_num(dev))
511                                         res_alloc->res_free = res_alloc->quota[t];
512                                 break;
513                         default:
514                                 break;
515                         }
516                         if (i == RES_MAC || i == RES_VLAN) {
517                                 for (j = 0; j < MLX4_MAX_PORTS; j++)
518                                         res_alloc->res_port_rsvd[j] +=
519                                                 res_alloc->guaranteed[t];
520                         } else {
521                                 res_alloc->res_reserved += res_alloc->guaranteed[t];
522                         }
523                 }
524         }
525         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
526         return 0;
527
528 no_mem_err:
529         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
530                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
531                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
532                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
533                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
534                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
535                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
536         }
537         return -ENOMEM;
538 }
539
540 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
541                                 enum mlx4_res_tracker_free_type type)
542 {
543         struct mlx4_priv *priv = mlx4_priv(dev);
544         int i;
545
546         if (priv->mfunc.master.res_tracker.slave_list) {
547                 if (type != RES_TR_FREE_STRUCTS_ONLY) {
548                         for (i = 0; i < dev->num_slaves; i++) {
549                                 if (type == RES_TR_FREE_ALL ||
550                                     dev->caps.function != i)
551                                         mlx4_delete_all_resources_for_slave(dev, i);
552                         }
553                         /* free master's vlans */
554                         i = dev->caps.function;
555                         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
556                         rem_slave_vlans(dev, i);
557                         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
558                 }
559
560                 if (type != RES_TR_FREE_SLAVES_ONLY) {
561                         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
562                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
563                                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
564                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
565                                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
566                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
567                                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
568                         }
569                         kfree(priv->mfunc.master.res_tracker.slave_list);
570                         priv->mfunc.master.res_tracker.slave_list = NULL;
571                 }
572         }
573 }
574
575 static void update_pkey_index(struct mlx4_dev *dev, int slave,
576                               struct mlx4_cmd_mailbox *inbox)
577 {
578         u8 sched = *(u8 *)(inbox->buf + 64);
579         u8 orig_index = *(u8 *)(inbox->buf + 35);
580         u8 new_index;
581         struct mlx4_priv *priv = mlx4_priv(dev);
582         int port;
583
584         port = (sched >> 6 & 1) + 1;
585
586         new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
587         *(u8 *)(inbox->buf + 35) = new_index;
588 }
589
590 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
591                        u8 slave)
592 {
593         struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
594         enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
595         u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
596
597         if (MLX4_QP_ST_UD == ts)
598                 qp_ctx->pri_path.mgid_index = 0x80 | slave;
599
600         if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
601                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
602                         qp_ctx->pri_path.mgid_index = slave & 0x7F;
603                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
604                         qp_ctx->alt_path.mgid_index = slave & 0x7F;
605         }
606 }
607
608 static int update_vport_qp_param(struct mlx4_dev *dev,
609                                  struct mlx4_cmd_mailbox *inbox,
610                                  u8 slave, u32 qpn)
611 {
612         struct mlx4_qp_context  *qpc = inbox->buf + 8;
613         struct mlx4_vport_oper_state *vp_oper;
614         struct mlx4_priv *priv;
615         u32 qp_type;
616         int port;
617
618         port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
619         priv = mlx4_priv(dev);
620         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
621
622         if (MLX4_VGT != vp_oper->state.default_vlan) {
623                 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
624                 if (MLX4_QP_ST_RC == qp_type ||
625                     (MLX4_QP_ST_UD == qp_type &&
626                      !mlx4_is_qp_reserved(dev, qpn)))
627                         return -EINVAL;
628
629                 /* the reserved QPs (special, proxy, tunnel)
630                  * do not operate over vlans
631                  */
632                 if (mlx4_is_qp_reserved(dev, qpn))
633                         return 0;
634
635                 /* force strip vlan by clear vsd */
636                 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
637
638                 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
639                     dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
640                         qpc->pri_path.vlan_control =
641                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
642                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
643                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
644                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
645                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
646                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
647                 } else if (0 != vp_oper->state.default_vlan) {
648                         qpc->pri_path.vlan_control =
649                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
650                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
651                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
652                 } else { /* priority tagged */
653                         qpc->pri_path.vlan_control =
654                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
655                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
656                 }
657
658                 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
659                 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
660                 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
661                 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
662                 qpc->pri_path.sched_queue &= 0xC7;
663                 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
664         }
665         if (vp_oper->state.spoofchk) {
666                 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
667                 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
668         }
669         return 0;
670 }
671
672 static int mpt_mask(struct mlx4_dev *dev)
673 {
674         return dev->caps.num_mpts - 1;
675 }
676
677 static void *find_res(struct mlx4_dev *dev, u64 res_id,
678                       enum mlx4_resource type)
679 {
680         struct mlx4_priv *priv = mlx4_priv(dev);
681
682         return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
683                                   res_id);
684 }
685
686 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
687                    enum mlx4_resource type,
688                    void *res)
689 {
690         struct res_common *r;
691         int err = 0;
692
693         spin_lock_irq(mlx4_tlock(dev));
694         r = find_res(dev, res_id, type);
695         if (!r) {
696                 err = -ENONET;
697                 goto exit;
698         }
699
700         if (r->state == RES_ANY_BUSY) {
701                 err = -EBUSY;
702                 goto exit;
703         }
704
705         if (r->owner != slave) {
706                 err = -EPERM;
707                 goto exit;
708         }
709
710         r->from_state = r->state;
711         r->state = RES_ANY_BUSY;
712
713         if (res)
714                 *((struct res_common **)res) = r;
715
716 exit:
717         spin_unlock_irq(mlx4_tlock(dev));
718         return err;
719 }
720
721 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
722                                     enum mlx4_resource type,
723                                     u64 res_id, int *slave)
724 {
725
726         struct res_common *r;
727         int err = -ENOENT;
728         int id = res_id;
729
730         if (type == RES_QP)
731                 id &= 0x7fffff;
732         spin_lock(mlx4_tlock(dev));
733
734         r = find_res(dev, id, type);
735         if (r) {
736                 *slave = r->owner;
737                 err = 0;
738         }
739         spin_unlock(mlx4_tlock(dev));
740
741         return err;
742 }
743
744 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
745                     enum mlx4_resource type)
746 {
747         struct res_common *r;
748
749         spin_lock_irq(mlx4_tlock(dev));
750         r = find_res(dev, res_id, type);
751         if (r)
752                 r->state = r->from_state;
753         spin_unlock_irq(mlx4_tlock(dev));
754 }
755
756 static struct res_common *alloc_qp_tr(int id)
757 {
758         struct res_qp *ret;
759
760         ret = kzalloc(sizeof *ret, GFP_KERNEL);
761         if (!ret)
762                 return NULL;
763
764         ret->com.res_id = id;
765         ret->com.state = RES_QP_RESERVED;
766         ret->local_qpn = id;
767         INIT_LIST_HEAD(&ret->mcg_list);
768         spin_lock_init(&ret->mcg_spl);
769         atomic_set(&ret->ref_count, 0);
770
771         return &ret->com;
772 }
773
774 static struct res_common *alloc_mtt_tr(int id, int order)
775 {
776         struct res_mtt *ret;
777
778         ret = kzalloc(sizeof *ret, GFP_KERNEL);
779         if (!ret)
780                 return NULL;
781
782         ret->com.res_id = id;
783         ret->order = order;
784         ret->com.state = RES_MTT_ALLOCATED;
785         atomic_set(&ret->ref_count, 0);
786
787         return &ret->com;
788 }
789
790 static struct res_common *alloc_mpt_tr(int id, int key)
791 {
792         struct res_mpt *ret;
793
794         ret = kzalloc(sizeof *ret, GFP_KERNEL);
795         if (!ret)
796                 return NULL;
797
798         ret->com.res_id = id;
799         ret->com.state = RES_MPT_RESERVED;
800         ret->key = key;
801
802         return &ret->com;
803 }
804
805 static struct res_common *alloc_eq_tr(int id)
806 {
807         struct res_eq *ret;
808
809         ret = kzalloc(sizeof *ret, GFP_KERNEL);
810         if (!ret)
811                 return NULL;
812
813         ret->com.res_id = id;
814         ret->com.state = RES_EQ_RESERVED;
815
816         return &ret->com;
817 }
818
819 static struct res_common *alloc_cq_tr(int id)
820 {
821         struct res_cq *ret;
822
823         ret = kzalloc(sizeof *ret, GFP_KERNEL);
824         if (!ret)
825                 return NULL;
826
827         ret->com.res_id = id;
828         ret->com.state = RES_CQ_ALLOCATED;
829         atomic_set(&ret->ref_count, 0);
830
831         return &ret->com;
832 }
833
834 static struct res_common *alloc_srq_tr(int id)
835 {
836         struct res_srq *ret;
837
838         ret = kzalloc(sizeof *ret, GFP_KERNEL);
839         if (!ret)
840                 return NULL;
841
842         ret->com.res_id = id;
843         ret->com.state = RES_SRQ_ALLOCATED;
844         atomic_set(&ret->ref_count, 0);
845
846         return &ret->com;
847 }
848
849 static struct res_common *alloc_counter_tr(int id)
850 {
851         struct res_counter *ret;
852
853         ret = kzalloc(sizeof *ret, GFP_KERNEL);
854         if (!ret)
855                 return NULL;
856
857         ret->com.res_id = id;
858         ret->com.state = RES_COUNTER_ALLOCATED;
859
860         return &ret->com;
861 }
862
863 static struct res_common *alloc_xrcdn_tr(int id)
864 {
865         struct res_xrcdn *ret;
866
867         ret = kzalloc(sizeof *ret, GFP_KERNEL);
868         if (!ret)
869                 return NULL;
870
871         ret->com.res_id = id;
872         ret->com.state = RES_XRCD_ALLOCATED;
873
874         return &ret->com;
875 }
876
877 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
878 {
879         struct res_fs_rule *ret;
880
881         ret = kzalloc(sizeof *ret, GFP_KERNEL);
882         if (!ret)
883                 return NULL;
884
885         ret->com.res_id = id;
886         ret->com.state = RES_FS_RULE_ALLOCATED;
887         ret->qpn = qpn;
888         return &ret->com;
889 }
890
891 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
892                                    int extra)
893 {
894         struct res_common *ret;
895
896         switch (type) {
897         case RES_QP:
898                 ret = alloc_qp_tr(id);
899                 break;
900         case RES_MPT:
901                 ret = alloc_mpt_tr(id, extra);
902                 break;
903         case RES_MTT:
904                 ret = alloc_mtt_tr(id, extra);
905                 break;
906         case RES_EQ:
907                 ret = alloc_eq_tr(id);
908                 break;
909         case RES_CQ:
910                 ret = alloc_cq_tr(id);
911                 break;
912         case RES_SRQ:
913                 ret = alloc_srq_tr(id);
914                 break;
915         case RES_MAC:
916                 printk(KERN_ERR "implementation missing\n");
917                 return NULL;
918         case RES_COUNTER:
919                 ret = alloc_counter_tr(id);
920                 break;
921         case RES_XRCD:
922                 ret = alloc_xrcdn_tr(id);
923                 break;
924         case RES_FS_RULE:
925                 ret = alloc_fs_rule_tr(id, extra);
926                 break;
927         default:
928                 return NULL;
929         }
930         if (ret)
931                 ret->owner = slave;
932
933         return ret;
934 }
935
936 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
937                          enum mlx4_resource type, int extra)
938 {
939         int i;
940         int err;
941         struct mlx4_priv *priv = mlx4_priv(dev);
942         struct res_common **res_arr;
943         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
944         struct rb_root *root = &tracker->res_tree[type];
945
946         res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
947         if (!res_arr)
948                 return -ENOMEM;
949
950         for (i = 0; i < count; ++i) {
951                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
952                 if (!res_arr[i]) {
953                         for (--i; i >= 0; --i)
954                                 kfree(res_arr[i]);
955
956                         kfree(res_arr);
957                         return -ENOMEM;
958                 }
959         }
960
961         spin_lock_irq(mlx4_tlock(dev));
962         for (i = 0; i < count; ++i) {
963                 if (find_res(dev, base + i, type)) {
964                         err = -EEXIST;
965                         goto undo;
966                 }
967                 err = res_tracker_insert(root, res_arr[i]);
968                 if (err)
969                         goto undo;
970                 list_add_tail(&res_arr[i]->list,
971                               &tracker->slave_list[slave].res_list[type]);
972         }
973         spin_unlock_irq(mlx4_tlock(dev));
974         kfree(res_arr);
975
976         return 0;
977
978 undo:
979         for (--i; i >= base; --i)
980                 rb_erase(&res_arr[i]->node, root);
981
982         spin_unlock_irq(mlx4_tlock(dev));
983
984         for (i = 0; i < count; ++i)
985                 kfree(res_arr[i]);
986
987         kfree(res_arr);
988
989         return err;
990 }
991
992 static int remove_qp_ok(struct res_qp *res)
993 {
994         if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
995             !list_empty(&res->mcg_list)) {
996                 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
997                        res->com.state, atomic_read(&res->ref_count));
998                 return -EBUSY;
999         } else if (res->com.state != RES_QP_RESERVED) {
1000                 return -EPERM;
1001         }
1002
1003         return 0;
1004 }
1005
1006 static int remove_mtt_ok(struct res_mtt *res, int order)
1007 {
1008         if (res->com.state == RES_MTT_BUSY ||
1009             atomic_read(&res->ref_count)) {
1010                 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
1011                        __func__, __LINE__,
1012                        mtt_states_str(res->com.state),
1013                        atomic_read(&res->ref_count));
1014                 return -EBUSY;
1015         } else if (res->com.state != RES_MTT_ALLOCATED)
1016                 return -EPERM;
1017         else if (res->order != order)
1018                 return -EINVAL;
1019
1020         return 0;
1021 }
1022
1023 static int remove_mpt_ok(struct res_mpt *res)
1024 {
1025         if (res->com.state == RES_MPT_BUSY)
1026                 return -EBUSY;
1027         else if (res->com.state != RES_MPT_RESERVED)
1028                 return -EPERM;
1029
1030         return 0;
1031 }
1032
1033 static int remove_eq_ok(struct res_eq *res)
1034 {
1035         if (res->com.state == RES_MPT_BUSY)
1036                 return -EBUSY;
1037         else if (res->com.state != RES_MPT_RESERVED)
1038                 return -EPERM;
1039
1040         return 0;
1041 }
1042
1043 static int remove_counter_ok(struct res_counter *res)
1044 {
1045         if (res->com.state == RES_COUNTER_BUSY)
1046                 return -EBUSY;
1047         else if (res->com.state != RES_COUNTER_ALLOCATED)
1048                 return -EPERM;
1049
1050         return 0;
1051 }
1052
1053 static int remove_xrcdn_ok(struct res_xrcdn *res)
1054 {
1055         if (res->com.state == RES_XRCD_BUSY)
1056                 return -EBUSY;
1057         else if (res->com.state != RES_XRCD_ALLOCATED)
1058                 return -EPERM;
1059
1060         return 0;
1061 }
1062
1063 static int remove_fs_rule_ok(struct res_fs_rule *res)
1064 {
1065         if (res->com.state == RES_FS_RULE_BUSY)
1066                 return -EBUSY;
1067         else if (res->com.state != RES_FS_RULE_ALLOCATED)
1068                 return -EPERM;
1069
1070         return 0;
1071 }
1072
1073 static int remove_cq_ok(struct res_cq *res)
1074 {
1075         if (res->com.state == RES_CQ_BUSY)
1076                 return -EBUSY;
1077         else if (res->com.state != RES_CQ_ALLOCATED)
1078                 return -EPERM;
1079
1080         return 0;
1081 }
1082
1083 static int remove_srq_ok(struct res_srq *res)
1084 {
1085         if (res->com.state == RES_SRQ_BUSY)
1086                 return -EBUSY;
1087         else if (res->com.state != RES_SRQ_ALLOCATED)
1088                 return -EPERM;
1089
1090         return 0;
1091 }
1092
1093 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1094 {
1095         switch (type) {
1096         case RES_QP:
1097                 return remove_qp_ok((struct res_qp *)res);
1098         case RES_CQ:
1099                 return remove_cq_ok((struct res_cq *)res);
1100         case RES_SRQ:
1101                 return remove_srq_ok((struct res_srq *)res);
1102         case RES_MPT:
1103                 return remove_mpt_ok((struct res_mpt *)res);
1104         case RES_MTT:
1105                 return remove_mtt_ok((struct res_mtt *)res, extra);
1106         case RES_MAC:
1107                 return -ENOSYS;
1108         case RES_EQ:
1109                 return remove_eq_ok((struct res_eq *)res);
1110         case RES_COUNTER:
1111                 return remove_counter_ok((struct res_counter *)res);
1112         case RES_XRCD:
1113                 return remove_xrcdn_ok((struct res_xrcdn *)res);
1114         case RES_FS_RULE:
1115                 return remove_fs_rule_ok((struct res_fs_rule *)res);
1116         default:
1117                 return -EINVAL;
1118         }
1119 }
1120
1121 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1122                          enum mlx4_resource type, int extra)
1123 {
1124         u64 i;
1125         int err;
1126         struct mlx4_priv *priv = mlx4_priv(dev);
1127         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1128         struct res_common *r;
1129
1130         spin_lock_irq(mlx4_tlock(dev));
1131         for (i = base; i < base + count; ++i) {
1132                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1133                 if (!r) {
1134                         err = -ENOENT;
1135                         goto out;
1136                 }
1137                 if (r->owner != slave) {
1138                         err = -EPERM;
1139                         goto out;
1140                 }
1141                 err = remove_ok(r, type, extra);
1142                 if (err)
1143                         goto out;
1144         }
1145
1146         for (i = base; i < base + count; ++i) {
1147                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1148                 rb_erase(&r->node, &tracker->res_tree[type]);
1149                 list_del(&r->list);
1150                 kfree(r);
1151         }
1152         err = 0;
1153
1154 out:
1155         spin_unlock_irq(mlx4_tlock(dev));
1156
1157         return err;
1158 }
1159
1160 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1161                                 enum res_qp_states state, struct res_qp **qp,
1162                                 int alloc)
1163 {
1164         struct mlx4_priv *priv = mlx4_priv(dev);
1165         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1166         struct res_qp *r;
1167         int err = 0;
1168
1169         spin_lock_irq(mlx4_tlock(dev));
1170         r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1171         if (!r)
1172                 err = -ENOENT;
1173         else if (r->com.owner != slave)
1174                 err = -EPERM;
1175         else {
1176                 switch (state) {
1177                 case RES_QP_BUSY:
1178                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1179                                  __func__, r->com.res_id);
1180                         err = -EBUSY;
1181                         break;
1182
1183                 case RES_QP_RESERVED:
1184                         if (r->com.state == RES_QP_MAPPED && !alloc)
1185                                 break;
1186
1187                         mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1188                         err = -EINVAL;
1189                         break;
1190
1191                 case RES_QP_MAPPED:
1192                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
1193                             r->com.state == RES_QP_HW)
1194                                 break;
1195                         else {
1196                                 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1197                                           r->com.res_id);
1198                                 err = -EINVAL;
1199                         }
1200
1201                         break;
1202
1203                 case RES_QP_HW:
1204                         if (r->com.state != RES_QP_MAPPED)
1205                                 err = -EINVAL;
1206                         break;
1207                 default:
1208                         err = -EINVAL;
1209                 }
1210
1211                 if (!err) {
1212                         r->com.from_state = r->com.state;
1213                         r->com.to_state = state;
1214                         r->com.state = RES_QP_BUSY;
1215                         if (qp)
1216                                 *qp = r;
1217                 }
1218         }
1219
1220         spin_unlock_irq(mlx4_tlock(dev));
1221
1222         return err;
1223 }
1224
1225 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1226                                 enum res_mpt_states state, struct res_mpt **mpt)
1227 {
1228         struct mlx4_priv *priv = mlx4_priv(dev);
1229         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1230         struct res_mpt *r;
1231         int err = 0;
1232
1233         spin_lock_irq(mlx4_tlock(dev));
1234         r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1235         if (!r)
1236                 err = -ENOENT;
1237         else if (r->com.owner != slave)
1238                 err = -EPERM;
1239         else {
1240                 switch (state) {
1241                 case RES_MPT_BUSY:
1242                         err = -EINVAL;
1243                         break;
1244
1245                 case RES_MPT_RESERVED:
1246                         if (r->com.state != RES_MPT_MAPPED)
1247                                 err = -EINVAL;
1248                         break;
1249
1250                 case RES_MPT_MAPPED:
1251                         if (r->com.state != RES_MPT_RESERVED &&
1252                             r->com.state != RES_MPT_HW)
1253                                 err = -EINVAL;
1254                         break;
1255
1256                 case RES_MPT_HW:
1257                         if (r->com.state != RES_MPT_MAPPED)
1258                                 err = -EINVAL;
1259                         break;
1260                 default:
1261                         err = -EINVAL;
1262                 }
1263
1264                 if (!err) {
1265                         r->com.from_state = r->com.state;
1266                         r->com.to_state = state;
1267                         r->com.state = RES_MPT_BUSY;
1268                         if (mpt)
1269                                 *mpt = r;
1270                 }
1271         }
1272
1273         spin_unlock_irq(mlx4_tlock(dev));
1274
1275         return err;
1276 }
1277
1278 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1279                                 enum res_eq_states state, struct res_eq **eq)
1280 {
1281         struct mlx4_priv *priv = mlx4_priv(dev);
1282         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1283         struct res_eq *r;
1284         int err = 0;
1285
1286         spin_lock_irq(mlx4_tlock(dev));
1287         r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1288         if (!r)
1289                 err = -ENOENT;
1290         else if (r->com.owner != slave)
1291                 err = -EPERM;
1292         else {
1293                 switch (state) {
1294                 case RES_EQ_BUSY:
1295                         err = -EINVAL;
1296                         break;
1297
1298                 case RES_EQ_RESERVED:
1299                         if (r->com.state != RES_EQ_HW)
1300                                 err = -EINVAL;
1301                         break;
1302
1303                 case RES_EQ_HW:
1304                         if (r->com.state != RES_EQ_RESERVED)
1305                                 err = -EINVAL;
1306                         break;
1307
1308                 default:
1309                         err = -EINVAL;
1310                 }
1311
1312                 if (!err) {
1313                         r->com.from_state = r->com.state;
1314                         r->com.to_state = state;
1315                         r->com.state = RES_EQ_BUSY;
1316                         if (eq)
1317                                 *eq = r;
1318                 }
1319         }
1320
1321         spin_unlock_irq(mlx4_tlock(dev));
1322
1323         return err;
1324 }
1325
1326 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1327                                 enum res_cq_states state, struct res_cq **cq)
1328 {
1329         struct mlx4_priv *priv = mlx4_priv(dev);
1330         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1331         struct res_cq *r;
1332         int err;
1333
1334         spin_lock_irq(mlx4_tlock(dev));
1335         r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1336         if (!r)
1337                 err = -ENOENT;
1338         else if (r->com.owner != slave)
1339                 err = -EPERM;
1340         else {
1341                 switch (state) {
1342                 case RES_CQ_BUSY:
1343                         err = -EBUSY;
1344                         break;
1345
1346                 case RES_CQ_ALLOCATED:
1347                         if (r->com.state != RES_CQ_HW)
1348                                 err = -EINVAL;
1349                         else if (atomic_read(&r->ref_count))
1350                                 err = -EBUSY;
1351                         else
1352                                 err = 0;
1353                         break;
1354
1355                 case RES_CQ_HW:
1356                         if (r->com.state != RES_CQ_ALLOCATED)
1357                                 err = -EINVAL;
1358                         else
1359                                 err = 0;
1360                         break;
1361
1362                 default:
1363                         err = -EINVAL;
1364                 }
1365
1366                 if (!err) {
1367                         r->com.from_state = r->com.state;
1368                         r->com.to_state = state;
1369                         r->com.state = RES_CQ_BUSY;
1370                         if (cq)
1371                                 *cq = r;
1372                 }
1373         }
1374
1375         spin_unlock_irq(mlx4_tlock(dev));
1376
1377         return err;
1378 }
1379
1380 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1381                                  enum res_cq_states state, struct res_srq **srq)
1382 {
1383         struct mlx4_priv *priv = mlx4_priv(dev);
1384         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1385         struct res_srq *r;
1386         int err = 0;
1387
1388         spin_lock_irq(mlx4_tlock(dev));
1389         r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1390         if (!r)
1391                 err = -ENOENT;
1392         else if (r->com.owner != slave)
1393                 err = -EPERM;
1394         else {
1395                 switch (state) {
1396                 case RES_SRQ_BUSY:
1397                         err = -EINVAL;
1398                         break;
1399
1400                 case RES_SRQ_ALLOCATED:
1401                         if (r->com.state != RES_SRQ_HW)
1402                                 err = -EINVAL;
1403                         else if (atomic_read(&r->ref_count))
1404                                 err = -EBUSY;
1405                         break;
1406
1407                 case RES_SRQ_HW:
1408                         if (r->com.state != RES_SRQ_ALLOCATED)
1409                                 err = -EINVAL;
1410                         break;
1411
1412                 default:
1413                         err = -EINVAL;
1414                 }
1415
1416                 if (!err) {
1417                         r->com.from_state = r->com.state;
1418                         r->com.to_state = state;
1419                         r->com.state = RES_SRQ_BUSY;
1420                         if (srq)
1421                                 *srq = r;
1422                 }
1423         }
1424
1425         spin_unlock_irq(mlx4_tlock(dev));
1426
1427         return err;
1428 }
1429
1430 static void res_abort_move(struct mlx4_dev *dev, int slave,
1431                            enum mlx4_resource type, int id)
1432 {
1433         struct mlx4_priv *priv = mlx4_priv(dev);
1434         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1435         struct res_common *r;
1436
1437         spin_lock_irq(mlx4_tlock(dev));
1438         r = res_tracker_lookup(&tracker->res_tree[type], id);
1439         if (r && (r->owner == slave))
1440                 r->state = r->from_state;
1441         spin_unlock_irq(mlx4_tlock(dev));
1442 }
1443
1444 static void res_end_move(struct mlx4_dev *dev, int slave,
1445                          enum mlx4_resource type, int id)
1446 {
1447         struct mlx4_priv *priv = mlx4_priv(dev);
1448         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1449         struct res_common *r;
1450
1451         spin_lock_irq(mlx4_tlock(dev));
1452         r = res_tracker_lookup(&tracker->res_tree[type], id);
1453         if (r && (r->owner == slave))
1454                 r->state = r->to_state;
1455         spin_unlock_irq(mlx4_tlock(dev));
1456 }
1457
1458 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1459 {
1460         return mlx4_is_qp_reserved(dev, qpn) &&
1461                 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1462 }
1463
1464 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1465 {
1466         return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1467 }
1468
1469 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1470                         u64 in_param, u64 *out_param)
1471 {
1472         int err;
1473         int count;
1474         int align;
1475         int base;
1476         int qpn;
1477
1478         switch (op) {
1479         case RES_OP_RESERVE:
1480                 count = get_param_l(&in_param);
1481                 align = get_param_h(&in_param);
1482                 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1483                 if (err)
1484                         return err;
1485
1486                 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1487                 if (err) {
1488                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1489                         return err;
1490                 }
1491
1492                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1493                 if (err) {
1494                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1495                         __mlx4_qp_release_range(dev, base, count);
1496                         return err;
1497                 }
1498                 set_param_l(out_param, base);
1499                 break;
1500         case RES_OP_MAP_ICM:
1501                 qpn = get_param_l(&in_param) & 0x7fffff;
1502                 if (valid_reserved(dev, slave, qpn)) {
1503                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1504                         if (err)
1505                                 return err;
1506                 }
1507
1508                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1509                                            NULL, 1);
1510                 if (err)
1511                         return err;
1512
1513                 if (!fw_reserved(dev, qpn)) {
1514                         err = __mlx4_qp_alloc_icm(dev, qpn);
1515                         if (err) {
1516                                 res_abort_move(dev, slave, RES_QP, qpn);
1517                                 return err;
1518                         }
1519                 }
1520
1521                 res_end_move(dev, slave, RES_QP, qpn);
1522                 break;
1523
1524         default:
1525                 err = -EINVAL;
1526                 break;
1527         }
1528         return err;
1529 }
1530
1531 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1532                          u64 in_param, u64 *out_param)
1533 {
1534         int err = -EINVAL;
1535         int base;
1536         int order;
1537
1538         if (op != RES_OP_RESERVE_AND_MAP)
1539                 return err;
1540
1541         order = get_param_l(&in_param);
1542
1543         err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1544         if (err)
1545                 return err;
1546
1547         base = __mlx4_alloc_mtt_range(dev, order);
1548         if (base == -1) {
1549                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1550                 return -ENOMEM;
1551         }
1552
1553         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1554         if (err) {
1555                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1556                 __mlx4_free_mtt_range(dev, base, order);
1557         } else {
1558                 set_param_l(out_param, base);
1559         }
1560
1561         return err;
1562 }
1563
1564 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1565                          u64 in_param, u64 *out_param)
1566 {
1567         int err = -EINVAL;
1568         int index;
1569         int id;
1570         struct res_mpt *mpt;
1571
1572         switch (op) {
1573         case RES_OP_RESERVE:
1574                 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1575                 if (err)
1576                         break;
1577
1578                 index = __mlx4_mpt_reserve(dev);
1579                 if (index == -1) {
1580                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1581                         break;
1582                 }
1583                 id = index & mpt_mask(dev);
1584
1585                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1586                 if (err) {
1587                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1588                         __mlx4_mpt_release(dev, index);
1589                         break;
1590                 }
1591                 set_param_l(out_param, index);
1592                 break;
1593         case RES_OP_MAP_ICM:
1594                 index = get_param_l(&in_param);
1595                 id = index & mpt_mask(dev);
1596                 err = mr_res_start_move_to(dev, slave, id,
1597                                            RES_MPT_MAPPED, &mpt);
1598                 if (err)
1599                         return err;
1600
1601                 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1602                 if (err) {
1603                         res_abort_move(dev, slave, RES_MPT, id);
1604                         return err;
1605                 }
1606
1607                 res_end_move(dev, slave, RES_MPT, id);
1608                 break;
1609         }
1610         return err;
1611 }
1612
1613 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1614                         u64 in_param, u64 *out_param)
1615 {
1616         int cqn;
1617         int err;
1618
1619         switch (op) {
1620         case RES_OP_RESERVE_AND_MAP:
1621                 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1622                 if (err)
1623                         break;
1624
1625                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1626                 if (err) {
1627                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1628                         break;
1629                 }
1630
1631                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1632                 if (err) {
1633                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1634                         __mlx4_cq_free_icm(dev, cqn);
1635                         break;
1636                 }
1637
1638                 set_param_l(out_param, cqn);
1639                 break;
1640
1641         default:
1642                 err = -EINVAL;
1643         }
1644
1645         return err;
1646 }
1647
1648 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1649                          u64 in_param, u64 *out_param)
1650 {
1651         int srqn;
1652         int err;
1653
1654         switch (op) {
1655         case RES_OP_RESERVE_AND_MAP:
1656                 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1657                 if (err)
1658                         break;
1659
1660                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1661                 if (err) {
1662                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1663                         break;
1664                 }
1665
1666                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1667                 if (err) {
1668                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1669                         __mlx4_srq_free_icm(dev, srqn);
1670                         break;
1671                 }
1672
1673                 set_param_l(out_param, srqn);
1674                 break;
1675
1676         default:
1677                 err = -EINVAL;
1678         }
1679
1680         return err;
1681 }
1682
1683 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1684 {
1685         struct mlx4_priv *priv = mlx4_priv(dev);
1686         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1687         struct mac_res *res;
1688
1689         if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1690                 return -EINVAL;
1691         res = kzalloc(sizeof *res, GFP_KERNEL);
1692         if (!res) {
1693                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1694                 return -ENOMEM;
1695         }
1696         res->mac = mac;
1697         res->port = (u8) port;
1698         list_add_tail(&res->list,
1699                       &tracker->slave_list[slave].res_list[RES_MAC]);
1700         return 0;
1701 }
1702
1703 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1704                                int port)
1705 {
1706         struct mlx4_priv *priv = mlx4_priv(dev);
1707         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1708         struct list_head *mac_list =
1709                 &tracker->slave_list[slave].res_list[RES_MAC];
1710         struct mac_res *res, *tmp;
1711
1712         list_for_each_entry_safe(res, tmp, mac_list, list) {
1713                 if (res->mac == mac && res->port == (u8) port) {
1714                         list_del(&res->list);
1715                         mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1716                         kfree(res);
1717                         break;
1718                 }
1719         }
1720 }
1721
1722 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1723 {
1724         struct mlx4_priv *priv = mlx4_priv(dev);
1725         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1726         struct list_head *mac_list =
1727                 &tracker->slave_list[slave].res_list[RES_MAC];
1728         struct mac_res *res, *tmp;
1729
1730         list_for_each_entry_safe(res, tmp, mac_list, list) {
1731                 list_del(&res->list);
1732                 __mlx4_unregister_mac(dev, res->port, res->mac);
1733                 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1734                 kfree(res);
1735         }
1736 }
1737
1738 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1739                          u64 in_param, u64 *out_param, int in_port)
1740 {
1741         int err = -EINVAL;
1742         int port;
1743         u64 mac;
1744
1745         if (op != RES_OP_RESERVE_AND_MAP)
1746                 return err;
1747
1748         port = !in_port ? get_param_l(out_param) : in_port;
1749         mac = in_param;
1750
1751         err = __mlx4_register_mac(dev, port, mac);
1752         if (err >= 0) {
1753                 set_param_l(out_param, err);
1754                 err = 0;
1755         }
1756
1757         if (!err) {
1758                 err = mac_add_to_slave(dev, slave, mac, port);
1759                 if (err)
1760                         __mlx4_unregister_mac(dev, port, mac);
1761         }
1762         return err;
1763 }
1764
1765 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1766                              int port, int vlan_index)
1767 {
1768         struct mlx4_priv *priv = mlx4_priv(dev);
1769         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1770         struct list_head *vlan_list =
1771                 &tracker->slave_list[slave].res_list[RES_VLAN];
1772         struct vlan_res *res, *tmp;
1773
1774         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1775                 if (res->vlan == vlan && res->port == (u8) port) {
1776                         /* vlan found. update ref count */
1777                         ++res->ref_count;
1778                         return 0;
1779                 }
1780         }
1781
1782         if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1783                 return -EINVAL;
1784         res = kzalloc(sizeof(*res), GFP_KERNEL);
1785         if (!res) {
1786                 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1787                 return -ENOMEM;
1788         }
1789         res->vlan = vlan;
1790         res->port = (u8) port;
1791         res->vlan_index = vlan_index;
1792         res->ref_count = 1;
1793         list_add_tail(&res->list,
1794                       &tracker->slave_list[slave].res_list[RES_VLAN]);
1795         return 0;
1796 }
1797
1798
1799 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1800                                 int port)
1801 {
1802         struct mlx4_priv *priv = mlx4_priv(dev);
1803         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1804         struct list_head *vlan_list =
1805                 &tracker->slave_list[slave].res_list[RES_VLAN];
1806         struct vlan_res *res, *tmp;
1807
1808         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1809                 if (res->vlan == vlan && res->port == (u8) port) {
1810                         if (!--res->ref_count) {
1811                                 list_del(&res->list);
1812                                 mlx4_release_resource(dev, slave, RES_VLAN,
1813                                                       1, port);
1814                                 kfree(res);
1815                         }
1816                         break;
1817                 }
1818         }
1819 }
1820
1821 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1822 {
1823         struct mlx4_priv *priv = mlx4_priv(dev);
1824         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1825         struct list_head *vlan_list =
1826                 &tracker->slave_list[slave].res_list[RES_VLAN];
1827         struct vlan_res *res, *tmp;
1828         int i;
1829
1830         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1831                 list_del(&res->list);
1832                 /* dereference the vlan the num times the slave referenced it */
1833                 for (i = 0; i < res->ref_count; i++)
1834                         __mlx4_unregister_vlan(dev, res->port, res->vlan);
1835                 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1836                 kfree(res);
1837         }
1838 }
1839
1840 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1841                           u64 in_param, u64 *out_param, int in_port)
1842 {
1843         struct mlx4_priv *priv = mlx4_priv(dev);
1844         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1845         int err;
1846         u16 vlan;
1847         int vlan_index;
1848         int port;
1849
1850         port = !in_port ? get_param_l(out_param) : in_port;
1851
1852         if (!port || op != RES_OP_RESERVE_AND_MAP)
1853                 return -EINVAL;
1854
1855         /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1856         if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1857                 slave_state[slave].old_vlan_api = true;
1858                 return 0;
1859         }
1860
1861         vlan = (u16) in_param;
1862
1863         err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1864         if (!err) {
1865                 set_param_l(out_param, (u32) vlan_index);
1866                 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1867                 if (err)
1868                         __mlx4_unregister_vlan(dev, port, vlan);
1869         }
1870         return err;
1871 }
1872
1873 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1874                              u64 in_param, u64 *out_param)
1875 {
1876         u32 index;
1877         int err;
1878
1879         if (op != RES_OP_RESERVE)
1880                 return -EINVAL;
1881
1882         err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
1883         if (err)
1884                 return err;
1885
1886         err = __mlx4_counter_alloc(dev, &index);
1887         if (err) {
1888                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1889                 return err;
1890         }
1891
1892         err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1893         if (err) {
1894                 __mlx4_counter_free(dev, index);
1895                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1896         } else {
1897                 set_param_l(out_param, index);
1898         }
1899
1900         return err;
1901 }
1902
1903 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1904                            u64 in_param, u64 *out_param)
1905 {
1906         u32 xrcdn;
1907         int err;
1908
1909         if (op != RES_OP_RESERVE)
1910                 return -EINVAL;
1911
1912         err = __mlx4_xrcd_alloc(dev, &xrcdn);
1913         if (err)
1914                 return err;
1915
1916         err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1917         if (err)
1918                 __mlx4_xrcd_free(dev, xrcdn);
1919         else
1920                 set_param_l(out_param, xrcdn);
1921
1922         return err;
1923 }
1924
1925 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1926                            struct mlx4_vhcr *vhcr,
1927                            struct mlx4_cmd_mailbox *inbox,
1928                            struct mlx4_cmd_mailbox *outbox,
1929                            struct mlx4_cmd_info *cmd)
1930 {
1931         int err;
1932         int alop = vhcr->op_modifier;
1933
1934         switch (vhcr->in_modifier & 0xFF) {
1935         case RES_QP:
1936                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1937                                    vhcr->in_param, &vhcr->out_param);
1938                 break;
1939
1940         case RES_MTT:
1941                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1942                                     vhcr->in_param, &vhcr->out_param);
1943                 break;
1944
1945         case RES_MPT:
1946                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1947                                     vhcr->in_param, &vhcr->out_param);
1948                 break;
1949
1950         case RES_CQ:
1951                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1952                                    vhcr->in_param, &vhcr->out_param);
1953                 break;
1954
1955         case RES_SRQ:
1956                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1957                                     vhcr->in_param, &vhcr->out_param);
1958                 break;
1959
1960         case RES_MAC:
1961                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1962                                     vhcr->in_param, &vhcr->out_param,
1963                                     (vhcr->in_modifier >> 8) & 0xFF);
1964                 break;
1965
1966         case RES_VLAN:
1967                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1968                                      vhcr->in_param, &vhcr->out_param,
1969                                      (vhcr->in_modifier >> 8) & 0xFF);
1970                 break;
1971
1972         case RES_COUNTER:
1973                 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1974                                         vhcr->in_param, &vhcr->out_param);
1975                 break;
1976
1977         case RES_XRCD:
1978                 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1979                                       vhcr->in_param, &vhcr->out_param);
1980                 break;
1981
1982         default:
1983                 err = -EINVAL;
1984                 break;
1985         }
1986
1987         return err;
1988 }
1989
1990 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1991                        u64 in_param)
1992 {
1993         int err;
1994         int count;
1995         int base;
1996         int qpn;
1997
1998         switch (op) {
1999         case RES_OP_RESERVE:
2000                 base = get_param_l(&in_param) & 0x7fffff;
2001                 count = get_param_h(&in_param);
2002                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2003                 if (err)
2004                         break;
2005                 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2006                 __mlx4_qp_release_range(dev, base, count);
2007                 break;
2008         case RES_OP_MAP_ICM:
2009                 qpn = get_param_l(&in_param) & 0x7fffff;
2010                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2011                                            NULL, 0);
2012                 if (err)
2013                         return err;
2014
2015                 if (!fw_reserved(dev, qpn))
2016                         __mlx4_qp_free_icm(dev, qpn);
2017
2018                 res_end_move(dev, slave, RES_QP, qpn);
2019
2020                 if (valid_reserved(dev, slave, qpn))
2021                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2022                 break;
2023         default:
2024                 err = -EINVAL;
2025                 break;
2026         }
2027         return err;
2028 }
2029
2030 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2031                         u64 in_param, u64 *out_param)
2032 {
2033         int err = -EINVAL;
2034         int base;
2035         int order;
2036
2037         if (op != RES_OP_RESERVE_AND_MAP)
2038                 return err;
2039
2040         base = get_param_l(&in_param);
2041         order = get_param_h(&in_param);
2042         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2043         if (!err) {
2044                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2045                 __mlx4_free_mtt_range(dev, base, order);
2046         }
2047         return err;
2048 }
2049
2050 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2051                         u64 in_param)
2052 {
2053         int err = -EINVAL;
2054         int index;
2055         int id;
2056         struct res_mpt *mpt;
2057
2058         switch (op) {
2059         case RES_OP_RESERVE:
2060                 index = get_param_l(&in_param);
2061                 id = index & mpt_mask(dev);
2062                 err = get_res(dev, slave, id, RES_MPT, &mpt);
2063                 if (err)
2064                         break;
2065                 index = mpt->key;
2066                 put_res(dev, slave, id, RES_MPT);
2067
2068                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2069                 if (err)
2070                         break;
2071                 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2072                 __mlx4_mpt_release(dev, index);
2073                 break;
2074         case RES_OP_MAP_ICM:
2075                         index = get_param_l(&in_param);
2076                         id = index & mpt_mask(dev);
2077                         err = mr_res_start_move_to(dev, slave, id,
2078                                                    RES_MPT_RESERVED, &mpt);
2079                         if (err)
2080                                 return err;
2081
2082                         __mlx4_mpt_free_icm(dev, mpt->key);
2083                         res_end_move(dev, slave, RES_MPT, id);
2084                         return err;
2085                 break;
2086         default:
2087                 err = -EINVAL;
2088                 break;
2089         }
2090         return err;
2091 }
2092
2093 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2094                        u64 in_param, u64 *out_param)
2095 {
2096         int cqn;
2097         int err;
2098
2099         switch (op) {
2100         case RES_OP_RESERVE_AND_MAP:
2101                 cqn = get_param_l(&in_param);
2102                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2103                 if (err)
2104                         break;
2105
2106                 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2107                 __mlx4_cq_free_icm(dev, cqn);
2108                 break;
2109
2110         default:
2111                 err = -EINVAL;
2112                 break;
2113         }
2114
2115         return err;
2116 }
2117
2118 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2119                         u64 in_param, u64 *out_param)
2120 {
2121         int srqn;
2122         int err;
2123
2124         switch (op) {
2125         case RES_OP_RESERVE_AND_MAP:
2126                 srqn = get_param_l(&in_param);
2127                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2128                 if (err)
2129                         break;
2130
2131                 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2132                 __mlx4_srq_free_icm(dev, srqn);
2133                 break;
2134
2135         default:
2136                 err = -EINVAL;
2137                 break;
2138         }
2139
2140         return err;
2141 }
2142
2143 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2144                             u64 in_param, u64 *out_param, int in_port)
2145 {
2146         int port;
2147         int err = 0;
2148
2149         switch (op) {
2150         case RES_OP_RESERVE_AND_MAP:
2151                 port = !in_port ? get_param_l(out_param) : in_port;
2152                 mac_del_from_slave(dev, slave, in_param, port);
2153                 __mlx4_unregister_mac(dev, port, in_param);
2154                 break;
2155         default:
2156                 err = -EINVAL;
2157                 break;
2158         }
2159
2160         return err;
2161
2162 }
2163
2164 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2165                             u64 in_param, u64 *out_param, int port)
2166 {
2167         struct mlx4_priv *priv = mlx4_priv(dev);
2168         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2169         int err = 0;
2170
2171         switch (op) {
2172         case RES_OP_RESERVE_AND_MAP:
2173                 if (slave_state[slave].old_vlan_api)
2174                         return 0;
2175                 if (!port)
2176                         return -EINVAL;
2177                 vlan_del_from_slave(dev, slave, in_param, port);
2178                 __mlx4_unregister_vlan(dev, port, in_param);
2179                 break;
2180         default:
2181                 err = -EINVAL;
2182                 break;
2183         }
2184
2185         return err;
2186 }
2187
2188 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2189                             u64 in_param, u64 *out_param)
2190 {
2191         int index;
2192         int err;
2193
2194         if (op != RES_OP_RESERVE)
2195                 return -EINVAL;
2196
2197         index = get_param_l(&in_param);
2198         err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2199         if (err)
2200                 return err;
2201
2202         __mlx4_counter_free(dev, index);
2203         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2204
2205         return err;
2206 }
2207
2208 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2209                           u64 in_param, u64 *out_param)
2210 {
2211         int xrcdn;
2212         int err;
2213
2214         if (op != RES_OP_RESERVE)
2215                 return -EINVAL;
2216
2217         xrcdn = get_param_l(&in_param);
2218         err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2219         if (err)
2220                 return err;
2221
2222         __mlx4_xrcd_free(dev, xrcdn);
2223
2224         return err;
2225 }
2226
2227 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2228                           struct mlx4_vhcr *vhcr,
2229                           struct mlx4_cmd_mailbox *inbox,
2230                           struct mlx4_cmd_mailbox *outbox,
2231                           struct mlx4_cmd_info *cmd)
2232 {
2233         int err = -EINVAL;
2234         int alop = vhcr->op_modifier;
2235
2236         switch (vhcr->in_modifier & 0xFF) {
2237         case RES_QP:
2238                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2239                                   vhcr->in_param);
2240                 break;
2241
2242         case RES_MTT:
2243                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2244                                    vhcr->in_param, &vhcr->out_param);
2245                 break;
2246
2247         case RES_MPT:
2248                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2249                                    vhcr->in_param);
2250                 break;
2251
2252         case RES_CQ:
2253                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2254                                   vhcr->in_param, &vhcr->out_param);
2255                 break;
2256
2257         case RES_SRQ:
2258                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2259                                    vhcr->in_param, &vhcr->out_param);
2260                 break;
2261
2262         case RES_MAC:
2263                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2264                                    vhcr->in_param, &vhcr->out_param,
2265                                    (vhcr->in_modifier >> 8) & 0xFF);
2266                 break;
2267
2268         case RES_VLAN:
2269                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2270                                     vhcr->in_param, &vhcr->out_param,
2271                                     (vhcr->in_modifier >> 8) & 0xFF);
2272                 break;
2273
2274         case RES_COUNTER:
2275                 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2276                                        vhcr->in_param, &vhcr->out_param);
2277                 break;
2278
2279         case RES_XRCD:
2280                 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2281                                      vhcr->in_param, &vhcr->out_param);
2282
2283         default:
2284                 break;
2285         }
2286         return err;
2287 }
2288
2289 /* ugly but other choices are uglier */
2290 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2291 {
2292         return (be32_to_cpu(mpt->flags) >> 9) & 1;
2293 }
2294
2295 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2296 {
2297         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2298 }
2299
2300 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2301 {
2302         return be32_to_cpu(mpt->mtt_sz);
2303 }
2304
2305 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2306 {
2307         return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2308 }
2309
2310 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2311 {
2312         return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2313 }
2314
2315 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2316 {
2317         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2318 }
2319
2320 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2321 {
2322         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2323 }
2324
2325 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2326 {
2327         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2328 }
2329
2330 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2331 {
2332         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2333 }
2334
2335 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2336 {
2337         int page_shift = (qpc->log_page_size & 0x3f) + 12;
2338         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2339         int log_sq_sride = qpc->sq_size_stride & 7;
2340         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2341         int log_rq_stride = qpc->rq_size_stride & 7;
2342         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2343         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2344         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2345         int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2346         int sq_size;
2347         int rq_size;
2348         int total_pages;
2349         int total_mem;
2350         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2351
2352         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2353         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2354         total_mem = sq_size + rq_size;
2355         total_pages =
2356                 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2357                                    page_shift);
2358
2359         return total_pages;
2360 }
2361
2362 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2363                            int size, struct res_mtt *mtt)
2364 {
2365         int res_start = mtt->com.res_id;
2366         int res_size = (1 << mtt->order);
2367
2368         if (start < res_start || start + size > res_start + res_size)
2369                 return -EPERM;
2370         return 0;
2371 }
2372
2373 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2374                            struct mlx4_vhcr *vhcr,
2375                            struct mlx4_cmd_mailbox *inbox,
2376                            struct mlx4_cmd_mailbox *outbox,
2377                            struct mlx4_cmd_info *cmd)
2378 {
2379         int err;
2380         int index = vhcr->in_modifier;
2381         struct res_mtt *mtt;
2382         struct res_mpt *mpt;
2383         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2384         int phys;
2385         int id;
2386         u32 pd;
2387         int pd_slave;
2388
2389         id = index & mpt_mask(dev);
2390         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2391         if (err)
2392                 return err;
2393
2394         /* Disable memory windows for VFs. */
2395         if (!mr_is_region(inbox->buf)) {
2396                 err = -EPERM;
2397                 goto ex_abort;
2398         }
2399
2400         /* Make sure that the PD bits related to the slave id are zeros. */
2401         pd = mr_get_pd(inbox->buf);
2402         pd_slave = (pd >> 17) & 0x7f;
2403         if (pd_slave != 0 && pd_slave != slave) {
2404                 err = -EPERM;
2405                 goto ex_abort;
2406         }
2407
2408         if (mr_is_fmr(inbox->buf)) {
2409                 /* FMR and Bind Enable are forbidden in slave devices. */
2410                 if (mr_is_bind_enabled(inbox->buf)) {
2411                         err = -EPERM;
2412                         goto ex_abort;
2413                 }
2414                 /* FMR and Memory Windows are also forbidden. */
2415                 if (!mr_is_region(inbox->buf)) {
2416                         err = -EPERM;
2417                         goto ex_abort;
2418                 }
2419         }
2420
2421         phys = mr_phys_mpt(inbox->buf);
2422         if (!phys) {
2423                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2424                 if (err)
2425                         goto ex_abort;
2426
2427                 err = check_mtt_range(dev, slave, mtt_base,
2428                                       mr_get_mtt_size(inbox->buf), mtt);
2429                 if (err)
2430                         goto ex_put;
2431
2432                 mpt->mtt = mtt;
2433         }
2434
2435         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2436         if (err)
2437                 goto ex_put;
2438
2439         if (!phys) {
2440                 atomic_inc(&mtt->ref_count);
2441                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2442         }
2443
2444         res_end_move(dev, slave, RES_MPT, id);
2445         return 0;
2446
2447 ex_put:
2448         if (!phys)
2449                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2450 ex_abort:
2451         res_abort_move(dev, slave, RES_MPT, id);
2452
2453         return err;
2454 }
2455
2456 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2457                            struct mlx4_vhcr *vhcr,
2458                            struct mlx4_cmd_mailbox *inbox,
2459                            struct mlx4_cmd_mailbox *outbox,
2460                            struct mlx4_cmd_info *cmd)
2461 {
2462         int err;
2463         int index = vhcr->in_modifier;
2464         struct res_mpt *mpt;
2465         int id;
2466
2467         id = index & mpt_mask(dev);
2468         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2469         if (err)
2470                 return err;
2471
2472         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2473         if (err)
2474                 goto ex_abort;
2475
2476         if (mpt->mtt)
2477                 atomic_dec(&mpt->mtt->ref_count);
2478
2479         res_end_move(dev, slave, RES_MPT, id);
2480         return 0;
2481
2482 ex_abort:
2483         res_abort_move(dev, slave, RES_MPT, id);
2484
2485         return err;
2486 }
2487
2488 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2489                            struct mlx4_vhcr *vhcr,
2490                            struct mlx4_cmd_mailbox *inbox,
2491                            struct mlx4_cmd_mailbox *outbox,
2492                            struct mlx4_cmd_info *cmd)
2493 {
2494         int err;
2495         int index = vhcr->in_modifier;
2496         struct res_mpt *mpt;
2497         int id;
2498
2499         id = index & mpt_mask(dev);
2500         err = get_res(dev, slave, id, RES_MPT, &mpt);
2501         if (err)
2502                 return err;
2503
2504         if (mpt->com.from_state != RES_MPT_HW) {
2505                 err = -EBUSY;
2506                 goto out;
2507         }
2508
2509         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2510
2511 out:
2512         put_res(dev, slave, id, RES_MPT);
2513         return err;
2514 }
2515
2516 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2517 {
2518         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2519 }
2520
2521 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2522 {
2523         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2524 }
2525
2526 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2527 {
2528         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2529 }
2530
2531 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2532                                   struct mlx4_qp_context *context)
2533 {
2534         u32 qpn = vhcr->in_modifier & 0xffffff;
2535         u32 qkey = 0;
2536
2537         if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2538                 return;
2539
2540         /* adjust qkey in qp context */
2541         context->qkey = cpu_to_be32(qkey);
2542 }
2543
2544 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2545                              struct mlx4_vhcr *vhcr,
2546                              struct mlx4_cmd_mailbox *inbox,
2547                              struct mlx4_cmd_mailbox *outbox,
2548                              struct mlx4_cmd_info *cmd)
2549 {
2550         int err;
2551         int qpn = vhcr->in_modifier & 0x7fffff;
2552         struct res_mtt *mtt;
2553         struct res_qp *qp;
2554         struct mlx4_qp_context *qpc = inbox->buf + 8;
2555         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2556         int mtt_size = qp_get_mtt_size(qpc);
2557         struct res_cq *rcq;
2558         struct res_cq *scq;
2559         int rcqn = qp_get_rcqn(qpc);
2560         int scqn = qp_get_scqn(qpc);
2561         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2562         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2563         struct res_srq *srq;
2564         int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2565
2566         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2567         if (err)
2568                 return err;
2569         qp->local_qpn = local_qpn;
2570         qp->sched_queue = 0;
2571         qp->qpc_flags = be32_to_cpu(qpc->flags);
2572
2573         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2574         if (err)
2575                 goto ex_abort;
2576
2577         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2578         if (err)
2579                 goto ex_put_mtt;
2580
2581         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2582         if (err)
2583                 goto ex_put_mtt;
2584
2585         if (scqn != rcqn) {
2586                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2587                 if (err)
2588                         goto ex_put_rcq;
2589         } else
2590                 scq = rcq;
2591
2592         if (use_srq) {
2593                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2594                 if (err)
2595                         goto ex_put_scq;
2596         }
2597
2598         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2599         update_pkey_index(dev, slave, inbox);
2600         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2601         if (err)
2602                 goto ex_put_srq;
2603         atomic_inc(&mtt->ref_count);
2604         qp->mtt = mtt;
2605         atomic_inc(&rcq->ref_count);
2606         qp->rcq = rcq;
2607         atomic_inc(&scq->ref_count);
2608         qp->scq = scq;
2609
2610         if (scqn != rcqn)
2611                 put_res(dev, slave, scqn, RES_CQ);
2612
2613         if (use_srq) {
2614                 atomic_inc(&srq->ref_count);
2615                 put_res(dev, slave, srqn, RES_SRQ);
2616                 qp->srq = srq;
2617         }
2618         put_res(dev, slave, rcqn, RES_CQ);
2619         put_res(dev, slave, mtt_base, RES_MTT);
2620         res_end_move(dev, slave, RES_QP, qpn);
2621
2622         return 0;
2623
2624 ex_put_srq:
2625         if (use_srq)
2626                 put_res(dev, slave, srqn, RES_SRQ);
2627 ex_put_scq:
2628         if (scqn != rcqn)
2629                 put_res(dev, slave, scqn, RES_CQ);
2630 ex_put_rcq:
2631         put_res(dev, slave, rcqn, RES_CQ);
2632 ex_put_mtt:
2633         put_res(dev, slave, mtt_base, RES_MTT);
2634 ex_abort:
2635         res_abort_move(dev, slave, RES_QP, qpn);
2636
2637         return err;
2638 }
2639
2640 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2641 {
2642         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2643 }
2644
2645 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2646 {
2647         int log_eq_size = eqc->log_eq_size & 0x1f;
2648         int page_shift = (eqc->log_page_size & 0x3f) + 12;
2649
2650         if (log_eq_size + 5 < page_shift)
2651                 return 1;
2652
2653         return 1 << (log_eq_size + 5 - page_shift);
2654 }
2655
2656 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2657 {
2658         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2659 }
2660
2661 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2662 {
2663         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2664         int page_shift = (cqc->log_page_size & 0x3f) + 12;
2665
2666         if (log_cq_size + 5 < page_shift)
2667                 return 1;
2668
2669         return 1 << (log_cq_size + 5 - page_shift);
2670 }
2671
2672 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2673                           struct mlx4_vhcr *vhcr,
2674                           struct mlx4_cmd_mailbox *inbox,
2675                           struct mlx4_cmd_mailbox *outbox,
2676                           struct mlx4_cmd_info *cmd)
2677 {
2678         int err;
2679         int eqn = vhcr->in_modifier;
2680         int res_id = (slave << 8) | eqn;
2681         struct mlx4_eq_context *eqc = inbox->buf;
2682         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2683         int mtt_size = eq_get_mtt_size(eqc);
2684         struct res_eq *eq;
2685         struct res_mtt *mtt;
2686
2687         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2688         if (err)
2689                 return err;
2690         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2691         if (err)
2692                 goto out_add;
2693
2694         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2695         if (err)
2696                 goto out_move;
2697
2698         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2699         if (err)
2700                 goto out_put;
2701
2702         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2703         if (err)
2704                 goto out_put;
2705
2706         atomic_inc(&mtt->ref_count);
2707         eq->mtt = mtt;
2708         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2709         res_end_move(dev, slave, RES_EQ, res_id);
2710         return 0;
2711
2712 out_put:
2713         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2714 out_move:
2715         res_abort_move(dev, slave, RES_EQ, res_id);
2716 out_add:
2717         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2718         return err;
2719 }
2720
2721 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2722                               int len, struct res_mtt **res)
2723 {
2724         struct mlx4_priv *priv = mlx4_priv(dev);
2725         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2726         struct res_mtt *mtt;
2727         int err = -EINVAL;
2728
2729         spin_lock_irq(mlx4_tlock(dev));
2730         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2731                             com.list) {
2732                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2733                         *res = mtt;
2734                         mtt->com.from_state = mtt->com.state;
2735                         mtt->com.state = RES_MTT_BUSY;
2736                         err = 0;
2737                         break;
2738                 }
2739         }
2740         spin_unlock_irq(mlx4_tlock(dev));
2741
2742         return err;
2743 }
2744
2745 static int verify_qp_parameters(struct mlx4_dev *dev,
2746                                 struct mlx4_cmd_mailbox *inbox,
2747                                 enum qp_transition transition, u8 slave)
2748 {
2749         u32                     qp_type;
2750         struct mlx4_qp_context  *qp_ctx;
2751         enum mlx4_qp_optpar     optpar;
2752
2753         qp_ctx  = inbox->buf + 8;
2754         qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2755         optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
2756
2757         switch (qp_type) {
2758         case MLX4_QP_ST_RC:
2759         case MLX4_QP_ST_UC:
2760                 switch (transition) {
2761                 case QP_TRANS_INIT2RTR:
2762                 case QP_TRANS_RTR2RTS:
2763                 case QP_TRANS_RTS2RTS:
2764                 case QP_TRANS_SQD2SQD:
2765                 case QP_TRANS_SQD2RTS:
2766                         if (slave != mlx4_master_func_num(dev))
2767                                 /* slaves have only gid index 0 */
2768                                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2769                                         if (qp_ctx->pri_path.mgid_index)
2770                                                 return -EINVAL;
2771                                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2772                                         if (qp_ctx->alt_path.mgid_index)
2773                                                 return -EINVAL;
2774                         break;
2775                 default:
2776                         break;
2777                 }
2778
2779                 break;
2780         default:
2781                 break;
2782         }
2783
2784         return 0;
2785 }
2786
2787 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2788                            struct mlx4_vhcr *vhcr,
2789                            struct mlx4_cmd_mailbox *inbox,
2790                            struct mlx4_cmd_mailbox *outbox,
2791                            struct mlx4_cmd_info *cmd)
2792 {
2793         struct mlx4_mtt mtt;
2794         __be64 *page_list = inbox->buf;
2795         u64 *pg_list = (u64 *)page_list;
2796         int i;
2797         struct res_mtt *rmtt = NULL;
2798         int start = be64_to_cpu(page_list[0]);
2799         int npages = vhcr->in_modifier;
2800         int err;
2801
2802         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2803         if (err)
2804                 return err;
2805
2806         /* Call the SW implementation of write_mtt:
2807          * - Prepare a dummy mtt struct
2808          * - Translate inbox contents to simple addresses in host endianess */
2809         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
2810                             we don't really use it */
2811         mtt.order = 0;
2812         mtt.page_shift = 0;
2813         for (i = 0; i < npages; ++i)
2814                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2815
2816         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2817                                ((u64 *)page_list + 2));
2818
2819         if (rmtt)
2820                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2821
2822         return err;
2823 }
2824
2825 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2826                           struct mlx4_vhcr *vhcr,
2827                           struct mlx4_cmd_mailbox *inbox,
2828                           struct mlx4_cmd_mailbox *outbox,
2829                           struct mlx4_cmd_info *cmd)
2830 {
2831         int eqn = vhcr->in_modifier;
2832         int res_id = eqn | (slave << 8);
2833         struct res_eq *eq;
2834         int err;
2835
2836         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2837         if (err)
2838                 return err;
2839
2840         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2841         if (err)
2842                 goto ex_abort;
2843
2844         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2845         if (err)
2846                 goto ex_put;
2847
2848         atomic_dec(&eq->mtt->ref_count);
2849         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2850         res_end_move(dev, slave, RES_EQ, res_id);
2851         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2852
2853         return 0;
2854
2855 ex_put:
2856         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2857 ex_abort:
2858         res_abort_move(dev, slave, RES_EQ, res_id);
2859
2860         return err;
2861 }
2862
2863 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2864 {
2865         struct mlx4_priv *priv = mlx4_priv(dev);
2866         struct mlx4_slave_event_eq_info *event_eq;
2867         struct mlx4_cmd_mailbox *mailbox;
2868         u32 in_modifier = 0;
2869         int err;
2870         int res_id;
2871         struct res_eq *req;
2872
2873         if (!priv->mfunc.master.slave_state)
2874                 return -EINVAL;
2875
2876         event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2877
2878         /* Create the event only if the slave is registered */
2879         if (event_eq->eqn < 0)
2880                 return 0;
2881
2882         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2883         res_id = (slave << 8) | event_eq->eqn;
2884         err = get_res(dev, slave, res_id, RES_EQ, &req);
2885         if (err)
2886                 goto unlock;
2887
2888         if (req->com.from_state != RES_EQ_HW) {
2889                 err = -EINVAL;
2890                 goto put;
2891         }
2892
2893         mailbox = mlx4_alloc_cmd_mailbox(dev);
2894         if (IS_ERR(mailbox)) {
2895                 err = PTR_ERR(mailbox);
2896                 goto put;
2897         }
2898
2899         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2900                 ++event_eq->token;
2901                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2902         }
2903
2904         memcpy(mailbox->buf, (u8 *) eqe, 28);
2905
2906         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2907
2908         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2909                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2910                        MLX4_CMD_NATIVE);
2911
2912         put_res(dev, slave, res_id, RES_EQ);
2913         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2914         mlx4_free_cmd_mailbox(dev, mailbox);
2915         return err;
2916
2917 put:
2918         put_res(dev, slave, res_id, RES_EQ);
2919
2920 unlock:
2921         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2922         return err;
2923 }
2924
2925 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2926                           struct mlx4_vhcr *vhcr,
2927                           struct mlx4_cmd_mailbox *inbox,
2928                           struct mlx4_cmd_mailbox *outbox,
2929                           struct mlx4_cmd_info *cmd)
2930 {
2931         int eqn = vhcr->in_modifier;
2932         int res_id = eqn | (slave << 8);
2933         struct res_eq *eq;
2934         int err;
2935
2936         err = get_res(dev, slave, res_id, RES_EQ, &eq);
2937         if (err)
2938                 return err;
2939
2940         if (eq->com.from_state != RES_EQ_HW) {
2941                 err = -EINVAL;
2942                 goto ex_put;
2943         }
2944
2945         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2946
2947 ex_put:
2948         put_res(dev, slave, res_id, RES_EQ);
2949         return err;
2950 }
2951
2952 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2953                           struct mlx4_vhcr *vhcr,
2954                           struct mlx4_cmd_mailbox *inbox,
2955                           struct mlx4_cmd_mailbox *outbox,
2956                           struct mlx4_cmd_info *cmd)
2957 {
2958         int err;
2959         int cqn = vhcr->in_modifier;
2960         struct mlx4_cq_context *cqc = inbox->buf;
2961         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2962         struct res_cq *cq;
2963         struct res_mtt *mtt;
2964
2965         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2966         if (err)
2967                 return err;
2968         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2969         if (err)
2970                 goto out_move;
2971         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2972         if (err)
2973                 goto out_put;
2974         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2975         if (err)
2976                 goto out_put;
2977         atomic_inc(&mtt->ref_count);
2978         cq->mtt = mtt;
2979         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2980         res_end_move(dev, slave, RES_CQ, cqn);
2981         return 0;
2982
2983 out_put:
2984         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2985 out_move:
2986         res_abort_move(dev, slave, RES_CQ, cqn);
2987         return err;
2988 }
2989
2990 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2991                           struct mlx4_vhcr *vhcr,
2992                           struct mlx4_cmd_mailbox *inbox,
2993                           struct mlx4_cmd_mailbox *outbox,
2994                           struct mlx4_cmd_info *cmd)
2995 {
2996         int err;
2997         int cqn = vhcr->in_modifier;
2998         struct res_cq *cq;
2999
3000         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3001         if (err)
3002                 return err;
3003         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3004         if (err)
3005                 goto out_move;
3006         atomic_dec(&cq->mtt->ref_count);
3007         res_end_move(dev, slave, RES_CQ, cqn);
3008         return 0;
3009
3010 out_move:
3011         res_abort_move(dev, slave, RES_CQ, cqn);
3012         return err;
3013 }
3014
3015 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3016                           struct mlx4_vhcr *vhcr,
3017                           struct mlx4_cmd_mailbox *inbox,
3018                           struct mlx4_cmd_mailbox *outbox,
3019                           struct mlx4_cmd_info *cmd)
3020 {
3021         int cqn = vhcr->in_modifier;
3022         struct res_cq *cq;
3023         int err;
3024
3025         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3026         if (err)
3027                 return err;
3028
3029         if (cq->com.from_state != RES_CQ_HW)
3030                 goto ex_put;
3031
3032         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3033 ex_put:
3034         put_res(dev, slave, cqn, RES_CQ);
3035
3036         return err;
3037 }
3038
3039 static int handle_resize(struct mlx4_dev *dev, int slave,
3040                          struct mlx4_vhcr *vhcr,
3041                          struct mlx4_cmd_mailbox *inbox,
3042                          struct mlx4_cmd_mailbox *outbox,
3043                          struct mlx4_cmd_info *cmd,
3044                          struct res_cq *cq)
3045 {
3046         int err;
3047         struct res_mtt *orig_mtt;
3048         struct res_mtt *mtt;
3049         struct mlx4_cq_context *cqc = inbox->buf;
3050         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3051
3052         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3053         if (err)
3054                 return err;
3055
3056         if (orig_mtt != cq->mtt) {
3057                 err = -EINVAL;
3058                 goto ex_put;
3059         }
3060
3061         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3062         if (err)
3063                 goto ex_put;
3064
3065         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3066         if (err)
3067                 goto ex_put1;
3068         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3069         if (err)
3070                 goto ex_put1;
3071         atomic_dec(&orig_mtt->ref_count);
3072         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3073         atomic_inc(&mtt->ref_count);
3074         cq->mtt = mtt;
3075         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3076         return 0;
3077
3078 ex_put1:
3079         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3080 ex_put:
3081         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3082
3083         return err;
3084
3085 }
3086
3087 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3088                            struct mlx4_vhcr *vhcr,
3089                            struct mlx4_cmd_mailbox *inbox,
3090                            struct mlx4_cmd_mailbox *outbox,
3091                            struct mlx4_cmd_info *cmd)
3092 {
3093         int cqn = vhcr->in_modifier;
3094         struct res_cq *cq;
3095         int err;
3096
3097         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3098         if (err)
3099                 return err;
3100
3101         if (cq->com.from_state != RES_CQ_HW)
3102                 goto ex_put;
3103
3104         if (vhcr->op_modifier == 0) {
3105                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3106                 goto ex_put;
3107         }
3108
3109         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3110 ex_put:
3111         put_res(dev, slave, cqn, RES_CQ);
3112
3113         return err;
3114 }
3115
3116 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3117 {
3118         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3119         int log_rq_stride = srqc->logstride & 7;
3120         int page_shift = (srqc->log_page_size & 0x3f) + 12;
3121
3122         if (log_srq_size + log_rq_stride + 4 < page_shift)
3123                 return 1;
3124
3125         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3126 }
3127
3128 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3129                            struct mlx4_vhcr *vhcr,
3130                            struct mlx4_cmd_mailbox *inbox,
3131                            struct mlx4_cmd_mailbox *outbox,
3132                            struct mlx4_cmd_info *cmd)
3133 {
3134         int err;
3135         int srqn = vhcr->in_modifier;
3136         struct res_mtt *mtt;
3137         struct res_srq *srq;
3138         struct mlx4_srq_context *srqc = inbox->buf;
3139         int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3140
3141         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3142                 return -EINVAL;
3143
3144         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3145         if (err)
3146                 return err;
3147         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3148         if (err)
3149                 goto ex_abort;
3150         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3151                               mtt);
3152         if (err)
3153                 goto ex_put_mtt;
3154
3155         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3156         if (err)
3157                 goto ex_put_mtt;
3158
3159         atomic_inc(&mtt->ref_count);
3160         srq->mtt = mtt;
3161         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3162         res_end_move(dev, slave, RES_SRQ, srqn);
3163         return 0;
3164
3165 ex_put_mtt:
3166         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3167 ex_abort:
3168         res_abort_move(dev, slave, RES_SRQ, srqn);
3169
3170         return err;
3171 }
3172
3173 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3174                            struct mlx4_vhcr *vhcr,
3175                            struct mlx4_cmd_mailbox *inbox,
3176                            struct mlx4_cmd_mailbox *outbox,
3177                            struct mlx4_cmd_info *cmd)
3178 {
3179         int err;
3180         int srqn = vhcr->in_modifier;
3181         struct res_srq *srq;
3182
3183         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3184         if (err)
3185                 return err;
3186         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3187         if (err)
3188                 goto ex_abort;
3189         atomic_dec(&srq->mtt->ref_count);
3190         if (srq->cq)
3191                 atomic_dec(&srq->cq->ref_count);
3192         res_end_move(dev, slave, RES_SRQ, srqn);
3193
3194         return 0;
3195
3196 ex_abort:
3197         res_abort_move(dev, slave, RES_SRQ, srqn);
3198
3199         return err;
3200 }
3201
3202 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3203                            struct mlx4_vhcr *vhcr,
3204                            struct mlx4_cmd_mailbox *inbox,
3205                            struct mlx4_cmd_mailbox *outbox,
3206                            struct mlx4_cmd_info *cmd)
3207 {
3208         int err;
3209         int srqn = vhcr->in_modifier;
3210         struct res_srq *srq;
3211
3212         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3213         if (err)
3214                 return err;
3215         if (srq->com.from_state != RES_SRQ_HW) {
3216                 err = -EBUSY;
3217                 goto out;
3218         }
3219         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3220 out:
3221         put_res(dev, slave, srqn, RES_SRQ);
3222         return err;
3223 }
3224
3225 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3226                          struct mlx4_vhcr *vhcr,
3227                          struct mlx4_cmd_mailbox *inbox,
3228                          struct mlx4_cmd_mailbox *outbox,
3229                          struct mlx4_cmd_info *cmd)
3230 {
3231         int err;
3232         int srqn = vhcr->in_modifier;
3233         struct res_srq *srq;
3234
3235         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3236         if (err)
3237                 return err;
3238
3239         if (srq->com.from_state != RES_SRQ_HW) {
3240                 err = -EBUSY;
3241                 goto out;
3242         }
3243
3244         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3245 out:
3246         put_res(dev, slave, srqn, RES_SRQ);
3247         return err;
3248 }
3249
3250 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3251                         struct mlx4_vhcr *vhcr,
3252                         struct mlx4_cmd_mailbox *inbox,
3253                         struct mlx4_cmd_mailbox *outbox,
3254                         struct mlx4_cmd_info *cmd)
3255 {
3256         int err;
3257         int qpn = vhcr->in_modifier & 0x7fffff;
3258         struct res_qp *qp;
3259
3260         err = get_res(dev, slave, qpn, RES_QP, &qp);
3261         if (err)
3262                 return err;
3263         if (qp->com.from_state != RES_QP_HW) {
3264                 err = -EBUSY;
3265                 goto out;
3266         }
3267
3268         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3269 out:
3270         put_res(dev, slave, qpn, RES_QP);
3271         return err;
3272 }
3273
3274 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3275                               struct mlx4_vhcr *vhcr,
3276                               struct mlx4_cmd_mailbox *inbox,
3277                               struct mlx4_cmd_mailbox *outbox,
3278                               struct mlx4_cmd_info *cmd)
3279 {
3280         struct mlx4_qp_context *context = inbox->buf + 8;
3281         adjust_proxy_tun_qkey(dev, vhcr, context);
3282         update_pkey_index(dev, slave, inbox);
3283         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3284 }
3285
3286 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3287                              struct mlx4_vhcr *vhcr,
3288                              struct mlx4_cmd_mailbox *inbox,
3289                              struct mlx4_cmd_mailbox *outbox,
3290                              struct mlx4_cmd_info *cmd)
3291 {
3292         int err;
3293         struct mlx4_qp_context *qpc = inbox->buf + 8;
3294         int qpn = vhcr->in_modifier & 0x7fffff;
3295         struct res_qp *qp;
3296         u8 orig_sched_queue;
3297
3298         err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3299         if (err)
3300                 return err;
3301
3302         update_pkey_index(dev, slave, inbox);
3303         update_gid(dev, inbox, (u8)slave);
3304         adjust_proxy_tun_qkey(dev, vhcr, qpc);
3305         orig_sched_queue = qpc->pri_path.sched_queue;
3306         err = update_vport_qp_param(dev, inbox, slave, qpn);
3307         if (err)
3308                 return err;
3309
3310         err = get_res(dev, slave, qpn, RES_QP, &qp);
3311         if (err)
3312                 return err;
3313         if (qp->com.from_state != RES_QP_HW) {
3314                 err = -EBUSY;
3315                 goto out;
3316         }
3317
3318         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3319 out:
3320         /* if no error, save sched queue value passed in by VF. This is
3321          * essentially the QOS value provided by the VF. This will be useful
3322          * if we allow dynamic changes from VST back to VGT
3323          */
3324         if (!err)
3325                 qp->sched_queue = orig_sched_queue;
3326
3327         put_res(dev, slave, qpn, RES_QP);
3328         return err;
3329 }
3330
3331 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3332                             struct mlx4_vhcr *vhcr,
3333                             struct mlx4_cmd_mailbox *inbox,
3334                             struct mlx4_cmd_mailbox *outbox,
3335                             struct mlx4_cmd_info *cmd)
3336 {
3337         int err;
3338         struct mlx4_qp_context *context = inbox->buf + 8;
3339
3340         err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3341         if (err)
3342                 return err;
3343
3344         update_pkey_index(dev, slave, inbox);
3345         update_gid(dev, inbox, (u8)slave);
3346         adjust_proxy_tun_qkey(dev, vhcr, context);
3347         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3348 }
3349
3350 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3351                             struct mlx4_vhcr *vhcr,
3352                             struct mlx4_cmd_mailbox *inbox,
3353                             struct mlx4_cmd_mailbox *outbox,
3354                             struct mlx4_cmd_info *cmd)
3355 {
3356         int err;
3357         struct mlx4_qp_context *context = inbox->buf + 8;
3358
3359         err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3360         if (err)
3361                 return err;
3362
3363         update_pkey_index(dev, slave, inbox);
3364         update_gid(dev, inbox, (u8)slave);
3365         adjust_proxy_tun_qkey(dev, vhcr, context);
3366         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3367 }
3368
3369
3370 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3371                               struct mlx4_vhcr *vhcr,
3372                               struct mlx4_cmd_mailbox *inbox,
3373                               struct mlx4_cmd_mailbox *outbox,
3374                               struct mlx4_cmd_info *cmd)
3375 {
3376         struct mlx4_qp_context *context = inbox->buf + 8;
3377         adjust_proxy_tun_qkey(dev, vhcr, context);
3378         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3379 }
3380
3381 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3382                             struct mlx4_vhcr *vhcr,
3383                             struct mlx4_cmd_mailbox *inbox,
3384                             struct mlx4_cmd_mailbox *outbox,
3385                             struct mlx4_cmd_info *cmd)
3386 {
3387         int err;
3388         struct mlx4_qp_context *context = inbox->buf + 8;
3389
3390         err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3391         if (err)
3392                 return err;
3393
3394         adjust_proxy_tun_qkey(dev, vhcr, context);
3395         update_gid(dev, inbox, (u8)slave);
3396         update_pkey_index(dev, slave, inbox);
3397         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3398 }
3399
3400 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3401                             struct mlx4_vhcr *vhcr,
3402                             struct mlx4_cmd_mailbox *inbox,
3403                             struct mlx4_cmd_mailbox *outbox,
3404                             struct mlx4_cmd_info *cmd)
3405 {
3406         int err;
3407         struct mlx4_qp_context *context = inbox->buf + 8;
3408
3409         err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3410         if (err)
3411                 return err;
3412
3413         adjust_proxy_tun_qkey(dev, vhcr, context);
3414         update_gid(dev, inbox, (u8)slave);
3415         update_pkey_index(dev, slave, inbox);
3416         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3417 }
3418
3419 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3420                          struct mlx4_vhcr *vhcr,
3421                          struct mlx4_cmd_mailbox *inbox,
3422                          struct mlx4_cmd_mailbox *outbox,
3423                          struct mlx4_cmd_info *cmd)
3424 {
3425         int err;
3426         int qpn = vhcr->in_modifier & 0x7fffff;
3427         struct res_qp *qp;
3428
3429         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3430         if (err)
3431                 return err;
3432         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3433         if (err)
3434                 goto ex_abort;
3435
3436         atomic_dec(&qp->mtt->ref_count);
3437         atomic_dec(&qp->rcq->ref_count);
3438         atomic_dec(&qp->scq->ref_count);
3439         if (qp->srq)
3440                 atomic_dec(&qp->srq->ref_count);
3441         res_end_move(dev, slave, RES_QP, qpn);
3442         return 0;
3443
3444 ex_abort:
3445         res_abort_move(dev, slave, RES_QP, qpn);
3446
3447         return err;
3448 }
3449
3450 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3451                                 struct res_qp *rqp, u8 *gid)
3452 {
3453         struct res_gid *res;
3454
3455         list_for_each_entry(res, &rqp->mcg_list, list) {
3456                 if (!memcmp(res->gid, gid, 16))
3457                         return res;
3458         }
3459         return NULL;
3460 }
3461
3462 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3463                        u8 *gid, enum mlx4_protocol prot,
3464                        enum mlx4_steer_type steer, u64 reg_id)
3465 {
3466         struct res_gid *res;
3467         int err;
3468
3469         res = kzalloc(sizeof *res, GFP_KERNEL);
3470         if (!res)
3471                 return -ENOMEM;
3472
3473         spin_lock_irq(&rqp->mcg_spl);
3474         if (find_gid(dev, slave, rqp, gid)) {
3475                 kfree(res);
3476                 err = -EEXIST;
3477         } else {
3478                 memcpy(res->gid, gid, 16);
3479                 res->prot = prot;
3480                 res->steer = steer;
3481                 res->reg_id = reg_id;
3482                 list_add_tail(&res->list, &rqp->mcg_list);
3483                 err = 0;
3484         }
3485         spin_unlock_irq(&rqp->mcg_spl);
3486
3487         return err;
3488 }
3489
3490 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3491                        u8 *gid, enum mlx4_protocol prot,
3492                        enum mlx4_steer_type steer, u64 *reg_id)
3493 {
3494         struct res_gid *res;
3495         int err;
3496
3497         spin_lock_irq(&rqp->mcg_spl);
3498         res = find_gid(dev, slave, rqp, gid);
3499         if (!res || res->prot != prot || res->steer != steer)
3500                 err = -EINVAL;
3501         else {
3502                 *reg_id = res->reg_id;
3503                 list_del(&res->list);
3504                 kfree(res);
3505                 err = 0;
3506         }
3507         spin_unlock_irq(&rqp->mcg_spl);
3508
3509         return err;
3510 }
3511
3512 static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3513                      int block_loopback, enum mlx4_protocol prot,
3514                      enum mlx4_steer_type type, u64 *reg_id)
3515 {
3516         switch (dev->caps.steering_mode) {
3517         case MLX4_STEERING_MODE_DEVICE_MANAGED:
3518                 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3519                                                 block_loopback, prot,
3520                                                 reg_id);
3521         case MLX4_STEERING_MODE_B0:
3522                 return mlx4_qp_attach_common(dev, qp, gid,
3523                                             block_loopback, prot, type);
3524         default:
3525                 return -EINVAL;
3526         }
3527 }
3528
3529 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3530                      enum mlx4_protocol prot, enum mlx4_steer_type type,
3531                      u64 reg_id)
3532 {
3533         switch (dev->caps.steering_mode) {
3534         case MLX4_STEERING_MODE_DEVICE_MANAGED:
3535                 return mlx4_flow_detach(dev, reg_id);
3536         case MLX4_STEERING_MODE_B0:
3537                 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3538         default:
3539                 return -EINVAL;
3540         }
3541 }
3542
3543 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3544                                struct mlx4_vhcr *vhcr,
3545                                struct mlx4_cmd_mailbox *inbox,
3546                                struct mlx4_cmd_mailbox *outbox,
3547                                struct mlx4_cmd_info *cmd)
3548 {
3549         struct mlx4_qp qp; /* dummy for calling attach/detach */
3550         u8 *gid = inbox->buf;
3551         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3552         int err;
3553         int qpn;
3554         struct res_qp *rqp;
3555         u64 reg_id = 0;
3556         int attach = vhcr->op_modifier;
3557         int block_loopback = vhcr->in_modifier >> 31;
3558         u8 steer_type_mask = 2;
3559         enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3560
3561         qpn = vhcr->in_modifier & 0xffffff;
3562         err = get_res(dev, slave, qpn, RES_QP, &rqp);
3563         if (err)
3564                 return err;
3565
3566         qp.qpn = qpn;
3567         if (attach) {
3568                 err = qp_attach(dev, &qp, gid, block_loopback, prot,
3569                                 type, &reg_id);
3570                 if (err) {
3571                         pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3572                         goto ex_put;
3573                 }
3574                 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3575                 if (err)
3576                         goto ex_detach;
3577         } else {
3578                 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3579                 if (err)
3580                         goto ex_put;
3581
3582                 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3583                 if (err)
3584                         pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3585                                qpn, reg_id);
3586         }
3587         put_res(dev, slave, qpn, RES_QP);
3588         return err;
3589
3590 ex_detach:
3591         qp_detach(dev, &qp, gid, prot, type, reg_id);
3592 ex_put:
3593         put_res(dev, slave, qpn, RES_QP);
3594         return err;
3595 }
3596
3597 /*
3598  * MAC validation for Flow Steering rules.
3599  * VF can attach rules only with a mac address which is assigned to it.
3600  */
3601 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3602                                    struct list_head *rlist)
3603 {
3604         struct mac_res *res, *tmp;
3605         __be64 be_mac;
3606
3607         /* make sure it isn't multicast or broadcast mac*/
3608         if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3609             !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3610                 list_for_each_entry_safe(res, tmp, rlist, list) {
3611                         be_mac = cpu_to_be64(res->mac << 16);
3612                         if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3613                                 return 0;
3614                 }
3615                 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3616                        eth_header->eth.dst_mac, slave);
3617                 return -EINVAL;
3618         }
3619         return 0;
3620 }
3621
3622 /*
3623  * In case of missing eth header, append eth header with a MAC address
3624  * assigned to the VF.
3625  */
3626 static int add_eth_header(struct mlx4_dev *dev, int slave,
3627                           struct mlx4_cmd_mailbox *inbox,
3628                           struct list_head *rlist, int header_id)
3629 {
3630         struct mac_res *res, *tmp;
3631         u8 port;
3632         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3633         struct mlx4_net_trans_rule_hw_eth *eth_header;
3634         struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3635         struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3636         __be64 be_mac = 0;
3637         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3638
3639         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3640         port = ctrl->port;
3641         eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3642
3643         /* Clear a space in the inbox for eth header */
3644         switch (header_id) {
3645         case MLX4_NET_TRANS_RULE_ID_IPV4:
3646                 ip_header =
3647                         (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3648                 memmove(ip_header, eth_header,
3649                         sizeof(*ip_header) + sizeof(*l4_header));
3650                 break;
3651         case MLX4_NET_TRANS_RULE_ID_TCP:
3652         case MLX4_NET_TRANS_RULE_ID_UDP:
3653                 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3654                             (eth_header + 1);
3655                 memmove(l4_header, eth_header, sizeof(*l4_header));
3656                 break;
3657         default:
3658                 return -EINVAL;
3659         }
3660         list_for_each_entry_safe(res, tmp, rlist, list) {
3661                 if (port == res->port) {
3662                         be_mac = cpu_to_be64(res->mac << 16);
3663                         break;
3664                 }
3665         }
3666         if (!be_mac) {
3667                 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3668                        port);
3669                 return -EINVAL;
3670         }
3671
3672         memset(eth_header, 0, sizeof(*eth_header));
3673         eth_header->size = sizeof(*eth_header) >> 2;
3674         eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3675         memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3676         memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3677
3678         return 0;
3679
3680 }
3681
3682 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3683                                          struct mlx4_vhcr *vhcr,
3684                                          struct mlx4_cmd_mailbox *inbox,
3685                                          struct mlx4_cmd_mailbox *outbox,
3686                                          struct mlx4_cmd_info *cmd)
3687 {
3688
3689         struct mlx4_priv *priv = mlx4_priv(dev);
3690         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3691         struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3692         int err;
3693         int qpn;
3694         struct res_qp *rqp;
3695         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3696         struct _rule_hw  *rule_header;
3697         int header_id;
3698
3699         if (dev->caps.steering_mode !=
3700             MLX4_STEERING_MODE_DEVICE_MANAGED)
3701                 return -EOPNOTSUPP;
3702
3703         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3704         qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3705         err = get_res(dev, slave, qpn, RES_QP, &rqp);
3706         if (err) {
3707                 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3708                 return err;
3709         }
3710         rule_header = (struct _rule_hw *)(ctrl + 1);
3711         header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3712
3713         switch (header_id) {
3714         case MLX4_NET_TRANS_RULE_ID_ETH:
3715                 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3716                         err = -EINVAL;
3717                         goto err_put;
3718                 }
3719                 break;
3720         case MLX4_NET_TRANS_RULE_ID_IB:
3721                 break;
3722         case MLX4_NET_TRANS_RULE_ID_IPV4:
3723         case MLX4_NET_TRANS_RULE_ID_TCP:
3724         case MLX4_NET_TRANS_RULE_ID_UDP:
3725                 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3726                 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3727                         err = -EINVAL;
3728                         goto err_put;
3729                 }
3730                 vhcr->in_modifier +=
3731                         sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3732                 break;
3733         default:
3734                 pr_err("Corrupted mailbox.\n");
3735                 err = -EINVAL;
3736                 goto err_put;
3737         }
3738
3739         err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3740                            vhcr->in_modifier, 0,
3741                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3742                            MLX4_CMD_NATIVE);
3743         if (err)
3744                 goto err_put;
3745
3746         err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3747         if (err) {
3748                 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3749                 /* detach rule*/
3750                 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3751                          MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3752                          MLX4_CMD_NATIVE);
3753                 goto err_put;
3754         }
3755         atomic_inc(&rqp->ref_count);
3756 err_put:
3757         put_res(dev, slave, qpn, RES_QP);
3758         return err;
3759 }
3760
3761 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3762                                          struct mlx4_vhcr *vhcr,
3763                                          struct mlx4_cmd_mailbox *inbox,
3764                                          struct mlx4_cmd_mailbox *outbox,
3765                                          struct mlx4_cmd_info *cmd)
3766 {
3767         int err;
3768         struct res_qp *rqp;
3769         struct res_fs_rule *rrule;
3770
3771         if (dev->caps.steering_mode !=
3772             MLX4_STEERING_MODE_DEVICE_MANAGED)
3773                 return -EOPNOTSUPP;
3774
3775         err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3776         if (err)
3777                 return err;
3778         /* Release the rule form busy state before removal */
3779         put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3780         err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3781         if (err)
3782                 return err;
3783
3784         err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3785         if (err) {
3786                 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3787                 goto out;
3788         }
3789
3790         err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3791                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3792                        MLX4_CMD_NATIVE);
3793         if (!err)
3794                 atomic_dec(&rqp->ref_count);
3795 out:
3796         put_res(dev, slave, rrule->qpn, RES_QP);
3797         return err;
3798 }
3799
3800 enum {
3801         BUSY_MAX_RETRIES = 10
3802 };
3803
3804 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3805                                struct mlx4_vhcr *vhcr,
3806                                struct mlx4_cmd_mailbox *inbox,
3807                                struct mlx4_cmd_mailbox *outbox,
3808                                struct mlx4_cmd_info *cmd)
3809 {
3810         int err;
3811         int index = vhcr->in_modifier & 0xffff;
3812
3813         err = get_res(dev, slave, index, RES_COUNTER, NULL);
3814         if (err)
3815                 return err;
3816
3817         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3818         put_res(dev, slave, index, RES_COUNTER);
3819         return err;
3820 }
3821
3822 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3823 {
3824         struct res_gid *rgid;
3825         struct res_gid *tmp;
3826         struct mlx4_qp qp; /* dummy for calling attach/detach */
3827
3828         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3829                 switch (dev->caps.steering_mode) {
3830                 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3831                         mlx4_flow_detach(dev, rgid->reg_id);
3832                         break;
3833                 case MLX4_STEERING_MODE_B0:
3834                         qp.qpn = rqp->local_qpn;
3835                         (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3836                                                      rgid->prot, rgid->steer);
3837                         break;
3838                 }
3839                 list_del(&rgid->list);
3840                 kfree(rgid);
3841         }
3842 }
3843
3844 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3845                           enum mlx4_resource type, int print)
3846 {
3847         struct mlx4_priv *priv = mlx4_priv(dev);
3848         struct mlx4_resource_tracker *tracker =
3849                 &priv->mfunc.master.res_tracker;
3850         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3851         struct res_common *r;
3852         struct res_common *tmp;
3853         int busy;
3854
3855         busy = 0;
3856         spin_lock_irq(mlx4_tlock(dev));
3857         list_for_each_entry_safe(r, tmp, rlist, list) {
3858                 if (r->owner == slave) {
3859                         if (!r->removing) {
3860                                 if (r->state == RES_ANY_BUSY) {
3861                                         if (print)
3862                                                 mlx4_dbg(dev,
3863                                                          "%s id 0x%llx is busy\n",
3864                                                           ResourceType(type),
3865                                                           r->res_id);
3866                                         ++busy;
3867                                 } else {
3868                                         r->from_state = r->state;
3869                                         r->state = RES_ANY_BUSY;
3870                                         r->removing = 1;
3871                                 }
3872                         }
3873                 }
3874         }
3875         spin_unlock_irq(mlx4_tlock(dev));
3876
3877         return busy;
3878 }
3879
3880 static int move_all_busy(struct mlx4_dev *dev, int slave,
3881                          enum mlx4_resource type)
3882 {
3883         unsigned long begin;
3884         int busy;
3885
3886         begin = jiffies;
3887         do {
3888                 busy = _move_all_busy(dev, slave, type, 0);
3889                 if (time_after(jiffies, begin + 5 * HZ))
3890                         break;
3891                 if (busy)
3892                         cond_resched();
3893         } while (busy);
3894
3895         if (busy)
3896                 busy = _move_all_busy(dev, slave, type, 1);
3897
3898         return busy;
3899 }
3900 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3901 {
3902         struct mlx4_priv *priv = mlx4_priv(dev);
3903         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3904         struct list_head *qp_list =
3905                 &tracker->slave_list[slave].res_list[RES_QP];
3906         struct res_qp *qp;
3907         struct res_qp *tmp;
3908         int state;
3909         u64 in_param;
3910         int qpn;
3911         int err;
3912
3913         err = move_all_busy(dev, slave, RES_QP);
3914         if (err)
3915                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3916                           "for slave %d\n", slave);
3917
3918         spin_lock_irq(mlx4_tlock(dev));
3919         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3920                 spin_unlock_irq(mlx4_tlock(dev));
3921                 if (qp->com.owner == slave) {
3922                         qpn = qp->com.res_id;
3923                         detach_qp(dev, slave, qp);
3924                         state = qp->com.from_state;
3925                         while (state != 0) {
3926                                 switch (state) {
3927                                 case RES_QP_RESERVED:
3928                                         spin_lock_irq(mlx4_tlock(dev));
3929                                         rb_erase(&qp->com.node,
3930                                                  &tracker->res_tree[RES_QP]);
3931                                         list_del(&qp->com.list);
3932                                         spin_unlock_irq(mlx4_tlock(dev));
3933                                         if (!valid_reserved(dev, slave, qpn)) {
3934                                                 __mlx4_qp_release_range(dev, qpn, 1);
3935                                                 mlx4_release_resource(dev, slave,
3936                                                                       RES_QP, 1, 0);
3937                                         }
3938                                         kfree(qp);
3939                                         state = 0;
3940                                         break;
3941                                 case RES_QP_MAPPED:
3942                                         if (!valid_reserved(dev, slave, qpn))
3943                                                 __mlx4_qp_free_icm(dev, qpn);
3944                                         state = RES_QP_RESERVED;
3945                                         break;
3946                                 case RES_QP_HW:
3947                                         in_param = slave;
3948                                         err = mlx4_cmd(dev, in_param,
3949                                                        qp->local_qpn, 2,
3950                                                        MLX4_CMD_2RST_QP,
3951                                                        MLX4_CMD_TIME_CLASS_A,
3952                                                        MLX4_CMD_NATIVE);
3953                                         if (err)
3954                                                 mlx4_dbg(dev, "rem_slave_qps: failed"
3955                                                          " to move slave %d qpn %d to"
3956                                                          " reset\n", slave,
3957                                                          qp->local_qpn);
3958                                         atomic_dec(&qp->rcq->ref_count);
3959                                         atomic_dec(&qp->scq->ref_count);
3960                                         atomic_dec(&qp->mtt->ref_count);
3961                                         if (qp->srq)
3962                                                 atomic_dec(&qp->srq->ref_count);
3963                                         state = RES_QP_MAPPED;
3964                                         break;
3965                                 default:
3966                                         state = 0;
3967                                 }
3968                         }
3969                 }
3970                 spin_lock_irq(mlx4_tlock(dev));
3971         }
3972         spin_unlock_irq(mlx4_tlock(dev));
3973 }
3974
3975 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3976 {
3977         struct mlx4_priv *priv = mlx4_priv(dev);
3978         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3979         struct list_head *srq_list =
3980                 &tracker->slave_list[slave].res_list[RES_SRQ];
3981         struct res_srq *srq;
3982         struct res_srq *tmp;
3983         int state;
3984         u64 in_param;
3985         LIST_HEAD(tlist);
3986         int srqn;
3987         int err;
3988
3989         err = move_all_busy(dev, slave, RES_SRQ);
3990         if (err)
3991                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3992                           "busy for slave %d\n", slave);
3993
3994         spin_lock_irq(mlx4_tlock(dev));
3995         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3996                 spin_unlock_irq(mlx4_tlock(dev));
3997                 if (srq->com.owner == slave) {
3998                         srqn = srq->com.res_id;
3999                         state = srq->com.from_state;
4000                         while (state != 0) {
4001                                 switch (state) {
4002                                 case RES_SRQ_ALLOCATED:
4003                                         __mlx4_srq_free_icm(dev, srqn);
4004                                         spin_lock_irq(mlx4_tlock(dev));
4005                                         rb_erase(&srq->com.node,
4006                                                  &tracker->res_tree[RES_SRQ]);
4007                                         list_del(&srq->com.list);
4008                                         spin_unlock_irq(mlx4_tlock(dev));
4009                                         mlx4_release_resource(dev, slave,
4010                                                               RES_SRQ, 1, 0);
4011                                         kfree(srq);
4012                                         state = 0;
4013                                         break;
4014
4015                                 case RES_SRQ_HW:
4016                                         in_param = slave;
4017                                         err = mlx4_cmd(dev, in_param, srqn, 1,
4018                                                        MLX4_CMD_HW2SW_SRQ,
4019                                                        MLX4_CMD_TIME_CLASS_A,
4020                                                        MLX4_CMD_NATIVE);
4021                                         if (err)
4022                                                 mlx4_dbg(dev, "rem_slave_srqs: failed"
4023                                                          " to move slave %d srq %d to"
4024                                                          " SW ownership\n",
4025                                                          slave, srqn);
4026
4027                                         atomic_dec(&srq->mtt->ref_count);
4028                                         if (srq->cq)
4029                                                 atomic_dec(&srq->cq->ref_count);
4030                                         state = RES_SRQ_ALLOCATED;
4031                                         break;
4032
4033                                 default:
4034                                         state = 0;
4035                                 }
4036                         }
4037                 }
4038                 spin_lock_irq(mlx4_tlock(dev));
4039         }
4040         spin_unlock_irq(mlx4_tlock(dev));
4041 }
4042
4043 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4044 {
4045         struct mlx4_priv *priv = mlx4_priv(dev);
4046         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4047         struct list_head *cq_list =
4048                 &tracker->slave_list[slave].res_list[RES_CQ];
4049         struct res_cq *cq;
4050         struct res_cq *tmp;
4051         int state;
4052         u64 in_param;
4053         LIST_HEAD(tlist);
4054         int cqn;
4055         int err;
4056
4057         err = move_all_busy(dev, slave, RES_CQ);
4058         if (err)
4059                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
4060                           "busy for slave %d\n", slave);
4061
4062         spin_lock_irq(mlx4_tlock(dev));
4063         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4064                 spin_unlock_irq(mlx4_tlock(dev));
4065                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4066                         cqn = cq->com.res_id;
4067                         state = cq->com.from_state;
4068                         while (state != 0) {
4069                                 switch (state) {
4070                                 case RES_CQ_ALLOCATED:
4071                                         __mlx4_cq_free_icm(dev, cqn);
4072                                         spin_lock_irq(mlx4_tlock(dev));
4073                                         rb_erase(&cq->com.node,
4074                                                  &tracker->res_tree[RES_CQ]);
4075                                         list_del(&cq->com.list);
4076                                         spin_unlock_irq(mlx4_tlock(dev));
4077                                         mlx4_release_resource(dev, slave,
4078                                                               RES_CQ, 1, 0);
4079                                         kfree(cq);
4080                                         state = 0;
4081                                         break;
4082
4083                                 case RES_CQ_HW:
4084                                         in_param = slave;
4085                                         err = mlx4_cmd(dev, in_param, cqn, 1,
4086                                                        MLX4_CMD_HW2SW_CQ,
4087                                                        MLX4_CMD_TIME_CLASS_A,
4088                                                        MLX4_CMD_NATIVE);
4089                                         if (err)
4090                                                 mlx4_dbg(dev, "rem_slave_cqs: failed"
4091                                                          " to move slave %d cq %d to"
4092                                                          " SW ownership\n",
4093                                                          slave, cqn);
4094                                         atomic_dec(&cq->mtt->ref_count);
4095                                         state = RES_CQ_ALLOCATED;
4096                                         break;
4097
4098                                 default:
4099                                         state = 0;
4100                                 }
4101                         }
4102                 }
4103                 spin_lock_irq(mlx4_tlock(dev));
4104         }
4105         spin_unlock_irq(mlx4_tlock(dev));
4106 }
4107
4108 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4109 {
4110         struct mlx4_priv *priv = mlx4_priv(dev);
4111         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4112         struct list_head *mpt_list =
4113                 &tracker->slave_list[slave].res_list[RES_MPT];
4114         struct res_mpt *mpt;
4115         struct res_mpt *tmp;
4116         int state;
4117         u64 in_param;
4118         LIST_HEAD(tlist);
4119         int mptn;
4120         int err;
4121
4122         err = move_all_busy(dev, slave, RES_MPT);
4123         if (err)
4124                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
4125                           "busy for slave %d\n", slave);
4126
4127         spin_lock_irq(mlx4_tlock(dev));
4128         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4129                 spin_unlock_irq(mlx4_tlock(dev));
4130                 if (mpt->com.owner == slave) {
4131                         mptn = mpt->com.res_id;
4132                         state = mpt->com.from_state;
4133                         while (state != 0) {
4134                                 switch (state) {
4135                                 case RES_MPT_RESERVED:
4136                                         __mlx4_mpt_release(dev, mpt->key);
4137                                         spin_lock_irq(mlx4_tlock(dev));
4138                                         rb_erase(&mpt->com.node,
4139                                                  &tracker->res_tree[RES_MPT]);
4140                                         list_del(&mpt->com.list);
4141                                         spin_unlock_irq(mlx4_tlock(dev));
4142                                         mlx4_release_resource(dev, slave,
4143                                                               RES_MPT, 1, 0);
4144                                         kfree(mpt);
4145                                         state = 0;
4146                                         break;
4147
4148                                 case RES_MPT_MAPPED:
4149                                         __mlx4_mpt_free_icm(dev, mpt->key);
4150                                         state = RES_MPT_RESERVED;
4151                                         break;
4152
4153                                 case RES_MPT_HW:
4154                                         in_param = slave;
4155                                         err = mlx4_cmd(dev, in_param, mptn, 0,
4156                                                      MLX4_CMD_HW2SW_MPT,
4157                                                      MLX4_CMD_TIME_CLASS_A,
4158                                                      MLX4_CMD_NATIVE);
4159                                         if (err)
4160                                                 mlx4_dbg(dev, "rem_slave_mrs: failed"
4161                                                          " to move slave %d mpt %d to"
4162                                                          " SW ownership\n",
4163                                                          slave, mptn);
4164                                         if (mpt->mtt)
4165                                                 atomic_dec(&mpt->mtt->ref_count);
4166                                         state = RES_MPT_MAPPED;
4167                                         break;
4168                                 default:
4169                                         state = 0;
4170                                 }
4171                         }
4172                 }
4173                 spin_lock_irq(mlx4_tlock(dev));
4174         }
4175         spin_unlock_irq(mlx4_tlock(dev));
4176 }
4177
4178 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4179 {
4180         struct mlx4_priv *priv = mlx4_priv(dev);
4181         struct mlx4_resource_tracker *tracker =
4182                 &priv->mfunc.master.res_tracker;
4183         struct list_head *mtt_list =
4184                 &tracker->slave_list[slave].res_list[RES_MTT];
4185         struct res_mtt *mtt;
4186         struct res_mtt *tmp;
4187         int state;
4188         LIST_HEAD(tlist);
4189         int base;
4190         int err;
4191
4192         err = move_all_busy(dev, slave, RES_MTT);
4193         if (err)
4194                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
4195                           "busy for slave %d\n", slave);
4196
4197         spin_lock_irq(mlx4_tlock(dev));
4198         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4199                 spin_unlock_irq(mlx4_tlock(dev));
4200                 if (mtt->com.owner == slave) {
4201                         base = mtt->com.res_id;
4202                         state = mtt->com.from_state;
4203                         while (state != 0) {
4204                                 switch (state) {
4205                                 case RES_MTT_ALLOCATED:
4206                                         __mlx4_free_mtt_range(dev, base,
4207                                                               mtt->order);
4208                                         spin_lock_irq(mlx4_tlock(dev));
4209                                         rb_erase(&mtt->com.node,
4210                                                  &tracker->res_tree[RES_MTT]);
4211                                         list_del(&mtt->com.list);
4212                                         spin_unlock_irq(mlx4_tlock(dev));
4213                                         mlx4_release_resource(dev, slave, RES_MTT,
4214                                                               1 << mtt->order, 0);
4215                                         kfree(mtt);
4216                                         state = 0;
4217                                         break;
4218
4219                                 default:
4220                                         state = 0;
4221                                 }
4222                         }
4223                 }
4224                 spin_lock_irq(mlx4_tlock(dev));
4225         }
4226         spin_unlock_irq(mlx4_tlock(dev));
4227 }
4228
4229 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4230 {
4231         struct mlx4_priv *priv = mlx4_priv(dev);
4232         struct mlx4_resource_tracker *tracker =
4233                 &priv->mfunc.master.res_tracker;
4234         struct list_head *fs_rule_list =
4235                 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4236         struct res_fs_rule *fs_rule;
4237         struct res_fs_rule *tmp;
4238         int state;
4239         u64 base;
4240         int err;
4241
4242         err = move_all_busy(dev, slave, RES_FS_RULE);
4243         if (err)
4244                 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4245                           slave);
4246
4247         spin_lock_irq(mlx4_tlock(dev));
4248         list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4249                 spin_unlock_irq(mlx4_tlock(dev));
4250                 if (fs_rule->com.owner == slave) {
4251                         base = fs_rule->com.res_id;
4252                         state = fs_rule->com.from_state;
4253                         while (state != 0) {
4254                                 switch (state) {
4255                                 case RES_FS_RULE_ALLOCATED:
4256                                         /* detach rule */
4257                                         err = mlx4_cmd(dev, base, 0, 0,
4258                                                        MLX4_QP_FLOW_STEERING_DETACH,
4259                                                        MLX4_CMD_TIME_CLASS_A,
4260                                                        MLX4_CMD_NATIVE);
4261
4262                                         spin_lock_irq(mlx4_tlock(dev));
4263                                         rb_erase(&fs_rule->com.node,
4264                                                  &tracker->res_tree[RES_FS_RULE]);
4265                                         list_del(&fs_rule->com.list);
4266                                         spin_unlock_irq(mlx4_tlock(dev));
4267                                         kfree(fs_rule);
4268                                         state = 0;
4269                                         break;
4270
4271                                 default:
4272                                         state = 0;
4273                                 }
4274                         }
4275                 }
4276                 spin_lock_irq(mlx4_tlock(dev));
4277         }
4278         spin_unlock_irq(mlx4_tlock(dev));
4279 }
4280
4281 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4282 {
4283         struct mlx4_priv *priv = mlx4_priv(dev);
4284         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4285         struct list_head *eq_list =
4286                 &tracker->slave_list[slave].res_list[RES_EQ];
4287         struct res_eq *eq;
4288         struct res_eq *tmp;
4289         int err;
4290         int state;
4291         LIST_HEAD(tlist);
4292         int eqn;
4293         struct mlx4_cmd_mailbox *mailbox;
4294
4295         err = move_all_busy(dev, slave, RES_EQ);
4296         if (err)
4297                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
4298                           "busy for slave %d\n", slave);
4299
4300         spin_lock_irq(mlx4_tlock(dev));
4301         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4302                 spin_unlock_irq(mlx4_tlock(dev));
4303                 if (eq->com.owner == slave) {
4304                         eqn = eq->com.res_id;
4305                         state = eq->com.from_state;
4306                         while (state != 0) {
4307                                 switch (state) {
4308                                 case RES_EQ_RESERVED:
4309                                         spin_lock_irq(mlx4_tlock(dev));
4310                                         rb_erase(&eq->com.node,
4311                                                  &tracker->res_tree[RES_EQ]);
4312                                         list_del(&eq->com.list);
4313                                         spin_unlock_irq(mlx4_tlock(dev));
4314                                         kfree(eq);
4315                                         state = 0;
4316                                         break;
4317
4318                                 case RES_EQ_HW:
4319                                         mailbox = mlx4_alloc_cmd_mailbox(dev);
4320                                         if (IS_ERR(mailbox)) {
4321                                                 cond_resched();
4322                                                 continue;
4323                                         }
4324                                         err = mlx4_cmd_box(dev, slave, 0,
4325                                                            eqn & 0xff, 0,
4326                                                            MLX4_CMD_HW2SW_EQ,
4327                                                            MLX4_CMD_TIME_CLASS_A,
4328                                                            MLX4_CMD_NATIVE);
4329                                         if (err)
4330                                                 mlx4_dbg(dev, "rem_slave_eqs: failed"
4331                                                          " to move slave %d eqs %d to"
4332                                                          " SW ownership\n", slave, eqn);
4333                                         mlx4_free_cmd_mailbox(dev, mailbox);
4334                                         atomic_dec(&eq->mtt->ref_count);
4335                                         state = RES_EQ_RESERVED;
4336                                         break;
4337
4338                                 default:
4339                                         state = 0;
4340                                 }
4341                         }
4342                 }
4343                 spin_lock_irq(mlx4_tlock(dev));
4344         }
4345         spin_unlock_irq(mlx4_tlock(dev));
4346 }
4347
4348 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4349 {
4350         struct mlx4_priv *priv = mlx4_priv(dev);
4351         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4352         struct list_head *counter_list =
4353                 &tracker->slave_list[slave].res_list[RES_COUNTER];
4354         struct res_counter *counter;
4355         struct res_counter *tmp;
4356         int err;
4357         int index;
4358
4359         err = move_all_busy(dev, slave, RES_COUNTER);
4360         if (err)
4361                 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
4362                           "busy for slave %d\n", slave);
4363
4364         spin_lock_irq(mlx4_tlock(dev));
4365         list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4366                 if (counter->com.owner == slave) {
4367                         index = counter->com.res_id;
4368                         rb_erase(&counter->com.node,
4369                                  &tracker->res_tree[RES_COUNTER]);
4370                         list_del(&counter->com.list);
4371                         kfree(counter);
4372                         __mlx4_counter_free(dev, index);
4373                         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4374                 }
4375         }
4376         spin_unlock_irq(mlx4_tlock(dev));
4377 }
4378
4379 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4380 {
4381         struct mlx4_priv *priv = mlx4_priv(dev);
4382         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4383         struct list_head *xrcdn_list =
4384                 &tracker->slave_list[slave].res_list[RES_XRCD];
4385         struct res_xrcdn *xrcd;
4386         struct res_xrcdn *tmp;
4387         int err;
4388         int xrcdn;
4389
4390         err = move_all_busy(dev, slave, RES_XRCD);
4391         if (err)
4392                 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
4393                           "busy for slave %d\n", slave);
4394
4395         spin_lock_irq(mlx4_tlock(dev));
4396         list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4397                 if (xrcd->com.owner == slave) {
4398                         xrcdn = xrcd->com.res_id;
4399                         rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4400                         list_del(&xrcd->com.list);
4401                         kfree(xrcd);
4402                         __mlx4_xrcd_free(dev, xrcdn);
4403                 }
4404         }
4405         spin_unlock_irq(mlx4_tlock(dev));
4406 }
4407
4408 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4409 {
4410         struct mlx4_priv *priv = mlx4_priv(dev);
4411
4412         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4413         rem_slave_vlans(dev, slave);
4414         rem_slave_macs(dev, slave);
4415         rem_slave_fs_rule(dev, slave);
4416         rem_slave_qps(dev, slave);
4417         rem_slave_srqs(dev, slave);
4418         rem_slave_cqs(dev, slave);
4419         rem_slave_mrs(dev, slave);
4420         rem_slave_eqs(dev, slave);
4421         rem_slave_mtts(dev, slave);
4422         rem_slave_counters(dev, slave);
4423         rem_slave_xrcdns(dev, slave);
4424         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4425 }
4426
4427 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4428 {
4429         struct mlx4_vf_immed_vlan_work *work =
4430                 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4431         struct mlx4_cmd_mailbox *mailbox;
4432         struct mlx4_update_qp_context *upd_context;
4433         struct mlx4_dev *dev = &work->priv->dev;
4434         struct mlx4_resource_tracker *tracker =
4435                 &work->priv->mfunc.master.res_tracker;
4436         struct list_head *qp_list =
4437                 &tracker->slave_list[work->slave].res_list[RES_QP];
4438         struct res_qp *qp;
4439         struct res_qp *tmp;
4440         u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4441                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4442                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4443                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4444                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4445                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) |
4446                        (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4447                        (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4448
4449         int err;
4450         int port, errors = 0;
4451         u8 vlan_control;
4452
4453         if (mlx4_is_slave(dev)) {
4454                 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4455                           work->slave);
4456                 goto out;
4457         }
4458
4459         mailbox = mlx4_alloc_cmd_mailbox(dev);
4460         if (IS_ERR(mailbox))
4461                 goto out;
4462         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4463                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4464                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4465                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4466                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4467                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4468                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4469         else if (!work->vlan_id)
4470                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4471                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4472         else
4473                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4474                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4475                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4476
4477         upd_context = mailbox->buf;
4478         upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask);
4479         upd_context->qp_context.pri_path.vlan_control = vlan_control;
4480         upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4481
4482         spin_lock_irq(mlx4_tlock(dev));
4483         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4484                 spin_unlock_irq(mlx4_tlock(dev));
4485                 if (qp->com.owner == work->slave) {
4486                         if (qp->com.from_state != RES_QP_HW ||
4487                             !qp->sched_queue ||  /* no INIT2RTR trans yet */
4488                             mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4489                             qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4490                                 spin_lock_irq(mlx4_tlock(dev));
4491                                 continue;
4492                         }
4493                         port = (qp->sched_queue >> 6 & 1) + 1;
4494                         if (port != work->port) {
4495                                 spin_lock_irq(mlx4_tlock(dev));
4496                                 continue;
4497                         }
4498                         upd_context->qp_context.pri_path.sched_queue =
4499                                 qp->sched_queue & 0xC7;
4500                         upd_context->qp_context.pri_path.sched_queue |=
4501                                 ((work->qos & 0x7) << 3);
4502
4503                         err = mlx4_cmd(dev, mailbox->dma,
4504                                        qp->local_qpn & 0xffffff,
4505                                        0, MLX4_CMD_UPDATE_QP,
4506                                        MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4507                         if (err) {
4508                                 mlx4_info(dev, "UPDATE_QP failed for slave %d, "
4509                                           "port %d, qpn %d (%d)\n",
4510                                           work->slave, port, qp->local_qpn,
4511                                           err);
4512                                 errors++;
4513                         }
4514                 }
4515                 spin_lock_irq(mlx4_tlock(dev));
4516         }
4517         spin_unlock_irq(mlx4_tlock(dev));
4518         mlx4_free_cmd_mailbox(dev, mailbox);
4519
4520         if (errors)
4521                 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4522                          errors, work->slave, work->port);
4523
4524         /* unregister previous vlan_id if needed and we had no errors
4525          * while updating the QPs
4526          */
4527         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4528             NO_INDX != work->orig_vlan_ix)
4529                 __mlx4_unregister_vlan(&work->priv->dev, work->port,
4530                                        work->orig_vlan_id);
4531 out:
4532         kfree(work);
4533         return;
4534 }