Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[cascardo/linux.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_sriov.c
1 /* bnx2x_sriov.c: Broadcom Everest network driver.
2  *
3  * Copyright 2009-2013 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16  * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17  *             Ariel Elior <ariele@broadcom.com>
18  *
19  */
20 #include "bnx2x.h"
21 #include "bnx2x_init.h"
22 #include "bnx2x_cmn.h"
23 #include "bnx2x_sp.h"
24 #include <linux/crc32.h>
25 #include <linux/if_vlan.h>
26
27 /* General service functions */
28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
29                                          u16 pf_id)
30 {
31         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
32                 pf_id);
33         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
34                 pf_id);
35         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
36                 pf_id);
37         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
38                 pf_id);
39 }
40
41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
42                                         u8 enable)
43 {
44         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
45                 enable);
46         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
47                 enable);
48         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
49                 enable);
50         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
51                 enable);
52 }
53
54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
55 {
56         int idx;
57
58         for_each_vf(bp, idx)
59                 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
60                         break;
61         return idx;
62 }
63
64 static
65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
66 {
67         u16 idx =  (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
68         return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
69 }
70
71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
72                                 u8 igu_sb_id, u8 segment, u16 index, u8 op,
73                                 u8 update)
74 {
75         /* acking a VF sb through the PF - use the GRC */
76         u32 ctl;
77         u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
78         u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
79         u32 func_encode = vf->abs_vfid;
80         u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
81         struct igu_regular cmd_data = {0};
82
83         cmd_data.sb_id_and_flags =
84                         ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
85                          (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
86                          (update << IGU_REGULAR_BUPDATE_SHIFT) |
87                          (op << IGU_REGULAR_ENABLE_INT_SHIFT));
88
89         ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT         |
90               func_encode << IGU_CTRL_REG_FID_SHIFT             |
91               IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
92
93         DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
94            cmd_data.sb_id_and_flags, igu_addr_data);
95         REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
96         mmiowb();
97         barrier();
98
99         DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
100            ctl, igu_addr_ctl);
101         REG_WR(bp, igu_addr_ctl, ctl);
102         mmiowb();
103         barrier();
104 }
105 /* VFOP - VF slow-path operation support */
106
107 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX           0x10000
108
109 /* VFOP operations states */
110 enum bnx2x_vfop_qctor_state {
111            BNX2X_VFOP_QCTOR_INIT,
112            BNX2X_VFOP_QCTOR_SETUP,
113            BNX2X_VFOP_QCTOR_INT_EN
114 };
115
116 enum bnx2x_vfop_qdtor_state {
117            BNX2X_VFOP_QDTOR_HALT,
118            BNX2X_VFOP_QDTOR_TERMINATE,
119            BNX2X_VFOP_QDTOR_CFCDEL,
120            BNX2X_VFOP_QDTOR_DONE
121 };
122
123 enum bnx2x_vfop_vlan_mac_state {
124            BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
125            BNX2X_VFOP_VLAN_MAC_CLEAR,
126            BNX2X_VFOP_VLAN_MAC_CHK_DONE,
127            BNX2X_VFOP_MAC_CONFIG_LIST,
128            BNX2X_VFOP_VLAN_CONFIG_LIST,
129            BNX2X_VFOP_VLAN_CONFIG_LIST_0
130 };
131
132 enum bnx2x_vfop_qsetup_state {
133            BNX2X_VFOP_QSETUP_CTOR,
134            BNX2X_VFOP_QSETUP_VLAN0,
135            BNX2X_VFOP_QSETUP_DONE
136 };
137
138 enum bnx2x_vfop_mcast_state {
139            BNX2X_VFOP_MCAST_DEL,
140            BNX2X_VFOP_MCAST_ADD,
141            BNX2X_VFOP_MCAST_CHK_DONE
142 };
143 enum bnx2x_vfop_qflr_state {
144            BNX2X_VFOP_QFLR_CLR_VLAN,
145            BNX2X_VFOP_QFLR_CLR_MAC,
146            BNX2X_VFOP_QFLR_TERMINATE,
147            BNX2X_VFOP_QFLR_DONE
148 };
149
150 enum bnx2x_vfop_flr_state {
151            BNX2X_VFOP_FLR_QUEUES,
152            BNX2X_VFOP_FLR_HW
153 };
154
155 enum bnx2x_vfop_close_state {
156            BNX2X_VFOP_CLOSE_QUEUES,
157            BNX2X_VFOP_CLOSE_HW
158 };
159
160 enum bnx2x_vfop_rxmode_state {
161            BNX2X_VFOP_RXMODE_CONFIG,
162            BNX2X_VFOP_RXMODE_DONE
163 };
164
165 enum bnx2x_vfop_qteardown_state {
166            BNX2X_VFOP_QTEARDOWN_RXMODE,
167            BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
168            BNX2X_VFOP_QTEARDOWN_CLR_MAC,
169            BNX2X_VFOP_QTEARDOWN_QDTOR,
170            BNX2X_VFOP_QTEARDOWN_DONE
171 };
172
173 #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
174
175 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
176                               struct bnx2x_queue_init_params *init_params,
177                               struct bnx2x_queue_setup_params *setup_params,
178                               u16 q_idx, u16 sb_idx)
179 {
180         DP(BNX2X_MSG_IOV,
181            "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
182            vf->abs_vfid,
183            q_idx,
184            sb_idx,
185            init_params->tx.sb_cq_index,
186            init_params->tx.hc_rate,
187            setup_params->flags,
188            setup_params->txq_params.traffic_type);
189 }
190
191 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
192                             struct bnx2x_queue_init_params *init_params,
193                             struct bnx2x_queue_setup_params *setup_params,
194                             u16 q_idx, u16 sb_idx)
195 {
196         struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
197
198         DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
199            "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
200            vf->abs_vfid,
201            q_idx,
202            sb_idx,
203            init_params->rx.sb_cq_index,
204            init_params->rx.hc_rate,
205            setup_params->gen_params.mtu,
206            rxq_params->buf_sz,
207            rxq_params->sge_buf_sz,
208            rxq_params->max_sges_pkt,
209            rxq_params->tpa_agg_sz,
210            setup_params->flags,
211            rxq_params->drop_flags,
212            rxq_params->cache_line_log);
213 }
214
215 void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
216                            struct bnx2x_virtf *vf,
217                            struct bnx2x_vf_queue *q,
218                            struct bnx2x_vfop_qctor_params *p,
219                            unsigned long q_type)
220 {
221         struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
222         struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
223
224         /* INIT */
225
226         /* Enable host coalescing in the transition to INIT state */
227         if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
228                 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
229
230         if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
231                 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
232
233         /* FW SB ID */
234         init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
235         init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
236
237         /* context */
238         init_p->cxts[0] = q->cxt;
239
240         /* SETUP */
241
242         /* Setup-op general parameters */
243         setup_p->gen_params.spcl_id = vf->sp_cl_id;
244         setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
245
246         /* Setup-op pause params:
247          * Nothing to do, the pause thresholds are set by default to 0 which
248          * effectively turns off the feature for this queue. We don't want
249          * one queue (VF) to interfering with another queue (another VF)
250          */
251         if (vf->cfg_flags & VF_CFG_FW_FC)
252                 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
253                           vf->abs_vfid);
254         /* Setup-op flags:
255          * collect statistics, zero statistics, local-switching, security,
256          * OV for Flex10, RSS and MCAST for leading
257          */
258         if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
259                 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
260
261         /* for VFs, enable tx switching, bd coherency, and mac address
262          * anti-spoofing
263          */
264         __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
265         __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
266         __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
267
268         if (vfq_is_leading(q)) {
269                 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
270                 __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
271         }
272
273         /* Setup-op rx parameters */
274         if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
275                 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
276
277                 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
278                 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
279                 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
280
281                 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
282                         rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
283         }
284
285         /* Setup-op tx parameters */
286         if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
287                 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
288                 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
289         }
290 }
291
292 /* VFOP queue construction */
293 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
294 {
295         struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
296         struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
297         struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
298         enum bnx2x_vfop_qctor_state state = vfop->state;
299
300         bnx2x_vfop_reset_wq(vf);
301
302         if (vfop->rc < 0)
303                 goto op_err;
304
305         DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
306
307         switch (state) {
308         case BNX2X_VFOP_QCTOR_INIT:
309
310                 /* has this queue already been opened? */
311                 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
312                     BNX2X_Q_LOGICAL_STATE_ACTIVE) {
313                         DP(BNX2X_MSG_IOV,
314                            "Entered qctor but queue was already up. Aborting gracefully\n");
315                         goto op_done;
316                 }
317
318                 /* next state */
319                 vfop->state = BNX2X_VFOP_QCTOR_SETUP;
320
321                 q_params->cmd = BNX2X_Q_CMD_INIT;
322                 vfop->rc = bnx2x_queue_state_change(bp, q_params);
323
324                 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
325
326         case BNX2X_VFOP_QCTOR_SETUP:
327                 /* next state */
328                 vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
329
330                 /* copy pre-prepared setup params to the queue-state params */
331                 vfop->op_p->qctor.qstate.params.setup =
332                         vfop->op_p->qctor.prep_qsetup;
333
334                 q_params->cmd = BNX2X_Q_CMD_SETUP;
335                 vfop->rc = bnx2x_queue_state_change(bp, q_params);
336
337                 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
338
339         case BNX2X_VFOP_QCTOR_INT_EN:
340
341                 /* enable interrupts */
342                 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
343                                     USTORM_ID, 0, IGU_INT_ENABLE, 0);
344                 goto op_done;
345         default:
346                 bnx2x_vfop_default(state);
347         }
348 op_err:
349         BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
350                   vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
351 op_done:
352         bnx2x_vfop_end(bp, vf, vfop);
353 op_pending:
354         return;
355 }
356
357 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
358                                 struct bnx2x_virtf *vf,
359                                 struct bnx2x_vfop_cmd *cmd,
360                                 int qid)
361 {
362         struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
363
364         if (vfop) {
365                 vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
366
367                 vfop->args.qctor.qid = qid;
368                 vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
369
370                 bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
371                                  bnx2x_vfop_qctor, cmd->done);
372                 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
373                                              cmd->block);
374         }
375         return -ENOMEM;
376 }
377
378 /* VFOP queue destruction */
379 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
380 {
381         struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
382         struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor;
383         struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
384         enum bnx2x_vfop_qdtor_state state = vfop->state;
385
386         bnx2x_vfop_reset_wq(vf);
387
388         if (vfop->rc < 0)
389                 goto op_err;
390
391         DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
392
393         switch (state) {
394         case BNX2X_VFOP_QDTOR_HALT:
395
396                 /* has this queue already been stopped? */
397                 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
398                     BNX2X_Q_LOGICAL_STATE_STOPPED) {
399                         DP(BNX2X_MSG_IOV,
400                            "Entered qdtor but queue was already stopped. Aborting gracefully\n");
401                         goto op_done;
402                 }
403
404                 /* next state */
405                 vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
406
407                 q_params->cmd = BNX2X_Q_CMD_HALT;
408                 vfop->rc = bnx2x_queue_state_change(bp, q_params);
409
410                 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
411
412         case BNX2X_VFOP_QDTOR_TERMINATE:
413                 /* next state */
414                 vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
415
416                 q_params->cmd = BNX2X_Q_CMD_TERMINATE;
417                 vfop->rc = bnx2x_queue_state_change(bp, q_params);
418
419                 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
420
421         case BNX2X_VFOP_QDTOR_CFCDEL:
422                 /* next state */
423                 vfop->state = BNX2X_VFOP_QDTOR_DONE;
424
425                 q_params->cmd = BNX2X_Q_CMD_CFC_DEL;
426                 vfop->rc = bnx2x_queue_state_change(bp, q_params);
427
428                 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
429 op_err:
430         BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
431                   vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc);
432 op_done:
433         case BNX2X_VFOP_QDTOR_DONE:
434                 /* invalidate the context */
435                 qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
436                 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
437                 bnx2x_vfop_end(bp, vf, vfop);
438                 return;
439         default:
440                 bnx2x_vfop_default(state);
441         }
442 op_pending:
443         return;
444 }
445
446 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
447                                 struct bnx2x_virtf *vf,
448                                 struct bnx2x_vfop_cmd *cmd,
449                                 int qid)
450 {
451         struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
452
453         if (vfop) {
454                 struct bnx2x_queue_state_params *qstate =
455                         &vf->op_params.qctor.qstate;
456
457                 memset(qstate, 0, sizeof(*qstate));
458                 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
459
460                 vfop->args.qdtor.qid = qid;
461                 vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
462
463                 bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
464                                  bnx2x_vfop_qdtor, cmd->done);
465                 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
466                                              cmd->block);
467         }
468         DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid);
469         return -ENOMEM;
470 }
471
472 static void
473 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
474 {
475         struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
476         if (vf) {
477                 if (!vf_sb_count(vf))
478                         vf->igu_base_id = igu_sb_id;
479                 ++vf_sb_count(vf);
480         }
481 }
482
483 /* VFOP MAC/VLAN helpers */
484 static inline void bnx2x_vfop_credit(struct bnx2x *bp,
485                                      struct bnx2x_vfop *vfop,
486                                      struct bnx2x_vlan_mac_obj *obj)
487 {
488         struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
489
490         /* update credit only if there is no error
491          * and a valid credit counter
492          */
493         if (!vfop->rc && args->credit) {
494                 struct list_head *pos;
495                 int read_lock;
496                 int cnt = 0;
497
498                 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
499                 if (read_lock)
500                         DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
501
502                 list_for_each(pos, &obj->head)
503                         cnt++;
504
505                 if (!read_lock)
506                         bnx2x_vlan_mac_h_read_unlock(bp, obj);
507
508                 atomic_set(args->credit, cnt);
509         }
510 }
511
512 static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
513                                     struct bnx2x_vfop_filter *pos,
514                                     struct bnx2x_vlan_mac_data *user_req)
515 {
516         user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
517                 BNX2X_VLAN_MAC_DEL;
518
519         switch (pos->type) {
520         case BNX2X_VFOP_FILTER_MAC:
521                 memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
522                 break;
523         case BNX2X_VFOP_FILTER_VLAN:
524                 user_req->u.vlan.vlan = pos->vid;
525                 break;
526         default:
527                 BNX2X_ERR("Invalid filter type, skipping\n");
528                 return 1;
529         }
530         return 0;
531 }
532
533 static int
534 bnx2x_vfop_config_vlan0(struct bnx2x *bp,
535                         struct bnx2x_vlan_mac_ramrod_params *vlan_mac,
536                         bool add)
537 {
538         int rc;
539
540         vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD :
541                 BNX2X_VLAN_MAC_DEL;
542         vlan_mac->user_req.u.vlan.vlan = 0;
543
544         rc = bnx2x_config_vlan_mac(bp, vlan_mac);
545         if (rc == -EEXIST)
546                 rc = 0;
547         return rc;
548 }
549
550 static int bnx2x_vfop_config_list(struct bnx2x *bp,
551                                   struct bnx2x_vfop_filters *filters,
552                                   struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
553 {
554         struct bnx2x_vfop_filter *pos, *tmp;
555         struct list_head rollback_list, *filters_list = &filters->head;
556         struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
557         int rc = 0, cnt = 0;
558
559         INIT_LIST_HEAD(&rollback_list);
560
561         list_for_each_entry_safe(pos, tmp, filters_list, link) {
562                 if (bnx2x_vfop_set_user_req(bp, pos, user_req))
563                         continue;
564
565                 rc = bnx2x_config_vlan_mac(bp, vlan_mac);
566                 if (rc >= 0) {
567                         cnt += pos->add ? 1 : -1;
568                         list_move(&pos->link, &rollback_list);
569                         rc = 0;
570                 } else if (rc == -EEXIST) {
571                         rc = 0;
572                 } else {
573                         BNX2X_ERR("Failed to add a new vlan_mac command\n");
574                         break;
575                 }
576         }
577
578         /* rollback if error or too many rules added */
579         if (rc || cnt > filters->add_cnt) {
580                 BNX2X_ERR("error or too many rules added. Performing rollback\n");
581                 list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
582                         pos->add = !pos->add;   /* reverse op */
583                         bnx2x_vfop_set_user_req(bp, pos, user_req);
584                         bnx2x_config_vlan_mac(bp, vlan_mac);
585                         list_del(&pos->link);
586                 }
587                 cnt = 0;
588                 if (!rc)
589                         rc = -EINVAL;
590         }
591         filters->add_cnt = cnt;
592         return rc;
593 }
594
595 /* VFOP set VLAN/MAC */
596 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
597 {
598         struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
599         struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
600         struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
601         struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
602
603         enum bnx2x_vfop_vlan_mac_state state = vfop->state;
604
605         if (vfop->rc < 0)
606                 goto op_err;
607
608         DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
609
610         bnx2x_vfop_reset_wq(vf);
611
612         switch (state) {
613         case BNX2X_VFOP_VLAN_MAC_CLEAR:
614                 /* next state */
615                 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
616
617                 /* do delete */
618                 vfop->rc = obj->delete_all(bp, obj,
619                                            &vlan_mac->user_req.vlan_mac_flags,
620                                            &vlan_mac->ramrod_flags);
621
622                 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
623
624         case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
625                 /* next state */
626                 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
627
628                 /* do config */
629                 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
630                 if (vfop->rc == -EEXIST)
631                         vfop->rc = 0;
632
633                 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
634
635         case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
636                 vfop->rc = !!obj->raw.check_pending(&obj->raw);
637                 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
638
639         case BNX2X_VFOP_MAC_CONFIG_LIST:
640                 /* next state */
641                 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
642
643                 /* do list config */
644                 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
645                 if (vfop->rc)
646                         goto op_err;
647
648                 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
649                 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
650                 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
651
652         case BNX2X_VFOP_VLAN_CONFIG_LIST:
653                 /* next state */
654                 vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0;
655
656                 /* remove vlan0 - could be no-op */
657                 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false);
658                 if (vfop->rc)
659                         goto op_err;
660
661                 /* Do vlan list config. if this operation fails we try to
662                  * restore vlan0 to keep the queue is working order
663                  */
664                 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
665                 if (!vfop->rc) {
666                         set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
667                         vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
668                 }
669                 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */
670
671         case BNX2X_VFOP_VLAN_CONFIG_LIST_0:
672                 /* next state */
673                 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
674
675                 if (list_empty(&obj->head))
676                         /* add vlan0 */
677                         vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true);
678                 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
679
680         default:
681                 bnx2x_vfop_default(state);
682         }
683 op_err:
684         BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
685 op_done:
686         kfree(filters);
687         bnx2x_vfop_credit(bp, vfop, obj);
688         bnx2x_vfop_end(bp, vf, vfop);
689 op_pending:
690         return;
691 }
692
693 struct bnx2x_vfop_vlan_mac_flags {
694         bool drv_only;
695         bool dont_consume;
696         bool single_cmd;
697         bool add;
698 };
699
700 static void
701 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
702                                 struct bnx2x_vfop_vlan_mac_flags *flags)
703 {
704         struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
705
706         memset(ramrod, 0, sizeof(*ramrod));
707
708         /* ramrod flags */
709         if (flags->drv_only)
710                 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
711         if (flags->single_cmd)
712                 set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
713
714         /* mac_vlan flags */
715         if (flags->dont_consume)
716                 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
717
718         /* cmd */
719         ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
720 }
721
722 static inline void
723 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
724                            struct bnx2x_vfop_vlan_mac_flags *flags)
725 {
726         bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
727         set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
728 }
729
730 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
731                                      struct bnx2x_virtf *vf,
732                                      struct bnx2x_vfop_cmd *cmd,
733                                      int qid, bool drv_only)
734 {
735         struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
736
737         if (vfop) {
738                 struct bnx2x_vfop_args_filters filters = {
739                         .multi_filter = NULL,   /* single */
740                         .credit = NULL,         /* consume credit */
741                 };
742                 struct bnx2x_vfop_vlan_mac_flags flags = {
743                         .drv_only = drv_only,
744                         .dont_consume = (filters.credit != NULL),
745                         .single_cmd = true,
746                         .add = false /* don't care */,
747                 };
748                 struct bnx2x_vlan_mac_ramrod_params *ramrod =
749                         &vf->op_params.vlan_mac;
750
751                 /* set ramrod params */
752                 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
753
754                 /* set object */
755                 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
756
757                 /* set extra args */
758                 vfop->args.filters = filters;
759
760                 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
761                                  bnx2x_vfop_vlan_mac, cmd->done);
762                 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
763                                              cmd->block);
764         }
765         return -ENOMEM;
766 }
767
768 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
769                             struct bnx2x_virtf *vf,
770                             struct bnx2x_vfop_cmd *cmd,
771                             struct bnx2x_vfop_filters *macs,
772                             int qid, bool drv_only)
773 {
774         struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
775
776         if (vfop) {
777                 struct bnx2x_vfop_args_filters filters = {
778                         .multi_filter = macs,
779                         .credit = NULL,         /* consume credit */
780                 };
781                 struct bnx2x_vfop_vlan_mac_flags flags = {
782                         .drv_only = drv_only,
783                         .dont_consume = (filters.credit != NULL),
784                         .single_cmd = false,
785                         .add = false, /* don't care since only the items in the
786                                        * filters list affect the sp operation,
787                                        * not the list itself
788                                        */
789                 };
790                 struct bnx2x_vlan_mac_ramrod_params *ramrod =
791                         &vf->op_params.vlan_mac;
792
793                 /* set ramrod params */
794                 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
795
796                 /* set object */
797                 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
798
799                 /* set extra args */
800                 filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX;
801                 vfop->args.filters = filters;
802
803                 bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST,
804                                  bnx2x_vfop_vlan_mac, cmd->done);
805                 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
806                                              cmd->block);
807         }
808         return -ENOMEM;
809 }
810
811 int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
812                             struct bnx2x_virtf *vf,
813                             struct bnx2x_vfop_cmd *cmd,
814                             int qid, u16 vid, bool add)
815 {
816         struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
817
818         if (vfop) {
819                 struct bnx2x_vfop_args_filters filters = {
820                         .multi_filter = NULL, /* single command */
821                         .credit = &bnx2x_vfq(vf, qid, vlan_count),
822                 };
823                 struct bnx2x_vfop_vlan_mac_flags flags = {
824                         .drv_only = false,
825                         .dont_consume = (filters.credit != NULL),
826                         .single_cmd = true,
827                         .add = add,
828                 };
829                 struct bnx2x_vlan_mac_ramrod_params *ramrod =
830                         &vf->op_params.vlan_mac;
831
832                 /* set ramrod params */
833                 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
834                 ramrod->user_req.u.vlan.vlan = vid;
835
836                 /* set object */
837                 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
838
839                 /* set extra args */
840                 vfop->args.filters = filters;
841
842                 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
843                                  bnx2x_vfop_vlan_mac, cmd->done);
844                 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
845                                              cmd->block);
846         }
847         return -ENOMEM;
848 }
849
850 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
851                                struct bnx2x_virtf *vf,
852                                struct bnx2x_vfop_cmd *cmd,
853                                int qid, bool drv_only)
854 {
855         struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
856
857         if (vfop) {
858                 struct bnx2x_vfop_args_filters filters = {
859                         .multi_filter = NULL, /* single command */
860                         .credit = &bnx2x_vfq(vf, qid, vlan_count),
861                 };
862                 struct bnx2x_vfop_vlan_mac_flags flags = {
863                         .drv_only = drv_only,
864                         .dont_consume = (filters.credit != NULL),
865                         .single_cmd = true,
866                         .add = false, /* don't care */
867                 };
868                 struct bnx2x_vlan_mac_ramrod_params *ramrod =
869                         &vf->op_params.vlan_mac;
870
871                 /* set ramrod params */
872                 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
873
874                 /* set object */
875                 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
876
877                 /* set extra args */
878                 vfop->args.filters = filters;
879
880                 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
881                                  bnx2x_vfop_vlan_mac, cmd->done);
882                 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
883                                              cmd->block);
884         }
885         return -ENOMEM;
886 }
887
888 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
889                              struct bnx2x_virtf *vf,
890                              struct bnx2x_vfop_cmd *cmd,
891                              struct bnx2x_vfop_filters *vlans,
892                              int qid, bool drv_only)
893 {
894         struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
895
896         if (vfop) {
897                 struct bnx2x_vfop_args_filters filters = {
898                         .multi_filter = vlans,
899                         .credit = &bnx2x_vfq(vf, qid, vlan_count),
900                 };
901                 struct bnx2x_vfop_vlan_mac_flags flags = {
902                         .drv_only = drv_only,
903                         .dont_consume = (filters.credit != NULL),
904                         .single_cmd = false,
905                         .add = false, /* don't care */
906                 };
907                 struct bnx2x_vlan_mac_ramrod_params *ramrod =
908                         &vf->op_params.vlan_mac;
909
910                 /* set ramrod params */
911                 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
912
913                 /* set object */
914                 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
915
916                 /* set extra args */
917                 filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
918                         atomic_read(filters.credit);
919
920                 vfop->args.filters = filters;
921
922                 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST,
923                                  bnx2x_vfop_vlan_mac, cmd->done);
924                 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
925                                              cmd->block);
926         }
927         return -ENOMEM;
928 }
929
930 /* VFOP queue setup (queue constructor + set vlan 0) */
931 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
932 {
933         struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
934         int qid = vfop->args.qctor.qid;
935         enum bnx2x_vfop_qsetup_state state = vfop->state;
936         struct bnx2x_vfop_cmd cmd = {
937                 .done = bnx2x_vfop_qsetup,
938                 .block = false,
939         };
940
941         if (vfop->rc < 0)
942                 goto op_err;
943
944         DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
945
946         switch (state) {
947         case BNX2X_VFOP_QSETUP_CTOR:
948                 /* init the queue ctor command */
949                 vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
950                 vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
951                 if (vfop->rc)
952                         goto op_err;
953                 return;
954
955         case BNX2X_VFOP_QSETUP_VLAN0:
956                 /* skip if non-leading or FPGA/EMU*/
957                 if (qid)
958                         goto op_done;
959
960                 /* init the queue set-vlan command (for vlan 0) */
961                 vfop->state = BNX2X_VFOP_QSETUP_DONE;
962                 vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
963                 if (vfop->rc)
964                         goto op_err;
965                 return;
966 op_err:
967         BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
968 op_done:
969         case BNX2X_VFOP_QSETUP_DONE:
970                 vf->cfg_flags |= VF_CFG_VLAN;
971                 smp_mb__before_clear_bit();
972                 set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
973                         &bp->sp_rtnl_state);
974                 smp_mb__after_clear_bit();
975                 schedule_delayed_work(&bp->sp_rtnl_task, 0);
976                 bnx2x_vfop_end(bp, vf, vfop);
977                 return;
978         default:
979                 bnx2x_vfop_default(state);
980         }
981 }
982
983 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
984                           struct bnx2x_virtf *vf,
985                           struct bnx2x_vfop_cmd *cmd,
986                           int qid)
987 {
988         struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
989
990         if (vfop) {
991                 vfop->args.qctor.qid = qid;
992
993                 bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
994                                  bnx2x_vfop_qsetup, cmd->done);
995                 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
996                                              cmd->block);
997         }
998         return -ENOMEM;
999 }
1000
1001 /* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
1002 static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
1003 {
1004         struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1005         int qid = vfop->args.qx.qid;
1006         enum bnx2x_vfop_qflr_state state = vfop->state;
1007         struct bnx2x_queue_state_params *qstate;
1008         struct bnx2x_vfop_cmd cmd;
1009
1010         bnx2x_vfop_reset_wq(vf);
1011
1012         if (vfop->rc < 0)
1013                 goto op_err;
1014
1015         DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state);
1016
1017         cmd.done = bnx2x_vfop_qflr;
1018         cmd.block = false;
1019
1020         switch (state) {
1021         case BNX2X_VFOP_QFLR_CLR_VLAN:
1022                 /* vlan-clear-all: driver-only, don't consume credit */
1023                 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
1024                 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
1025                 if (vfop->rc)
1026                         goto op_err;
1027                 return;
1028
1029         case BNX2X_VFOP_QFLR_CLR_MAC:
1030                 /* mac-clear-all: driver only consume credit */
1031                 vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
1032                 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
1033                 DP(BNX2X_MSG_IOV,
1034                    "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
1035                    vf->abs_vfid, vfop->rc);
1036                 if (vfop->rc)
1037                         goto op_err;
1038                 return;
1039
1040         case BNX2X_VFOP_QFLR_TERMINATE:
1041                 qstate = &vfop->op_p->qctor.qstate;
1042                 memset(qstate , 0, sizeof(*qstate));
1043                 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
1044                 vfop->state = BNX2X_VFOP_QFLR_DONE;
1045
1046                 DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n",
1047                    vf->abs_vfid, qstate->q_obj->state);
1048
1049                 if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) {
1050                         qstate->q_obj->state = BNX2X_Q_STATE_STOPPED;
1051                         qstate->cmd = BNX2X_Q_CMD_TERMINATE;
1052                         vfop->rc = bnx2x_queue_state_change(bp, qstate);
1053                         bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND);
1054                 } else {
1055                         goto op_done;
1056                 }
1057
1058 op_err:
1059         BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
1060                   vf->abs_vfid, qid, vfop->rc);
1061 op_done:
1062         case BNX2X_VFOP_QFLR_DONE:
1063                 bnx2x_vfop_end(bp, vf, vfop);
1064                 return;
1065         default:
1066                 bnx2x_vfop_default(state);
1067         }
1068 op_pending:
1069         return;
1070 }
1071
1072 static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
1073                                struct bnx2x_virtf *vf,
1074                                struct bnx2x_vfop_cmd *cmd,
1075                                int qid)
1076 {
1077         struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1078
1079         if (vfop) {
1080                 vfop->args.qx.qid = qid;
1081                 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
1082                                  bnx2x_vfop_qflr, cmd->done);
1083                 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
1084                                              cmd->block);
1085         }
1086         return -ENOMEM;
1087 }
1088
1089 /* VFOP multi-casts */
1090 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
1091 {
1092         struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1093         struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast;
1094         struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw;
1095         struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
1096         enum bnx2x_vfop_mcast_state state = vfop->state;
1097         int i;
1098
1099         bnx2x_vfop_reset_wq(vf);
1100
1101         if (vfop->rc < 0)
1102                 goto op_err;
1103
1104         DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1105
1106         switch (state) {
1107         case BNX2X_VFOP_MCAST_DEL:
1108                 /* clear existing mcasts */
1109                 vfop->state = BNX2X_VFOP_MCAST_ADD;
1110                 vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
1111                 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
1112
1113         case BNX2X_VFOP_MCAST_ADD:
1114                 if (raw->check_pending(raw))
1115                         goto op_pending;
1116
1117                 if (args->mc_num) {
1118                         /* update mcast list on the ramrod params */
1119                         INIT_LIST_HEAD(&mcast->mcast_list);
1120                         for (i = 0; i < args->mc_num; i++)
1121                                 list_add_tail(&(args->mc[i].link),
1122                                               &mcast->mcast_list);
1123                         /* add new mcasts */
1124                         vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
1125                         vfop->rc = bnx2x_config_mcast(bp, mcast,
1126                                                       BNX2X_MCAST_CMD_ADD);
1127                 }
1128                 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1129
1130         case BNX2X_VFOP_MCAST_CHK_DONE:
1131                 vfop->rc = raw->check_pending(raw) ? 1 : 0;
1132                 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1133         default:
1134                 bnx2x_vfop_default(state);
1135         }
1136 op_err:
1137         BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
1138 op_done:
1139         kfree(args->mc);
1140         bnx2x_vfop_end(bp, vf, vfop);
1141 op_pending:
1142         return;
1143 }
1144
1145 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
1146                          struct bnx2x_virtf *vf,
1147                          struct bnx2x_vfop_cmd *cmd,
1148                          bnx2x_mac_addr_t *mcasts,
1149                          int mcast_num, bool drv_only)
1150 {
1151         struct bnx2x_vfop *vfop = NULL;
1152         size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
1153         struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
1154                                            NULL;
1155
1156         if (!mc_sz || mc) {
1157                 vfop = bnx2x_vfop_add(bp, vf);
1158                 if (vfop) {
1159                         int i;
1160                         struct bnx2x_mcast_ramrod_params *ramrod =
1161                                 &vf->op_params.mcast;
1162
1163                         /* set ramrod params */
1164                         memset(ramrod, 0, sizeof(*ramrod));
1165                         ramrod->mcast_obj = &vf->mcast_obj;
1166                         if (drv_only)
1167                                 set_bit(RAMROD_DRV_CLR_ONLY,
1168                                         &ramrod->ramrod_flags);
1169
1170                         /* copy mcasts pointers */
1171                         vfop->args.mc_list.mc_num = mcast_num;
1172                         vfop->args.mc_list.mc = mc;
1173                         for (i = 0; i < mcast_num; i++)
1174                                 mc[i].mac = mcasts[i];
1175
1176                         bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
1177                                          bnx2x_vfop_mcast, cmd->done);
1178                         return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
1179                                                      cmd->block);
1180                 } else {
1181                         kfree(mc);
1182                 }
1183         }
1184         return -ENOMEM;
1185 }
1186
1187 /* VFOP rx-mode */
1188 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
1189 {
1190         struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1191         struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
1192         enum bnx2x_vfop_rxmode_state state = vfop->state;
1193
1194         bnx2x_vfop_reset_wq(vf);
1195
1196         if (vfop->rc < 0)
1197                 goto op_err;
1198
1199         DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1200
1201         switch (state) {
1202         case BNX2X_VFOP_RXMODE_CONFIG:
1203                 /* next state */
1204                 vfop->state = BNX2X_VFOP_RXMODE_DONE;
1205
1206                 vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
1207                 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1208 op_err:
1209                 BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
1210 op_done:
1211         case BNX2X_VFOP_RXMODE_DONE:
1212                 bnx2x_vfop_end(bp, vf, vfop);
1213                 return;
1214         default:
1215                 bnx2x_vfop_default(state);
1216         }
1217 op_pending:
1218         return;
1219 }
1220
1221 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
1222                           struct bnx2x_virtf *vf,
1223                           struct bnx2x_vfop_cmd *cmd,
1224                           int qid, unsigned long accept_flags)
1225 {
1226         struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1227         struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1228
1229         if (vfop) {
1230                 struct bnx2x_rx_mode_ramrod_params *ramrod =
1231                         &vf->op_params.rx_mode;
1232
1233                 memset(ramrod, 0, sizeof(*ramrod));
1234
1235                 /* Prepare ramrod parameters */
1236                 ramrod->cid = vfq->cid;
1237                 ramrod->cl_id = vfq_cl_id(vf, vfq);
1238                 ramrod->rx_mode_obj = &bp->rx_mode_obj;
1239                 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1240
1241                 ramrod->rx_accept_flags = accept_flags;
1242                 ramrod->tx_accept_flags = accept_flags;
1243                 ramrod->pstate = &vf->filter_state;
1244                 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1245
1246                 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1247                 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1248                 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1249
1250                 ramrod->rdata =
1251                         bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1252                 ramrod->rdata_mapping =
1253                         bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1254
1255                 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
1256                                  bnx2x_vfop_rxmode, cmd->done);
1257                 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode,
1258                                              cmd->block);
1259         }
1260         return -ENOMEM;
1261 }
1262
1263 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
1264  * queue destructor)
1265  */
1266 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
1267 {
1268         struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1269         int qid = vfop->args.qx.qid;
1270         enum bnx2x_vfop_qteardown_state state = vfop->state;
1271         struct bnx2x_vfop_cmd cmd;
1272
1273         if (vfop->rc < 0)
1274                 goto op_err;
1275
1276         DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1277
1278         cmd.done = bnx2x_vfop_qdown;
1279         cmd.block = false;
1280
1281         switch (state) {
1282         case BNX2X_VFOP_QTEARDOWN_RXMODE:
1283                 /* Drop all */
1284                 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
1285                 vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
1286                 if (vfop->rc)
1287                         goto op_err;
1288                 return;
1289
1290         case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
1291                 /* vlan-clear-all: don't consume credit */
1292                 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
1293                 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
1294                 if (vfop->rc)
1295                         goto op_err;
1296                 return;
1297
1298         case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
1299                 /* mac-clear-all: consume credit */
1300                 vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
1301                 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
1302                 if (vfop->rc)
1303                         goto op_err;
1304                 return;
1305
1306         case BNX2X_VFOP_QTEARDOWN_QDTOR:
1307                 /* run the queue destruction flow */
1308                 DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
1309                 vfop->state = BNX2X_VFOP_QTEARDOWN_DONE;
1310                 DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
1311                 vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
1312                 DP(BNX2X_MSG_IOV, "returned from cmd\n");
1313                 if (vfop->rc)
1314                         goto op_err;
1315                 return;
1316 op_err:
1317         BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
1318                   vf->abs_vfid, qid, vfop->rc);
1319
1320         case BNX2X_VFOP_QTEARDOWN_DONE:
1321                 bnx2x_vfop_end(bp, vf, vfop);
1322                 return;
1323         default:
1324                 bnx2x_vfop_default(state);
1325         }
1326 }
1327
1328 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
1329                          struct bnx2x_virtf *vf,
1330                          struct bnx2x_vfop_cmd *cmd,
1331                          int qid)
1332 {
1333         struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1334
1335         if (vfop) {
1336                 vfop->args.qx.qid = qid;
1337                 bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE,
1338                                  bnx2x_vfop_qdown, cmd->done);
1339                 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
1340                                              cmd->block);
1341         }
1342
1343         return -ENOMEM;
1344 }
1345
1346 /* VF enable primitives
1347  * when pretend is required the caller is responsible
1348  * for calling pretend prior to calling these routines
1349  */
1350
1351 /* internal vf enable - until vf is enabled internally all transactions
1352  * are blocked. This routine should always be called last with pretend.
1353  */
1354 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
1355 {
1356         REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
1357 }
1358
1359 /* clears vf error in all semi blocks */
1360 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
1361 {
1362         REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
1363         REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
1364         REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
1365         REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
1366 }
1367
1368 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
1369 {
1370         u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
1371         u32 was_err_reg = 0;
1372
1373         switch (was_err_group) {
1374         case 0:
1375             was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
1376             break;
1377         case 1:
1378             was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
1379             break;
1380         case 2:
1381             was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
1382             break;
1383         case 3:
1384             was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
1385             break;
1386         }
1387         REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
1388 }
1389
1390 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
1391 {
1392         int i;
1393         u32 val;
1394
1395         /* Set VF masks and configuration - pretend */
1396         bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1397
1398         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
1399         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
1400         REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
1401         REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
1402         REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
1403         REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
1404
1405         val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
1406         val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
1407         if (vf->cfg_flags & VF_CFG_INT_SIMD)
1408                 val |= IGU_VF_CONF_SINGLE_ISR_EN;
1409         val &= ~IGU_VF_CONF_PARENT_MASK;
1410         val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */
1411         REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1412
1413         DP(BNX2X_MSG_IOV,
1414            "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
1415            vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
1416
1417         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1418
1419         /* iterate over all queues, clear sb consumer */
1420         for (i = 0; i < vf_sb_count(vf); i++) {
1421                 u8 igu_sb_id = vf_igu_sb(vf, i);
1422
1423                 /* zero prod memory */
1424                 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
1425
1426                 /* clear sb state machine */
1427                 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
1428                                        false /* VF */);
1429
1430                 /* disable + update */
1431                 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
1432                                     IGU_INT_DISABLE, 1);
1433         }
1434 }
1435
1436 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
1437 {
1438         /* set the VF-PF association in the FW */
1439         storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
1440         storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
1441
1442         /* clear vf errors*/
1443         bnx2x_vf_semi_clear_err(bp, abs_vfid);
1444         bnx2x_vf_pglue_clear_err(bp, abs_vfid);
1445
1446         /* internal vf-enable - pretend */
1447         bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
1448         DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
1449         bnx2x_vf_enable_internal(bp, true);
1450         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1451 }
1452
1453 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
1454 {
1455         /* Reset vf in IGU  interrupts are still disabled */
1456         bnx2x_vf_igu_reset(bp, vf);
1457
1458         /* pretend to enable the vf with the PBF */
1459         bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1460         REG_WR(bp, PBF_REG_DISABLE_VF, 0);
1461         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1462 }
1463
1464 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
1465 {
1466         struct pci_dev *dev;
1467         struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1468
1469         if (!vf)
1470                 return false;
1471
1472         dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
1473         if (dev)
1474                 return bnx2x_is_pcie_pending(dev);
1475         return false;
1476 }
1477
1478 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
1479 {
1480         /* Verify no pending pci transactions */
1481         if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
1482                 BNX2X_ERR("PCIE Transactions still pending\n");
1483
1484         return 0;
1485 }
1486
1487 /* must be called after the number of PF queues and the number of VFs are
1488  * both known
1489  */
1490 static void
1491 bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
1492 {
1493         u16 vlan_count = 0;
1494
1495         /* will be set only during VF-ACQUIRE */
1496         resc->num_rxqs = 0;
1497         resc->num_txqs = 0;
1498
1499         /* no credit calculcis for macs (just yet) */
1500         resc->num_mac_filters = 1;
1501
1502         /* divvy up vlan rules */
1503         vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
1504         vlan_count = 1 << ilog2(vlan_count);
1505         resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
1506
1507         /* no real limitation */
1508         resc->num_mc_filters = 0;
1509
1510         /* num_sbs already set */
1511 }
1512
1513 /* FLR routines: */
1514 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
1515 {
1516         /* reset the state variables */
1517         bnx2x_iov_static_resc(bp, &vf->alloc_resc);
1518         vf->state = VF_FREE;
1519 }
1520
1521 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
1522 {
1523         u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1524
1525         /* DQ usage counter */
1526         bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1527         bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
1528                                         "DQ VF usage counter timed out",
1529                                         poll_cnt);
1530         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1531
1532         /* FW cleanup command - poll for the results */
1533         if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
1534                                    poll_cnt))
1535                 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
1536
1537         /* verify TX hw is flushed */
1538         bnx2x_tx_hw_flushed(bp, poll_cnt);
1539 }
1540
1541 static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
1542 {
1543         struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1544         struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
1545         enum bnx2x_vfop_flr_state state = vfop->state;
1546         struct bnx2x_vfop_cmd cmd = {
1547                 .done = bnx2x_vfop_flr,
1548                 .block = false,
1549         };
1550
1551         if (vfop->rc < 0)
1552                 goto op_err;
1553
1554         DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1555
1556         switch (state) {
1557         case BNX2X_VFOP_FLR_QUEUES:
1558                 /* the cleanup operations are valid if and only if the VF
1559                  * was first acquired.
1560                  */
1561                 if (++(qx->qid) < vf_rxq_count(vf)) {
1562                         vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd,
1563                                                        qx->qid);
1564                         if (vfop->rc)
1565                                 goto op_err;
1566                         return;
1567                 }
1568                 /* remove multicasts */
1569                 vfop->state = BNX2X_VFOP_FLR_HW;
1570                 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL,
1571                                                 0, true);
1572                 if (vfop->rc)
1573                         goto op_err;
1574                 return;
1575         case BNX2X_VFOP_FLR_HW:
1576
1577                 /* dispatch final cleanup and wait for HW queues to flush */
1578                 bnx2x_vf_flr_clnup_hw(bp, vf);
1579
1580                 /* release VF resources */
1581                 bnx2x_vf_free_resc(bp, vf);
1582
1583                 /* re-open the mailbox */
1584                 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1585
1586                 goto op_done;
1587         default:
1588                 bnx2x_vfop_default(state);
1589         }
1590 op_err:
1591         BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc);
1592 op_done:
1593         vf->flr_clnup_stage = VF_FLR_ACK;
1594         bnx2x_vfop_end(bp, vf, vfop);
1595         bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1596 }
1597
1598 static int bnx2x_vfop_flr_cmd(struct bnx2x *bp,
1599                               struct bnx2x_virtf *vf,
1600                               vfop_handler_t done)
1601 {
1602         struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1603         if (vfop) {
1604                 vfop->args.qx.qid = -1; /* loop */
1605                 bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES,
1606                                  bnx2x_vfop_flr, done);
1607                 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false);
1608         }
1609         return -ENOMEM;
1610 }
1611
1612 static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf)
1613 {
1614         int i = prev_vf ? prev_vf->index + 1 : 0;
1615         struct bnx2x_virtf *vf;
1616
1617         /* find next VF to cleanup */
1618 next_vf_to_clean:
1619         for (;
1620              i < BNX2X_NR_VIRTFN(bp) &&
1621              (bnx2x_vf(bp, i, state) != VF_RESET ||
1622               bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN);
1623              i++)
1624                 ;
1625
1626         DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i,
1627            BNX2X_NR_VIRTFN(bp));
1628
1629         if (i < BNX2X_NR_VIRTFN(bp)) {
1630                 vf = BP_VF(bp, i);
1631
1632                 /* lock the vf pf channel */
1633                 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1634
1635                 /* invoke the VF FLR SM */
1636                 if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) {
1637                         BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
1638                                   vf->abs_vfid);
1639
1640                         /* mark the VF to be ACKED and continue */
1641                         vf->flr_clnup_stage = VF_FLR_ACK;
1642                         goto next_vf_to_clean;
1643                 }
1644                 return;
1645         }
1646
1647         /* we are done, update vf records */
1648         for_each_vf(bp, i) {
1649                 vf = BP_VF(bp, i);
1650
1651                 if (vf->flr_clnup_stage != VF_FLR_ACK)
1652                         continue;
1653
1654                 vf->flr_clnup_stage = VF_FLR_EPILOG;
1655         }
1656
1657         /* Acknowledge the handled VFs.
1658          * we are acknowledge all the vfs which an flr was requested for, even
1659          * if amongst them there are such that we never opened, since the mcp
1660          * will interrupt us immediately again if we only ack some of the bits,
1661          * resulting in an endless loop. This can happen for example in KVM
1662          * where an 'all ones' flr request is sometimes given by hyper visor
1663          */
1664         DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
1665            bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1666         for (i = 0; i < FLRD_VFS_DWORDS; i++)
1667                 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
1668                           bp->vfdb->flrd_vfs[i]);
1669
1670         bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
1671
1672         /* clear the acked bits - better yet if the MCP implemented
1673          * write to clear semantics
1674          */
1675         for (i = 0; i < FLRD_VFS_DWORDS; i++)
1676                 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
1677 }
1678
1679 void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
1680 {
1681         int i;
1682
1683         /* Read FLR'd VFs */
1684         for (i = 0; i < FLRD_VFS_DWORDS; i++)
1685                 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
1686
1687         DP(BNX2X_MSG_MCP,
1688            "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
1689            bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1690
1691         for_each_vf(bp, i) {
1692                 struct bnx2x_virtf *vf = BP_VF(bp, i);
1693                 u32 reset = 0;
1694
1695                 if (vf->abs_vfid < 32)
1696                         reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
1697                 else
1698                         reset = bp->vfdb->flrd_vfs[1] &
1699                                 (1 << (vf->abs_vfid - 32));
1700
1701                 if (reset) {
1702                         /* set as reset and ready for cleanup */
1703                         vf->state = VF_RESET;
1704                         vf->flr_clnup_stage = VF_FLR_CLN;
1705
1706                         DP(BNX2X_MSG_IOV,
1707                            "Initiating Final cleanup for VF %d\n",
1708                            vf->abs_vfid);
1709                 }
1710         }
1711
1712         /* do the FLR cleanup for all marked VFs*/
1713         bnx2x_vf_flr_clnup(bp, NULL);
1714 }
1715
1716 /* IOV global initialization routines  */
1717 void bnx2x_iov_init_dq(struct bnx2x *bp)
1718 {
1719         if (!IS_SRIOV(bp))
1720                 return;
1721
1722         /* Set the DQ such that the CID reflect the abs_vfid */
1723         REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
1724         REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
1725
1726         /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1727          * the PF L2 queues
1728          */
1729         REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
1730
1731         /* The VF window size is the log2 of the max number of CIDs per VF */
1732         REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
1733
1734         /* The VF doorbell size  0 - *B, 4 - 128B. We set it here to match
1735          * the Pf doorbell size although the 2 are independent.
1736          */
1737         REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
1738                BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
1739
1740         /* No security checks for now -
1741          * configure single rule (out of 16) mask = 0x1, value = 0x0,
1742          * CID range 0 - 0x1ffff
1743          */
1744         REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1745         REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1746         REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1747         REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1748
1749         /* set the number of VF allowed doorbells to the full DQ range */
1750         REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
1751
1752         /* set the VF doorbell threshold */
1753         REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
1754 }
1755
1756 void bnx2x_iov_init_dmae(struct bnx2x *bp)
1757 {
1758         DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
1759         if (!IS_SRIOV(bp))
1760                 return;
1761
1762         REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1763 }
1764
1765 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1766 {
1767         struct pci_dev *dev = bp->pdev;
1768         struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1769
1770         return dev->bus->number + ((dev->devfn + iov->offset +
1771                                     iov->stride * vfid) >> 8);
1772 }
1773
1774 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1775 {
1776         struct pci_dev *dev = bp->pdev;
1777         struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1778
1779         return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1780 }
1781
1782 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1783 {
1784         int i, n;
1785         struct pci_dev *dev = bp->pdev;
1786         struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1787
1788         for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1789                 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1790                 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1791
1792                 size /= iov->total;
1793                 vf->bars[n].bar = start + size * vf->abs_vfid;
1794                 vf->bars[n].size = size;
1795         }
1796 }
1797
1798 static int bnx2x_ari_enabled(struct pci_dev *dev)
1799 {
1800         return dev->bus->self && dev->bus->self->ari_enabled;
1801 }
1802
1803 static void
1804 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1805 {
1806         int sb_id;
1807         u32 val;
1808         u8 fid;
1809
1810         /* IGU in normal mode - read CAM */
1811         for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1812                 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1813                 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1814                         continue;
1815                 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1816                 if (!(fid & IGU_FID_ENCODE_IS_PF))
1817                         bnx2x_vf_set_igu_info(bp, sb_id,
1818                                               (fid & IGU_FID_VF_NUM_MASK));
1819
1820                 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1821                    ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1822                    ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1823                    (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1824                    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1825         }
1826 }
1827
1828 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1829 {
1830         if (bp->vfdb) {
1831                 kfree(bp->vfdb->vfqs);
1832                 kfree(bp->vfdb->vfs);
1833                 kfree(bp->vfdb);
1834         }
1835         bp->vfdb = NULL;
1836 }
1837
1838 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1839 {
1840         int pos;
1841         struct pci_dev *dev = bp->pdev;
1842
1843         pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1844         if (!pos) {
1845                 BNX2X_ERR("failed to find SRIOV capability in device\n");
1846                 return -ENODEV;
1847         }
1848
1849         iov->pos = pos;
1850         DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1851         pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1852         pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1853         pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1854         pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1855         pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1856         pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1857         pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1858         pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1859
1860         return 0;
1861 }
1862
1863 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1864 {
1865         u32 val;
1866
1867         /* read the SRIOV capability structure
1868          * The fields can be read via configuration read or
1869          * directly from the device (starting at offset PCICFG_OFFSET)
1870          */
1871         if (bnx2x_sriov_pci_cfg_info(bp, iov))
1872                 return -ENODEV;
1873
1874         /* get the number of SRIOV bars */
1875         iov->nres = 0;
1876
1877         /* read the first_vfid */
1878         val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1879         iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1880                                * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1881
1882         DP(BNX2X_MSG_IOV,
1883            "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1884            BP_FUNC(bp),
1885            iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1886            iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1887
1888         return 0;
1889 }
1890
1891 static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
1892 {
1893         int i;
1894         u8 queue_count = 0;
1895
1896         if (IS_SRIOV(bp))
1897                 for_each_vf(bp, i)
1898                         queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
1899
1900         return queue_count;
1901 }
1902
1903 /* must be called after PF bars are mapped */
1904 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1905                         int num_vfs_param)
1906 {
1907         int err, i, qcount;
1908         struct bnx2x_sriov *iov;
1909         struct pci_dev *dev = bp->pdev;
1910
1911         bp->vfdb = NULL;
1912
1913         /* verify is pf */
1914         if (IS_VF(bp))
1915                 return 0;
1916
1917         /* verify sriov capability is present in configuration space */
1918         if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1919                 return 0;
1920
1921         /* verify chip revision */
1922         if (CHIP_IS_E1x(bp))
1923                 return 0;
1924
1925         /* check if SRIOV support is turned off */
1926         if (!num_vfs_param)
1927                 return 0;
1928
1929         /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1930         if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1931                 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1932                           BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1933                 return 0;
1934         }
1935
1936         /* SRIOV can be enabled only with MSIX */
1937         if (int_mode_param == BNX2X_INT_MODE_MSI ||
1938             int_mode_param == BNX2X_INT_MODE_INTX) {
1939                 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1940                 return 0;
1941         }
1942
1943         err = -EIO;
1944         /* verify ari is enabled */
1945         if (!bnx2x_ari_enabled(bp->pdev)) {
1946                 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1947                 return 0;
1948         }
1949
1950         /* verify igu is in normal mode */
1951         if (CHIP_INT_MODE_IS_BC(bp)) {
1952                 BNX2X_ERR("IGU not normal mode,  SRIOV can not be enabled\n");
1953                 return 0;
1954         }
1955
1956         /* allocate the vfs database */
1957         bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1958         if (!bp->vfdb) {
1959                 BNX2X_ERR("failed to allocate vf database\n");
1960                 err = -ENOMEM;
1961                 goto failed;
1962         }
1963
1964         /* get the sriov info - Linux already collected all the pertinent
1965          * information, however the sriov structure is for the private use
1966          * of the pci module. Also we want this information regardless
1967          * of the hyper-visor.
1968          */
1969         iov = &(bp->vfdb->sriov);
1970         err = bnx2x_sriov_info(bp, iov);
1971         if (err)
1972                 goto failed;
1973
1974         /* SR-IOV capability was enabled but there are no VFs*/
1975         if (iov->total == 0)
1976                 goto failed;
1977
1978         iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
1979
1980         DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
1981            num_vfs_param, iov->nr_virtfn);
1982
1983         /* allocate the vf array */
1984         bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
1985                                 BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
1986         if (!bp->vfdb->vfs) {
1987                 BNX2X_ERR("failed to allocate vf array\n");
1988                 err = -ENOMEM;
1989                 goto failed;
1990         }
1991
1992         /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1993         for_each_vf(bp, i) {
1994                 bnx2x_vf(bp, i, index) = i;
1995                 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
1996                 bnx2x_vf(bp, i, state) = VF_FREE;
1997                 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
1998                 mutex_init(&bnx2x_vf(bp, i, op_mutex));
1999                 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
2000         }
2001
2002         /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
2003         bnx2x_get_vf_igu_cam_info(bp);
2004
2005         /* get the total queue count and allocate the global queue arrays */
2006         qcount = bnx2x_iov_get_max_queue_count(bp);
2007
2008         /* allocate the queue arrays for all VFs */
2009         bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
2010                                  GFP_KERNEL);
2011         if (!bp->vfdb->vfqs) {
2012                 BNX2X_ERR("failed to allocate vf queue array\n");
2013                 err = -ENOMEM;
2014                 goto failed;
2015         }
2016
2017         return 0;
2018 failed:
2019         DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
2020         __bnx2x_iov_free_vfdb(bp);
2021         return err;
2022 }
2023
2024 void bnx2x_iov_remove_one(struct bnx2x *bp)
2025 {
2026         /* if SRIOV is not enabled there's nothing to do */
2027         if (!IS_SRIOV(bp))
2028                 return;
2029
2030         DP(BNX2X_MSG_IOV, "about to call disable sriov\n");
2031         pci_disable_sriov(bp->pdev);
2032         DP(BNX2X_MSG_IOV, "sriov disabled\n");
2033
2034         /* free vf database */
2035         __bnx2x_iov_free_vfdb(bp);
2036 }
2037
2038 void bnx2x_iov_free_mem(struct bnx2x *bp)
2039 {
2040         int i;
2041
2042         if (!IS_SRIOV(bp))
2043                 return;
2044
2045         /* free vfs hw contexts */
2046         for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2047                 struct hw_dma *cxt = &bp->vfdb->context[i];
2048                 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
2049         }
2050
2051         BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
2052                        BP_VFDB(bp)->sp_dma.mapping,
2053                        BP_VFDB(bp)->sp_dma.size);
2054
2055         BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
2056                        BP_VF_MBX_DMA(bp)->mapping,
2057                        BP_VF_MBX_DMA(bp)->size);
2058
2059         BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
2060                        BP_VF_BULLETIN_DMA(bp)->mapping,
2061                        BP_VF_BULLETIN_DMA(bp)->size);
2062 }
2063
2064 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
2065 {
2066         size_t tot_size;
2067         int i, rc = 0;
2068
2069         if (!IS_SRIOV(bp))
2070                 return rc;
2071
2072         /* allocate vfs hw contexts */
2073         tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
2074                 BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
2075
2076         for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2077                 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
2078                 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
2079
2080                 if (cxt->size) {
2081                         BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
2082                 } else {
2083                         cxt->addr = NULL;
2084                         cxt->mapping = 0;
2085                 }
2086                 tot_size -= cxt->size;
2087         }
2088
2089         /* allocate vfs ramrods dma memory - client_init and set_mac */
2090         tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
2091         BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
2092                         tot_size);
2093         BP_VFDB(bp)->sp_dma.size = tot_size;
2094
2095         /* allocate mailboxes */
2096         tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
2097         BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
2098                         tot_size);
2099         BP_VF_MBX_DMA(bp)->size = tot_size;
2100
2101         /* allocate local bulletin boards */
2102         tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
2103         BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr,
2104                         &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size);
2105         BP_VF_BULLETIN_DMA(bp)->size = tot_size;
2106
2107         return 0;
2108
2109 alloc_mem_err:
2110         return -ENOMEM;
2111 }
2112
2113 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
2114                            struct bnx2x_vf_queue *q)
2115 {
2116         u8 cl_id = vfq_cl_id(vf, q);
2117         u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
2118         unsigned long q_type = 0;
2119
2120         set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
2121         set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
2122
2123         /* Queue State object */
2124         bnx2x_init_queue_obj(bp, &q->sp_obj,
2125                              cl_id, &q->cid, 1, func_id,
2126                              bnx2x_vf_sp(bp, vf, q_data),
2127                              bnx2x_vf_sp_map(bp, vf, q_data),
2128                              q_type);
2129
2130         DP(BNX2X_MSG_IOV,
2131            "initialized vf %d's queue object. func id set to %d\n",
2132            vf->abs_vfid, q->sp_obj.func_id);
2133
2134         /* mac/vlan objects are per queue, but only those
2135          * that belong to the leading queue are initialized
2136          */
2137         if (vfq_is_leading(q)) {
2138                 /* mac */
2139                 bnx2x_init_mac_obj(bp, &q->mac_obj,
2140                                    cl_id, q->cid, func_id,
2141                                    bnx2x_vf_sp(bp, vf, mac_rdata),
2142                                    bnx2x_vf_sp_map(bp, vf, mac_rdata),
2143                                    BNX2X_FILTER_MAC_PENDING,
2144                                    &vf->filter_state,
2145                                    BNX2X_OBJ_TYPE_RX_TX,
2146                                    &bp->macs_pool);
2147                 /* vlan */
2148                 bnx2x_init_vlan_obj(bp, &q->vlan_obj,
2149                                     cl_id, q->cid, func_id,
2150                                     bnx2x_vf_sp(bp, vf, vlan_rdata),
2151                                     bnx2x_vf_sp_map(bp, vf, vlan_rdata),
2152                                     BNX2X_FILTER_VLAN_PENDING,
2153                                     &vf->filter_state,
2154                                     BNX2X_OBJ_TYPE_RX_TX,
2155                                     &bp->vlans_pool);
2156
2157                 /* mcast */
2158                 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
2159                                      q->cid, func_id, func_id,
2160                                      bnx2x_vf_sp(bp, vf, mcast_rdata),
2161                                      bnx2x_vf_sp_map(bp, vf, mcast_rdata),
2162                                      BNX2X_FILTER_MCAST_PENDING,
2163                                      &vf->filter_state,
2164                                      BNX2X_OBJ_TYPE_RX_TX);
2165
2166                 vf->leading_rss = cl_id;
2167         }
2168 }
2169
2170 /* called by bnx2x_nic_load */
2171 int bnx2x_iov_nic_init(struct bnx2x *bp)
2172 {
2173         int vfid, qcount, i;
2174
2175         if (!IS_SRIOV(bp)) {
2176                 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
2177                 return 0;
2178         }
2179
2180         DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
2181
2182         /* let FLR complete ... */
2183         msleep(100);
2184
2185         /* initialize vf database */
2186         for_each_vf(bp, vfid) {
2187                 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
2188
2189                 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
2190                         BNX2X_CIDS_PER_VF;
2191
2192                 union cdu_context *base_cxt = (union cdu_context *)
2193                         BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2194                         (base_vf_cid & (ILT_PAGE_CIDS-1));
2195
2196                 DP(BNX2X_MSG_IOV,
2197                    "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
2198                    vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
2199                    BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
2200
2201                 /* init statically provisioned resources */
2202                 bnx2x_iov_static_resc(bp, &vf->alloc_resc);
2203
2204                 /* queues are initialized during VF-ACQUIRE */
2205
2206                 /* reserve the vf vlan credit */
2207                 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
2208
2209                 vf->filter_state = 0;
2210                 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
2211
2212                 /*  init mcast object - This object will be re-initialized
2213                  *  during VF-ACQUIRE with the proper cl_id and cid.
2214                  *  It needs to be initialized here so that it can be safely
2215                  *  handled by a subsequent FLR flow.
2216                  */
2217                 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
2218                                      0xFF, 0xFF, 0xFF,
2219                                      bnx2x_vf_sp(bp, vf, mcast_rdata),
2220                                      bnx2x_vf_sp_map(bp, vf, mcast_rdata),
2221                                      BNX2X_FILTER_MCAST_PENDING,
2222                                      &vf->filter_state,
2223                                      BNX2X_OBJ_TYPE_RX_TX);
2224
2225                 /* set the mailbox message addresses */
2226                 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
2227                         (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
2228                         MBX_MSG_ALIGNED_SIZE);
2229
2230                 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
2231                         vfid * MBX_MSG_ALIGNED_SIZE;
2232
2233                 /* Enable vf mailbox */
2234                 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
2235         }
2236
2237         /* Final VF init */
2238         qcount = 0;
2239         for_each_vf(bp, i) {
2240                 struct bnx2x_virtf *vf = BP_VF(bp, i);
2241
2242                 /* fill in the BDF and bars */
2243                 vf->bus = bnx2x_vf_bus(bp, i);
2244                 vf->devfn = bnx2x_vf_devfn(bp, i);
2245                 bnx2x_vf_set_bars(bp, vf);
2246
2247                 DP(BNX2X_MSG_IOV,
2248                    "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
2249                    vf->abs_vfid, vf->bus, vf->devfn,
2250                    (unsigned)vf->bars[0].bar, vf->bars[0].size,
2251                    (unsigned)vf->bars[1].bar, vf->bars[1].size,
2252                    (unsigned)vf->bars[2].bar, vf->bars[2].size);
2253
2254                 /* set local queue arrays */
2255                 vf->vfqs = &bp->vfdb->vfqs[qcount];
2256                 qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
2257         }
2258
2259         return 0;
2260 }
2261
2262 /* called by bnx2x_chip_cleanup */
2263 int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
2264 {
2265         int i;
2266
2267         if (!IS_SRIOV(bp))
2268                 return 0;
2269
2270         /* release all the VFs */
2271         for_each_vf(bp, i)
2272                 bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */
2273
2274         return 0;
2275 }
2276
2277 /* called by bnx2x_init_hw_func, returns the next ilt line */
2278 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
2279 {
2280         int i;
2281         struct bnx2x_ilt *ilt = BP_ILT(bp);
2282
2283         if (!IS_SRIOV(bp))
2284                 return line;
2285
2286         /* set vfs ilt lines */
2287         for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2288                 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
2289
2290                 ilt->lines[line+i].page = hw_cxt->addr;
2291                 ilt->lines[line+i].page_mapping = hw_cxt->mapping;
2292                 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
2293         }
2294         return line + i;
2295 }
2296
2297 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
2298 {
2299         return ((cid >= BNX2X_FIRST_VF_CID) &&
2300                 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
2301 }
2302
2303 static
2304 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
2305                                         struct bnx2x_vf_queue *vfq,
2306                                         union event_ring_elem *elem)
2307 {
2308         unsigned long ramrod_flags = 0;
2309         int rc = 0;
2310
2311         /* Always push next commands out, don't wait here */
2312         set_bit(RAMROD_CONT, &ramrod_flags);
2313
2314         switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
2315         case BNX2X_FILTER_MAC_PENDING:
2316                 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
2317                                            &ramrod_flags);
2318                 break;
2319         case BNX2X_FILTER_VLAN_PENDING:
2320                 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
2321                                             &ramrod_flags);
2322                 break;
2323         default:
2324                 BNX2X_ERR("Unsupported classification command: %d\n",
2325                           elem->message.data.eth_event.echo);
2326                 return;
2327         }
2328         if (rc < 0)
2329                 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
2330         else if (rc > 0)
2331                 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
2332 }
2333
2334 static
2335 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
2336                                struct bnx2x_virtf *vf)
2337 {
2338         struct bnx2x_mcast_ramrod_params rparam = {NULL};
2339         int rc;
2340
2341         rparam.mcast_obj = &vf->mcast_obj;
2342         vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
2343
2344         /* If there are pending mcast commands - send them */
2345         if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
2346                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2347                 if (rc < 0)
2348                         BNX2X_ERR("Failed to send pending mcast commands: %d\n",
2349                                   rc);
2350         }
2351 }
2352
2353 static
2354 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
2355                                  struct bnx2x_virtf *vf)
2356 {
2357         smp_mb__before_clear_bit();
2358         clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
2359         smp_mb__after_clear_bit();
2360 }
2361
2362 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
2363 {
2364         struct bnx2x_virtf *vf;
2365         int qidx = 0, abs_vfid;
2366         u8 opcode;
2367         u16 cid = 0xffff;
2368
2369         if (!IS_SRIOV(bp))
2370                 return 1;
2371
2372         /* first get the cid - the only events we handle here are cfc-delete
2373          * and set-mac completion
2374          */
2375         opcode = elem->message.opcode;
2376
2377         switch (opcode) {
2378         case EVENT_RING_OPCODE_CFC_DEL:
2379                 cid = SW_CID((__force __le32)
2380                              elem->message.data.cfc_del_event.cid);
2381                 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
2382                 break;
2383         case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2384         case EVENT_RING_OPCODE_MULTICAST_RULES:
2385         case EVENT_RING_OPCODE_FILTERS_RULES:
2386                 cid = (elem->message.data.eth_event.echo &
2387                        BNX2X_SWCID_MASK);
2388                 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
2389                 break;
2390         case EVENT_RING_OPCODE_VF_FLR:
2391                 abs_vfid = elem->message.data.vf_flr_event.vf_id;
2392                 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
2393                    abs_vfid);
2394                 goto get_vf;
2395         case EVENT_RING_OPCODE_MALICIOUS_VF:
2396                 abs_vfid = elem->message.data.malicious_vf_event.vf_id;
2397                 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
2398                    abs_vfid, elem->message.data.malicious_vf_event.err_id);
2399                 goto get_vf;
2400         default:
2401                 return 1;
2402         }
2403
2404         /* check if the cid is the VF range */
2405         if (!bnx2x_iov_is_vf_cid(bp, cid)) {
2406                 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
2407                 return 1;
2408         }
2409
2410         /* extract vf and rxq index from vf_cid - relies on the following:
2411          * 1. vfid on cid reflects the true abs_vfid
2412          * 2. The max number of VFs (per path) is 64
2413          */
2414         qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
2415         abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2416 get_vf:
2417         vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
2418
2419         if (!vf) {
2420                 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
2421                           cid, abs_vfid);
2422                 return 0;
2423         }
2424
2425         switch (opcode) {
2426         case EVENT_RING_OPCODE_CFC_DEL:
2427                 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
2428                    vf->abs_vfid, qidx);
2429                 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
2430                                                        &vfq_get(vf,
2431                                                                 qidx)->sp_obj,
2432                                                        BNX2X_Q_CMD_CFC_DEL);
2433                 break;
2434         case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2435                 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
2436                    vf->abs_vfid, qidx);
2437                 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
2438                 break;
2439         case EVENT_RING_OPCODE_MULTICAST_RULES:
2440                 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
2441                    vf->abs_vfid, qidx);
2442                 bnx2x_vf_handle_mcast_eqe(bp, vf);
2443                 break;
2444         case EVENT_RING_OPCODE_FILTERS_RULES:
2445                 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
2446                    vf->abs_vfid, qidx);
2447                 bnx2x_vf_handle_filters_eqe(bp, vf);
2448                 break;
2449         case EVENT_RING_OPCODE_VF_FLR:
2450                 DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
2451                    vf->abs_vfid);
2452                 /* Do nothing for now */
2453                 break;
2454         case EVENT_RING_OPCODE_MALICIOUS_VF:
2455                 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n",
2456                    abs_vfid, elem->message.data.malicious_vf_event.err_id);
2457                 /* Do nothing for now */
2458                 break;
2459         }
2460         /* SRIOV: reschedule any 'in_progress' operations */
2461         bnx2x_iov_sp_event(bp, cid, false);
2462
2463         return 0;
2464 }
2465
2466 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
2467 {
2468         /* extract the vf from vf_cid - relies on the following:
2469          * 1. vfid on cid reflects the true abs_vfid
2470          * 2. The max number of VFs (per path) is 64
2471          */
2472         int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2473         return bnx2x_vf_by_abs_fid(bp, abs_vfid);
2474 }
2475
2476 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
2477                                 struct bnx2x_queue_sp_obj **q_obj)
2478 {
2479         struct bnx2x_virtf *vf;
2480
2481         if (!IS_SRIOV(bp))
2482                 return;
2483
2484         vf = bnx2x_vf_by_cid(bp, vf_cid);
2485
2486         if (vf) {
2487                 /* extract queue index from vf_cid - relies on the following:
2488                  * 1. vfid on cid reflects the true abs_vfid
2489                  * 2. The max number of VFs (per path) is 64
2490                  */
2491                 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
2492                 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
2493         } else {
2494                 BNX2X_ERR("No vf matching cid %d\n", vf_cid);
2495         }
2496 }
2497
2498 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
2499 {
2500         struct bnx2x_virtf *vf;
2501
2502         /* check if the cid is the VF range */
2503         if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
2504                 return;
2505
2506         vf = bnx2x_vf_by_cid(bp, vf_cid);
2507         if (vf) {
2508                 /* set in_progress flag */
2509                 atomic_set(&vf->op_in_progress, 1);
2510                 if (queue_work)
2511                         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2512         }
2513 }
2514
2515 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2516 {
2517         int i;
2518         int first_queue_query_index, num_queues_req;
2519         dma_addr_t cur_data_offset;
2520         struct stats_query_entry *cur_query_entry;
2521         u8 stats_count = 0;
2522         bool is_fcoe = false;
2523
2524         if (!IS_SRIOV(bp))
2525                 return;
2526
2527         if (!NO_FCOE(bp))
2528                 is_fcoe = true;
2529
2530         /* fcoe adds one global request and one queue request */
2531         num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
2532         first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
2533                 (is_fcoe ? 0 : 1);
2534
2535         DP(BNX2X_MSG_IOV,
2536            "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
2537            BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
2538            first_queue_query_index + num_queues_req);
2539
2540         cur_data_offset = bp->fw_stats_data_mapping +
2541                 offsetof(struct bnx2x_fw_stats_data, queue_stats) +
2542                 num_queues_req * sizeof(struct per_queue_stats);
2543
2544         cur_query_entry = &bp->fw_stats_req->
2545                 query[first_queue_query_index + num_queues_req];
2546
2547         for_each_vf(bp, i) {
2548                 int j;
2549                 struct bnx2x_virtf *vf = BP_VF(bp, i);
2550
2551                 if (vf->state != VF_ENABLED) {
2552                         DP(BNX2X_MSG_IOV,
2553                            "vf %d not enabled so no stats for it\n",
2554                            vf->abs_vfid);
2555                         continue;
2556                 }
2557
2558                 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
2559                 for_each_vfq(vf, j) {
2560                         struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
2561
2562                         /* collect stats fro active queues only */
2563                         if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
2564                             BNX2X_Q_LOGICAL_STATE_STOPPED)
2565                                 continue;
2566
2567                         /* create stats query entry for this queue */
2568                         cur_query_entry->kind = STATS_TYPE_QUEUE;
2569                         cur_query_entry->index = vfq_cl_id(vf, rxq);
2570                         cur_query_entry->funcID =
2571                                 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
2572                         cur_query_entry->address.hi =
2573                                 cpu_to_le32(U64_HI(vf->fw_stat_map));
2574                         cur_query_entry->address.lo =
2575                                 cpu_to_le32(U64_LO(vf->fw_stat_map));
2576                         DP(BNX2X_MSG_IOV,
2577                            "added address %x %x for vf %d queue %d client %d\n",
2578                            cur_query_entry->address.hi,
2579                            cur_query_entry->address.lo, cur_query_entry->funcID,
2580                            j, cur_query_entry->index);
2581                         cur_query_entry++;
2582                         cur_data_offset += sizeof(struct per_queue_stats);
2583                         stats_count++;
2584                 }
2585         }
2586         bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
2587 }
2588
2589 void bnx2x_iov_sp_task(struct bnx2x *bp)
2590 {
2591         int i;
2592
2593         if (!IS_SRIOV(bp))
2594                 return;
2595         /* Iterate over all VFs and invoke state transition for VFs with
2596          * 'in-progress' slow-path operations
2597          */
2598         DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
2599         for_each_vf(bp, i) {
2600                 struct bnx2x_virtf *vf = BP_VF(bp, i);
2601
2602                 if (!list_empty(&vf->op_list_head) &&
2603                     atomic_read(&vf->op_in_progress)) {
2604                         DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
2605                         bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
2606                 }
2607         }
2608 }
2609
2610 static inline
2611 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
2612 {
2613         int i;
2614         struct bnx2x_virtf *vf = NULL;
2615
2616         for_each_vf(bp, i) {
2617                 vf = BP_VF(bp, i);
2618                 if (stat_id >= vf->igu_base_id &&
2619                     stat_id < vf->igu_base_id + vf_sb_count(vf))
2620                         break;
2621         }
2622         return vf;
2623 }
2624
2625 /* VF API helpers */
2626 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
2627                                 u8 enable)
2628 {
2629         u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
2630         u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
2631
2632         REG_WR(bp, reg, val);
2633 }
2634
2635 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
2636 {
2637         int i;
2638
2639         for_each_vfq(vf, i)
2640                 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2641                                     vfq_qzone_id(vf, vfq_get(vf, i)), false);
2642 }
2643
2644 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
2645 {
2646         u32 val;
2647
2648         /* clear the VF configuration - pretend */
2649         bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
2650         val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
2651         val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
2652                  IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
2653         REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
2654         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2655 }
2656
2657 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
2658 {
2659         return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
2660                      BNX2X_VF_MAX_QUEUES);
2661 }
2662
2663 static
2664 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
2665                             struct vf_pf_resc_request *req_resc)
2666 {
2667         u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2668         u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2669
2670         return ((req_resc->num_rxqs <= rxq_cnt) &&
2671                 (req_resc->num_txqs <= txq_cnt) &&
2672                 (req_resc->num_sbs <= vf_sb_count(vf))   &&
2673                 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
2674                 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
2675 }
2676
2677 /* CORE VF API */
2678 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2679                      struct vf_pf_resc_request *resc)
2680 {
2681         int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
2682                 BNX2X_CIDS_PER_VF;
2683
2684         union cdu_context *base_cxt = (union cdu_context *)
2685                 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2686                 (base_vf_cid & (ILT_PAGE_CIDS-1));
2687         int i;
2688
2689         /* if state is 'acquired' the VF was not released or FLR'd, in
2690          * this case the returned resources match the acquired already
2691          * acquired resources. Verify that the requested numbers do
2692          * not exceed the already acquired numbers.
2693          */
2694         if (vf->state == VF_ACQUIRED) {
2695                 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2696                    vf->abs_vfid);
2697
2698                 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2699                         BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2700                                   vf->abs_vfid);
2701                         return -EINVAL;
2702                 }
2703                 return 0;
2704         }
2705
2706         /* Otherwise vf state must be 'free' or 'reset' */
2707         if (vf->state != VF_FREE && vf->state != VF_RESET) {
2708                 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2709                           vf->abs_vfid, vf->state);
2710                 return -EINVAL;
2711         }
2712
2713         /* static allocation:
2714          * the global maximum number are fixed per VF. Fail the request if
2715          * requested number exceed these globals
2716          */
2717         if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2718                 DP(BNX2X_MSG_IOV,
2719                    "cannot fulfill vf resource request. Placing maximal available values in response\n");
2720                 /* set the max resource in the vf */
2721                 return -ENOMEM;
2722         }
2723
2724         /* Set resources counters - 0 request means max available */
2725         vf_sb_count(vf) = resc->num_sbs;
2726         vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2727         vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2728         if (resc->num_mac_filters)
2729                 vf_mac_rules_cnt(vf) = resc->num_mac_filters;
2730         if (resc->num_vlan_filters)
2731                 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
2732
2733         DP(BNX2X_MSG_IOV,
2734            "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2735            vf_sb_count(vf), vf_rxq_count(vf),
2736            vf_txq_count(vf), vf_mac_rules_cnt(vf),
2737            vf_vlan_rules_cnt(vf));
2738
2739         /* Initialize the queues */
2740         if (!vf->vfqs) {
2741                 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2742                 return -EINVAL;
2743         }
2744
2745         for_each_vfq(vf, i) {
2746                 struct bnx2x_vf_queue *q = vfq_get(vf, i);
2747
2748                 if (!q) {
2749                         DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
2750                         return -EINVAL;
2751                 }
2752
2753                 q->index = i;
2754                 q->cxt = &((base_cxt + i)->eth);
2755                 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2756
2757                 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2758                    vf->abs_vfid, i, q->index, q->cid, q->cxt);
2759
2760                 /* init SP objects */
2761                 bnx2x_vfq_init(bp, vf, q);
2762         }
2763         vf->state = VF_ACQUIRED;
2764         return 0;
2765 }
2766
2767 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2768 {
2769         struct bnx2x_func_init_params func_init = {0};
2770         u16 flags = 0;
2771         int i;
2772
2773         /* the sb resources are initialized at this point, do the
2774          * FW/HW initializations
2775          */
2776         for_each_vf_sb(vf, i)
2777                 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2778                               vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2779
2780         /* Sanity checks */
2781         if (vf->state != VF_ACQUIRED) {
2782                 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2783                    vf->abs_vfid, vf->state);
2784                 return -EINVAL;
2785         }
2786
2787         /* let FLR complete ... */
2788         msleep(100);
2789
2790         /* FLR cleanup epilogue */
2791         if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2792                 return -EBUSY;
2793
2794         /* reset IGU VF statistics: MSIX */
2795         REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2796
2797         /* vf init */
2798         if (vf->cfg_flags & VF_CFG_STATS)
2799                 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
2800
2801         if (vf->cfg_flags & VF_CFG_TPA)
2802                 flags |= FUNC_FLG_TPA;
2803
2804         if (is_vf_multi(vf))
2805                 flags |= FUNC_FLG_RSS;
2806
2807         /* function setup */
2808         func_init.func_flgs = flags;
2809         func_init.pf_id = BP_FUNC(bp);
2810         func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2811         func_init.fw_stat_map = vf->fw_stat_map;
2812         func_init.spq_map = vf->spq_map;
2813         func_init.spq_prod = 0;
2814         bnx2x_func_init(bp, &func_init);
2815
2816         /* Enable the vf */
2817         bnx2x_vf_enable_access(bp, vf->abs_vfid);
2818         bnx2x_vf_enable_traffic(bp, vf);
2819
2820         /* queue protection table */
2821         for_each_vfq(vf, i)
2822                 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2823                                     vfq_qzone_id(vf, vfq_get(vf, i)), true);
2824
2825         vf->state = VF_ENABLED;
2826
2827         /* update vf bulletin board */
2828         bnx2x_post_vf_bulletin(bp, vf->index);
2829
2830         return 0;
2831 }
2832
2833 /* VFOP close (teardown the queues, delete mcasts and close HW) */
2834 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2835 {
2836         struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2837         struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
2838         enum bnx2x_vfop_close_state state = vfop->state;
2839         struct bnx2x_vfop_cmd cmd = {
2840                 .done = bnx2x_vfop_close,
2841                 .block = false,
2842         };
2843
2844         if (vfop->rc < 0)
2845                 goto op_err;
2846
2847         DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
2848
2849         switch (state) {
2850         case BNX2X_VFOP_CLOSE_QUEUES:
2851
2852                 if (++(qx->qid) < vf_rxq_count(vf)) {
2853                         vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid);
2854                         if (vfop->rc)
2855                                 goto op_err;
2856                         return;
2857                 }
2858
2859                 /* remove multicasts */
2860                 vfop->state = BNX2X_VFOP_CLOSE_HW;
2861                 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
2862                 if (vfop->rc)
2863                         goto op_err;
2864                 return;
2865
2866         case BNX2X_VFOP_CLOSE_HW:
2867
2868                 /* disable the interrupts */
2869                 DP(BNX2X_MSG_IOV, "disabling igu\n");
2870                 bnx2x_vf_igu_disable(bp, vf);
2871
2872                 /* disable the VF */
2873                 DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2874                 bnx2x_vf_clr_qtbl(bp, vf);
2875
2876                 goto op_done;
2877         default:
2878                 bnx2x_vfop_default(state);
2879         }
2880 op_err:
2881         BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
2882 op_done:
2883         vf->state = VF_ACQUIRED;
2884         DP(BNX2X_MSG_IOV, "set state to acquired\n");
2885         bnx2x_vfop_end(bp, vf, vfop);
2886 }
2887
2888 int bnx2x_vfop_close_cmd(struct bnx2x *bp,
2889                          struct bnx2x_virtf *vf,
2890                          struct bnx2x_vfop_cmd *cmd)
2891 {
2892         struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
2893         if (vfop) {
2894                 vfop->args.qx.qid = -1; /* loop */
2895                 bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES,
2896                                  bnx2x_vfop_close, cmd->done);
2897                 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close,
2898                                              cmd->block);
2899         }
2900         return -ENOMEM;
2901 }
2902
2903 /* VF release can be called either: 1. The VF was acquired but
2904  * not enabled 2. the vf was enabled or in the process of being
2905  * enabled
2906  */
2907 static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2908 {
2909         struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2910         struct bnx2x_vfop_cmd cmd = {
2911                 .done = bnx2x_vfop_release,
2912                 .block = false,
2913         };
2914
2915         DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
2916
2917         if (vfop->rc < 0)
2918                 goto op_err;
2919
2920         DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2921            vf->state == VF_FREE ? "Free" :
2922            vf->state == VF_ACQUIRED ? "Acquired" :
2923            vf->state == VF_ENABLED ? "Enabled" :
2924            vf->state == VF_RESET ? "Reset" :
2925            "Unknown");
2926
2927         switch (vf->state) {
2928         case VF_ENABLED:
2929                 vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
2930                 if (vfop->rc)
2931                         goto op_err;
2932                 return;
2933
2934         case VF_ACQUIRED:
2935                 DP(BNX2X_MSG_IOV, "about to free resources\n");
2936                 bnx2x_vf_free_resc(bp, vf);
2937                 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
2938                 goto op_done;
2939
2940         case VF_FREE:
2941         case VF_RESET:
2942                 /* do nothing */
2943                 goto op_done;
2944         default:
2945                 bnx2x_vfop_default(vf->state);
2946         }
2947 op_err:
2948         BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc);
2949 op_done:
2950         bnx2x_vfop_end(bp, vf, vfop);
2951 }
2952
2953 int bnx2x_vfop_release_cmd(struct bnx2x *bp,
2954                            struct bnx2x_virtf *vf,
2955                            struct bnx2x_vfop_cmd *cmd)
2956 {
2957         struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
2958         if (vfop) {
2959                 bnx2x_vfop_opset(-1, /* use vf->state */
2960                                  bnx2x_vfop_release, cmd->done);
2961                 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release,
2962                                              cmd->block);
2963         }
2964         return -ENOMEM;
2965 }
2966
2967 /* VF release ~ VF close + VF release-resources
2968  * Release is the ultimate SW shutdown and is called whenever an
2969  * irrecoverable error is encountered.
2970  */
2971 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
2972 {
2973         struct bnx2x_vfop_cmd cmd = {
2974                 .done = NULL,
2975                 .block = block,
2976         };
2977         int rc;
2978         bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2979
2980         rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
2981         if (rc)
2982                 WARN(rc,
2983                      "VF[%d] Failed to allocate resources for release op- rc=%d\n",
2984                      vf->abs_vfid, rc);
2985 }
2986
2987 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
2988                               struct bnx2x_virtf *vf, u32 *sbdf)
2989 {
2990         *sbdf = vf->devfn | (vf->bus << 8);
2991 }
2992
2993 static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
2994                        struct bnx2x_vf_bar_info *bar_info)
2995 {
2996         int n;
2997
2998         bar_info->nr_bars = bp->vfdb->sriov.nres;
2999         for (n = 0; n < bar_info->nr_bars; n++)
3000                 bar_info->bars[n] = vf->bars[n];
3001 }
3002
3003 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
3004                               enum channel_tlvs tlv)
3005 {
3006         /* lock the channel */
3007         mutex_lock(&vf->op_mutex);
3008
3009         /* record the locking op */
3010         vf->op_current = tlv;
3011
3012         /* log the lock */
3013         DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
3014            vf->abs_vfid, tlv);
3015 }
3016
3017 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
3018                                 enum channel_tlvs expected_tlv)
3019 {
3020         WARN(expected_tlv != vf->op_current,
3021              "lock mismatch: expected %d found %d", expected_tlv,
3022              vf->op_current);
3023
3024         /* lock the channel */
3025         mutex_unlock(&vf->op_mutex);
3026
3027         /* log the unlock */
3028         DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
3029            vf->abs_vfid, vf->op_current);
3030
3031         /* record the locking op */
3032         vf->op_current = CHANNEL_TLV_NONE;
3033 }
3034
3035 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
3036 {
3037         struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
3038
3039         DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
3040            num_vfs_param, BNX2X_NR_VIRTFN(bp));
3041
3042         /* HW channel is only operational when PF is up */
3043         if (bp->state != BNX2X_STATE_OPEN) {
3044                 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
3045                 return -EINVAL;
3046         }
3047
3048         /* we are always bound by the total_vfs in the configuration space */
3049         if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
3050                 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
3051                           num_vfs_param, BNX2X_NR_VIRTFN(bp));
3052                 num_vfs_param = BNX2X_NR_VIRTFN(bp);
3053         }
3054
3055         bp->requested_nr_virtfn = num_vfs_param;
3056         if (num_vfs_param == 0) {
3057                 pci_disable_sriov(dev);
3058                 return 0;
3059         } else {
3060                 return bnx2x_enable_sriov(bp);
3061         }
3062 }
3063
3064 int bnx2x_enable_sriov(struct bnx2x *bp)
3065 {
3066         int rc = 0, req_vfs = bp->requested_nr_virtfn;
3067
3068         rc = pci_enable_sriov(bp->pdev, req_vfs);
3069         if (rc) {
3070                 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
3071                 return rc;
3072         }
3073         DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
3074         return req_vfs;
3075 }
3076
3077 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
3078 {
3079         int vfidx;
3080         struct pf_vf_bulletin_content *bulletin;
3081
3082         DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
3083         for_each_vf(bp, vfidx) {
3084         bulletin = BP_VF_BULLETIN(bp, vfidx);
3085                 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
3086                         bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
3087         }
3088 }
3089
3090 void bnx2x_disable_sriov(struct bnx2x *bp)
3091 {
3092         pci_disable_sriov(bp->pdev);
3093 }
3094
3095 static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
3096                                struct bnx2x_virtf *vf)
3097 {
3098         if (bp->state != BNX2X_STATE_OPEN) {
3099                 BNX2X_ERR("vf ndo called though PF is down\n");
3100                 return -EINVAL;
3101         }
3102
3103         if (!IS_SRIOV(bp)) {
3104                 BNX2X_ERR("vf ndo called though sriov is disabled\n");
3105                 return -EINVAL;
3106         }
3107
3108         if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
3109                 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
3110                           vfidx, BNX2X_NR_VIRTFN(bp));
3111                 return -EINVAL;
3112         }
3113
3114         if (!vf) {
3115                 BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
3116                           vfidx);
3117                 return -EINVAL;
3118         }
3119
3120         return 0;
3121 }
3122
3123 int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3124                         struct ifla_vf_info *ivi)
3125 {
3126         struct bnx2x *bp = netdev_priv(dev);
3127         struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
3128         struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
3129         struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
3130         struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
3131         int rc;
3132
3133         /* sanity */
3134         rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
3135         if (rc)
3136                 return rc;
3137         if (!mac_obj || !vlan_obj || !bulletin) {
3138                 BNX2X_ERR("VF partially initialized\n");
3139                 return -EINVAL;
3140         }
3141
3142         ivi->vf = vfidx;
3143         ivi->qos = 0;
3144         ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
3145         ivi->spoofchk = 1; /*always enabled */
3146         if (vf->state == VF_ENABLED) {
3147                 /* mac and vlan are in vlan_mac objects */
3148                 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
3149                                         0, ETH_ALEN);
3150                 vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan,
3151                                          0, VLAN_HLEN);
3152         } else {
3153                 /* mac */
3154                 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
3155                         /* mac configured by ndo so its in bulletin board */
3156                         memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
3157                 else
3158                         /* function has not been loaded yet. Show mac as 0s */
3159                         memset(&ivi->mac, 0, ETH_ALEN);
3160
3161                 /* vlan */
3162                 if (bulletin->valid_bitmap & (1 << VLAN_VALID))
3163                         /* vlan configured by ndo so its in bulletin board */
3164                         memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
3165                 else
3166                         /* function has not been loaded yet. Show vlans as 0s */
3167                         memset(&ivi->vlan, 0, VLAN_HLEN);
3168         }
3169
3170         return 0;
3171 }
3172
3173 /* New mac for VF. Consider these cases:
3174  * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
3175  *    supply at acquire.
3176  * 2. VF has already been acquired but has not yet initialized - store in local
3177  *    bulletin board. mac will be posted on VF bulletin board after VF init. VF
3178  *    will configure this mac when it is ready.
3179  * 3. VF has already initialized but has not yet setup a queue - post the new
3180  *    mac on VF's bulletin board right now. VF will configure this mac when it
3181  *    is ready.
3182  * 4. VF has already set a queue - delete any macs already configured for this
3183  *    queue and manually config the new mac.
3184  * In any event, once this function has been called refuse any attempts by the
3185  * VF to configure any mac for itself except for this mac. In case of a race
3186  * where the VF fails to see the new post on its bulletin board before sending a
3187  * mac configuration request, the PF will simply fail the request and VF can try
3188  * again after consulting its bulletin board.
3189  */
3190 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3191 {
3192         struct bnx2x *bp = netdev_priv(dev);
3193         int rc, q_logical_state;
3194         struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
3195         struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
3196
3197         /* sanity */
3198         rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
3199         if (rc)
3200                 return rc;
3201         if (!is_valid_ether_addr(mac)) {
3202                 BNX2X_ERR("mac address invalid\n");
3203                 return -EINVAL;
3204         }
3205
3206         /* update PF's copy of the VF's bulletin. Will no longer accept mac
3207          * configuration requests from vf unless match this mac
3208          */
3209         bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
3210         memcpy(bulletin->mac, mac, ETH_ALEN);
3211
3212         /* Post update on VF's bulletin board */
3213         rc = bnx2x_post_vf_bulletin(bp, vfidx);
3214         if (rc) {
3215                 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
3216                 return rc;
3217         }
3218
3219         /* is vf initialized and queue set up? */
3220         q_logical_state =
3221                 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
3222         if (vf->state == VF_ENABLED &&
3223             q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3224                 /* configure the mac in device on this vf's queue */
3225                 unsigned long ramrod_flags = 0;
3226                 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
3227
3228                 /* must lock vfpf channel to protect against vf flows */
3229                 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3230
3231                 /* remove existing eth macs */
3232                 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
3233                 if (rc) {
3234                         BNX2X_ERR("failed to delete eth macs\n");
3235                         return -EINVAL;
3236                 }
3237
3238                 /* remove existing uc list macs */
3239                 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
3240                 if (rc) {
3241                         BNX2X_ERR("failed to delete uc_list macs\n");
3242                         return -EINVAL;
3243                 }
3244
3245                 /* configure the new mac to device */
3246                 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3247                 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
3248                                   BNX2X_ETH_MAC, &ramrod_flags);
3249
3250                 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3251         }
3252
3253         return 0;
3254 }
3255
3256 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3257 {
3258         struct bnx2x *bp = netdev_priv(dev);
3259         int rc, q_logical_state;
3260         struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
3261         struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
3262
3263         /* sanity */
3264         rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
3265         if (rc)
3266                 return rc;
3267
3268         if (vlan > 4095) {
3269                 BNX2X_ERR("illegal vlan value %d\n", vlan);
3270                 return -EINVAL;
3271         }
3272
3273         DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
3274            vfidx, vlan, 0);
3275
3276         /* update PF's copy of the VF's bulletin. No point in posting the vlan
3277          * to the VF since it doesn't have anything to do with it. But it useful
3278          * to store it here in case the VF is not up yet and we can only
3279          * configure the vlan later when it does.
3280          */
3281         bulletin->valid_bitmap |= 1 << VLAN_VALID;
3282         bulletin->vlan = vlan;
3283
3284         /* is vf initialized and queue set up? */
3285         q_logical_state =
3286                 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
3287         if (vf->state == VF_ENABLED &&
3288             q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3289                 /* configure the vlan in device on this vf's queue */
3290                 unsigned long ramrod_flags = 0;
3291                 unsigned long vlan_mac_flags = 0;
3292                 struct bnx2x_vlan_mac_obj *vlan_obj =
3293                         &bnx2x_vfq(vf, 0, vlan_obj);
3294                 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3295                 struct bnx2x_queue_state_params q_params = {NULL};
3296                 struct bnx2x_queue_update_params *update_params;
3297
3298                 memset(&ramrod_param, 0, sizeof(ramrod_param));
3299
3300                 /* must lock vfpf channel to protect against vf flows */
3301                 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3302
3303                 /* remove existing vlans */
3304                 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3305                 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
3306                                           &ramrod_flags);
3307                 if (rc) {
3308                         BNX2X_ERR("failed to delete vlans\n");
3309                         return -EINVAL;
3310                 }
3311
3312                 /* send queue update ramrod to configure default vlan and silent
3313                  * vlan removal
3314                  */
3315                 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3316                 q_params.cmd = BNX2X_Q_CMD_UPDATE;
3317                 q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj);
3318                 update_params = &q_params.params.update;
3319                 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3320                           &update_params->update_flags);
3321                 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
3322                           &update_params->update_flags);
3323
3324                 if (vlan == 0) {
3325                         /* if vlan is 0 then we want to leave the VF traffic
3326                          * untagged, and leave the incoming traffic untouched
3327                          * (i.e. do not remove any vlan tags).
3328                          */
3329                         __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3330                                     &update_params->update_flags);
3331                         __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3332                                     &update_params->update_flags);
3333                 } else {
3334                         /* configure the new vlan to device */
3335                         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3336                         ramrod_param.vlan_mac_obj = vlan_obj;
3337                         ramrod_param.ramrod_flags = ramrod_flags;
3338                         ramrod_param.user_req.u.vlan.vlan = vlan;
3339                         ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3340                         rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3341                         if (rc) {
3342                                 BNX2X_ERR("failed to configure vlan\n");
3343                                 return -EINVAL;
3344                         }
3345
3346                         /* configure default vlan to vf queue and set silent
3347                          * vlan removal (the vf remains unaware of this vlan).
3348                          */
3349                         update_params = &q_params.params.update;
3350                         __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3351                                   &update_params->update_flags);
3352                         __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3353                                   &update_params->update_flags);
3354                         update_params->def_vlan = vlan;
3355                 }
3356
3357                 /* Update the Queue state */
3358                 rc = bnx2x_queue_state_change(bp, &q_params);
3359                 if (rc) {
3360                         BNX2X_ERR("Failed to configure default VLAN\n");
3361                         return rc;
3362                 }
3363
3364                 /* clear the flag indicating that this VF needs its vlan
3365                  * (will only be set if the HV configured th Vlan before vf was
3366                  * and we were called because the VF came up later
3367                  */
3368                 vf->cfg_flags &= ~VF_CFG_VLAN;
3369
3370                 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3371         }
3372         return 0;
3373 }
3374
3375 /* crc is the first field in the bulletin board. Compute the crc over the
3376  * entire bulletin board excluding the crc field itself. Use the length field
3377  * as the Bulletin Board was posted by a PF with possibly a different version
3378  * from the vf which will sample it. Therefore, the length is computed by the
3379  * PF and the used blindly by the VF.
3380  */
3381 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
3382                           struct pf_vf_bulletin_content *bulletin)
3383 {
3384         return crc32(BULLETIN_CRC_SEED,
3385                  ((u8 *)bulletin) + sizeof(bulletin->crc),
3386                  bulletin->length - sizeof(bulletin->crc));
3387 }
3388
3389 /* Check for new posts on the bulletin board */
3390 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
3391 {
3392         struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
3393         int attempts;
3394
3395         /* bulletin board hasn't changed since last sample */
3396         if (bp->old_bulletin.version == bulletin.version)
3397                 return PFVF_BULLETIN_UNCHANGED;
3398
3399         /* validate crc of new bulletin board */
3400         if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) {
3401                 /* sampling structure in mid post may result with corrupted data
3402                  * validate crc to ensure coherency.
3403                  */
3404                 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
3405                         bulletin = bp->pf2vf_bulletin->content;
3406                         if (bulletin.crc == bnx2x_crc_vf_bulletin(bp,
3407                                                                   &bulletin))
3408                                 break;
3409                         BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
3410                                   bulletin.crc,
3411                                   bnx2x_crc_vf_bulletin(bp, &bulletin));
3412                 }
3413                 if (attempts >= BULLETIN_ATTEMPTS) {
3414                         BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
3415                                   attempts);
3416                         return PFVF_BULLETIN_CRC_ERR;
3417                 }
3418         }
3419
3420         /* the mac address in bulletin board is valid and is new */
3421         if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
3422             memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) {
3423                 /* update new mac to net device */
3424                 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
3425         }
3426
3427         /* the vlan in bulletin board is valid and is new */
3428         if (bulletin.valid_bitmap & 1 << VLAN_VALID)
3429                 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN);
3430
3431         /* copy new bulletin board to bp */
3432         bp->old_bulletin = bulletin;
3433
3434         return PFVF_BULLETIN_UPDATED;
3435 }
3436
3437 void bnx2x_timer_sriov(struct bnx2x *bp)
3438 {
3439         bnx2x_sample_bulletin(bp);
3440
3441         /* if channel is down we need to self destruct */
3442         if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
3443                 smp_mb__before_clear_bit();
3444                 set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3445                         &bp->sp_rtnl_state);
3446                 smp_mb__after_clear_bit();
3447                 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3448         }
3449 }
3450
3451 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
3452 {
3453         /* vf doorbells are embedded within the regview */
3454         return bp->regview + PXP_VF_ADDR_DB_START;
3455 }
3456
3457 int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3458 {
3459         mutex_init(&bp->vf2pf_mutex);
3460
3461         /* allocate vf2pf mailbox for vf to pf channel */
3462         BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
3463                         sizeof(struct bnx2x_vf_mbx_msg));
3464
3465         /* allocate pf 2 vf bulletin board */
3466         BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping,
3467                         sizeof(union pf_vf_bulletin));
3468
3469         return 0;
3470
3471 alloc_mem_err:
3472         BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3473                        sizeof(struct bnx2x_vf_mbx_msg));
3474         BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
3475                        sizeof(union pf_vf_bulletin));
3476         return -ENOMEM;
3477 }
3478
3479 int bnx2x_open_epilog(struct bnx2x *bp)
3480 {
3481         /* Enable sriov via delayed work. This must be done via delayed work
3482          * because it causes the probe of the vf devices to be run, which invoke
3483          * register_netdevice which must have rtnl lock taken. As we are holding
3484          * the lock right now, that could only work if the probe would not take
3485          * the lock. However, as the probe of the vf may be called from other
3486          * contexts as well (such as passthrough to vm fails) it can't assume
3487          * the lock is being held for it. Using delayed work here allows the
3488          * probe code to simply take the lock (i.e. wait for it to be released
3489          * if it is being held). We only want to do this if the number of VFs
3490          * was set before PF driver was loaded.
3491          */
3492         if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
3493                 smp_mb__before_clear_bit();
3494                 set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
3495                 smp_mb__after_clear_bit();
3496                 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3497         }
3498
3499         return 0;
3500 }
3501
3502 void bnx2x_iov_channel_down(struct bnx2x *bp)
3503 {
3504         int vf_idx;
3505         struct pf_vf_bulletin_content *bulletin;
3506
3507         if (!IS_SRIOV(bp))
3508                 return;
3509
3510         for_each_vf(bp, vf_idx) {
3511                 /* locate this VFs bulletin board and update the channel down
3512                  * bit
3513                  */
3514                 bulletin = BP_VF_BULLETIN(bp, vf_idx);
3515                 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
3516
3517                 /* update vf bulletin board */
3518                 bnx2x_post_vf_bulletin(bp, vf_idx);
3519         }
3520 }