qed: IOV l2 functionality
[cascardo/linux.git] / drivers / net / ethernet / qlogic / qed / qed_sriov.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8
9 #include <linux/etherdevice.h>
10 #include <linux/qed/qed_iov_if.h>
11 #include "qed_cxt.h"
12 #include "qed_hsi.h"
13 #include "qed_hw.h"
14 #include "qed_init_ops.h"
15 #include "qed_int.h"
16 #include "qed_mcp.h"
17 #include "qed_reg_addr.h"
18 #include "qed_sp.h"
19 #include "qed_sriov.h"
20 #include "qed_vf.h"
21
22 /* IOV ramrods */
23 static int qed_sp_vf_start(struct qed_hwfn *p_hwfn,
24                            u32 concrete_vfid, u16 opaque_vfid)
25 {
26         struct vf_start_ramrod_data *p_ramrod = NULL;
27         struct qed_spq_entry *p_ent = NULL;
28         struct qed_sp_init_data init_data;
29         int rc = -EINVAL;
30
31         /* Get SPQ entry */
32         memset(&init_data, 0, sizeof(init_data));
33         init_data.cid = qed_spq_get_cid(p_hwfn);
34         init_data.opaque_fid = opaque_vfid;
35         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
36
37         rc = qed_sp_init_request(p_hwfn, &p_ent,
38                                  COMMON_RAMROD_VF_START,
39                                  PROTOCOLID_COMMON, &init_data);
40         if (rc)
41                 return rc;
42
43         p_ramrod = &p_ent->ramrod.vf_start;
44
45         p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
46         p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid);
47
48         p_ramrod->personality = PERSONALITY_ETH;
49
50         return qed_spq_post(p_hwfn, p_ent, NULL);
51 }
52
53 static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
54                           u32 concrete_vfid, u16 opaque_vfid)
55 {
56         struct vf_stop_ramrod_data *p_ramrod = NULL;
57         struct qed_spq_entry *p_ent = NULL;
58         struct qed_sp_init_data init_data;
59         int rc = -EINVAL;
60
61         /* Get SPQ entry */
62         memset(&init_data, 0, sizeof(init_data));
63         init_data.cid = qed_spq_get_cid(p_hwfn);
64         init_data.opaque_fid = opaque_vfid;
65         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
66
67         rc = qed_sp_init_request(p_hwfn, &p_ent,
68                                  COMMON_RAMROD_VF_STOP,
69                                  PROTOCOLID_COMMON, &init_data);
70         if (rc)
71                 return rc;
72
73         p_ramrod = &p_ent->ramrod.vf_stop;
74
75         p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
76
77         return qed_spq_post(p_hwfn, p_ent, NULL);
78 }
79
80 bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
81                            int rel_vf_id, bool b_enabled_only)
82 {
83         if (!p_hwfn->pf_iov_info) {
84                 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
85                 return false;
86         }
87
88         if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
89             (rel_vf_id < 0))
90                 return false;
91
92         if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
93             b_enabled_only)
94                 return false;
95
96         return true;
97 }
98
99 static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
100                                                u16 relative_vf_id,
101                                                bool b_enabled_only)
102 {
103         struct qed_vf_info *vf = NULL;
104
105         if (!p_hwfn->pf_iov_info) {
106                 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
107                 return NULL;
108         }
109
110         if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
111                 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
112         else
113                 DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
114                        relative_vf_id);
115
116         return vf;
117 }
118
119 static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
120 {
121         struct qed_hw_sriov_info *iov = cdev->p_iov_info;
122         int pos = iov->pos;
123
124         DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
125         pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
126
127         pci_read_config_word(cdev->pdev,
128                              pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
129         pci_read_config_word(cdev->pdev,
130                              pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
131
132         pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
133         if (iov->num_vfs) {
134                 DP_VERBOSE(cdev,
135                            QED_MSG_IOV,
136                            "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
137                 iov->num_vfs = 0;
138         }
139
140         pci_read_config_word(cdev->pdev,
141                              pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
142
143         pci_read_config_word(cdev->pdev,
144                              pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
145
146         pci_read_config_word(cdev->pdev,
147                              pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
148
149         pci_read_config_dword(cdev->pdev,
150                               pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
151
152         pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
153
154         pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
155
156         DP_VERBOSE(cdev,
157                    QED_MSG_IOV,
158                    "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
159                    iov->nres,
160                    iov->cap,
161                    iov->ctrl,
162                    iov->total_vfs,
163                    iov->initial_vfs,
164                    iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
165
166         /* Some sanity checks */
167         if (iov->num_vfs > NUM_OF_VFS(cdev) ||
168             iov->total_vfs > NUM_OF_VFS(cdev)) {
169                 /* This can happen only due to a bug. In this case we set
170                  * num_vfs to zero to avoid memory corruption in the code that
171                  * assumes max number of vfs
172                  */
173                 DP_NOTICE(cdev,
174                           "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
175                           iov->num_vfs);
176
177                 iov->num_vfs = 0;
178                 iov->total_vfs = 0;
179         }
180
181         return 0;
182 }
183
184 static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
185                                         struct qed_ptt *p_ptt)
186 {
187         struct qed_igu_block *p_sb;
188         u16 sb_id;
189         u32 val;
190
191         if (!p_hwfn->hw_info.p_igu_info) {
192                 DP_ERR(p_hwfn,
193                        "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
194                 return;
195         }
196
197         for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
198              sb_id++) {
199                 p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
200                 if ((p_sb->status & QED_IGU_STATUS_FREE) &&
201                     !(p_sb->status & QED_IGU_STATUS_PF)) {
202                         val = qed_rd(p_hwfn, p_ptt,
203                                      IGU_REG_MAPPING_MEMORY + sb_id * 4);
204                         SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
205                         qed_wr(p_hwfn, p_ptt,
206                                IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
207                 }
208         }
209 }
210
211 static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
212 {
213         struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
214         struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
215         struct qed_bulletin_content *p_bulletin_virt;
216         dma_addr_t req_p, rply_p, bulletin_p;
217         union pfvf_tlvs *p_reply_virt_addr;
218         union vfpf_tlvs *p_req_virt_addr;
219         u8 idx = 0;
220
221         memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
222
223         p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
224         req_p = p_iov_info->mbx_msg_phys_addr;
225         p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
226         rply_p = p_iov_info->mbx_reply_phys_addr;
227         p_bulletin_virt = p_iov_info->p_bulletins;
228         bulletin_p = p_iov_info->bulletins_phys;
229         if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
230                 DP_ERR(p_hwfn,
231                        "qed_iov_setup_vfdb called without allocating mem first\n");
232                 return;
233         }
234
235         for (idx = 0; idx < p_iov->total_vfs; idx++) {
236                 struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
237                 u32 concrete;
238
239                 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
240                 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
241                 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
242                 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
243
244                 vf->state = VF_STOPPED;
245                 vf->b_init = false;
246
247                 vf->bulletin.phys = idx *
248                                     sizeof(struct qed_bulletin_content) +
249                                     bulletin_p;
250                 vf->bulletin.p_virt = p_bulletin_virt + idx;
251                 vf->bulletin.size = sizeof(struct qed_bulletin_content);
252
253                 vf->relative_vf_id = idx;
254                 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
255                 concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
256                 vf->concrete_fid = concrete;
257                 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
258                                  (vf->abs_vf_id << 8);
259                 vf->vport_id = idx + 1;
260         }
261 }
262
263 static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
264 {
265         struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
266         void **p_v_addr;
267         u16 num_vfs = 0;
268
269         num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
270
271         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
272                    "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
273
274         /* Allocate PF Mailbox buffer (per-VF) */
275         p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
276         p_v_addr = &p_iov_info->mbx_msg_virt_addr;
277         *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
278                                        p_iov_info->mbx_msg_size,
279                                        &p_iov_info->mbx_msg_phys_addr,
280                                        GFP_KERNEL);
281         if (!*p_v_addr)
282                 return -ENOMEM;
283
284         /* Allocate PF Mailbox Reply buffer (per-VF) */
285         p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
286         p_v_addr = &p_iov_info->mbx_reply_virt_addr;
287         *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
288                                        p_iov_info->mbx_reply_size,
289                                        &p_iov_info->mbx_reply_phys_addr,
290                                        GFP_KERNEL);
291         if (!*p_v_addr)
292                 return -ENOMEM;
293
294         p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
295                                      num_vfs;
296         p_v_addr = &p_iov_info->p_bulletins;
297         *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
298                                        p_iov_info->bulletins_size,
299                                        &p_iov_info->bulletins_phys,
300                                        GFP_KERNEL);
301         if (!*p_v_addr)
302                 return -ENOMEM;
303
304         DP_VERBOSE(p_hwfn,
305                    QED_MSG_IOV,
306                    "PF's Requests mailbox [%p virt 0x%llx phys],  Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
307                    p_iov_info->mbx_msg_virt_addr,
308                    (u64) p_iov_info->mbx_msg_phys_addr,
309                    p_iov_info->mbx_reply_virt_addr,
310                    (u64) p_iov_info->mbx_reply_phys_addr,
311                    p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
312
313         return 0;
314 }
315
316 static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
317 {
318         struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
319
320         if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
321                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
322                                   p_iov_info->mbx_msg_size,
323                                   p_iov_info->mbx_msg_virt_addr,
324                                   p_iov_info->mbx_msg_phys_addr);
325
326         if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
327                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
328                                   p_iov_info->mbx_reply_size,
329                                   p_iov_info->mbx_reply_virt_addr,
330                                   p_iov_info->mbx_reply_phys_addr);
331
332         if (p_iov_info->p_bulletins)
333                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
334                                   p_iov_info->bulletins_size,
335                                   p_iov_info->p_bulletins,
336                                   p_iov_info->bulletins_phys);
337 }
338
339 int qed_iov_alloc(struct qed_hwfn *p_hwfn)
340 {
341         struct qed_pf_iov *p_sriov;
342
343         if (!IS_PF_SRIOV(p_hwfn)) {
344                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
345                            "No SR-IOV - no need for IOV db\n");
346                 return 0;
347         }
348
349         p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
350         if (!p_sriov) {
351                 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
352                 return -ENOMEM;
353         }
354
355         p_hwfn->pf_iov_info = p_sriov;
356
357         return qed_iov_allocate_vfdb(p_hwfn);
358 }
359
360 void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
361 {
362         if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
363                 return;
364
365         qed_iov_setup_vfdb(p_hwfn);
366         qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
367 }
368
369 void qed_iov_free(struct qed_hwfn *p_hwfn)
370 {
371         if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
372                 qed_iov_free_vfdb(p_hwfn);
373                 kfree(p_hwfn->pf_iov_info);
374         }
375 }
376
377 void qed_iov_free_hw_info(struct qed_dev *cdev)
378 {
379         kfree(cdev->p_iov_info);
380         cdev->p_iov_info = NULL;
381 }
382
383 int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
384 {
385         struct qed_dev *cdev = p_hwfn->cdev;
386         int pos;
387         int rc;
388
389         if (IS_VF(p_hwfn->cdev))
390                 return 0;
391
392         /* Learn the PCI configuration */
393         pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
394                                       PCI_EXT_CAP_ID_SRIOV);
395         if (!pos) {
396                 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
397                 return 0;
398         }
399
400         /* Allocate a new struct for IOV information */
401         cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
402         if (!cdev->p_iov_info) {
403                 DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
404                 return -ENOMEM;
405         }
406         cdev->p_iov_info->pos = pos;
407
408         rc = qed_iov_pci_cfg_info(cdev);
409         if (rc)
410                 return rc;
411
412         /* We want PF IOV to be synonemous with the existance of p_iov_info;
413          * In case the capability is published but there are no VFs, simply
414          * de-allocate the struct.
415          */
416         if (!cdev->p_iov_info->total_vfs) {
417                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
418                            "IOV capabilities, but no VFs are published\n");
419                 kfree(cdev->p_iov_info);
420                 cdev->p_iov_info = NULL;
421                 return 0;
422         }
423
424         /* Calculate the first VF index - this is a bit tricky; Basically,
425          * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
426          * after the first engine's VFs.
427          */
428         cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
429                                            p_hwfn->abs_pf_id - 16;
430         if (QED_PATH_ID(p_hwfn))
431                 cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
432
433         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
434                    "First VF in hwfn 0x%08x\n",
435                    cdev->p_iov_info->first_vf_in_pf);
436
437         return 0;
438 }
439
440 static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
441 {
442         /* Check PF supports sriov */
443         if (!IS_QED_SRIOV(p_hwfn->cdev) || !IS_PF_SRIOV_ALLOC(p_hwfn))
444                 return false;
445
446         /* Check VF validity */
447         if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
448             !IS_PF_SRIOV_ALLOC(p_hwfn))
449                 return false;
450
451         return true;
452 }
453
454 static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
455                                       u16 rel_vf_id, u8 to_disable)
456 {
457         struct qed_vf_info *vf;
458         int i;
459
460         for_each_hwfn(cdev, i) {
461                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
462
463                 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
464                 if (!vf)
465                         continue;
466
467                 vf->to_disable = to_disable;
468         }
469 }
470
471 void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
472 {
473         u16 i;
474
475         if (!IS_QED_SRIOV(cdev))
476                 return;
477
478         for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
479                 qed_iov_set_vf_to_disable(cdev, i, to_disable);
480 }
481
482 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
483                                        struct qed_ptt *p_ptt, u8 abs_vfid)
484 {
485         qed_wr(p_hwfn, p_ptt,
486                PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
487                1 << (abs_vfid & 0x1f));
488 }
489
490 static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
491                                  struct qed_ptt *p_ptt, struct qed_vf_info *vf)
492 {
493         u16 igu_sb_id;
494         int i;
495
496         /* Set VF masks and configuration - pretend */
497         qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
498
499         qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
500
501         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
502                    "value in VF_CONFIGURATION of vf %d after write %x\n",
503                    vf->abs_vf_id,
504                    qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION));
505
506         /* unpretend */
507         qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
508
509         /* iterate over all queues, clear sb consumer */
510         for (i = 0; i < vf->num_sbs; i++) {
511                 igu_sb_id = vf->igu_sbs[i];
512                 /* Set then clear... */
513                 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1,
514                                        vf->opaque_fid);
515                 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0,
516                                        vf->opaque_fid);
517         }
518 }
519
520 static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
521                                    struct qed_ptt *p_ptt,
522                                    struct qed_vf_info *vf, bool enable)
523 {
524         u32 igu_vf_conf;
525
526         qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
527
528         igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
529
530         if (enable)
531                 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
532         else
533                 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
534
535         qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
536
537         /* unpretend */
538         qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
539 }
540
541 static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
542                                     struct qed_ptt *p_ptt,
543                                     struct qed_vf_info *vf)
544 {
545         u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
546         int rc;
547
548         if (vf->to_disable)
549                 return 0;
550
551         DP_VERBOSE(p_hwfn,
552                    QED_MSG_IOV,
553                    "Enable internal access for vf %x [abs %x]\n",
554                    vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
555
556         qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
557
558         rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
559         if (rc)
560                 return rc;
561
562         qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
563
564         SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
565         STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
566
567         qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
568                      p_hwfn->hw_info.hw_mode);
569
570         /* unpretend */
571         qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
572
573         if (vf->state != VF_STOPPED) {
574                 DP_NOTICE(p_hwfn, "VF[%02x] is already started\n",
575                           vf->abs_vf_id);
576                 return -EINVAL;
577         }
578
579         /* Start VF */
580         rc = qed_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);
581         if (rc)
582                 DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
583
584         vf->state = VF_FREE;
585
586         return rc;
587 }
588
589 /**
590  * @brief qed_iov_config_perm_table - configure the permission
591  *      zone table.
592  *      In E4, queue zone permission table size is 320x9. There
593  *      are 320 VF queues for single engine device (256 for dual
594  *      engine device), and each entry has the following format:
595  *      {Valid, VF[7:0]}
596  * @param p_hwfn
597  * @param p_ptt
598  * @param vf
599  * @param enable
600  */
601 static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
602                                       struct qed_ptt *p_ptt,
603                                       struct qed_vf_info *vf, u8 enable)
604 {
605         u32 reg_addr, val;
606         u16 qzone_id = 0;
607         int qid;
608
609         for (qid = 0; qid < vf->num_rxqs; qid++) {
610                 qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
611                                 &qzone_id);
612
613                 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
614                 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
615                 qed_wr(p_hwfn, p_ptt, reg_addr, val);
616         }
617 }
618
619 static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
620                                       struct qed_ptt *p_ptt,
621                                       struct qed_vf_info *vf)
622 {
623         /* Reset vf in IGU - interrupts are still disabled */
624         qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
625
626         qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
627
628         /* Permission Table */
629         qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
630 }
631
632 static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
633                                    struct qed_ptt *p_ptt,
634                                    struct qed_vf_info *vf, u16 num_rx_queues)
635 {
636         struct qed_igu_block *igu_blocks;
637         int qid = 0, igu_id = 0;
638         u32 val = 0;
639
640         igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
641
642         if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
643                 num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
644         p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
645
646         SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
647         SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
648         SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
649
650         while ((qid < num_rx_queues) &&
651                (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
652                 if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
653                         struct cau_sb_entry sb_entry;
654
655                         vf->igu_sbs[qid] = (u16)igu_id;
656                         igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
657
658                         SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
659
660                         qed_wr(p_hwfn, p_ptt,
661                                IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
662                                val);
663
664                         /* Configure igu sb in CAU which were marked valid */
665                         qed_init_cau_sb_entry(p_hwfn, &sb_entry,
666                                               p_hwfn->rel_pf_id,
667                                               vf->abs_vf_id, 1);
668                         qed_dmae_host2grc(p_hwfn, p_ptt,
669                                           (u64)(uintptr_t)&sb_entry,
670                                           CAU_REG_SB_VAR_MEMORY +
671                                           igu_id * sizeof(u64), 2, 0);
672                         qid++;
673                 }
674                 igu_id++;
675         }
676
677         vf->num_sbs = (u8) num_rx_queues;
678
679         return vf->num_sbs;
680 }
681
682 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
683                                     struct qed_ptt *p_ptt,
684                                     struct qed_vf_info *vf)
685 {
686         struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
687         int idx, igu_id;
688         u32 addr, val;
689
690         /* Invalidate igu CAM lines and mark them as free */
691         for (idx = 0; idx < vf->num_sbs; idx++) {
692                 igu_id = vf->igu_sbs[idx];
693                 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
694
695                 val = qed_rd(p_hwfn, p_ptt, addr);
696                 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
697                 qed_wr(p_hwfn, p_ptt, addr, val);
698
699                 p_info->igu_map.igu_blocks[igu_id].status |=
700                     QED_IGU_STATUS_FREE;
701
702                 p_hwfn->hw_info.p_igu_info->free_blks++;
703         }
704
705         vf->num_sbs = 0;
706 }
707
708 static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
709                                   struct qed_ptt *p_ptt,
710                                   u16 rel_vf_id, u16 num_rx_queues)
711 {
712         u8 num_of_vf_avaiable_chains = 0;
713         struct qed_vf_info *vf = NULL;
714         int rc = 0;
715         u32 cids;
716         u8 i;
717
718         vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
719         if (!vf) {
720                 DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
721                 return -EINVAL;
722         }
723
724         if (vf->b_init) {
725                 DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id);
726                 return -EINVAL;
727         }
728
729         /* Limit number of queues according to number of CIDs */
730         qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
731         DP_VERBOSE(p_hwfn,
732                    QED_MSG_IOV,
733                    "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
734                    vf->relative_vf_id, num_rx_queues, (u16) cids);
735         num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids));
736
737         num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
738                                                              p_ptt,
739                                                              vf,
740                                                              num_rx_queues);
741         if (!num_of_vf_avaiable_chains) {
742                 DP_ERR(p_hwfn, "no available igu sbs\n");
743                 return -ENOMEM;
744         }
745
746         /* Choose queue number and index ranges */
747         vf->num_rxqs = num_of_vf_avaiable_chains;
748         vf->num_txqs = num_of_vf_avaiable_chains;
749
750         for (i = 0; i < vf->num_rxqs; i++) {
751                 u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn,
752                                                            vf->igu_sbs[i]);
753
754                 if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
755                         DP_NOTICE(p_hwfn,
756                                   "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
757                                   vf->relative_vf_id, queue_id);
758                         return -EINVAL;
759                 }
760
761                 /* CIDs are per-VF, so no problem having them 0-based. */
762                 vf->vf_queues[i].fw_rx_qid = queue_id;
763                 vf->vf_queues[i].fw_tx_qid = queue_id;
764                 vf->vf_queues[i].fw_cid = i;
765
766                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
767                            "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
768                            vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
769         }
770         rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
771         if (!rc) {
772                 vf->b_init = true;
773
774                 if (IS_LEAD_HWFN(p_hwfn))
775                         p_hwfn->cdev->p_iov_info->num_vfs++;
776         }
777
778         return rc;
779 }
780
781 static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
782                                      struct qed_ptt *p_ptt, u16 rel_vf_id)
783 {
784         struct qed_vf_info *vf = NULL;
785         int rc = 0;
786
787         vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
788         if (!vf) {
789                 DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
790                 return -EINVAL;
791         }
792
793         if (vf->state != VF_STOPPED) {
794                 /* Stopping the VF */
795                 rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);
796
797                 if (rc != 0) {
798                         DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
799                                rc);
800                         return rc;
801                 }
802
803                 vf->state = VF_STOPPED;
804         }
805
806         /* disablng interrupts and resetting permission table was done during
807          * vf-close, however, we could get here without going through vf_close
808          */
809         /* Disable Interrupts for VF */
810         qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
811
812         /* Reset Permission table */
813         qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
814
815         vf->num_rxqs = 0;
816         vf->num_txqs = 0;
817         qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
818
819         if (vf->b_init) {
820                 vf->b_init = false;
821
822                 if (IS_LEAD_HWFN(p_hwfn))
823                         p_hwfn->cdev->p_iov_info->num_vfs--;
824         }
825
826         return 0;
827 }
828
829 static bool qed_iov_tlv_supported(u16 tlvtype)
830 {
831         return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
832 }
833
834 /* place a given tlv on the tlv buffer, continuing current tlv list */
835 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
836 {
837         struct channel_tlv *tl = (struct channel_tlv *)*offset;
838
839         tl->type = type;
840         tl->length = length;
841
842         /* Offset should keep pointing to next TLV (the end of the last) */
843         *offset += length;
844
845         /* Return a pointer to the start of the added tlv */
846         return *offset - length;
847 }
848
849 /* list the types and lengths of the tlvs on the buffer */
850 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
851 {
852         u16 i = 1, total_length = 0;
853         struct channel_tlv *tlv;
854
855         do {
856                 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
857
858                 /* output tlv */
859                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
860                            "TLV number %d: type %d, length %d\n",
861                            i, tlv->type, tlv->length);
862
863                 if (tlv->type == CHANNEL_TLV_LIST_END)
864                         return;
865
866                 /* Validate entry - protect against malicious VFs */
867                 if (!tlv->length) {
868                         DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
869                         return;
870                 }
871
872                 total_length += tlv->length;
873
874                 if (total_length >= sizeof(struct tlv_buffer_size)) {
875                         DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
876                         return;
877                 }
878
879                 i++;
880         } while (1);
881 }
882
883 static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
884                                   struct qed_ptt *p_ptt,
885                                   struct qed_vf_info *p_vf,
886                                   u16 length, u8 status)
887 {
888         struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
889         struct qed_dmae_params params;
890         u8 eng_vf_id;
891
892         mbx->reply_virt->default_resp.hdr.status = status;
893
894         qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
895
896         eng_vf_id = p_vf->abs_vf_id;
897
898         memset(&params, 0, sizeof(struct qed_dmae_params));
899         params.flags = QED_DMAE_FLAG_VF_DST;
900         params.dst_vfid = eng_vf_id;
901
902         qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
903                            mbx->req_virt->first_tlv.reply_address +
904                            sizeof(u64),
905                            (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
906                            &params);
907
908         qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
909                            mbx->req_virt->first_tlv.reply_address,
910                            sizeof(u64) / 4, &params);
911
912         REG_WR(p_hwfn,
913                GTT_BAR0_MAP_REG_USDM_RAM +
914                USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
915 }
916
917 static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
918                                 enum qed_iov_vport_update_flag flag)
919 {
920         switch (flag) {
921         case QED_IOV_VP_UPDATE_ACTIVATE:
922                 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
923         case QED_IOV_VP_UPDATE_MCAST:
924                 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
925         case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
926                 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
927         case QED_IOV_VP_UPDATE_RSS:
928                 return CHANNEL_TLV_VPORT_UPDATE_RSS;
929         default:
930                 return 0;
931         }
932 }
933
934 static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
935                                             struct qed_vf_info *p_vf,
936                                             struct qed_iov_vf_mbx *p_mbx,
937                                             u8 status,
938                                             u16 tlvs_mask, u16 tlvs_accepted)
939 {
940         struct pfvf_def_resp_tlv *resp;
941         u16 size, total_len, i;
942
943         memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
944         p_mbx->offset = (u8 *)p_mbx->reply_virt;
945         size = sizeof(struct pfvf_def_resp_tlv);
946         total_len = size;
947
948         qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
949
950         /* Prepare response for all extended tlvs if they are found by PF */
951         for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
952                 if (!(tlvs_mask & (1 << i)))
953                         continue;
954
955                 resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
956                                    qed_iov_vport_to_tlv(p_hwfn, i), size);
957
958                 if (tlvs_accepted & (1 << i))
959                         resp->hdr.status = status;
960                 else
961                         resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
962
963                 DP_VERBOSE(p_hwfn,
964                            QED_MSG_IOV,
965                            "VF[%d] - vport_update response: TLV %d, status %02x\n",
966                            p_vf->relative_vf_id,
967                            qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
968
969                 total_len += size;
970         }
971
972         qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
973                     sizeof(struct channel_list_end_tlv));
974
975         return total_len;
976 }
977
978 static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
979                                  struct qed_ptt *p_ptt,
980                                  struct qed_vf_info *vf_info,
981                                  u16 type, u16 length, u8 status)
982 {
983         struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
984
985         mbx->offset = (u8 *)mbx->reply_virt;
986
987         qed_add_tlv(p_hwfn, &mbx->offset, type, length);
988         qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
989                     sizeof(struct channel_list_end_tlv));
990
991         qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
992 }
993
994 struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
995                                                       u16 relative_vf_id,
996                                                       bool b_enabled_only)
997 {
998         struct qed_vf_info *vf = NULL;
999
1000         vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1001         if (!vf)
1002                 return NULL;
1003
1004         return &vf->p_vf_info;
1005 }
1006
1007 void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
1008 {
1009         struct qed_public_vf_info *vf_info;
1010
1011         vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
1012
1013         if (!vf_info)
1014                 return;
1015
1016         /* Clear the VF mac */
1017         memset(vf_info->mac, 0, ETH_ALEN);
1018 }
1019
1020 static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
1021                                struct qed_vf_info *p_vf)
1022 {
1023         u32 i;
1024
1025         p_vf->vf_bulletin = 0;
1026         p_vf->vport_instance = 0;
1027         p_vf->num_mac_filters = 0;
1028         p_vf->num_vlan_filters = 0;
1029
1030         /* If VF previously requested less resources, go back to default */
1031         p_vf->num_rxqs = p_vf->num_sbs;
1032         p_vf->num_txqs = p_vf->num_sbs;
1033
1034         p_vf->num_active_rxqs = 0;
1035
1036         for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++)
1037                 p_vf->vf_queues[i].rxq_active = 0;
1038
1039         qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
1040 }
1041
1042 static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1043                                    struct qed_ptt *p_ptt,
1044                                    struct qed_vf_info *vf)
1045 {
1046         struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1047         struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1048         struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1049         struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1050         u8 i, vfpf_status = PFVF_STATUS_SUCCESS;
1051         struct pf_vf_resc *resc = &resp->resc;
1052
1053         /* Validate FW compatibility */
1054         if (req->vfdev_info.fw_major != FW_MAJOR_VERSION ||
1055             req->vfdev_info.fw_minor != FW_MINOR_VERSION ||
1056             req->vfdev_info.fw_revision != FW_REVISION_VERSION ||
1057             req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {
1058                 DP_INFO(p_hwfn,
1059                         "VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n",
1060                         vf->abs_vf_id,
1061                         req->vfdev_info.fw_major,
1062                         req->vfdev_info.fw_minor,
1063                         req->vfdev_info.fw_revision,
1064                         req->vfdev_info.fw_engineering,
1065                         FW_MAJOR_VERSION,
1066                         FW_MINOR_VERSION,
1067                         FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
1068                 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1069                 goto out;
1070         }
1071
1072         /* On 100g PFs, prevent old VFs from loading */
1073         if ((p_hwfn->cdev->num_hwfns > 1) &&
1074             !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1075                 DP_INFO(p_hwfn,
1076                         "VF[%d] is running an old driver that doesn't support 100g\n",
1077                         vf->abs_vf_id);
1078                 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1079                 goto out;
1080         }
1081
1082         memset(resp, 0, sizeof(*resp));
1083
1084         /* Fill in vf info stuff */
1085         vf->opaque_fid = req->vfdev_info.opaque_fid;
1086         vf->num_mac_filters = 1;
1087         vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
1088
1089         vf->vf_bulletin = req->bulletin_addr;
1090         vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1091                             vf->bulletin.size : req->bulletin_size;
1092
1093         /* fill in pfdev info */
1094         pfdev_info->chip_num = p_hwfn->cdev->chip_num;
1095         pfdev_info->db_size = 0;
1096         pfdev_info->indices_per_sb = PIS_PER_SB;
1097
1098         pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1099                                    PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1100         if (p_hwfn->cdev->num_hwfns > 1)
1101                 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1102
1103         pfdev_info->stats_info.mstats.address =
1104             PXP_VF_BAR0_START_MSDM_ZONE_B +
1105             offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat);
1106         pfdev_info->stats_info.mstats.len =
1107             sizeof(struct eth_mstorm_per_queue_stat);
1108
1109         pfdev_info->stats_info.ustats.address =
1110             PXP_VF_BAR0_START_USDM_ZONE_B +
1111             offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat);
1112         pfdev_info->stats_info.ustats.len =
1113             sizeof(struct eth_ustorm_per_queue_stat);
1114
1115         pfdev_info->stats_info.pstats.address =
1116             PXP_VF_BAR0_START_PSDM_ZONE_B +
1117             offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat);
1118         pfdev_info->stats_info.pstats.len =
1119             sizeof(struct eth_pstorm_per_queue_stat);
1120
1121         pfdev_info->stats_info.tstats.address = 0;
1122         pfdev_info->stats_info.tstats.len = 0;
1123
1124         memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1125
1126         pfdev_info->fw_major = FW_MAJOR_VERSION;
1127         pfdev_info->fw_minor = FW_MINOR_VERSION;
1128         pfdev_info->fw_rev = FW_REVISION_VERSION;
1129         pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1130         pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
1131         qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
1132
1133         pfdev_info->dev_type = p_hwfn->cdev->type;
1134         pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
1135
1136         resc->num_rxqs = vf->num_rxqs;
1137         resc->num_txqs = vf->num_txqs;
1138         resc->num_sbs = vf->num_sbs;
1139         for (i = 0; i < resc->num_sbs; i++) {
1140                 resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i];
1141                 resc->hw_sbs[i].sb_qid = 0;
1142         }
1143
1144         for (i = 0; i < resc->num_rxqs; i++) {
1145                 qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid,
1146                                 (u16 *)&resc->hw_qid[i]);
1147                 resc->cid[i] = vf->vf_queues[i].fw_cid;
1148         }
1149
1150         resc->num_mac_filters = min_t(u8, vf->num_mac_filters,
1151                                       req->resc_request.num_mac_filters);
1152         resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters,
1153                                        req->resc_request.num_vlan_filters);
1154
1155         /* This isn't really required as VF isn't limited, but some VFs might
1156          * actually test this value, so need to provide it.
1157          */
1158         resc->num_mc_filters = req->resc_request.num_mc_filters;
1159
1160         /* Fill agreed size of bulletin board in response */
1161         resp->bulletin_size = vf->bulletin.size;
1162
1163         DP_VERBOSE(p_hwfn,
1164                    QED_MSG_IOV,
1165                    "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1166                    "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1167                    vf->abs_vf_id,
1168                    resp->pfdev_info.chip_num,
1169                    resp->pfdev_info.db_size,
1170                    resp->pfdev_info.indices_per_sb,
1171                    resp->pfdev_info.capabilities,
1172                    resc->num_rxqs,
1173                    resc->num_txqs,
1174                    resc->num_sbs,
1175                    resc->num_mac_filters,
1176                    resc->num_vlan_filters);
1177         vf->state = VF_ACQUIRED;
1178
1179         /* Prepare Response */
1180 out:
1181         qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1182                              sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
1183 }
1184
1185 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1186                                        struct qed_ptt *p_ptt,
1187                                        struct qed_vf_info *vf)
1188 {
1189         struct qed_sp_vport_start_params params = { 0 };
1190         struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1191         struct vfpf_vport_start_tlv *start;
1192         u8 status = PFVF_STATUS_SUCCESS;
1193         struct qed_vf_info *vf_info;
1194         int sb_id;
1195         int rc;
1196
1197         vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
1198         if (!vf_info) {
1199                 DP_NOTICE(p_hwfn->cdev,
1200                           "Failed to get VF info, invalid vfid [%d]\n",
1201                           vf->relative_vf_id);
1202                 return;
1203         }
1204
1205         vf->state = VF_ENABLED;
1206         start = &mbx->req_virt->start_vport;
1207
1208         /* Initialize Status block in CAU */
1209         for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1210                 if (!start->sb_addr[sb_id]) {
1211                         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1212                                    "VF[%d] did not fill the address of SB %d\n",
1213                                    vf->relative_vf_id, sb_id);
1214                         break;
1215                 }
1216
1217                 qed_int_cau_conf_sb(p_hwfn, p_ptt,
1218                                     start->sb_addr[sb_id],
1219                                     vf->igu_sbs[sb_id],
1220                                     vf->abs_vf_id, 1);
1221         }
1222         qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1223
1224         vf->mtu = start->mtu;
1225
1226         params.tpa_mode = start->tpa_mode;
1227         params.remove_inner_vlan = start->inner_vlan_removal;
1228
1229         params.drop_ttl0 = false;
1230         params.concrete_fid = vf->concrete_fid;
1231         params.opaque_fid = vf->opaque_fid;
1232         params.vport_id = vf->vport_id;
1233         params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1234         params.mtu = vf->mtu;
1235
1236         rc = qed_sp_eth_vport_start(p_hwfn, &params);
1237         if (rc != 0) {
1238                 DP_ERR(p_hwfn,
1239                        "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
1240                 status = PFVF_STATUS_FAILURE;
1241         } else {
1242                 vf->vport_instance++;
1243         }
1244         qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1245                              sizeof(struct pfvf_def_resp_tlv), status);
1246 }
1247
1248 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
1249                                       struct qed_ptt *p_ptt,
1250                                       struct qed_vf_info *vf)
1251 {
1252         u8 status = PFVF_STATUS_SUCCESS;
1253         int rc;
1254
1255         vf->vport_instance--;
1256
1257         rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
1258         if (rc != 0) {
1259                 DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
1260                        rc);
1261                 status = PFVF_STATUS_FAILURE;
1262         }
1263
1264         qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
1265                              sizeof(struct pfvf_def_resp_tlv), status);
1266 }
1267
1268 #define TSTORM_QZONE_START   PXP_VF_BAR0_START_SDM_ZONE_A
1269 #define MSTORM_QZONE_START(dev)   (TSTORM_QZONE_START + \
1270                                    (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
1271
1272 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
1273                                           struct qed_ptt *p_ptt,
1274                                           struct qed_vf_info *vf, u8 status)
1275 {
1276         struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1277         struct pfvf_start_queue_resp_tlv *p_tlv;
1278         struct vfpf_start_rxq_tlv *req;
1279
1280         mbx->offset = (u8 *)mbx->reply_virt;
1281
1282         p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
1283                             sizeof(*p_tlv));
1284         qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1285                     sizeof(struct channel_list_end_tlv));
1286
1287         /* Update the TLV with the response */
1288         if (status == PFVF_STATUS_SUCCESS) {
1289                 u16 hw_qid = 0;
1290
1291                 req = &mbx->req_virt->start_rxq;
1292                 qed_fw_l2_queue(p_hwfn, vf->vf_queues[req->rx_qid].fw_rx_qid,
1293                                 &hw_qid);
1294
1295                 p_tlv->offset = MSTORM_QZONE_START(p_hwfn->cdev) +
1296                                 hw_qid * MSTORM_QZONE_SIZE +
1297                                 offsetof(struct mstorm_eth_queue_zone,
1298                                          rx_producers);
1299         }
1300
1301         qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
1302 }
1303
1304 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
1305                                      struct qed_ptt *p_ptt,
1306                                      struct qed_vf_info *vf)
1307 {
1308         struct qed_queue_start_common_params params;
1309         struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1310         u8 status = PFVF_STATUS_SUCCESS;
1311         struct vfpf_start_rxq_tlv *req;
1312         int rc;
1313
1314         memset(&params, 0, sizeof(params));
1315         req = &mbx->req_virt->start_rxq;
1316         params.queue_id =  vf->vf_queues[req->rx_qid].fw_rx_qid;
1317         params.vport_id = vf->vport_id;
1318         params.sb = req->hw_sb;
1319         params.sb_idx = req->sb_index;
1320
1321         rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
1322                                          vf->vf_queues[req->rx_qid].fw_cid,
1323                                          &params,
1324                                          vf->abs_vf_id + 0x10,
1325                                          req->bd_max_bytes,
1326                                          req->rxq_addr,
1327                                          req->cqe_pbl_addr, req->cqe_pbl_size);
1328
1329         if (rc) {
1330                 status = PFVF_STATUS_FAILURE;
1331         } else {
1332                 vf->vf_queues[req->rx_qid].rxq_active = true;
1333                 vf->num_active_rxqs++;
1334         }
1335
1336         qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status);
1337 }
1338
1339 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
1340                                      struct qed_ptt *p_ptt,
1341                                      struct qed_vf_info *vf)
1342 {
1343         u16 length = sizeof(struct pfvf_def_resp_tlv);
1344         struct qed_queue_start_common_params params;
1345         struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1346         union qed_qm_pq_params pq_params;
1347         u8 status = PFVF_STATUS_SUCCESS;
1348         struct vfpf_start_txq_tlv *req;
1349         int rc;
1350
1351         /* Prepare the parameters which would choose the right PQ */
1352         memset(&pq_params, 0, sizeof(pq_params));
1353         pq_params.eth.is_vf = 1;
1354         pq_params.eth.vf_id = vf->relative_vf_id;
1355
1356         memset(&params, 0, sizeof(params));
1357         req = &mbx->req_virt->start_txq;
1358         params.queue_id =  vf->vf_queues[req->tx_qid].fw_tx_qid;
1359         params.vport_id = vf->vport_id;
1360         params.sb = req->hw_sb;
1361         params.sb_idx = req->sb_index;
1362
1363         rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
1364                                          vf->opaque_fid,
1365                                          vf->vf_queues[req->tx_qid].fw_cid,
1366                                          &params,
1367                                          vf->abs_vf_id + 0x10,
1368                                          req->pbl_addr,
1369                                          req->pbl_size, &pq_params);
1370
1371         if (rc)
1372                 status = PFVF_STATUS_FAILURE;
1373         else
1374                 vf->vf_queues[req->tx_qid].txq_active = true;
1375
1376         qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ,
1377                              length, status);
1378 }
1379
1380 static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
1381                                 struct qed_vf_info *vf,
1382                                 u16 rxq_id, u8 num_rxqs, bool cqe_completion)
1383 {
1384         int rc = 0;
1385         int qid;
1386
1387         if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
1388                 return -EINVAL;
1389
1390         for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
1391                 if (vf->vf_queues[qid].rxq_active) {
1392                         rc = qed_sp_eth_rx_queue_stop(p_hwfn,
1393                                                       vf->vf_queues[qid].
1394                                                       fw_rx_qid, false,
1395                                                       cqe_completion);
1396
1397                         if (rc)
1398                                 return rc;
1399                 }
1400                 vf->vf_queues[qid].rxq_active = false;
1401                 vf->num_active_rxqs--;
1402         }
1403
1404         return rc;
1405 }
1406
1407 static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
1408                                 struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
1409 {
1410         int rc = 0;
1411         int qid;
1412
1413         if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
1414                 return -EINVAL;
1415
1416         for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
1417                 if (vf->vf_queues[qid].txq_active) {
1418                         rc = qed_sp_eth_tx_queue_stop(p_hwfn,
1419                                                       vf->vf_queues[qid].
1420                                                       fw_tx_qid);
1421
1422                         if (rc)
1423                                 return rc;
1424                 }
1425                 vf->vf_queues[qid].txq_active = false;
1426         }
1427         return rc;
1428 }
1429
1430 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
1431                                      struct qed_ptt *p_ptt,
1432                                      struct qed_vf_info *vf)
1433 {
1434         u16 length = sizeof(struct pfvf_def_resp_tlv);
1435         struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1436         u8 status = PFVF_STATUS_SUCCESS;
1437         struct vfpf_stop_rxqs_tlv *req;
1438         int rc;
1439
1440         /* We give the option of starting from qid != 0, in this case we
1441          * need to make sure that qid + num_qs doesn't exceed the actual
1442          * amount of queues that exist.
1443          */
1444         req = &mbx->req_virt->stop_rxqs;
1445         rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
1446                                   req->num_rxqs, req->cqe_completion);
1447         if (rc)
1448                 status = PFVF_STATUS_FAILURE;
1449
1450         qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
1451                              length, status);
1452 }
1453
1454 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
1455                                      struct qed_ptt *p_ptt,
1456                                      struct qed_vf_info *vf)
1457 {
1458         u16 length = sizeof(struct pfvf_def_resp_tlv);
1459         struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1460         u8 status = PFVF_STATUS_SUCCESS;
1461         struct vfpf_stop_txqs_tlv *req;
1462         int rc;
1463
1464         /* We give the option of starting from qid != 0, in this case we
1465          * need to make sure that qid + num_qs doesn't exceed the actual
1466          * amount of queues that exist.
1467          */
1468         req = &mbx->req_virt->stop_txqs;
1469         rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
1470         if (rc)
1471                 status = PFVF_STATUS_FAILURE;
1472
1473         qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
1474                              length, status);
1475 }
1476
1477 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
1478                                void *p_tlvs_list, u16 req_type)
1479 {
1480         struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
1481         int len = 0;
1482
1483         do {
1484                 if (!p_tlv->length) {
1485                         DP_NOTICE(p_hwfn, "Zero length TLV found\n");
1486                         return NULL;
1487                 }
1488
1489                 if (p_tlv->type == req_type) {
1490                         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1491                                    "Extended tlv type %d, length %d found\n",
1492                                    p_tlv->type, p_tlv->length);
1493                         return p_tlv;
1494                 }
1495
1496                 len += p_tlv->length;
1497                 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
1498
1499                 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
1500                         DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
1501                         return NULL;
1502                 }
1503         } while (p_tlv->type != CHANNEL_TLV_LIST_END);
1504
1505         return NULL;
1506 }
1507
1508 static void
1509 qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
1510                             struct qed_sp_vport_update_params *p_data,
1511                             struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
1512 {
1513         struct vfpf_vport_update_activate_tlv *p_act_tlv;
1514         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1515
1516         p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
1517                     qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
1518         if (!p_act_tlv)
1519                 return;
1520
1521         p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
1522         p_data->vport_active_rx_flg = p_act_tlv->active_rx;
1523         p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
1524         p_data->vport_active_tx_flg = p_act_tlv->active_tx;
1525         *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
1526 }
1527
1528 static void
1529 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
1530                                   struct qed_sp_vport_update_params *p_data,
1531                                   struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
1532 {
1533         struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
1534         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
1535
1536         p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
1537             qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
1538         if (!p_mcast_tlv)
1539                 return;
1540
1541         p_data->update_approx_mcast_flg = 1;
1542         memcpy(p_data->bins, p_mcast_tlv->bins,
1543                sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
1544         *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
1545 }
1546
1547 static void
1548 qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
1549                               struct qed_sp_vport_update_params *p_data,
1550                               struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
1551 {
1552         struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
1553         struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
1554         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1555
1556         p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
1557             qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
1558         if (!p_accept_tlv)
1559                 return;
1560
1561         p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
1562         p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
1563         p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
1564         p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
1565         *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
1566 }
1567
1568 static void
1569 qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
1570                             struct qed_vf_info *vf,
1571                             struct qed_sp_vport_update_params *p_data,
1572                             struct qed_rss_params *p_rss,
1573                             struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
1574 {
1575         struct vfpf_vport_update_rss_tlv *p_rss_tlv;
1576         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
1577         u16 i, q_idx, max_q_idx;
1578         u16 table_size;
1579
1580         p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
1581                     qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
1582         if (!p_rss_tlv) {
1583                 p_data->rss_params = NULL;
1584                 return;
1585         }
1586
1587         memset(p_rss, 0, sizeof(struct qed_rss_params));
1588
1589         p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
1590                                       VFPF_UPDATE_RSS_CONFIG_FLAG);
1591         p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
1592                                             VFPF_UPDATE_RSS_CAPS_FLAG);
1593         p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
1594                                          VFPF_UPDATE_RSS_IND_TABLE_FLAG);
1595         p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
1596                                    VFPF_UPDATE_RSS_KEY_FLAG);
1597
1598         p_rss->rss_enable = p_rss_tlv->rss_enable;
1599         p_rss->rss_eng_id = vf->relative_vf_id + 1;
1600         p_rss->rss_caps = p_rss_tlv->rss_caps;
1601         p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
1602         memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
1603                sizeof(p_rss->rss_ind_table));
1604         memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
1605
1606         table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
1607                            (1 << p_rss_tlv->rss_table_size_log));
1608
1609         max_q_idx = ARRAY_SIZE(vf->vf_queues);
1610
1611         for (i = 0; i < table_size; i++) {
1612                 u16 index = vf->vf_queues[0].fw_rx_qid;
1613
1614                 q_idx = p_rss->rss_ind_table[i];
1615                 if (q_idx >= max_q_idx)
1616                         DP_NOTICE(p_hwfn,
1617                                   "rss_ind_table[%d] = %d, rxq is out of range\n",
1618                                   i, q_idx);
1619                 else if (!vf->vf_queues[q_idx].rxq_active)
1620                         DP_NOTICE(p_hwfn,
1621                                   "rss_ind_table[%d] = %d, rxq is not active\n",
1622                                   i, q_idx);
1623                 else
1624                         index = vf->vf_queues[q_idx].fw_rx_qid;
1625                 p_rss->rss_ind_table[i] = index;
1626         }
1627
1628         p_data->rss_params = p_rss;
1629         *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
1630 }
1631
1632 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
1633                                         struct qed_ptt *p_ptt,
1634                                         struct qed_vf_info *vf)
1635 {
1636         struct qed_sp_vport_update_params params;
1637         struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1638         struct qed_rss_params rss_params;
1639         u8 status = PFVF_STATUS_SUCCESS;
1640         u16 tlvs_mask = 0;
1641         u16 length;
1642         int rc;
1643
1644         memset(&params, 0, sizeof(params));
1645         params.opaque_fid = vf->opaque_fid;
1646         params.vport_id = vf->vport_id;
1647         params.rss_params = NULL;
1648
1649         /* Search for extended tlvs list and update values
1650          * from VF in struct qed_sp_vport_update_params.
1651          */
1652         qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
1653         qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
1654         qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
1655         qed_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
1656                                     mbx, &tlvs_mask);
1657
1658         /* Just log a message if there is no single extended tlv in buffer.
1659          * When all features of vport update ramrod would be requested by VF
1660          * as extended TLVs in buffer then an error can be returned in response
1661          * if there is no extended TLV present in buffer.
1662          */
1663         if (!tlvs_mask) {
1664                 DP_NOTICE(p_hwfn,
1665                           "No feature tlvs found for vport update\n");
1666                 status = PFVF_STATUS_NOT_SUPPORTED;
1667                 goto out;
1668         }
1669
1670         rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
1671
1672         if (rc)
1673                 status = PFVF_STATUS_FAILURE;
1674
1675 out:
1676         length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
1677                                                   tlvs_mask, tlvs_mask);
1678         qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
1679 }
1680
1681 int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
1682                       int vfid, struct qed_filter_ucast *params)
1683 {
1684         struct qed_public_vf_info *vf;
1685
1686         vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
1687         if (!vf)
1688                 return -EINVAL;
1689
1690         /* No real decision to make; Store the configured MAC */
1691         if (params->type == QED_FILTER_MAC ||
1692             params->type == QED_FILTER_MAC_VLAN)
1693                 ether_addr_copy(vf->mac, params->mac);
1694
1695         return 0;
1696 }
1697
1698 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
1699                                         struct qed_ptt *p_ptt,
1700                                         struct qed_vf_info *vf)
1701 {
1702         struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1703         struct vfpf_ucast_filter_tlv *req;
1704         u8 status = PFVF_STATUS_SUCCESS;
1705         struct qed_filter_ucast params;
1706         int rc;
1707
1708         /* Prepare the unicast filter params */
1709         memset(&params, 0, sizeof(struct qed_filter_ucast));
1710         req = &mbx->req_virt->ucast_filter;
1711         params.opcode = (enum qed_filter_opcode)req->opcode;
1712         params.type = (enum qed_filter_ucast_type)req->type;
1713
1714         params.is_rx_filter = 1;
1715         params.is_tx_filter = 1;
1716         params.vport_to_remove_from = vf->vport_id;
1717         params.vport_to_add_to = vf->vport_id;
1718         memcpy(params.mac, req->mac, ETH_ALEN);
1719         params.vlan = req->vlan;
1720
1721         DP_VERBOSE(p_hwfn,
1722                    QED_MSG_IOV,
1723                    "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
1724                    vf->abs_vf_id, params.opcode, params.type,
1725                    params.is_rx_filter ? "RX" : "",
1726                    params.is_tx_filter ? "TX" : "",
1727                    params.vport_to_add_to,
1728                    params.mac[0], params.mac[1],
1729                    params.mac[2], params.mac[3],
1730                    params.mac[4], params.mac[5], params.vlan);
1731
1732         if (!vf->vport_instance) {
1733                 DP_VERBOSE(p_hwfn,
1734                            QED_MSG_IOV,
1735                            "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
1736                            vf->abs_vf_id);
1737                 status = PFVF_STATUS_FAILURE;
1738                 goto out;
1739         }
1740
1741         rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params);
1742         if (rc) {
1743                 status = PFVF_STATUS_FAILURE;
1744                 goto out;
1745         }
1746
1747         rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
1748                                      QED_SPQ_MODE_CB, NULL);
1749         if (rc)
1750                 status = PFVF_STATUS_FAILURE;
1751
1752 out:
1753         qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
1754                              sizeof(struct pfvf_def_resp_tlv), status);
1755 }
1756
1757 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
1758                                        struct qed_ptt *p_ptt,
1759                                        struct qed_vf_info *vf)
1760 {
1761         int i;
1762
1763         /* Reset the SBs */
1764         for (i = 0; i < vf->num_sbs; i++)
1765                 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
1766                                                 vf->igu_sbs[i],
1767                                                 vf->opaque_fid, false);
1768
1769         qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
1770                              sizeof(struct pfvf_def_resp_tlv),
1771                              PFVF_STATUS_SUCCESS);
1772 }
1773
1774 static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
1775                                  struct qed_ptt *p_ptt, struct qed_vf_info *vf)
1776 {
1777         u16 length = sizeof(struct pfvf_def_resp_tlv);
1778         u8 status = PFVF_STATUS_SUCCESS;
1779
1780         /* Disable Interrupts for VF */
1781         qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1782
1783         /* Reset Permission table */
1784         qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1785
1786         qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
1787                              length, status);
1788 }
1789
1790 static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
1791                                    struct qed_ptt *p_ptt,
1792                                    struct qed_vf_info *p_vf)
1793 {
1794         u16 length = sizeof(struct pfvf_def_resp_tlv);
1795
1796         qed_iov_vf_cleanup(p_hwfn, p_vf);
1797
1798         qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
1799                              length, PFVF_STATUS_SUCCESS);
1800 }
1801
1802 static int
1803 qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
1804                          struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
1805 {
1806         int cnt;
1807         u32 val;
1808
1809         qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
1810
1811         for (cnt = 0; cnt < 50; cnt++) {
1812                 val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
1813                 if (!val)
1814                         break;
1815                 msleep(20);
1816         }
1817         qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
1818
1819         if (cnt == 50) {
1820                 DP_ERR(p_hwfn,
1821                        "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
1822                        p_vf->abs_vf_id, val);
1823                 return -EBUSY;
1824         }
1825
1826         return 0;
1827 }
1828
1829 static int
1830 qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
1831                         struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
1832 {
1833         u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
1834         int i, cnt;
1835
1836         /* Read initial consumers & producers */
1837         for (i = 0; i < MAX_NUM_VOQS; i++) {
1838                 u32 prod;
1839
1840                 cons[i] = qed_rd(p_hwfn, p_ptt,
1841                                  PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
1842                                  i * 0x40);
1843                 prod = qed_rd(p_hwfn, p_ptt,
1844                               PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
1845                               i * 0x40);
1846                 distance[i] = prod - cons[i];
1847         }
1848
1849         /* Wait for consumers to pass the producers */
1850         i = 0;
1851         for (cnt = 0; cnt < 50; cnt++) {
1852                 for (; i < MAX_NUM_VOQS; i++) {
1853                         u32 tmp;
1854
1855                         tmp = qed_rd(p_hwfn, p_ptt,
1856                                      PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
1857                                      i * 0x40);
1858                         if (distance[i] > tmp - cons[i])
1859                                 break;
1860                 }
1861
1862                 if (i == MAX_NUM_VOQS)
1863                         break;
1864
1865                 msleep(20);
1866         }
1867
1868         if (cnt == 50) {
1869                 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
1870                        p_vf->abs_vf_id, i);
1871                 return -EBUSY;
1872         }
1873
1874         return 0;
1875 }
1876
1877 static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
1878                                struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
1879 {
1880         int rc;
1881
1882         rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
1883         if (rc)
1884                 return rc;
1885
1886         rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
1887         if (rc)
1888                 return rc;
1889
1890         return 0;
1891 }
1892
1893 static int
1894 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
1895                                struct qed_ptt *p_ptt,
1896                                u16 rel_vf_id, u32 *ack_vfs)
1897 {
1898         struct qed_vf_info *p_vf;
1899         int rc = 0;
1900
1901         p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
1902         if (!p_vf)
1903                 return 0;
1904
1905         if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
1906             (1ULL << (rel_vf_id % 64))) {
1907                 u16 vfid = p_vf->abs_vf_id;
1908
1909                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1910                            "VF[%d] - Handling FLR\n", vfid);
1911
1912                 qed_iov_vf_cleanup(p_hwfn, p_vf);
1913
1914                 /* If VF isn't active, no need for anything but SW */
1915                 if (!p_vf->b_init)
1916                         goto cleanup;
1917
1918                 rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
1919                 if (rc)
1920                         goto cleanup;
1921
1922                 rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
1923                 if (rc) {
1924                         DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
1925                         return rc;
1926                 }
1927
1928                 /* VF_STOPPED has to be set only after final cleanup
1929                  * but prior to re-enabling the VF.
1930                  */
1931                 p_vf->state = VF_STOPPED;
1932
1933                 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
1934                 if (rc) {
1935                         DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
1936                                vfid);
1937                         return rc;
1938                 }
1939 cleanup:
1940                 /* Mark VF for ack and clean pending state */
1941                 if (p_vf->state == VF_RESET)
1942                         p_vf->state = VF_STOPPED;
1943                 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
1944                 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
1945                     ~(1ULL << (rel_vf_id % 64));
1946                 p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
1947                     ~(1ULL << (rel_vf_id % 64));
1948         }
1949
1950         return rc;
1951 }
1952
1953 int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1954 {
1955         u32 ack_vfs[VF_MAX_STATIC / 32];
1956         int rc = 0;
1957         u16 i;
1958
1959         memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
1960
1961         /* Since BRB <-> PRS interface can't be tested as part of the flr
1962          * polling due to HW limitations, simply sleep a bit. And since
1963          * there's no need to wait per-vf, do it before looping.
1964          */
1965         msleep(100);
1966
1967         for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
1968                 qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
1969
1970         rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
1971         return rc;
1972 }
1973
1974 int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
1975 {
1976         u16 i, found = 0;
1977
1978         DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
1979         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1980                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1981                            "[%08x,...,%08x]: %08x\n",
1982                            i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
1983
1984         if (!p_hwfn->cdev->p_iov_info) {
1985                 DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
1986                 return 0;
1987         }
1988
1989         /* Mark VFs */
1990         for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
1991                 struct qed_vf_info *p_vf;
1992                 u8 vfid;
1993
1994                 p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
1995                 if (!p_vf)
1996                         continue;
1997
1998                 vfid = p_vf->abs_vf_id;
1999                 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
2000                         u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
2001                         u16 rel_vf_id = p_vf->relative_vf_id;
2002
2003                         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2004                                    "VF[%d] [rel %d] got FLR-ed\n",
2005                                    vfid, rel_vf_id);
2006
2007                         p_vf->state = VF_RESET;
2008
2009                         /* No need to lock here, since pending_flr should
2010                          * only change here and before ACKing MFw. Since
2011                          * MFW will not trigger an additional attention for
2012                          * VF flr until ACKs, we're safe.
2013                          */
2014                         p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
2015                         found = 1;
2016                 }
2017         }
2018
2019         return found;
2020 }
2021
2022 static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
2023                                     struct qed_ptt *p_ptt, int vfid)
2024 {
2025         struct qed_iov_vf_mbx *mbx;
2026         struct qed_vf_info *p_vf;
2027         int i;
2028
2029         p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
2030         if (!p_vf)
2031                 return;
2032
2033         mbx = &p_vf->vf_mbx;
2034
2035         /* qed_iov_process_mbx_request */
2036         DP_VERBOSE(p_hwfn,
2037                    QED_MSG_IOV,
2038                    "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
2039
2040         mbx->first_tlv = mbx->req_virt->first_tlv;
2041
2042         /* check if tlv type is known */
2043         if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
2044                 switch (mbx->first_tlv.tl.type) {
2045                 case CHANNEL_TLV_ACQUIRE:
2046                         qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
2047                         break;
2048                 case CHANNEL_TLV_VPORT_START:
2049                         qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
2050                         break;
2051                 case CHANNEL_TLV_VPORT_TEARDOWN:
2052                         qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
2053                         break;
2054                 case CHANNEL_TLV_START_RXQ:
2055                         qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
2056                         break;
2057                 case CHANNEL_TLV_START_TXQ:
2058                         qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
2059                         break;
2060                 case CHANNEL_TLV_STOP_RXQS:
2061                         qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
2062                         break;
2063                 case CHANNEL_TLV_STOP_TXQS:
2064                         qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
2065                         break;
2066                 case CHANNEL_TLV_VPORT_UPDATE:
2067                         qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
2068                         break;
2069                 case CHANNEL_TLV_UCAST_FILTER:
2070                         qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
2071                         break;
2072                 case CHANNEL_TLV_CLOSE:
2073                         qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
2074                         break;
2075                 case CHANNEL_TLV_INT_CLEANUP:
2076                         qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
2077                         break;
2078                 case CHANNEL_TLV_RELEASE:
2079                         qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
2080                         break;
2081                 }
2082         } else {
2083                 /* unknown TLV - this may belong to a VF driver from the future
2084                  * - a version written after this PF driver was written, which
2085                  * supports features unknown as of yet. Too bad since we don't
2086                  * support them. Or this may be because someone wrote a crappy
2087                  * VF driver and is sending garbage over the channel.
2088                  */
2089                 DP_ERR(p_hwfn,
2090                        "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
2091                        mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
2092
2093                 for (i = 0; i < 20; i++) {
2094                         DP_VERBOSE(p_hwfn,
2095                                    QED_MSG_IOV,
2096                                    "%x ",
2097                                    mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
2098                 }
2099         }
2100 }
2101
2102 void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
2103 {
2104         u64 add_bit = 1ULL << (vfid % 64);
2105
2106         p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
2107 }
2108
2109 static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
2110                                                     u64 *events)
2111 {
2112         u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
2113
2114         memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
2115         memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
2116 }
2117
2118 static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
2119                               u16 abs_vfid, struct regpair *vf_msg)
2120 {
2121         u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
2122         struct qed_vf_info *p_vf;
2123
2124         if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
2125                 DP_VERBOSE(p_hwfn,
2126                            QED_MSG_IOV,
2127                            "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
2128                            abs_vfid);
2129                 return 0;
2130         }
2131         p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
2132
2133         /* List the physical address of the request so that handler
2134          * could later on copy the message from it.
2135          */
2136         p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
2137
2138         /* Mark the event and schedule the workqueue */
2139         qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
2140         qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
2141
2142         return 0;
2143 }
2144
2145 int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
2146                         u8 opcode, __le16 echo, union event_ring_data *data)
2147 {
2148         switch (opcode) {
2149         case COMMON_EVENT_VF_PF_CHANNEL:
2150                 return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
2151                                           &data->vf_pf_channel.msg_addr);
2152         default:
2153                 DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
2154                         opcode);
2155                 return -EINVAL;
2156         }
2157 }
2158
2159 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
2160 {
2161         struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
2162         u16 i;
2163
2164         if (!p_iov)
2165                 goto out;
2166
2167         for (i = rel_vf_id; i < p_iov->total_vfs; i++)
2168                 if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
2169                         return i;
2170
2171 out:
2172         return MAX_NUM_VFS;
2173 }
2174
2175 static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
2176                                int vfid)
2177 {
2178         struct qed_dmae_params params;
2179         struct qed_vf_info *vf_info;
2180
2181         vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
2182         if (!vf_info)
2183                 return -EINVAL;
2184
2185         memset(&params, 0, sizeof(struct qed_dmae_params));
2186         params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
2187         params.src_vfid = vf_info->abs_vf_id;
2188
2189         if (qed_dmae_host2host(p_hwfn, ptt,
2190                                vf_info->vf_mbx.pending_req,
2191                                vf_info->vf_mbx.req_phys,
2192                                sizeof(union vfpf_tlvs) / 4, &params)) {
2193                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2194                            "Failed to copy message from VF 0x%02x\n", vfid);
2195
2196                 return -EIO;
2197         }
2198
2199         return 0;
2200 }
2201
2202 bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
2203 {
2204         struct qed_vf_info *p_vf_info;
2205
2206         p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
2207         if (!p_vf_info)
2208                 return true;
2209
2210         return p_vf_info->state == VF_STOPPED;
2211 }
2212
2213 /**
2214  * qed_schedule_iov - schedules IOV task for VF and PF
2215  * @hwfn: hardware function pointer
2216  * @flag: IOV flag for VF/PF
2217  */
2218 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
2219 {
2220         smp_mb__before_atomic();
2221         set_bit(flag, &hwfn->iov_task_flags);
2222         smp_mb__after_atomic();
2223         DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
2224         queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
2225 }
2226
2227 void qed_vf_start_iov_wq(struct qed_dev *cdev)
2228 {
2229         int i;
2230
2231         for_each_hwfn(cdev, i)
2232             queue_delayed_work(cdev->hwfns[i].iov_wq,
2233                                &cdev->hwfns[i].iov_task, 0);
2234 }
2235
2236 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
2237 {
2238         int i, j;
2239
2240         for_each_hwfn(cdev, i)
2241             if (cdev->hwfns[i].iov_wq)
2242                 flush_workqueue(cdev->hwfns[i].iov_wq);
2243
2244         /* Mark VFs for disablement */
2245         qed_iov_set_vfs_to_disable(cdev, true);
2246
2247         if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
2248                 pci_disable_sriov(cdev->pdev);
2249
2250         for_each_hwfn(cdev, i) {
2251                 struct qed_hwfn *hwfn = &cdev->hwfns[i];
2252                 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
2253
2254                 /* Failure to acquire the ptt in 100g creates an odd error
2255                  * where the first engine has already relased IOV.
2256                  */
2257                 if (!ptt) {
2258                         DP_ERR(hwfn, "Failed to acquire ptt\n");
2259                         return -EBUSY;
2260                 }
2261
2262                 qed_for_each_vf(hwfn, j) {
2263                         int k;
2264
2265                         if (!qed_iov_is_valid_vfid(hwfn, j, true))
2266                                 continue;
2267
2268                         /* Wait until VF is disabled before releasing */
2269                         for (k = 0; k < 100; k++) {
2270                                 if (!qed_iov_is_vf_stopped(hwfn, j))
2271                                         msleep(20);
2272                                 else
2273                                         break;
2274                         }
2275
2276                         if (k < 100)
2277                                 qed_iov_release_hw_for_vf(&cdev->hwfns[i],
2278                                                           ptt, j);
2279                         else
2280                                 DP_ERR(hwfn,
2281                                        "Timeout waiting for VF's FLR to end\n");
2282                 }
2283
2284                 qed_ptt_release(hwfn, ptt);
2285         }
2286
2287         qed_iov_set_vfs_to_disable(cdev, false);
2288
2289         return 0;
2290 }
2291
2292 static int qed_sriov_enable(struct qed_dev *cdev, int num)
2293 {
2294         struct qed_sb_cnt_info sb_cnt_info;
2295         int i, j, rc;
2296
2297         if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
2298                 DP_NOTICE(cdev, "Can start at most %d VFs\n",
2299                           RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
2300                 return -EINVAL;
2301         }
2302
2303         /* Initialize HW for VF access */
2304         for_each_hwfn(cdev, j) {
2305                 struct qed_hwfn *hwfn = &cdev->hwfns[j];
2306                 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
2307                 int num_sbs = 0, limit = 16;
2308
2309                 if (!ptt) {
2310                         DP_ERR(hwfn, "Failed to acquire ptt\n");
2311                         rc = -EBUSY;
2312                         goto err;
2313                 }
2314
2315                 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
2316                 qed_int_get_num_sbs(hwfn, &sb_cnt_info);
2317                 num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);
2318
2319                 for (i = 0; i < num; i++) {
2320                         if (!qed_iov_is_valid_vfid(hwfn, i, false))
2321                                 continue;
2322
2323                         rc = qed_iov_init_hw_for_vf(hwfn,
2324                                                     ptt, i, num_sbs / num);
2325                         if (rc) {
2326                                 DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
2327                                 qed_ptt_release(hwfn, ptt);
2328                                 goto err;
2329                         }
2330                 }
2331
2332                 qed_ptt_release(hwfn, ptt);
2333         }
2334
2335         /* Enable SRIOV PCIe functions */
2336         rc = pci_enable_sriov(cdev->pdev, num);
2337         if (rc) {
2338                 DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
2339                 goto err;
2340         }
2341
2342         return num;
2343
2344 err:
2345         qed_sriov_disable(cdev, false);
2346         return rc;
2347 }
2348
2349 static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
2350 {
2351         if (!IS_QED_SRIOV(cdev)) {
2352                 DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
2353                 return -EOPNOTSUPP;
2354         }
2355
2356         if (num_vfs_param)
2357                 return qed_sriov_enable(cdev, num_vfs_param);
2358         else
2359                 return qed_sriov_disable(cdev, true);
2360 }
2361
2362 static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
2363 {
2364         u64 events[QED_VF_ARRAY_LENGTH];
2365         struct qed_ptt *ptt;
2366         int i;
2367
2368         ptt = qed_ptt_acquire(hwfn);
2369         if (!ptt) {
2370                 DP_VERBOSE(hwfn, QED_MSG_IOV,
2371                            "Can't acquire PTT; re-scheduling\n");
2372                 qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
2373                 return;
2374         }
2375
2376         qed_iov_pf_get_and_clear_pending_events(hwfn, events);
2377
2378         DP_VERBOSE(hwfn, QED_MSG_IOV,
2379                    "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
2380                    events[0], events[1], events[2]);
2381
2382         qed_for_each_vf(hwfn, i) {
2383                 /* Skip VFs with no pending messages */
2384                 if (!(events[i / 64] & (1ULL << (i % 64))))
2385                         continue;
2386
2387                 DP_VERBOSE(hwfn, QED_MSG_IOV,
2388                            "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
2389                            i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
2390
2391                 /* Copy VF's message to PF's request buffer for that VF */
2392                 if (qed_iov_copy_vf_msg(hwfn, ptt, i))
2393                         continue;
2394
2395                 qed_iov_process_mbx_req(hwfn, ptt, i);
2396         }
2397
2398         qed_ptt_release(hwfn, ptt);
2399 }
2400
2401 void qed_iov_pf_task(struct work_struct *work)
2402 {
2403         struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
2404                                              iov_task.work);
2405         int rc;
2406
2407         if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
2408                 return;
2409
2410         if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
2411                 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
2412
2413                 if (!ptt) {
2414                         qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
2415                         return;
2416                 }
2417
2418                 rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
2419                 if (rc)
2420                         qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
2421
2422                 qed_ptt_release(hwfn, ptt);
2423         }
2424
2425         if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
2426                 qed_handle_vf_msg(hwfn);
2427 }
2428
2429 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
2430 {
2431         int i;
2432
2433         for_each_hwfn(cdev, i) {
2434                 if (!cdev->hwfns[i].iov_wq)
2435                         continue;
2436
2437                 if (schedule_first) {
2438                         qed_schedule_iov(&cdev->hwfns[i],
2439                                          QED_IOV_WQ_STOP_WQ_FLAG);
2440                         cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
2441                 }
2442
2443                 flush_workqueue(cdev->hwfns[i].iov_wq);
2444                 destroy_workqueue(cdev->hwfns[i].iov_wq);
2445         }
2446 }
2447
2448 int qed_iov_wq_start(struct qed_dev *cdev)
2449 {
2450         char name[NAME_SIZE];
2451         int i;
2452
2453         for_each_hwfn(cdev, i) {
2454                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2455
2456                 /* PFs needs a dedicated workqueue only if they support IOV. */
2457                 if (!IS_PF_SRIOV(p_hwfn))
2458                         continue;
2459
2460                 snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
2461                          cdev->pdev->bus->number,
2462                          PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
2463
2464                 p_hwfn->iov_wq = create_singlethread_workqueue(name);
2465                 if (!p_hwfn->iov_wq) {
2466                         DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
2467                         return -ENOMEM;
2468                 }
2469
2470                 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
2471         }
2472
2473         return 0;
2474 }
2475
2476 const struct qed_iov_hv_ops qed_iov_ops_pass = {
2477         .configure = &qed_sriov_configure,
2478 };