1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/etherdevice.h>
10 #include <linux/qed/qed_iov_if.h>
14 #include "qed_init_ops.h"
17 #include "qed_reg_addr.h"
19 #include "qed_sriov.h"
23 static int qed_sp_vf_start(struct qed_hwfn *p_hwfn,
24 u32 concrete_vfid, u16 opaque_vfid)
26 struct vf_start_ramrod_data *p_ramrod = NULL;
27 struct qed_spq_entry *p_ent = NULL;
28 struct qed_sp_init_data init_data;
32 memset(&init_data, 0, sizeof(init_data));
33 init_data.cid = qed_spq_get_cid(p_hwfn);
34 init_data.opaque_fid = opaque_vfid;
35 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
37 rc = qed_sp_init_request(p_hwfn, &p_ent,
38 COMMON_RAMROD_VF_START,
39 PROTOCOLID_COMMON, &init_data);
43 p_ramrod = &p_ent->ramrod.vf_start;
45 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
46 p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid);
48 p_ramrod->personality = PERSONALITY_ETH;
50 return qed_spq_post(p_hwfn, p_ent, NULL);
53 static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
54 u32 concrete_vfid, u16 opaque_vfid)
56 struct vf_stop_ramrod_data *p_ramrod = NULL;
57 struct qed_spq_entry *p_ent = NULL;
58 struct qed_sp_init_data init_data;
62 memset(&init_data, 0, sizeof(init_data));
63 init_data.cid = qed_spq_get_cid(p_hwfn);
64 init_data.opaque_fid = opaque_vfid;
65 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
67 rc = qed_sp_init_request(p_hwfn, &p_ent,
68 COMMON_RAMROD_VF_STOP,
69 PROTOCOLID_COMMON, &init_data);
73 p_ramrod = &p_ent->ramrod.vf_stop;
75 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
77 return qed_spq_post(p_hwfn, p_ent, NULL);
80 bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
81 int rel_vf_id, bool b_enabled_only)
83 if (!p_hwfn->pf_iov_info) {
84 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
88 if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
92 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
99 static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
103 struct qed_vf_info *vf = NULL;
105 if (!p_hwfn->pf_iov_info) {
106 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
110 if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
111 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
113 DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
119 static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
121 struct qed_hw_sriov_info *iov = cdev->p_iov_info;
124 DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
125 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
127 pci_read_config_word(cdev->pdev,
128 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
129 pci_read_config_word(cdev->pdev,
130 pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
132 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
136 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
140 pci_read_config_word(cdev->pdev,
141 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
143 pci_read_config_word(cdev->pdev,
144 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
146 pci_read_config_word(cdev->pdev,
147 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
149 pci_read_config_dword(cdev->pdev,
150 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
152 pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
154 pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
158 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
164 iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
166 /* Some sanity checks */
167 if (iov->num_vfs > NUM_OF_VFS(cdev) ||
168 iov->total_vfs > NUM_OF_VFS(cdev)) {
169 /* This can happen only due to a bug. In this case we set
170 * num_vfs to zero to avoid memory corruption in the code that
171 * assumes max number of vfs
174 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
184 static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
185 struct qed_ptt *p_ptt)
187 struct qed_igu_block *p_sb;
191 if (!p_hwfn->hw_info.p_igu_info) {
193 "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
197 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
199 p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
200 if ((p_sb->status & QED_IGU_STATUS_FREE) &&
201 !(p_sb->status & QED_IGU_STATUS_PF)) {
202 val = qed_rd(p_hwfn, p_ptt,
203 IGU_REG_MAPPING_MEMORY + sb_id * 4);
204 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
205 qed_wr(p_hwfn, p_ptt,
206 IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
211 static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
213 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
214 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
215 struct qed_bulletin_content *p_bulletin_virt;
216 dma_addr_t req_p, rply_p, bulletin_p;
217 union pfvf_tlvs *p_reply_virt_addr;
218 union vfpf_tlvs *p_req_virt_addr;
221 memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
223 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
224 req_p = p_iov_info->mbx_msg_phys_addr;
225 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
226 rply_p = p_iov_info->mbx_reply_phys_addr;
227 p_bulletin_virt = p_iov_info->p_bulletins;
228 bulletin_p = p_iov_info->bulletins_phys;
229 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
231 "qed_iov_setup_vfdb called without allocating mem first\n");
235 for (idx = 0; idx < p_iov->total_vfs; idx++) {
236 struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
239 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
240 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
241 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
242 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
244 vf->state = VF_STOPPED;
247 vf->bulletin.phys = idx *
248 sizeof(struct qed_bulletin_content) +
250 vf->bulletin.p_virt = p_bulletin_virt + idx;
251 vf->bulletin.size = sizeof(struct qed_bulletin_content);
253 vf->relative_vf_id = idx;
254 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
255 concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
256 vf->concrete_fid = concrete;
257 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
258 (vf->abs_vf_id << 8);
259 vf->vport_id = idx + 1;
263 static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
265 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
269 num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
271 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
272 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
274 /* Allocate PF Mailbox buffer (per-VF) */
275 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
276 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
277 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
278 p_iov_info->mbx_msg_size,
279 &p_iov_info->mbx_msg_phys_addr,
284 /* Allocate PF Mailbox Reply buffer (per-VF) */
285 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
286 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
287 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
288 p_iov_info->mbx_reply_size,
289 &p_iov_info->mbx_reply_phys_addr,
294 p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
296 p_v_addr = &p_iov_info->p_bulletins;
297 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
298 p_iov_info->bulletins_size,
299 &p_iov_info->bulletins_phys,
306 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
307 p_iov_info->mbx_msg_virt_addr,
308 (u64) p_iov_info->mbx_msg_phys_addr,
309 p_iov_info->mbx_reply_virt_addr,
310 (u64) p_iov_info->mbx_reply_phys_addr,
311 p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
316 static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
318 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
320 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
321 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
322 p_iov_info->mbx_msg_size,
323 p_iov_info->mbx_msg_virt_addr,
324 p_iov_info->mbx_msg_phys_addr);
326 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
327 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
328 p_iov_info->mbx_reply_size,
329 p_iov_info->mbx_reply_virt_addr,
330 p_iov_info->mbx_reply_phys_addr);
332 if (p_iov_info->p_bulletins)
333 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
334 p_iov_info->bulletins_size,
335 p_iov_info->p_bulletins,
336 p_iov_info->bulletins_phys);
339 int qed_iov_alloc(struct qed_hwfn *p_hwfn)
341 struct qed_pf_iov *p_sriov;
343 if (!IS_PF_SRIOV(p_hwfn)) {
344 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
345 "No SR-IOV - no need for IOV db\n");
349 p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
351 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
355 p_hwfn->pf_iov_info = p_sriov;
357 return qed_iov_allocate_vfdb(p_hwfn);
360 void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
362 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
365 qed_iov_setup_vfdb(p_hwfn);
366 qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
369 void qed_iov_free(struct qed_hwfn *p_hwfn)
371 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
372 qed_iov_free_vfdb(p_hwfn);
373 kfree(p_hwfn->pf_iov_info);
377 void qed_iov_free_hw_info(struct qed_dev *cdev)
379 kfree(cdev->p_iov_info);
380 cdev->p_iov_info = NULL;
383 int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
385 struct qed_dev *cdev = p_hwfn->cdev;
389 if (IS_VF(p_hwfn->cdev))
392 /* Learn the PCI configuration */
393 pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
394 PCI_EXT_CAP_ID_SRIOV);
396 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
400 /* Allocate a new struct for IOV information */
401 cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
402 if (!cdev->p_iov_info) {
403 DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
406 cdev->p_iov_info->pos = pos;
408 rc = qed_iov_pci_cfg_info(cdev);
412 /* We want PF IOV to be synonemous with the existance of p_iov_info;
413 * In case the capability is published but there are no VFs, simply
414 * de-allocate the struct.
416 if (!cdev->p_iov_info->total_vfs) {
417 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
418 "IOV capabilities, but no VFs are published\n");
419 kfree(cdev->p_iov_info);
420 cdev->p_iov_info = NULL;
424 /* Calculate the first VF index - this is a bit tricky; Basically,
425 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
426 * after the first engine's VFs.
428 cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
429 p_hwfn->abs_pf_id - 16;
430 if (QED_PATH_ID(p_hwfn))
431 cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
433 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
434 "First VF in hwfn 0x%08x\n",
435 cdev->p_iov_info->first_vf_in_pf);
440 static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
442 /* Check PF supports sriov */
443 if (!IS_QED_SRIOV(p_hwfn->cdev) || !IS_PF_SRIOV_ALLOC(p_hwfn))
446 /* Check VF validity */
447 if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
448 !IS_PF_SRIOV_ALLOC(p_hwfn))
454 static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
455 u16 rel_vf_id, u8 to_disable)
457 struct qed_vf_info *vf;
460 for_each_hwfn(cdev, i) {
461 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
463 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
467 vf->to_disable = to_disable;
471 void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
475 if (!IS_QED_SRIOV(cdev))
478 for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
479 qed_iov_set_vf_to_disable(cdev, i, to_disable);
482 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
483 struct qed_ptt *p_ptt, u8 abs_vfid)
485 qed_wr(p_hwfn, p_ptt,
486 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
487 1 << (abs_vfid & 0x1f));
490 static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
491 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
496 /* Set VF masks and configuration - pretend */
497 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
499 qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
501 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
502 "value in VF_CONFIGURATION of vf %d after write %x\n",
504 qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION));
507 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
509 /* iterate over all queues, clear sb consumer */
510 for (i = 0; i < vf->num_sbs; i++) {
511 igu_sb_id = vf->igu_sbs[i];
512 /* Set then clear... */
513 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1,
515 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0,
520 static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
521 struct qed_ptt *p_ptt,
522 struct qed_vf_info *vf, bool enable)
526 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
528 igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
531 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
533 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
535 qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
538 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
541 static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
542 struct qed_ptt *p_ptt,
543 struct qed_vf_info *vf)
545 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
553 "Enable internal access for vf %x [abs %x]\n",
554 vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
556 qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
558 rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
562 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
564 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
565 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
567 qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
568 p_hwfn->hw_info.hw_mode);
571 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
573 if (vf->state != VF_STOPPED) {
574 DP_NOTICE(p_hwfn, "VF[%02x] is already started\n",
580 rc = qed_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);
582 DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
590 * @brief qed_iov_config_perm_table - configure the permission
592 * In E4, queue zone permission table size is 320x9. There
593 * are 320 VF queues for single engine device (256 for dual
594 * engine device), and each entry has the following format:
601 static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
602 struct qed_ptt *p_ptt,
603 struct qed_vf_info *vf, u8 enable)
609 for (qid = 0; qid < vf->num_rxqs; qid++) {
610 qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
613 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
614 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
615 qed_wr(p_hwfn, p_ptt, reg_addr, val);
619 static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
620 struct qed_ptt *p_ptt,
621 struct qed_vf_info *vf)
623 /* Reset vf in IGU - interrupts are still disabled */
624 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
626 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
628 /* Permission Table */
629 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
632 static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
633 struct qed_ptt *p_ptt,
634 struct qed_vf_info *vf, u16 num_rx_queues)
636 struct qed_igu_block *igu_blocks;
637 int qid = 0, igu_id = 0;
640 igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
642 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
643 num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
644 p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
646 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
647 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
648 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
650 while ((qid < num_rx_queues) &&
651 (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
652 if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
653 struct cau_sb_entry sb_entry;
655 vf->igu_sbs[qid] = (u16)igu_id;
656 igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
658 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
660 qed_wr(p_hwfn, p_ptt,
661 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
664 /* Configure igu sb in CAU which were marked valid */
665 qed_init_cau_sb_entry(p_hwfn, &sb_entry,
668 qed_dmae_host2grc(p_hwfn, p_ptt,
669 (u64)(uintptr_t)&sb_entry,
670 CAU_REG_SB_VAR_MEMORY +
671 igu_id * sizeof(u64), 2, 0);
677 vf->num_sbs = (u8) num_rx_queues;
682 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
683 struct qed_ptt *p_ptt,
684 struct qed_vf_info *vf)
686 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
690 /* Invalidate igu CAM lines and mark them as free */
691 for (idx = 0; idx < vf->num_sbs; idx++) {
692 igu_id = vf->igu_sbs[idx];
693 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
695 val = qed_rd(p_hwfn, p_ptt, addr);
696 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
697 qed_wr(p_hwfn, p_ptt, addr, val);
699 p_info->igu_map.igu_blocks[igu_id].status |=
702 p_hwfn->hw_info.p_igu_info->free_blks++;
708 static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
709 struct qed_ptt *p_ptt,
710 u16 rel_vf_id, u16 num_rx_queues)
712 u8 num_of_vf_avaiable_chains = 0;
713 struct qed_vf_info *vf = NULL;
718 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
720 DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
725 DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id);
729 /* Limit number of queues according to number of CIDs */
730 qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
733 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
734 vf->relative_vf_id, num_rx_queues, (u16) cids);
735 num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids));
737 num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
741 if (!num_of_vf_avaiable_chains) {
742 DP_ERR(p_hwfn, "no available igu sbs\n");
746 /* Choose queue number and index ranges */
747 vf->num_rxqs = num_of_vf_avaiable_chains;
748 vf->num_txqs = num_of_vf_avaiable_chains;
750 for (i = 0; i < vf->num_rxqs; i++) {
751 u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn,
754 if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
756 "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
757 vf->relative_vf_id, queue_id);
761 /* CIDs are per-VF, so no problem having them 0-based. */
762 vf->vf_queues[i].fw_rx_qid = queue_id;
763 vf->vf_queues[i].fw_tx_qid = queue_id;
764 vf->vf_queues[i].fw_cid = i;
766 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
767 "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
768 vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
770 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
774 if (IS_LEAD_HWFN(p_hwfn))
775 p_hwfn->cdev->p_iov_info->num_vfs++;
781 static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
782 struct qed_ptt *p_ptt, u16 rel_vf_id)
784 struct qed_vf_info *vf = NULL;
787 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
789 DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
793 if (vf->state != VF_STOPPED) {
794 /* Stopping the VF */
795 rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);
798 DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
803 vf->state = VF_STOPPED;
806 /* disablng interrupts and resetting permission table was done during
807 * vf-close, however, we could get here without going through vf_close
809 /* Disable Interrupts for VF */
810 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
812 /* Reset Permission table */
813 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
817 qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
822 if (IS_LEAD_HWFN(p_hwfn))
823 p_hwfn->cdev->p_iov_info->num_vfs--;
829 static bool qed_iov_tlv_supported(u16 tlvtype)
831 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
834 /* place a given tlv on the tlv buffer, continuing current tlv list */
835 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
837 struct channel_tlv *tl = (struct channel_tlv *)*offset;
842 /* Offset should keep pointing to next TLV (the end of the last) */
845 /* Return a pointer to the start of the added tlv */
846 return *offset - length;
849 /* list the types and lengths of the tlvs on the buffer */
850 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
852 u16 i = 1, total_length = 0;
853 struct channel_tlv *tlv;
856 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
859 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
860 "TLV number %d: type %d, length %d\n",
861 i, tlv->type, tlv->length);
863 if (tlv->type == CHANNEL_TLV_LIST_END)
866 /* Validate entry - protect against malicious VFs */
868 DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
872 total_length += tlv->length;
874 if (total_length >= sizeof(struct tlv_buffer_size)) {
875 DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
883 static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
884 struct qed_ptt *p_ptt,
885 struct qed_vf_info *p_vf,
886 u16 length, u8 status)
888 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
889 struct qed_dmae_params params;
892 mbx->reply_virt->default_resp.hdr.status = status;
894 qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
896 eng_vf_id = p_vf->abs_vf_id;
898 memset(¶ms, 0, sizeof(struct qed_dmae_params));
899 params.flags = QED_DMAE_FLAG_VF_DST;
900 params.dst_vfid = eng_vf_id;
902 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
903 mbx->req_virt->first_tlv.reply_address +
905 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
908 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
909 mbx->req_virt->first_tlv.reply_address,
910 sizeof(u64) / 4, ¶ms);
913 GTT_BAR0_MAP_REG_USDM_RAM +
914 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
917 static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
918 enum qed_iov_vport_update_flag flag)
921 case QED_IOV_VP_UPDATE_ACTIVATE:
922 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
923 case QED_IOV_VP_UPDATE_MCAST:
924 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
925 case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
926 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
927 case QED_IOV_VP_UPDATE_RSS:
928 return CHANNEL_TLV_VPORT_UPDATE_RSS;
934 static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
935 struct qed_vf_info *p_vf,
936 struct qed_iov_vf_mbx *p_mbx,
938 u16 tlvs_mask, u16 tlvs_accepted)
940 struct pfvf_def_resp_tlv *resp;
941 u16 size, total_len, i;
943 memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
944 p_mbx->offset = (u8 *)p_mbx->reply_virt;
945 size = sizeof(struct pfvf_def_resp_tlv);
948 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
950 /* Prepare response for all extended tlvs if they are found by PF */
951 for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
952 if (!(tlvs_mask & (1 << i)))
955 resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
956 qed_iov_vport_to_tlv(p_hwfn, i), size);
958 if (tlvs_accepted & (1 << i))
959 resp->hdr.status = status;
961 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
965 "VF[%d] - vport_update response: TLV %d, status %02x\n",
966 p_vf->relative_vf_id,
967 qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
972 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
973 sizeof(struct channel_list_end_tlv));
978 static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
979 struct qed_ptt *p_ptt,
980 struct qed_vf_info *vf_info,
981 u16 type, u16 length, u8 status)
983 struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
985 mbx->offset = (u8 *)mbx->reply_virt;
987 qed_add_tlv(p_hwfn, &mbx->offset, type, length);
988 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
989 sizeof(struct channel_list_end_tlv));
991 qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
994 struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
998 struct qed_vf_info *vf = NULL;
1000 vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1004 return &vf->p_vf_info;
1007 void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
1009 struct qed_public_vf_info *vf_info;
1011 vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
1016 /* Clear the VF mac */
1017 memset(vf_info->mac, 0, ETH_ALEN);
1020 static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
1021 struct qed_vf_info *p_vf)
1025 p_vf->vf_bulletin = 0;
1026 p_vf->vport_instance = 0;
1027 p_vf->num_mac_filters = 0;
1028 p_vf->num_vlan_filters = 0;
1030 /* If VF previously requested less resources, go back to default */
1031 p_vf->num_rxqs = p_vf->num_sbs;
1032 p_vf->num_txqs = p_vf->num_sbs;
1034 p_vf->num_active_rxqs = 0;
1036 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++)
1037 p_vf->vf_queues[i].rxq_active = 0;
1039 qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
1042 static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1043 struct qed_ptt *p_ptt,
1044 struct qed_vf_info *vf)
1046 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1047 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1048 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1049 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1050 u8 i, vfpf_status = PFVF_STATUS_SUCCESS;
1051 struct pf_vf_resc *resc = &resp->resc;
1053 /* Validate FW compatibility */
1054 if (req->vfdev_info.fw_major != FW_MAJOR_VERSION ||
1055 req->vfdev_info.fw_minor != FW_MINOR_VERSION ||
1056 req->vfdev_info.fw_revision != FW_REVISION_VERSION ||
1057 req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {
1059 "VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n",
1061 req->vfdev_info.fw_major,
1062 req->vfdev_info.fw_minor,
1063 req->vfdev_info.fw_revision,
1064 req->vfdev_info.fw_engineering,
1067 FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
1068 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1072 /* On 100g PFs, prevent old VFs from loading */
1073 if ((p_hwfn->cdev->num_hwfns > 1) &&
1074 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1076 "VF[%d] is running an old driver that doesn't support 100g\n",
1078 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1082 memset(resp, 0, sizeof(*resp));
1084 /* Fill in vf info stuff */
1085 vf->opaque_fid = req->vfdev_info.opaque_fid;
1086 vf->num_mac_filters = 1;
1087 vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
1089 vf->vf_bulletin = req->bulletin_addr;
1090 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1091 vf->bulletin.size : req->bulletin_size;
1093 /* fill in pfdev info */
1094 pfdev_info->chip_num = p_hwfn->cdev->chip_num;
1095 pfdev_info->db_size = 0;
1096 pfdev_info->indices_per_sb = PIS_PER_SB;
1098 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1099 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1100 if (p_hwfn->cdev->num_hwfns > 1)
1101 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1103 pfdev_info->stats_info.mstats.address =
1104 PXP_VF_BAR0_START_MSDM_ZONE_B +
1105 offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat);
1106 pfdev_info->stats_info.mstats.len =
1107 sizeof(struct eth_mstorm_per_queue_stat);
1109 pfdev_info->stats_info.ustats.address =
1110 PXP_VF_BAR0_START_USDM_ZONE_B +
1111 offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat);
1112 pfdev_info->stats_info.ustats.len =
1113 sizeof(struct eth_ustorm_per_queue_stat);
1115 pfdev_info->stats_info.pstats.address =
1116 PXP_VF_BAR0_START_PSDM_ZONE_B +
1117 offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat);
1118 pfdev_info->stats_info.pstats.len =
1119 sizeof(struct eth_pstorm_per_queue_stat);
1121 pfdev_info->stats_info.tstats.address = 0;
1122 pfdev_info->stats_info.tstats.len = 0;
1124 memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1126 pfdev_info->fw_major = FW_MAJOR_VERSION;
1127 pfdev_info->fw_minor = FW_MINOR_VERSION;
1128 pfdev_info->fw_rev = FW_REVISION_VERSION;
1129 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1130 pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
1131 qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
1133 pfdev_info->dev_type = p_hwfn->cdev->type;
1134 pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
1136 resc->num_rxqs = vf->num_rxqs;
1137 resc->num_txqs = vf->num_txqs;
1138 resc->num_sbs = vf->num_sbs;
1139 for (i = 0; i < resc->num_sbs; i++) {
1140 resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i];
1141 resc->hw_sbs[i].sb_qid = 0;
1144 for (i = 0; i < resc->num_rxqs; i++) {
1145 qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid,
1146 (u16 *)&resc->hw_qid[i]);
1147 resc->cid[i] = vf->vf_queues[i].fw_cid;
1150 resc->num_mac_filters = min_t(u8, vf->num_mac_filters,
1151 req->resc_request.num_mac_filters);
1152 resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters,
1153 req->resc_request.num_vlan_filters);
1155 /* This isn't really required as VF isn't limited, but some VFs might
1156 * actually test this value, so need to provide it.
1158 resc->num_mc_filters = req->resc_request.num_mc_filters;
1160 /* Fill agreed size of bulletin board in response */
1161 resp->bulletin_size = vf->bulletin.size;
1165 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1166 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1168 resp->pfdev_info.chip_num,
1169 resp->pfdev_info.db_size,
1170 resp->pfdev_info.indices_per_sb,
1171 resp->pfdev_info.capabilities,
1175 resc->num_mac_filters,
1176 resc->num_vlan_filters);
1177 vf->state = VF_ACQUIRED;
1179 /* Prepare Response */
1181 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1182 sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
1185 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1186 struct qed_ptt *p_ptt,
1187 struct qed_vf_info *vf)
1189 struct qed_sp_vport_start_params params = { 0 };
1190 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1191 struct vfpf_vport_start_tlv *start;
1192 u8 status = PFVF_STATUS_SUCCESS;
1193 struct qed_vf_info *vf_info;
1197 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
1199 DP_NOTICE(p_hwfn->cdev,
1200 "Failed to get VF info, invalid vfid [%d]\n",
1201 vf->relative_vf_id);
1205 vf->state = VF_ENABLED;
1206 start = &mbx->req_virt->start_vport;
1208 /* Initialize Status block in CAU */
1209 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1210 if (!start->sb_addr[sb_id]) {
1211 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1212 "VF[%d] did not fill the address of SB %d\n",
1213 vf->relative_vf_id, sb_id);
1217 qed_int_cau_conf_sb(p_hwfn, p_ptt,
1218 start->sb_addr[sb_id],
1222 qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1224 vf->mtu = start->mtu;
1226 params.tpa_mode = start->tpa_mode;
1227 params.remove_inner_vlan = start->inner_vlan_removal;
1229 params.drop_ttl0 = false;
1230 params.concrete_fid = vf->concrete_fid;
1231 params.opaque_fid = vf->opaque_fid;
1232 params.vport_id = vf->vport_id;
1233 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1234 params.mtu = vf->mtu;
1236 rc = qed_sp_eth_vport_start(p_hwfn, ¶ms);
1239 "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
1240 status = PFVF_STATUS_FAILURE;
1242 vf->vport_instance++;
1244 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1245 sizeof(struct pfvf_def_resp_tlv), status);
1248 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
1249 struct qed_ptt *p_ptt,
1250 struct qed_vf_info *vf)
1252 u8 status = PFVF_STATUS_SUCCESS;
1255 vf->vport_instance--;
1257 rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
1259 DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
1261 status = PFVF_STATUS_FAILURE;
1264 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
1265 sizeof(struct pfvf_def_resp_tlv), status);
1268 #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
1269 #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
1270 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
1272 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
1273 struct qed_ptt *p_ptt,
1274 struct qed_vf_info *vf, u8 status)
1276 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1277 struct pfvf_start_queue_resp_tlv *p_tlv;
1278 struct vfpf_start_rxq_tlv *req;
1280 mbx->offset = (u8 *)mbx->reply_virt;
1282 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
1284 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1285 sizeof(struct channel_list_end_tlv));
1287 /* Update the TLV with the response */
1288 if (status == PFVF_STATUS_SUCCESS) {
1291 req = &mbx->req_virt->start_rxq;
1292 qed_fw_l2_queue(p_hwfn, vf->vf_queues[req->rx_qid].fw_rx_qid,
1295 p_tlv->offset = MSTORM_QZONE_START(p_hwfn->cdev) +
1296 hw_qid * MSTORM_QZONE_SIZE +
1297 offsetof(struct mstorm_eth_queue_zone,
1301 qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
1304 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
1305 struct qed_ptt *p_ptt,
1306 struct qed_vf_info *vf)
1308 struct qed_queue_start_common_params params;
1309 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1310 u8 status = PFVF_STATUS_SUCCESS;
1311 struct vfpf_start_rxq_tlv *req;
1314 memset(¶ms, 0, sizeof(params));
1315 req = &mbx->req_virt->start_rxq;
1316 params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid;
1317 params.vport_id = vf->vport_id;
1318 params.sb = req->hw_sb;
1319 params.sb_idx = req->sb_index;
1321 rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
1322 vf->vf_queues[req->rx_qid].fw_cid,
1324 vf->abs_vf_id + 0x10,
1327 req->cqe_pbl_addr, req->cqe_pbl_size);
1330 status = PFVF_STATUS_FAILURE;
1332 vf->vf_queues[req->rx_qid].rxq_active = true;
1333 vf->num_active_rxqs++;
1336 qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status);
1339 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
1340 struct qed_ptt *p_ptt,
1341 struct qed_vf_info *vf)
1343 u16 length = sizeof(struct pfvf_def_resp_tlv);
1344 struct qed_queue_start_common_params params;
1345 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1346 union qed_qm_pq_params pq_params;
1347 u8 status = PFVF_STATUS_SUCCESS;
1348 struct vfpf_start_txq_tlv *req;
1351 /* Prepare the parameters which would choose the right PQ */
1352 memset(&pq_params, 0, sizeof(pq_params));
1353 pq_params.eth.is_vf = 1;
1354 pq_params.eth.vf_id = vf->relative_vf_id;
1356 memset(¶ms, 0, sizeof(params));
1357 req = &mbx->req_virt->start_txq;
1358 params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid;
1359 params.vport_id = vf->vport_id;
1360 params.sb = req->hw_sb;
1361 params.sb_idx = req->sb_index;
1363 rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
1365 vf->vf_queues[req->tx_qid].fw_cid,
1367 vf->abs_vf_id + 0x10,
1369 req->pbl_size, &pq_params);
1372 status = PFVF_STATUS_FAILURE;
1374 vf->vf_queues[req->tx_qid].txq_active = true;
1376 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ,
1380 static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
1381 struct qed_vf_info *vf,
1382 u16 rxq_id, u8 num_rxqs, bool cqe_completion)
1387 if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
1390 for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
1391 if (vf->vf_queues[qid].rxq_active) {
1392 rc = qed_sp_eth_rx_queue_stop(p_hwfn,
1400 vf->vf_queues[qid].rxq_active = false;
1401 vf->num_active_rxqs--;
1407 static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
1408 struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
1413 if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
1416 for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
1417 if (vf->vf_queues[qid].txq_active) {
1418 rc = qed_sp_eth_tx_queue_stop(p_hwfn,
1425 vf->vf_queues[qid].txq_active = false;
1430 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
1431 struct qed_ptt *p_ptt,
1432 struct qed_vf_info *vf)
1434 u16 length = sizeof(struct pfvf_def_resp_tlv);
1435 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1436 u8 status = PFVF_STATUS_SUCCESS;
1437 struct vfpf_stop_rxqs_tlv *req;
1440 /* We give the option of starting from qid != 0, in this case we
1441 * need to make sure that qid + num_qs doesn't exceed the actual
1442 * amount of queues that exist.
1444 req = &mbx->req_virt->stop_rxqs;
1445 rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
1446 req->num_rxqs, req->cqe_completion);
1448 status = PFVF_STATUS_FAILURE;
1450 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
1454 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
1455 struct qed_ptt *p_ptt,
1456 struct qed_vf_info *vf)
1458 u16 length = sizeof(struct pfvf_def_resp_tlv);
1459 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1460 u8 status = PFVF_STATUS_SUCCESS;
1461 struct vfpf_stop_txqs_tlv *req;
1464 /* We give the option of starting from qid != 0, in this case we
1465 * need to make sure that qid + num_qs doesn't exceed the actual
1466 * amount of queues that exist.
1468 req = &mbx->req_virt->stop_txqs;
1469 rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
1471 status = PFVF_STATUS_FAILURE;
1473 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
1477 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
1478 void *p_tlvs_list, u16 req_type)
1480 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
1484 if (!p_tlv->length) {
1485 DP_NOTICE(p_hwfn, "Zero length TLV found\n");
1489 if (p_tlv->type == req_type) {
1490 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1491 "Extended tlv type %d, length %d found\n",
1492 p_tlv->type, p_tlv->length);
1496 len += p_tlv->length;
1497 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
1499 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
1500 DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
1503 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
1509 qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
1510 struct qed_sp_vport_update_params *p_data,
1511 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
1513 struct vfpf_vport_update_activate_tlv *p_act_tlv;
1514 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1516 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
1517 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
1521 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
1522 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
1523 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
1524 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
1525 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
1529 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
1530 struct qed_sp_vport_update_params *p_data,
1531 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
1533 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
1534 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
1536 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
1537 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
1541 p_data->update_approx_mcast_flg = 1;
1542 memcpy(p_data->bins, p_mcast_tlv->bins,
1543 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
1544 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
1548 qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
1549 struct qed_sp_vport_update_params *p_data,
1550 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
1552 struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
1553 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
1554 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1556 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
1557 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
1561 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
1562 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
1563 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
1564 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
1565 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
1569 qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
1570 struct qed_vf_info *vf,
1571 struct qed_sp_vport_update_params *p_data,
1572 struct qed_rss_params *p_rss,
1573 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
1575 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
1576 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
1577 u16 i, q_idx, max_q_idx;
1580 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
1581 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
1583 p_data->rss_params = NULL;
1587 memset(p_rss, 0, sizeof(struct qed_rss_params));
1589 p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
1590 VFPF_UPDATE_RSS_CONFIG_FLAG);
1591 p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
1592 VFPF_UPDATE_RSS_CAPS_FLAG);
1593 p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
1594 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
1595 p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
1596 VFPF_UPDATE_RSS_KEY_FLAG);
1598 p_rss->rss_enable = p_rss_tlv->rss_enable;
1599 p_rss->rss_eng_id = vf->relative_vf_id + 1;
1600 p_rss->rss_caps = p_rss_tlv->rss_caps;
1601 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
1602 memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
1603 sizeof(p_rss->rss_ind_table));
1604 memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
1606 table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
1607 (1 << p_rss_tlv->rss_table_size_log));
1609 max_q_idx = ARRAY_SIZE(vf->vf_queues);
1611 for (i = 0; i < table_size; i++) {
1612 u16 index = vf->vf_queues[0].fw_rx_qid;
1614 q_idx = p_rss->rss_ind_table[i];
1615 if (q_idx >= max_q_idx)
1617 "rss_ind_table[%d] = %d, rxq is out of range\n",
1619 else if (!vf->vf_queues[q_idx].rxq_active)
1621 "rss_ind_table[%d] = %d, rxq is not active\n",
1624 index = vf->vf_queues[q_idx].fw_rx_qid;
1625 p_rss->rss_ind_table[i] = index;
1628 p_data->rss_params = p_rss;
1629 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
1632 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
1633 struct qed_ptt *p_ptt,
1634 struct qed_vf_info *vf)
1636 struct qed_sp_vport_update_params params;
1637 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1638 struct qed_rss_params rss_params;
1639 u8 status = PFVF_STATUS_SUCCESS;
1644 memset(¶ms, 0, sizeof(params));
1645 params.opaque_fid = vf->opaque_fid;
1646 params.vport_id = vf->vport_id;
1647 params.rss_params = NULL;
1649 /* Search for extended tlvs list and update values
1650 * from VF in struct qed_sp_vport_update_params.
1652 qed_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
1653 qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
1654 qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
1655 qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, &rss_params,
1658 /* Just log a message if there is no single extended tlv in buffer.
1659 * When all features of vport update ramrod would be requested by VF
1660 * as extended TLVs in buffer then an error can be returned in response
1661 * if there is no extended TLV present in buffer.
1665 "No feature tlvs found for vport update\n");
1666 status = PFVF_STATUS_NOT_SUPPORTED;
1670 rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL);
1673 status = PFVF_STATUS_FAILURE;
1676 length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
1677 tlvs_mask, tlvs_mask);
1678 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
1681 int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
1682 int vfid, struct qed_filter_ucast *params)
1684 struct qed_public_vf_info *vf;
1686 vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
1690 /* No real decision to make; Store the configured MAC */
1691 if (params->type == QED_FILTER_MAC ||
1692 params->type == QED_FILTER_MAC_VLAN)
1693 ether_addr_copy(vf->mac, params->mac);
1698 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
1699 struct qed_ptt *p_ptt,
1700 struct qed_vf_info *vf)
1702 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1703 struct vfpf_ucast_filter_tlv *req;
1704 u8 status = PFVF_STATUS_SUCCESS;
1705 struct qed_filter_ucast params;
1708 /* Prepare the unicast filter params */
1709 memset(¶ms, 0, sizeof(struct qed_filter_ucast));
1710 req = &mbx->req_virt->ucast_filter;
1711 params.opcode = (enum qed_filter_opcode)req->opcode;
1712 params.type = (enum qed_filter_ucast_type)req->type;
1714 params.is_rx_filter = 1;
1715 params.is_tx_filter = 1;
1716 params.vport_to_remove_from = vf->vport_id;
1717 params.vport_to_add_to = vf->vport_id;
1718 memcpy(params.mac, req->mac, ETH_ALEN);
1719 params.vlan = req->vlan;
1723 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
1724 vf->abs_vf_id, params.opcode, params.type,
1725 params.is_rx_filter ? "RX" : "",
1726 params.is_tx_filter ? "TX" : "",
1727 params.vport_to_add_to,
1728 params.mac[0], params.mac[1],
1729 params.mac[2], params.mac[3],
1730 params.mac[4], params.mac[5], params.vlan);
1732 if (!vf->vport_instance) {
1735 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
1737 status = PFVF_STATUS_FAILURE;
1741 rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, ¶ms);
1743 status = PFVF_STATUS_FAILURE;
1747 rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
1748 QED_SPQ_MODE_CB, NULL);
1750 status = PFVF_STATUS_FAILURE;
1753 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
1754 sizeof(struct pfvf_def_resp_tlv), status);
1757 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
1758 struct qed_ptt *p_ptt,
1759 struct qed_vf_info *vf)
1764 for (i = 0; i < vf->num_sbs; i++)
1765 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
1767 vf->opaque_fid, false);
1769 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
1770 sizeof(struct pfvf_def_resp_tlv),
1771 PFVF_STATUS_SUCCESS);
1774 static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
1775 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
1777 u16 length = sizeof(struct pfvf_def_resp_tlv);
1778 u8 status = PFVF_STATUS_SUCCESS;
1780 /* Disable Interrupts for VF */
1781 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1783 /* Reset Permission table */
1784 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1786 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
1790 static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
1791 struct qed_ptt *p_ptt,
1792 struct qed_vf_info *p_vf)
1794 u16 length = sizeof(struct pfvf_def_resp_tlv);
1796 qed_iov_vf_cleanup(p_hwfn, p_vf);
1798 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
1799 length, PFVF_STATUS_SUCCESS);
1803 qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
1804 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
1809 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
1811 for (cnt = 0; cnt < 50; cnt++) {
1812 val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
1817 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
1821 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
1822 p_vf->abs_vf_id, val);
1830 qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
1831 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
1833 u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
1836 /* Read initial consumers & producers */
1837 for (i = 0; i < MAX_NUM_VOQS; i++) {
1840 cons[i] = qed_rd(p_hwfn, p_ptt,
1841 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
1843 prod = qed_rd(p_hwfn, p_ptt,
1844 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
1846 distance[i] = prod - cons[i];
1849 /* Wait for consumers to pass the producers */
1851 for (cnt = 0; cnt < 50; cnt++) {
1852 for (; i < MAX_NUM_VOQS; i++) {
1855 tmp = qed_rd(p_hwfn, p_ptt,
1856 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
1858 if (distance[i] > tmp - cons[i])
1862 if (i == MAX_NUM_VOQS)
1869 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
1870 p_vf->abs_vf_id, i);
1877 static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
1878 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
1882 rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
1886 rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
1894 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
1895 struct qed_ptt *p_ptt,
1896 u16 rel_vf_id, u32 *ack_vfs)
1898 struct qed_vf_info *p_vf;
1901 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
1905 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
1906 (1ULL << (rel_vf_id % 64))) {
1907 u16 vfid = p_vf->abs_vf_id;
1909 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1910 "VF[%d] - Handling FLR\n", vfid);
1912 qed_iov_vf_cleanup(p_hwfn, p_vf);
1914 /* If VF isn't active, no need for anything but SW */
1918 rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
1922 rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
1924 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
1928 /* VF_STOPPED has to be set only after final cleanup
1929 * but prior to re-enabling the VF.
1931 p_vf->state = VF_STOPPED;
1933 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
1935 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
1940 /* Mark VF for ack and clean pending state */
1941 if (p_vf->state == VF_RESET)
1942 p_vf->state = VF_STOPPED;
1943 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
1944 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
1945 ~(1ULL << (rel_vf_id % 64));
1946 p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
1947 ~(1ULL << (rel_vf_id % 64));
1953 int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1955 u32 ack_vfs[VF_MAX_STATIC / 32];
1959 memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
1961 /* Since BRB <-> PRS interface can't be tested as part of the flr
1962 * polling due to HW limitations, simply sleep a bit. And since
1963 * there's no need to wait per-vf, do it before looping.
1967 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
1968 qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
1970 rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
1974 int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
1978 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
1979 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1980 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1981 "[%08x,...,%08x]: %08x\n",
1982 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
1984 if (!p_hwfn->cdev->p_iov_info) {
1985 DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
1990 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
1991 struct qed_vf_info *p_vf;
1994 p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
1998 vfid = p_vf->abs_vf_id;
1999 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
2000 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
2001 u16 rel_vf_id = p_vf->relative_vf_id;
2003 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2004 "VF[%d] [rel %d] got FLR-ed\n",
2007 p_vf->state = VF_RESET;
2009 /* No need to lock here, since pending_flr should
2010 * only change here and before ACKing MFw. Since
2011 * MFW will not trigger an additional attention for
2012 * VF flr until ACKs, we're safe.
2014 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
2022 static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
2023 struct qed_ptt *p_ptt, int vfid)
2025 struct qed_iov_vf_mbx *mbx;
2026 struct qed_vf_info *p_vf;
2029 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
2033 mbx = &p_vf->vf_mbx;
2035 /* qed_iov_process_mbx_request */
2038 "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
2040 mbx->first_tlv = mbx->req_virt->first_tlv;
2042 /* check if tlv type is known */
2043 if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
2044 switch (mbx->first_tlv.tl.type) {
2045 case CHANNEL_TLV_ACQUIRE:
2046 qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
2048 case CHANNEL_TLV_VPORT_START:
2049 qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
2051 case CHANNEL_TLV_VPORT_TEARDOWN:
2052 qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
2054 case CHANNEL_TLV_START_RXQ:
2055 qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
2057 case CHANNEL_TLV_START_TXQ:
2058 qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
2060 case CHANNEL_TLV_STOP_RXQS:
2061 qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
2063 case CHANNEL_TLV_STOP_TXQS:
2064 qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
2066 case CHANNEL_TLV_VPORT_UPDATE:
2067 qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
2069 case CHANNEL_TLV_UCAST_FILTER:
2070 qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
2072 case CHANNEL_TLV_CLOSE:
2073 qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
2075 case CHANNEL_TLV_INT_CLEANUP:
2076 qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
2078 case CHANNEL_TLV_RELEASE:
2079 qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
2083 /* unknown TLV - this may belong to a VF driver from the future
2084 * - a version written after this PF driver was written, which
2085 * supports features unknown as of yet. Too bad since we don't
2086 * support them. Or this may be because someone wrote a crappy
2087 * VF driver and is sending garbage over the channel.
2090 "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
2091 mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
2093 for (i = 0; i < 20; i++) {
2097 mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
2102 void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
2104 u64 add_bit = 1ULL << (vfid % 64);
2106 p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
2109 static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
2112 u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
2114 memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
2115 memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
2118 static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
2119 u16 abs_vfid, struct regpair *vf_msg)
2121 u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
2122 struct qed_vf_info *p_vf;
2124 if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
2127 "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
2131 p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
2133 /* List the physical address of the request so that handler
2134 * could later on copy the message from it.
2136 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
2138 /* Mark the event and schedule the workqueue */
2139 qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
2140 qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
2145 int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
2146 u8 opcode, __le16 echo, union event_ring_data *data)
2149 case COMMON_EVENT_VF_PF_CHANNEL:
2150 return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
2151 &data->vf_pf_channel.msg_addr);
2153 DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
2159 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
2161 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
2167 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
2168 if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
2175 static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
2178 struct qed_dmae_params params;
2179 struct qed_vf_info *vf_info;
2181 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
2185 memset(¶ms, 0, sizeof(struct qed_dmae_params));
2186 params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
2187 params.src_vfid = vf_info->abs_vf_id;
2189 if (qed_dmae_host2host(p_hwfn, ptt,
2190 vf_info->vf_mbx.pending_req,
2191 vf_info->vf_mbx.req_phys,
2192 sizeof(union vfpf_tlvs) / 4, ¶ms)) {
2193 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2194 "Failed to copy message from VF 0x%02x\n", vfid);
2202 bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
2204 struct qed_vf_info *p_vf_info;
2206 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
2210 return p_vf_info->state == VF_STOPPED;
2214 * qed_schedule_iov - schedules IOV task for VF and PF
2215 * @hwfn: hardware function pointer
2216 * @flag: IOV flag for VF/PF
2218 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
2220 smp_mb__before_atomic();
2221 set_bit(flag, &hwfn->iov_task_flags);
2222 smp_mb__after_atomic();
2223 DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
2224 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
2227 void qed_vf_start_iov_wq(struct qed_dev *cdev)
2231 for_each_hwfn(cdev, i)
2232 queue_delayed_work(cdev->hwfns[i].iov_wq,
2233 &cdev->hwfns[i].iov_task, 0);
2236 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
2240 for_each_hwfn(cdev, i)
2241 if (cdev->hwfns[i].iov_wq)
2242 flush_workqueue(cdev->hwfns[i].iov_wq);
2244 /* Mark VFs for disablement */
2245 qed_iov_set_vfs_to_disable(cdev, true);
2247 if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
2248 pci_disable_sriov(cdev->pdev);
2250 for_each_hwfn(cdev, i) {
2251 struct qed_hwfn *hwfn = &cdev->hwfns[i];
2252 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
2254 /* Failure to acquire the ptt in 100g creates an odd error
2255 * where the first engine has already relased IOV.
2258 DP_ERR(hwfn, "Failed to acquire ptt\n");
2262 qed_for_each_vf(hwfn, j) {
2265 if (!qed_iov_is_valid_vfid(hwfn, j, true))
2268 /* Wait until VF is disabled before releasing */
2269 for (k = 0; k < 100; k++) {
2270 if (!qed_iov_is_vf_stopped(hwfn, j))
2277 qed_iov_release_hw_for_vf(&cdev->hwfns[i],
2281 "Timeout waiting for VF's FLR to end\n");
2284 qed_ptt_release(hwfn, ptt);
2287 qed_iov_set_vfs_to_disable(cdev, false);
2292 static int qed_sriov_enable(struct qed_dev *cdev, int num)
2294 struct qed_sb_cnt_info sb_cnt_info;
2297 if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
2298 DP_NOTICE(cdev, "Can start at most %d VFs\n",
2299 RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
2303 /* Initialize HW for VF access */
2304 for_each_hwfn(cdev, j) {
2305 struct qed_hwfn *hwfn = &cdev->hwfns[j];
2306 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
2307 int num_sbs = 0, limit = 16;
2310 DP_ERR(hwfn, "Failed to acquire ptt\n");
2315 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
2316 qed_int_get_num_sbs(hwfn, &sb_cnt_info);
2317 num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);
2319 for (i = 0; i < num; i++) {
2320 if (!qed_iov_is_valid_vfid(hwfn, i, false))
2323 rc = qed_iov_init_hw_for_vf(hwfn,
2324 ptt, i, num_sbs / num);
2326 DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
2327 qed_ptt_release(hwfn, ptt);
2332 qed_ptt_release(hwfn, ptt);
2335 /* Enable SRIOV PCIe functions */
2336 rc = pci_enable_sriov(cdev->pdev, num);
2338 DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
2345 qed_sriov_disable(cdev, false);
2349 static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
2351 if (!IS_QED_SRIOV(cdev)) {
2352 DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
2357 return qed_sriov_enable(cdev, num_vfs_param);
2359 return qed_sriov_disable(cdev, true);
2362 static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
2364 u64 events[QED_VF_ARRAY_LENGTH];
2365 struct qed_ptt *ptt;
2368 ptt = qed_ptt_acquire(hwfn);
2370 DP_VERBOSE(hwfn, QED_MSG_IOV,
2371 "Can't acquire PTT; re-scheduling\n");
2372 qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
2376 qed_iov_pf_get_and_clear_pending_events(hwfn, events);
2378 DP_VERBOSE(hwfn, QED_MSG_IOV,
2379 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
2380 events[0], events[1], events[2]);
2382 qed_for_each_vf(hwfn, i) {
2383 /* Skip VFs with no pending messages */
2384 if (!(events[i / 64] & (1ULL << (i % 64))))
2387 DP_VERBOSE(hwfn, QED_MSG_IOV,
2388 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
2389 i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
2391 /* Copy VF's message to PF's request buffer for that VF */
2392 if (qed_iov_copy_vf_msg(hwfn, ptt, i))
2395 qed_iov_process_mbx_req(hwfn, ptt, i);
2398 qed_ptt_release(hwfn, ptt);
2401 void qed_iov_pf_task(struct work_struct *work)
2403 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
2407 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
2410 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
2411 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
2414 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
2418 rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
2420 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
2422 qed_ptt_release(hwfn, ptt);
2425 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
2426 qed_handle_vf_msg(hwfn);
2429 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
2433 for_each_hwfn(cdev, i) {
2434 if (!cdev->hwfns[i].iov_wq)
2437 if (schedule_first) {
2438 qed_schedule_iov(&cdev->hwfns[i],
2439 QED_IOV_WQ_STOP_WQ_FLAG);
2440 cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
2443 flush_workqueue(cdev->hwfns[i].iov_wq);
2444 destroy_workqueue(cdev->hwfns[i].iov_wq);
2448 int qed_iov_wq_start(struct qed_dev *cdev)
2450 char name[NAME_SIZE];
2453 for_each_hwfn(cdev, i) {
2454 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2456 /* PFs needs a dedicated workqueue only if they support IOV. */
2457 if (!IS_PF_SRIOV(p_hwfn))
2460 snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
2461 cdev->pdev->bus->number,
2462 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
2464 p_hwfn->iov_wq = create_singlethread_workqueue(name);
2465 if (!p_hwfn->iov_wq) {
2466 DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
2470 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
2476 const struct qed_iov_hv_ops qed_iov_ops_pass = {
2477 .configure = &qed_sriov_configure,