ce8aec3928c2275da2a1e96b5dd3f7cb4f0d41c0
[cascardo/linux.git] / drivers / net / ethernet / qlogic / qed / qed_vf.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8
9 #include <linux/crc32.h>
10 #include <linux/etherdevice.h>
11 #include "qed.h"
12 #include "qed_sriov.h"
13 #include "qed_vf.h"
14
15 static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
16 {
17         struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
18         void *p_tlv;
19
20         /* This lock is released when we receive PF's response
21          * in qed_send_msg2pf().
22          * So, qed_vf_pf_prep() and qed_send_msg2pf()
23          * must come in sequence.
24          */
25         mutex_lock(&(p_iov->mutex));
26
27         DP_VERBOSE(p_hwfn,
28                    QED_MSG_IOV,
29                    "preparing to send 0x%04x tlv over vf pf channel\n",
30                    type);
31
32         /* Reset Requst offset */
33         p_iov->offset = (u8 *)p_iov->vf2pf_request;
34
35         /* Clear mailbox - both request and reply */
36         memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
37         memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
38
39         /* Init type and length */
40         p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length);
41
42         /* Init first tlv header */
43         ((struct vfpf_first_tlv *)p_tlv)->reply_address =
44             (u64)p_iov->pf2vf_reply_phys;
45
46         return p_tlv;
47 }
48
49 static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
50 {
51         union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
52         struct ustorm_trigger_vf_zone trigger;
53         struct ustorm_vf_zone *zone_data;
54         int rc = 0, time = 100;
55
56         zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
57
58         /* output tlvs list */
59         qed_dp_tlv_list(p_hwfn, p_req);
60
61         /* need to add the END TLV to the message size */
62         resp_size += sizeof(struct channel_list_end_tlv);
63
64         /* Send TLVs over HW channel */
65         memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
66         trigger.vf_pf_msg_valid = 1;
67
68         DP_VERBOSE(p_hwfn,
69                    QED_MSG_IOV,
70                    "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
71                    GET_FIELD(p_hwfn->hw_info.concrete_fid,
72                              PXP_CONCRETE_FID_PFID),
73                    upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
74                    lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
75                    &zone_data->non_trigger.vf_pf_msg_addr,
76                    *((u32 *)&trigger), &zone_data->trigger);
77
78         REG_WR(p_hwfn,
79                (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
80                lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
81
82         REG_WR(p_hwfn,
83                (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
84                upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
85
86         /* The message data must be written first, to prevent trigger before
87          * data is written.
88          */
89         wmb();
90
91         REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
92
93         /* When PF would be done with the response, it would write back to the
94          * `done' address. Poll until then.
95          */
96         while ((!*done) && time) {
97                 msleep(25);
98                 time--;
99         }
100
101         if (!*done) {
102                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
103                            "VF <-- PF Timeout [Type %d]\n",
104                            p_req->first_tlv.tl.type);
105                 rc = -EBUSY;
106                 goto exit;
107         } else {
108                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
109                            "PF response: %d [Type %d]\n",
110                            *done, p_req->first_tlv.tl.type);
111         }
112
113 exit:
114         mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
115
116         return rc;
117 }
118
119 #define VF_ACQUIRE_THRESH 3
120 #define VF_ACQUIRE_MAC_FILTERS 1
121
122 static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
123 {
124         struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
125         struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
126         struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
127         u8 rx_count = 1, tx_count = 1, num_sbs = 1;
128         u8 num_mac = VF_ACQUIRE_MAC_FILTERS;
129         bool resources_acquired = false;
130         struct vfpf_acquire_tlv *req;
131         int rc = 0, attempts = 0;
132
133         /* clear mailbox and prep first tlv */
134         req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
135
136         /* starting filling the request */
137         req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
138
139         req->resc_request.num_rxqs = rx_count;
140         req->resc_request.num_txqs = tx_count;
141         req->resc_request.num_sbs = num_sbs;
142         req->resc_request.num_mac_filters = num_mac;
143         req->resc_request.num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
144
145         req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
146         req->vfdev_info.fw_major = FW_MAJOR_VERSION;
147         req->vfdev_info.fw_minor = FW_MINOR_VERSION;
148         req->vfdev_info.fw_revision = FW_REVISION_VERSION;
149         req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
150         req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
151         req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
152
153         /* Fill capability field with any non-deprecated config we support */
154         req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
155
156         /* pf 2 vf bulletin board address */
157         req->bulletin_addr = p_iov->bulletin.phys;
158         req->bulletin_size = p_iov->bulletin.size;
159
160         /* add list termination tlv */
161         qed_add_tlv(p_hwfn, &p_iov->offset,
162                     CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
163
164         while (!resources_acquired) {
165                 DP_VERBOSE(p_hwfn,
166                            QED_MSG_IOV, "attempting to acquire resources\n");
167
168                 /* send acquire request */
169                 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
170                 if (rc)
171                         return rc;
172
173                 /* copy acquire response from buffer to p_hwfn */
174                 memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp));
175
176                 attempts++;
177
178                 if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
179                         /* PF agrees to allocate our resources */
180                         if (!(resp->pfdev_info.capabilities &
181                               PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
182                                 DP_INFO(p_hwfn,
183                                         "PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n");
184                                 return -EINVAL;
185                         }
186                         DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n");
187                         resources_acquired = true;
188                 } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
189                            attempts < VF_ACQUIRE_THRESH) {
190                         DP_VERBOSE(p_hwfn,
191                                    QED_MSG_IOV,
192                                    "PF unwilling to fullfill resource request. Try PF recommended amount\n");
193
194                         /* humble our request */
195                         req->resc_request.num_txqs = resp->resc.num_txqs;
196                         req->resc_request.num_rxqs = resp->resc.num_rxqs;
197                         req->resc_request.num_sbs = resp->resc.num_sbs;
198                         req->resc_request.num_mac_filters =
199                             resp->resc.num_mac_filters;
200                         req->resc_request.num_vlan_filters =
201                             resp->resc.num_vlan_filters;
202
203                         /* Clear response buffer */
204                         memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
205                 } else if ((resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) &&
206                            pfdev_info->major_fp_hsi &&
207                            (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
208                         DP_NOTICE(p_hwfn,
209                                   "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
210                                   pfdev_info->major_fp_hsi,
211                                   pfdev_info->minor_fp_hsi,
212                                   ETH_HSI_VER_MAJOR,
213                                   ETH_HSI_VER_MINOR, pfdev_info->major_fp_hsi);
214                         return -EINVAL;
215                 } else {
216                         DP_ERR(p_hwfn,
217                                "PF returned error %d to VF acquisition request\n",
218                                resp->hdr.status);
219                         return -EAGAIN;
220                 }
221         }
222
223         /* Update bulletin board size with response from PF */
224         p_iov->bulletin.size = resp->bulletin_size;
225
226         /* get HW info */
227         p_hwfn->cdev->type = resp->pfdev_info.dev_type;
228         p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev;
229
230         p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff;
231
232         /* Learn of the possibility of CMT */
233         if (IS_LEAD_HWFN(p_hwfn)) {
234                 if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
235                         DP_NOTICE(p_hwfn, "100g VF\n");
236                         p_hwfn->cdev->num_hwfns = 2;
237                 }
238         }
239
240         if (ETH_HSI_VER_MINOR &&
241             (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
242                 DP_INFO(p_hwfn,
243                         "PF is using older fastpath HSI; %02x.%02x is configured\n",
244                         ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi);
245         }
246
247         return 0;
248 }
249
250 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
251 {
252         struct qed_vf_iov *p_iov;
253         u32 reg;
254
255         /* Set number of hwfns - might be overriden once leading hwfn learns
256          * actual configuration from PF.
257          */
258         if (IS_LEAD_HWFN(p_hwfn))
259                 p_hwfn->cdev->num_hwfns = 1;
260
261         /* Set the doorbell bar. Assumption: regview is set */
262         p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
263                                           PXP_VF_BAR0_START_DQ;
264
265         reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
266         p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
267
268         reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
269         p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
270
271         /* Allocate vf sriov info */
272         p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL);
273         if (!p_iov) {
274                 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
275                 return -ENOMEM;
276         }
277
278         /* Allocate vf2pf msg */
279         p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
280                                                   sizeof(union vfpf_tlvs),
281                                                   &p_iov->vf2pf_request_phys,
282                                                   GFP_KERNEL);
283         if (!p_iov->vf2pf_request) {
284                 DP_NOTICE(p_hwfn,
285                           "Failed to allocate `vf2pf_request' DMA memory\n");
286                 goto free_p_iov;
287         }
288
289         p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
290                                                 sizeof(union pfvf_tlvs),
291                                                 &p_iov->pf2vf_reply_phys,
292                                                 GFP_KERNEL);
293         if (!p_iov->pf2vf_reply) {
294                 DP_NOTICE(p_hwfn,
295                           "Failed to allocate `pf2vf_reply' DMA memory\n");
296                 goto free_vf2pf_request;
297         }
298
299         DP_VERBOSE(p_hwfn,
300                    QED_MSG_IOV,
301                    "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
302                    p_iov->vf2pf_request,
303                    (u64) p_iov->vf2pf_request_phys,
304                    p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys);
305
306         /* Allocate Bulletin board */
307         p_iov->bulletin.size = sizeof(struct qed_bulletin_content);
308         p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
309                                                     p_iov->bulletin.size,
310                                                     &p_iov->bulletin.phys,
311                                                     GFP_KERNEL);
312         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
313                    "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
314                    p_iov->bulletin.p_virt,
315                    (u64)p_iov->bulletin.phys, p_iov->bulletin.size);
316
317         mutex_init(&p_iov->mutex);
318
319         p_hwfn->vf_iov_info = p_iov;
320
321         p_hwfn->hw_info.personality = QED_PCI_ETH;
322
323         return qed_vf_pf_acquire(p_hwfn);
324
325 free_vf2pf_request:
326         dma_free_coherent(&p_hwfn->cdev->pdev->dev,
327                           sizeof(union vfpf_tlvs),
328                           p_iov->vf2pf_request, p_iov->vf2pf_request_phys);
329 free_p_iov:
330         kfree(p_iov);
331
332         return -ENOMEM;
333 }
334
335 int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
336                         u8 rx_qid,
337                         u16 sb,
338                         u8 sb_index,
339                         u16 bd_max_bytes,
340                         dma_addr_t bd_chain_phys_addr,
341                         dma_addr_t cqe_pbl_addr,
342                         u16 cqe_pbl_size, void __iomem **pp_prod)
343 {
344         struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
345         struct pfvf_start_queue_resp_tlv *resp;
346         struct vfpf_start_rxq_tlv *req;
347         int rc;
348
349         /* clear mailbox and prep first tlv */
350         req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
351
352         req->rx_qid = rx_qid;
353         req->cqe_pbl_addr = cqe_pbl_addr;
354         req->cqe_pbl_size = cqe_pbl_size;
355         req->rxq_addr = bd_chain_phys_addr;
356         req->hw_sb = sb;
357         req->sb_index = sb_index;
358         req->bd_max_bytes = bd_max_bytes;
359         req->stat_id = -1;
360
361         /* add list termination tlv */
362         qed_add_tlv(p_hwfn, &p_iov->offset,
363                     CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
364
365         resp = &p_iov->pf2vf_reply->queue_start;
366         rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
367         if (rc)
368                 return rc;
369
370         if (resp->hdr.status != PFVF_STATUS_SUCCESS)
371                 return -EINVAL;
372
373         /* Learn the address of the producer from the response */
374         if (pp_prod) {
375                 u64 init_prod_val = 0;
376
377                 *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
378                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
379                            "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
380                            rx_qid, *pp_prod, resp->offset);
381
382                 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
383                 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
384                                   (u32 *)&init_prod_val);
385         }
386
387         return rc;
388 }
389
390 int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
391 {
392         struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
393         struct vfpf_stop_rxqs_tlv *req;
394         struct pfvf_def_resp_tlv *resp;
395         int rc;
396
397         /* clear mailbox and prep first tlv */
398         req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
399
400         req->rx_qid = rx_qid;
401         req->num_rxqs = 1;
402         req->cqe_completion = cqe_completion;
403
404         /* add list termination tlv */
405         qed_add_tlv(p_hwfn, &p_iov->offset,
406                     CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
407
408         resp = &p_iov->pf2vf_reply->default_resp;
409         rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
410         if (rc)
411                 return rc;
412
413         if (resp->hdr.status != PFVF_STATUS_SUCCESS)
414                 return -EINVAL;
415
416         return rc;
417 }
418
419 int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
420                         u16 tx_queue_id,
421                         u16 sb,
422                         u8 sb_index,
423                         dma_addr_t pbl_addr,
424                         u16 pbl_size, void __iomem **pp_doorbell)
425 {
426         struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
427         struct vfpf_start_txq_tlv *req;
428         struct pfvf_def_resp_tlv *resp;
429         int rc;
430
431         /* clear mailbox and prep first tlv */
432         req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
433
434         req->tx_qid = tx_queue_id;
435
436         /* Tx */
437         req->pbl_addr = pbl_addr;
438         req->pbl_size = pbl_size;
439         req->hw_sb = sb;
440         req->sb_index = sb_index;
441
442         /* add list termination tlv */
443         qed_add_tlv(p_hwfn, &p_iov->offset,
444                     CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
445
446         resp = &p_iov->pf2vf_reply->default_resp;
447         rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
448         if (rc)
449                 return rc;
450
451         if (resp->hdr.status != PFVF_STATUS_SUCCESS)
452                 return -EINVAL;
453
454         if (pp_doorbell) {
455                 u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
456
457                 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
458                                              qed_db_addr(cid, DQ_DEMS_LEGACY);
459         }
460
461         return rc;
462 }
463
464 int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
465 {
466         struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
467         struct vfpf_stop_txqs_tlv *req;
468         struct pfvf_def_resp_tlv *resp;
469         int rc;
470
471         /* clear mailbox and prep first tlv */
472         req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
473
474         req->tx_qid = tx_qid;
475         req->num_txqs = 1;
476
477         /* add list termination tlv */
478         qed_add_tlv(p_hwfn, &p_iov->offset,
479                     CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
480
481         resp = &p_iov->pf2vf_reply->default_resp;
482         rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
483         if (rc)
484                 return rc;
485
486         if (resp->hdr.status != PFVF_STATUS_SUCCESS)
487                 return -EINVAL;
488
489         return rc;
490 }
491
492 int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
493                           u8 vport_id,
494                           u16 mtu,
495                           u8 inner_vlan_removal,
496                           enum qed_tpa_mode tpa_mode,
497                           u8 max_buffers_per_cqe, u8 only_untagged)
498 {
499         struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
500         struct vfpf_vport_start_tlv *req;
501         struct pfvf_def_resp_tlv *resp;
502         int rc, i;
503
504         /* clear mailbox and prep first tlv */
505         req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
506
507         req->mtu = mtu;
508         req->vport_id = vport_id;
509         req->inner_vlan_removal = inner_vlan_removal;
510         req->tpa_mode = tpa_mode;
511         req->max_buffers_per_cqe = max_buffers_per_cqe;
512         req->only_untagged = only_untagged;
513
514         /* status blocks */
515         for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
516                 if (p_hwfn->sbs_info[i])
517                         req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
518
519         /* add list termination tlv */
520         qed_add_tlv(p_hwfn, &p_iov->offset,
521                     CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
522
523         resp = &p_iov->pf2vf_reply->default_resp;
524         rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
525         if (rc)
526                 return rc;
527
528         if (resp->hdr.status != PFVF_STATUS_SUCCESS)
529                 return -EINVAL;
530
531         return rc;
532 }
533
534 int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
535 {
536         struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
537         struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
538         int rc;
539
540         /* clear mailbox and prep first tlv */
541         qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
542                        sizeof(struct vfpf_first_tlv));
543
544         /* add list termination tlv */
545         qed_add_tlv(p_hwfn, &p_iov->offset,
546                     CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
547
548         rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
549         if (rc)
550                 return rc;
551
552         if (resp->hdr.status != PFVF_STATUS_SUCCESS)
553                 return -EINVAL;
554
555         return rc;
556 }
557
558 static bool
559 qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn,
560                                   struct qed_sp_vport_update_params *p_data,
561                                   u16 tlv)
562 {
563         switch (tlv) {
564         case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
565                 return !!(p_data->update_vport_active_rx_flg ||
566                           p_data->update_vport_active_tx_flg);
567         case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
568                 return !!p_data->update_tx_switching_flg;
569         case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
570                 return !!p_data->update_inner_vlan_removal_flg;
571         case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
572                 return !!p_data->update_accept_any_vlan_flg;
573         case CHANNEL_TLV_VPORT_UPDATE_MCAST:
574                 return !!p_data->update_approx_mcast_flg;
575         case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
576                 return !!(p_data->accept_flags.update_rx_mode_config ||
577                           p_data->accept_flags.update_tx_mode_config);
578         case CHANNEL_TLV_VPORT_UPDATE_RSS:
579                 return !!p_data->rss_params;
580         case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
581                 return !!p_data->sge_tpa_params;
582         default:
583                 DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n",
584                         tlv);
585                 return false;
586         }
587 }
588
589 static void
590 qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn,
591                                   struct qed_sp_vport_update_params *p_data)
592 {
593         struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
594         struct pfvf_def_resp_tlv *p_resp;
595         u16 tlv;
596
597         for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
598              tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) {
599                 if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
600                         continue;
601
602                 p_resp = (struct pfvf_def_resp_tlv *)
603                          qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply,
604                                                   tlv);
605                 if (p_resp && p_resp->hdr.status)
606                         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
607                                    "TLV[%d] Configuration %s\n",
608                                    tlv,
609                                    (p_resp && p_resp->hdr.status) ? "succeeded"
610                                                                   : "failed");
611         }
612 }
613
614 int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
615                            struct qed_sp_vport_update_params *p_params)
616 {
617         struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
618         struct vfpf_vport_update_tlv *req;
619         struct pfvf_def_resp_tlv *resp;
620         u8 update_rx, update_tx;
621         u32 resp_size = 0;
622         u16 size, tlv;
623         int rc;
624
625         resp = &p_iov->pf2vf_reply->default_resp;
626         resp_size = sizeof(*resp);
627
628         update_rx = p_params->update_vport_active_rx_flg;
629         update_tx = p_params->update_vport_active_tx_flg;
630
631         /* clear mailbox and prep header tlv */
632         qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
633
634         /* Prepare extended tlvs */
635         if (update_rx || update_tx) {
636                 struct vfpf_vport_update_activate_tlv *p_act_tlv;
637
638                 size = sizeof(struct vfpf_vport_update_activate_tlv);
639                 p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
640                                         CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
641                                         size);
642                 resp_size += sizeof(struct pfvf_def_resp_tlv);
643
644                 if (update_rx) {
645                         p_act_tlv->update_rx = update_rx;
646                         p_act_tlv->active_rx = p_params->vport_active_rx_flg;
647                 }
648
649                 if (update_tx) {
650                         p_act_tlv->update_tx = update_tx;
651                         p_act_tlv->active_tx = p_params->vport_active_tx_flg;
652                 }
653         }
654
655         if (p_params->update_tx_switching_flg) {
656                 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
657
658                 size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
659                 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
660                 p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
661                                               tlv, size);
662                 resp_size += sizeof(struct pfvf_def_resp_tlv);
663
664                 p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
665         }
666
667         if (p_params->update_approx_mcast_flg) {
668                 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
669
670                 size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
671                 p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
672                                           CHANNEL_TLV_VPORT_UPDATE_MCAST, size);
673                 resp_size += sizeof(struct pfvf_def_resp_tlv);
674
675                 memcpy(p_mcast_tlv->bins, p_params->bins,
676                        sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
677         }
678
679         update_rx = p_params->accept_flags.update_rx_mode_config;
680         update_tx = p_params->accept_flags.update_tx_mode_config;
681
682         if (update_rx || update_tx) {
683                 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
684
685                 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
686                 size = sizeof(struct vfpf_vport_update_accept_param_tlv);
687                 p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
688                 resp_size += sizeof(struct pfvf_def_resp_tlv);
689
690                 if (update_rx) {
691                         p_accept_tlv->update_rx_mode = update_rx;
692                         p_accept_tlv->rx_accept_filter =
693                             p_params->accept_flags.rx_accept_filter;
694                 }
695
696                 if (update_tx) {
697                         p_accept_tlv->update_tx_mode = update_tx;
698                         p_accept_tlv->tx_accept_filter =
699                             p_params->accept_flags.tx_accept_filter;
700                 }
701         }
702
703         if (p_params->rss_params) {
704                 struct qed_rss_params *rss_params = p_params->rss_params;
705                 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
706
707                 size = sizeof(struct vfpf_vport_update_rss_tlv);
708                 p_rss_tlv = qed_add_tlv(p_hwfn,
709                                         &p_iov->offset,
710                                         CHANNEL_TLV_VPORT_UPDATE_RSS, size);
711                 resp_size += sizeof(struct pfvf_def_resp_tlv);
712
713                 if (rss_params->update_rss_config)
714                         p_rss_tlv->update_rss_flags |=
715                             VFPF_UPDATE_RSS_CONFIG_FLAG;
716                 if (rss_params->update_rss_capabilities)
717                         p_rss_tlv->update_rss_flags |=
718                             VFPF_UPDATE_RSS_CAPS_FLAG;
719                 if (rss_params->update_rss_ind_table)
720                         p_rss_tlv->update_rss_flags |=
721                             VFPF_UPDATE_RSS_IND_TABLE_FLAG;
722                 if (rss_params->update_rss_key)
723                         p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
724
725                 p_rss_tlv->rss_enable = rss_params->rss_enable;
726                 p_rss_tlv->rss_caps = rss_params->rss_caps;
727                 p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
728                 memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
729                        sizeof(rss_params->rss_ind_table));
730                 memcpy(p_rss_tlv->rss_key, rss_params->rss_key,
731                        sizeof(rss_params->rss_key));
732         }
733
734         if (p_params->update_accept_any_vlan_flg) {
735                 struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
736
737                 size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
738                 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
739                 p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
740
741                 resp_size += sizeof(struct pfvf_def_resp_tlv);
742                 p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
743                 p_any_vlan_tlv->update_accept_any_vlan_flg =
744                     p_params->update_accept_any_vlan_flg;
745         }
746
747         /* add list termination tlv */
748         qed_add_tlv(p_hwfn, &p_iov->offset,
749                     CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
750
751         rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
752         if (rc)
753                 return rc;
754
755         if (resp->hdr.status != PFVF_STATUS_SUCCESS)
756                 return -EINVAL;
757
758         qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
759
760         return rc;
761 }
762
763 int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
764 {
765         struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
766         struct pfvf_def_resp_tlv *resp;
767         struct vfpf_first_tlv *req;
768         int rc;
769
770         /* clear mailbox and prep first tlv */
771         req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
772
773         /* add list termination tlv */
774         qed_add_tlv(p_hwfn, &p_iov->offset,
775                     CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
776
777         resp = &p_iov->pf2vf_reply->default_resp;
778         rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
779         if (rc)
780                 return rc;
781
782         if (resp->hdr.status != PFVF_STATUS_SUCCESS)
783                 return -EAGAIN;
784
785         p_hwfn->b_int_enabled = 0;
786
787         return 0;
788 }
789
790 int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
791 {
792         struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
793         struct pfvf_def_resp_tlv *resp;
794         struct vfpf_first_tlv *req;
795         u32 size;
796         int rc;
797
798         /* clear mailbox and prep first tlv */
799         req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
800
801         /* add list termination tlv */
802         qed_add_tlv(p_hwfn, &p_iov->offset,
803                     CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
804
805         resp = &p_iov->pf2vf_reply->default_resp;
806         rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
807
808         if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
809                 rc = -EAGAIN;
810
811         p_hwfn->b_int_enabled = 0;
812
813         if (p_iov->vf2pf_request)
814                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
815                                   sizeof(union vfpf_tlvs),
816                                   p_iov->vf2pf_request,
817                                   p_iov->vf2pf_request_phys);
818         if (p_iov->pf2vf_reply)
819                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
820                                   sizeof(union pfvf_tlvs),
821                                   p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
822
823         if (p_iov->bulletin.p_virt) {
824                 size = sizeof(struct qed_bulletin_content);
825                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
826                                   size,
827                                   p_iov->bulletin.p_virt, p_iov->bulletin.phys);
828         }
829
830         kfree(p_hwfn->vf_iov_info);
831         p_hwfn->vf_iov_info = NULL;
832
833         return rc;
834 }
835
836 void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
837                             struct qed_filter_mcast *p_filter_cmd)
838 {
839         struct qed_sp_vport_update_params sp_params;
840         int i;
841
842         memset(&sp_params, 0, sizeof(sp_params));
843         sp_params.update_approx_mcast_flg = 1;
844
845         if (p_filter_cmd->opcode == QED_FILTER_ADD) {
846                 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
847                         u32 bit;
848
849                         bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
850                         __set_bit(bit, sp_params.bins);
851                 }
852         }
853
854         qed_vf_pf_vport_update(p_hwfn, &sp_params);
855 }
856
857 int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
858                            struct qed_filter_ucast *p_ucast)
859 {
860         struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
861         struct vfpf_ucast_filter_tlv *req;
862         struct pfvf_def_resp_tlv *resp;
863         int rc;
864
865         /* clear mailbox and prep first tlv */
866         req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
867         req->opcode = (u8) p_ucast->opcode;
868         req->type = (u8) p_ucast->type;
869         memcpy(req->mac, p_ucast->mac, ETH_ALEN);
870         req->vlan = p_ucast->vlan;
871
872         /* add list termination tlv */
873         qed_add_tlv(p_hwfn, &p_iov->offset,
874                     CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
875
876         resp = &p_iov->pf2vf_reply->default_resp;
877         rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
878         if (rc)
879                 return rc;
880
881         if (resp->hdr.status != PFVF_STATUS_SUCCESS)
882                 return -EAGAIN;
883
884         return 0;
885 }
886
887 int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
888 {
889         struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
890         struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
891         int rc;
892
893         /* clear mailbox and prep first tlv */
894         qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
895                        sizeof(struct vfpf_first_tlv));
896
897         /* add list termination tlv */
898         qed_add_tlv(p_hwfn, &p_iov->offset,
899                     CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
900
901         rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
902         if (rc)
903                 return rc;
904
905         if (resp->hdr.status != PFVF_STATUS_SUCCESS)
906                 return -EINVAL;
907
908         return 0;
909 }
910
911 u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
912 {
913         struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
914
915         if (!p_iov) {
916                 DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
917                 return 0;
918         }
919
920         return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
921 }
922
923 int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change)
924 {
925         struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
926         struct qed_bulletin_content shadow;
927         u32 crc, crc_size;
928
929         crc_size = sizeof(p_iov->bulletin.p_virt->crc);
930         *p_change = 0;
931
932         /* Need to guarantee PF is not in the middle of writing it */
933         memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
934
935         /* If version did not update, no need to do anything */
936         if (shadow.version == p_iov->bulletin_shadow.version)
937                 return 0;
938
939         /* Verify the bulletin we see is valid */
940         crc = crc32(0, (u8 *)&shadow + crc_size,
941                     p_iov->bulletin.size - crc_size);
942         if (crc != shadow.crc)
943                 return -EAGAIN;
944
945         /* Set the shadow bulletin and process it */
946         memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
947
948         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
949                    "Read a bulletin update %08x\n", shadow.version);
950
951         *p_change = 1;
952
953         return 0;
954 }
955
956 void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
957                               struct qed_mcp_link_params *p_params,
958                               struct qed_bulletin_content *p_bulletin)
959 {
960         memset(p_params, 0, sizeof(*p_params));
961
962         p_params->speed.autoneg = p_bulletin->req_autoneg;
963         p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
964         p_params->speed.forced_speed = p_bulletin->req_forced_speed;
965         p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
966         p_params->pause.forced_rx = p_bulletin->req_forced_rx;
967         p_params->pause.forced_tx = p_bulletin->req_forced_tx;
968         p_params->loopback_mode = p_bulletin->req_loopback;
969 }
970
971 void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
972                             struct qed_mcp_link_params *params)
973 {
974         __qed_vf_get_link_params(p_hwfn, params,
975                                  &(p_hwfn->vf_iov_info->bulletin_shadow));
976 }
977
978 void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
979                              struct qed_mcp_link_state *p_link,
980                              struct qed_bulletin_content *p_bulletin)
981 {
982         memset(p_link, 0, sizeof(*p_link));
983
984         p_link->link_up = p_bulletin->link_up;
985         p_link->speed = p_bulletin->speed;
986         p_link->full_duplex = p_bulletin->full_duplex;
987         p_link->an = p_bulletin->autoneg;
988         p_link->an_complete = p_bulletin->autoneg_complete;
989         p_link->parallel_detection = p_bulletin->parallel_detection;
990         p_link->pfc_enabled = p_bulletin->pfc_enabled;
991         p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
992         p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
993         p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
994         p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
995         p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
996 }
997
998 void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
999                            struct qed_mcp_link_state *link)
1000 {
1001         __qed_vf_get_link_state(p_hwfn, link,
1002                                 &(p_hwfn->vf_iov_info->bulletin_shadow));
1003 }
1004
1005 void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1006                             struct qed_mcp_link_capabilities *p_link_caps,
1007                             struct qed_bulletin_content *p_bulletin)
1008 {
1009         memset(p_link_caps, 0, sizeof(*p_link_caps));
1010         p_link_caps->speed_capabilities = p_bulletin->capability_speed;
1011 }
1012
1013 void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1014                           struct qed_mcp_link_capabilities *p_link_caps)
1015 {
1016         __qed_vf_get_link_caps(p_hwfn, p_link_caps,
1017                                &(p_hwfn->vf_iov_info->bulletin_shadow));
1018 }
1019
1020 void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
1021 {
1022         *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
1023 }
1024
1025 void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
1026 {
1027         memcpy(port_mac,
1028                p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN);
1029 }
1030
1031 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters)
1032 {
1033         struct qed_vf_iov *p_vf;
1034
1035         p_vf = p_hwfn->vf_iov_info;
1036         *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
1037 }
1038
1039 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
1040 {
1041         struct qed_bulletin_content *bulletin;
1042
1043         bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
1044         if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
1045                 return true;
1046
1047         /* Forbid VF from changing a MAC enforced by PF */
1048         if (ether_addr_equal(bulletin->mac, mac))
1049                 return false;
1050
1051         return false;
1052 }
1053
1054 bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
1055                                     u8 *dst_mac, u8 *p_is_forced)
1056 {
1057         struct qed_bulletin_content *bulletin;
1058
1059         bulletin = &hwfn->vf_iov_info->bulletin_shadow;
1060
1061         if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
1062                 if (p_is_forced)
1063                         *p_is_forced = 1;
1064         } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
1065                 if (p_is_forced)
1066                         *p_is_forced = 0;
1067         } else {
1068                 return false;
1069         }
1070
1071         ether_addr_copy(dst_mac, bulletin->mac);
1072
1073         return true;
1074 }
1075
1076 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
1077                            u16 *fw_major, u16 *fw_minor,
1078                            u16 *fw_rev, u16 *fw_eng)
1079 {
1080         struct pf_vf_pfdev_info *info;
1081
1082         info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
1083
1084         *fw_major = info->fw_major;
1085         *fw_minor = info->fw_minor;
1086         *fw_rev = info->fw_rev;
1087         *fw_eng = info->fw_eng;
1088 }
1089
1090 static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
1091 {
1092         struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth;
1093         u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced;
1094         void *cookie = hwfn->cdev->ops_cookie;
1095
1096         is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
1097                                                       &is_mac_forced);
1098         if (is_mac_exist && is_mac_forced && cookie)
1099                 ops->force_mac(cookie, mac);
1100
1101         /* Always update link configuration according to bulletin */
1102         qed_link_update(hwfn);
1103 }
1104
1105 void qed_iov_vf_task(struct work_struct *work)
1106 {
1107         struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
1108                                              iov_task.work);
1109         u8 change = 0;
1110
1111         if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
1112                 return;
1113
1114         /* Handle bulletin board changes */
1115         qed_vf_read_bulletin(hwfn, &change);
1116         if (change)
1117                 qed_handle_bulletin_change(hwfn);
1118
1119         /* As VF is polling bulletin board, need to constantly re-schedule */
1120         queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ);
1121 }