bba59c51f72c9b057cc1f9b194584ec0890d404b
[cascardo/linux.git] / drivers / net / ethernet / qlogic / qed / qed_l2.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <asm/param.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/etherdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <linux/slab.h>
20 #include <linux/stddef.h>
21 #include <linux/string.h>
22 #include <linux/version.h>
23 #include <linux/workqueue.h>
24 #include <linux/bitops.h>
25 #include <linux/bug.h>
26 #include "qed.h"
27 #include <linux/qed/qed_chain.h>
28 #include "qed_cxt.h"
29 #include "qed_dev_api.h"
30 #include <linux/qed/qed_eth_if.h>
31 #include "qed_hsi.h"
32 #include "qed_hw.h"
33 #include "qed_int.h"
34 #include "qed_reg_addr.h"
35 #include "qed_sp.h"
36
37 enum qed_rss_caps {
38         QED_RSS_IPV4            = 0x1,
39         QED_RSS_IPV6            = 0x2,
40         QED_RSS_IPV4_TCP        = 0x4,
41         QED_RSS_IPV6_TCP        = 0x8,
42         QED_RSS_IPV4_UDP        = 0x10,
43         QED_RSS_IPV6_UDP        = 0x20,
44 };
45
46 /* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
47 #define QED_RSS_IND_TABLE_SIZE 128
48 #define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
49
50 struct qed_rss_params {
51         u8      update_rss_config;
52         u8      rss_enable;
53         u8      rss_eng_id;
54         u8      update_rss_capabilities;
55         u8      update_rss_ind_table;
56         u8      update_rss_key;
57         u8      rss_caps;
58         u8      rss_table_size_log;
59         u16     rss_ind_table[QED_RSS_IND_TABLE_SIZE];
60         u32     rss_key[QED_RSS_KEY_SIZE];
61 };
62
63 enum qed_filter_opcode {
64         QED_FILTER_ADD,
65         QED_FILTER_REMOVE,
66         QED_FILTER_MOVE,
67         QED_FILTER_REPLACE,     /* Delete all MACs and add new one instead */
68         QED_FILTER_FLUSH,       /* Removes all filters */
69 };
70
71 enum qed_filter_ucast_type {
72         QED_FILTER_MAC,
73         QED_FILTER_VLAN,
74         QED_FILTER_MAC_VLAN,
75         QED_FILTER_INNER_MAC,
76         QED_FILTER_INNER_VLAN,
77         QED_FILTER_INNER_PAIR,
78         QED_FILTER_INNER_MAC_VNI_PAIR,
79         QED_FILTER_MAC_VNI_PAIR,
80         QED_FILTER_VNI,
81 };
82
83 struct qed_filter_ucast {
84         enum qed_filter_opcode          opcode;
85         enum qed_filter_ucast_type      type;
86         u8                              is_rx_filter;
87         u8                              is_tx_filter;
88         u8                              vport_to_add_to;
89         u8                              vport_to_remove_from;
90         unsigned char                   mac[ETH_ALEN];
91         u8                              assert_on_error;
92         u16                             vlan;
93         u32                             vni;
94 };
95
96 struct qed_filter_mcast {
97         /* MOVE is not supported for multicast */
98         enum qed_filter_opcode  opcode;
99         u8                      vport_to_add_to;
100         u8                      vport_to_remove_from;
101         u8                      num_mc_addrs;
102 #define QED_MAX_MC_ADDRS        64
103         unsigned char           mac[QED_MAX_MC_ADDRS][ETH_ALEN];
104 };
105
106 struct qed_filter_accept_flags {
107         u8      update_rx_mode_config;
108         u8      update_tx_mode_config;
109         u8      rx_accept_filter;
110         u8      tx_accept_filter;
111 #define QED_ACCEPT_NONE         0x01
112 #define QED_ACCEPT_UCAST_MATCHED        0x02
113 #define QED_ACCEPT_UCAST_UNMATCHED      0x04
114 #define QED_ACCEPT_MCAST_MATCHED        0x08
115 #define QED_ACCEPT_MCAST_UNMATCHED      0x10
116 #define QED_ACCEPT_BCAST                0x20
117 };
118
119 struct qed_sp_vport_update_params {
120         u16                             opaque_fid;
121         u8                              vport_id;
122         u8                              update_vport_active_rx_flg;
123         u8                              vport_active_rx_flg;
124         u8                              update_vport_active_tx_flg;
125         u8                              vport_active_tx_flg;
126         u8                              update_approx_mcast_flg;
127         u8                              update_accept_any_vlan_flg;
128         u8                              accept_any_vlan;
129         unsigned long                   bins[8];
130         struct qed_rss_params           *rss_params;
131         struct qed_filter_accept_flags  accept_flags;
132 };
133
134 #define QED_MAX_SGES_NUM 16
135 #define CRC32_POLY 0x1edc6f41
136
137 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
138                               u32 concrete_fid,
139                               u16 opaque_fid,
140                               u8 vport_id,
141                               u16 mtu,
142                               u8 drop_ttl0_flg,
143                               u8 inner_vlan_removal_en_flg)
144 {
145         struct vport_start_ramrod_data *p_ramrod = NULL;
146         struct qed_spq_entry *p_ent =  NULL;
147         struct qed_sp_init_data init_data;
148         int rc = -EINVAL;
149         u16 rx_mode = 0;
150         u8 abs_vport_id = 0;
151
152         rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
153         if (rc != 0)
154                 return rc;
155
156         memset(&init_data, 0, sizeof(init_data));
157         init_data.cid = qed_spq_get_cid(p_hwfn);
158         init_data.opaque_fid = opaque_fid;
159         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
160
161         rc = qed_sp_init_request(p_hwfn, &p_ent,
162                                  ETH_RAMROD_VPORT_START,
163                                  PROTOCOLID_ETH, &init_data);
164         if (rc)
165                 return rc;
166
167         p_ramrod                = &p_ent->ramrod.vport_start;
168         p_ramrod->vport_id      = abs_vport_id;
169
170         p_ramrod->mtu                   = cpu_to_le16(mtu);
171         p_ramrod->inner_vlan_removal_en = inner_vlan_removal_en_flg;
172         p_ramrod->drop_ttl0_en          = drop_ttl0_flg;
173
174         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
175         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
176
177         p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
178
179         /* TPA related fields */
180         memset(&p_ramrod->tpa_param, 0,
181                sizeof(struct eth_vport_tpa_param));
182
183         /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
184         p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
185                                                   concrete_fid);
186
187         return qed_spq_post(p_hwfn, p_ent, NULL);
188 }
189
190 static int
191 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
192                         struct vport_update_ramrod_data *p_ramrod,
193                         struct qed_rss_params *p_params)
194 {
195         struct eth_vport_rss_config *rss = &p_ramrod->rss_config;
196         u16 abs_l2_queue = 0, capabilities = 0;
197         int rc = 0, i;
198
199         if (!p_params) {
200                 p_ramrod->common.update_rss_flg = 0;
201                 return rc;
202         }
203
204         BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE !=
205                      ETH_RSS_IND_TABLE_ENTRIES_NUM);
206
207         rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id);
208         if (rc)
209                 return rc;
210
211         p_ramrod->common.update_rss_flg = p_params->update_rss_config;
212         rss->update_rss_capabilities = p_params->update_rss_capabilities;
213         rss->update_rss_ind_table = p_params->update_rss_ind_table;
214         rss->update_rss_key = p_params->update_rss_key;
215
216         rss->rss_mode = p_params->rss_enable ?
217                         ETH_VPORT_RSS_MODE_REGULAR :
218                         ETH_VPORT_RSS_MODE_DISABLED;
219
220         SET_FIELD(capabilities,
221                   ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
222                   !!(p_params->rss_caps & QED_RSS_IPV4));
223         SET_FIELD(capabilities,
224                   ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
225                   !!(p_params->rss_caps & QED_RSS_IPV6));
226         SET_FIELD(capabilities,
227                   ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
228                   !!(p_params->rss_caps & QED_RSS_IPV4_TCP));
229         SET_FIELD(capabilities,
230                   ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
231                   !!(p_params->rss_caps & QED_RSS_IPV6_TCP));
232         SET_FIELD(capabilities,
233                   ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
234                   !!(p_params->rss_caps & QED_RSS_IPV4_UDP));
235         SET_FIELD(capabilities,
236                   ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
237                   !!(p_params->rss_caps & QED_RSS_IPV6_UDP));
238         rss->tbl_size = p_params->rss_table_size_log;
239
240         rss->capabilities = cpu_to_le16(capabilities);
241
242         DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
243                    "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
244                    p_ramrod->common.update_rss_flg,
245                    rss->rss_mode, rss->update_rss_capabilities,
246                    capabilities, rss->update_rss_ind_table,
247                    rss->update_rss_key);
248
249         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
250                 rc = qed_fw_l2_queue(p_hwfn,
251                                      (u8)p_params->rss_ind_table[i],
252                                      &abs_l2_queue);
253                 if (rc)
254                         return rc;
255
256                 rss->indirection_table[i] = cpu_to_le16(abs_l2_queue);
257                 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n",
258                            i, rss->indirection_table[i]);
259         }
260
261         for (i = 0; i < 10; i++)
262                 rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]);
263
264         return rc;
265 }
266
267 static void
268 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
269                           struct vport_update_ramrod_data *p_ramrod,
270                           struct qed_filter_accept_flags accept_flags)
271 {
272         p_ramrod->common.update_rx_mode_flg =
273                 accept_flags.update_rx_mode_config;
274
275         p_ramrod->common.update_tx_mode_flg =
276                 accept_flags.update_tx_mode_config;
277
278         /* Set Rx mode accept flags */
279         if (p_ramrod->common.update_rx_mode_flg) {
280                 u8 accept_filter = accept_flags.rx_accept_filter;
281                 u16 state = 0;
282
283                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
284                           !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
285                             !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
286
287                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
288                           !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
289
290                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
291                           !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
292                             !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
293
294                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
295                           (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
296                            !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
297
298                 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
299                           !!(accept_filter & QED_ACCEPT_BCAST));
300
301                 p_ramrod->rx_mode.state = cpu_to_le16(state);
302                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
303                            "p_ramrod->rx_mode.state = 0x%x\n", state);
304         }
305
306         /* Set Tx mode accept flags */
307         if (p_ramrod->common.update_tx_mode_flg) {
308                 u8 accept_filter = accept_flags.tx_accept_filter;
309                 u16 state = 0;
310
311                 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
312                           !!(accept_filter & QED_ACCEPT_NONE));
313
314                 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
315                           (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
316                            !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
317
318                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
319                           !!(accept_filter & QED_ACCEPT_NONE));
320
321                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
322                           (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
323                            !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
324
325                 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
326                           !!(accept_filter & QED_ACCEPT_BCAST));
327
328                 p_ramrod->tx_mode.state = cpu_to_le16(state);
329                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
330                            "p_ramrod->tx_mode.state = 0x%x\n", state);
331         }
332 }
333
334 static void
335 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
336                         struct vport_update_ramrod_data *p_ramrod,
337                         struct qed_sp_vport_update_params *p_params)
338 {
339         int i;
340
341         memset(&p_ramrod->approx_mcast.bins, 0,
342                sizeof(p_ramrod->approx_mcast.bins));
343
344         if (p_params->update_approx_mcast_flg) {
345                 p_ramrod->common.update_approx_mcast_flg = 1;
346                 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
347                         u32 *p_bins = (u32 *)p_params->bins;
348                         __le32 val = cpu_to_le32(p_bins[i]);
349
350                         p_ramrod->approx_mcast.bins[i] = val;
351                 }
352         }
353 }
354
355 static int
356 qed_sp_vport_update(struct qed_hwfn *p_hwfn,
357                     struct qed_sp_vport_update_params *p_params,
358                     enum spq_mode comp_mode,
359                     struct qed_spq_comp_cb *p_comp_data)
360 {
361         struct qed_rss_params *p_rss_params = p_params->rss_params;
362         struct vport_update_ramrod_data_cmn *p_cmn;
363         struct qed_sp_init_data init_data;
364         struct vport_update_ramrod_data *p_ramrod = NULL;
365         struct qed_spq_entry *p_ent = NULL;
366         u8 abs_vport_id = 0;
367         int rc = -EINVAL;
368
369         rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
370         if (rc != 0)
371                 return rc;
372
373         memset(&init_data, 0, sizeof(init_data));
374         init_data.cid = qed_spq_get_cid(p_hwfn);
375         init_data.opaque_fid = p_params->opaque_fid;
376         init_data.comp_mode = comp_mode;
377         init_data.p_comp_data = p_comp_data;
378
379         rc = qed_sp_init_request(p_hwfn, &p_ent,
380                                  ETH_RAMROD_VPORT_UPDATE,
381                                  PROTOCOLID_ETH, &init_data);
382         if (rc)
383                 return rc;
384
385         /* Copy input params to ramrod according to FW struct */
386         p_ramrod = &p_ent->ramrod.vport_update;
387         p_cmn = &p_ramrod->common;
388
389         p_cmn->vport_id = abs_vport_id;
390         p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
391         p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
392         p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
393         p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
394         p_cmn->accept_any_vlan = p_params->accept_any_vlan;
395         p_cmn->update_accept_any_vlan_flg =
396                         p_params->update_accept_any_vlan_flg;
397         rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
398         if (rc) {
399                 /* Return spq entry which is taken in qed_sp_init_request()*/
400                 qed_spq_return_entry(p_hwfn, p_ent);
401                 return rc;
402         }
403
404         /* Update mcast bins for VFs, PF doesn't use this functionality */
405         qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
406
407         qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
408         return qed_spq_post(p_hwfn, p_ent, NULL);
409 }
410
411 static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
412                              u16 opaque_fid,
413                              u8 vport_id)
414 {
415         struct vport_stop_ramrod_data *p_ramrod;
416         struct qed_sp_init_data init_data;
417         struct qed_spq_entry *p_ent;
418         u8 abs_vport_id = 0;
419         int rc;
420
421         rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
422         if (rc != 0)
423                 return rc;
424
425         memset(&init_data, 0, sizeof(init_data));
426         init_data.cid = qed_spq_get_cid(p_hwfn);
427         init_data.opaque_fid = opaque_fid;
428         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
429
430         rc = qed_sp_init_request(p_hwfn, &p_ent,
431                                  ETH_RAMROD_VPORT_STOP,
432                                  PROTOCOLID_ETH, &init_data);
433         if (rc)
434                 return rc;
435
436         p_ramrod = &p_ent->ramrod.vport_stop;
437         p_ramrod->vport_id = abs_vport_id;
438
439         return qed_spq_post(p_hwfn, p_ent, NULL);
440 }
441
442 static int qed_filter_accept_cmd(struct qed_dev *cdev,
443                                  u8 vport,
444                                  struct qed_filter_accept_flags accept_flags,
445                                  u8 update_accept_any_vlan,
446                                  u8 accept_any_vlan,
447                                 enum spq_mode comp_mode,
448                                 struct qed_spq_comp_cb *p_comp_data)
449 {
450         struct qed_sp_vport_update_params vport_update_params;
451         int i, rc;
452
453         /* Prepare and send the vport rx_mode change */
454         memset(&vport_update_params, 0, sizeof(vport_update_params));
455         vport_update_params.vport_id = vport;
456         vport_update_params.accept_flags = accept_flags;
457         vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
458         vport_update_params.accept_any_vlan = accept_any_vlan;
459
460         for_each_hwfn(cdev, i) {
461                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
462
463                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
464
465                 rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
466                                          comp_mode, p_comp_data);
467                 if (rc != 0) {
468                         DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
469                         return rc;
470                 }
471
472                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
473                            "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
474                            accept_flags.rx_accept_filter,
475                            accept_flags.tx_accept_filter);
476                 if (update_accept_any_vlan)
477                         DP_VERBOSE(p_hwfn, QED_MSG_SP,
478                                    "accept_any_vlan=%d configured\n",
479                                    accept_any_vlan);
480         }
481
482         return 0;
483 }
484
485 static int qed_sp_release_queue_cid(
486         struct qed_hwfn *p_hwfn,
487         struct qed_hw_cid_data *p_cid_data)
488 {
489         if (!p_cid_data->b_cid_allocated)
490                 return 0;
491
492         qed_cxt_release_cid(p_hwfn, p_cid_data->cid);
493
494         p_cid_data->b_cid_allocated = false;
495
496         return 0;
497 }
498
499 static int
500 qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
501                             u16 opaque_fid,
502                             u32 cid,
503                             struct qed_queue_start_common_params *params,
504                             u8 stats_id,
505                             u16 bd_max_bytes,
506                             dma_addr_t bd_chain_phys_addr,
507                             dma_addr_t cqe_pbl_addr,
508                             u16 cqe_pbl_size)
509 {
510         struct rx_queue_start_ramrod_data *p_ramrod = NULL;
511         struct qed_spq_entry *p_ent = NULL;
512         struct qed_sp_init_data init_data;
513         struct qed_hw_cid_data *p_rx_cid;
514         u16 abs_rx_q_id = 0;
515         u8 abs_vport_id = 0;
516         int rc = -EINVAL;
517
518         /* Store information for the stop */
519         p_rx_cid                = &p_hwfn->p_rx_cids[params->queue_id];
520         p_rx_cid->cid           = cid;
521         p_rx_cid->opaque_fid    = opaque_fid;
522         p_rx_cid->vport_id      = params->vport_id;
523
524         rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id);
525         if (rc != 0)
526                 return rc;
527
528         rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id);
529         if (rc != 0)
530                 return rc;
531
532         DP_VERBOSE(p_hwfn, QED_MSG_SP,
533                    "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
534                    opaque_fid, cid, params->queue_id, params->vport_id,
535                    params->sb);
536
537         /* Get SPQ entry */
538         memset(&init_data, 0, sizeof(init_data));
539         init_data.cid = cid;
540         init_data.opaque_fid = opaque_fid;
541         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
542
543         rc = qed_sp_init_request(p_hwfn, &p_ent,
544                                  ETH_RAMROD_RX_QUEUE_START,
545                                  PROTOCOLID_ETH, &init_data);
546         if (rc)
547                 return rc;
548
549         p_ramrod = &p_ent->ramrod.rx_queue_start;
550
551         p_ramrod->sb_id                 = cpu_to_le16(params->sb);
552         p_ramrod->sb_index              = params->sb_idx;
553         p_ramrod->vport_id              = abs_vport_id;
554         p_ramrod->stats_counter_id      = stats_id;
555         p_ramrod->rx_queue_id           = cpu_to_le16(abs_rx_q_id);
556         p_ramrod->complete_cqe_flg      = 0;
557         p_ramrod->complete_event_flg    = 1;
558
559         p_ramrod->bd_max_bytes  = cpu_to_le16(bd_max_bytes);
560         DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
561
562         p_ramrod->num_of_pbl_pages      = cpu_to_le16(cqe_pbl_size);
563         DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
564
565         rc = qed_spq_post(p_hwfn, p_ent, NULL);
566
567         return rc;
568 }
569
570 static int
571 qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
572                           u16 opaque_fid,
573                           struct qed_queue_start_common_params *params,
574                           u16 bd_max_bytes,
575                           dma_addr_t bd_chain_phys_addr,
576                           dma_addr_t cqe_pbl_addr,
577                           u16 cqe_pbl_size,
578                           void __iomem **pp_prod)
579 {
580         struct qed_hw_cid_data *p_rx_cid;
581         u64 init_prod_val = 0;
582         u16 abs_l2_queue = 0;
583         u8 abs_stats_id = 0;
584         int rc;
585
586         rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
587         if (rc != 0)
588                 return rc;
589
590         rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id);
591         if (rc != 0)
592                 return rc;
593
594         *pp_prod = (u8 __iomem *)p_hwfn->regview +
595                                  GTT_BAR0_MAP_REG_MSDM_RAM +
596                                  MSTORM_PRODS_OFFSET(abs_l2_queue);
597
598         /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
599         __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
600                           (u32 *)(&init_prod_val));
601
602         /* Allocate a CID for the queue */
603         p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
604         rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
605                                  &p_rx_cid->cid);
606         if (rc) {
607                 DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
608                 return rc;
609         }
610         p_rx_cid->b_cid_allocated = true;
611
612         rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
613                                          opaque_fid,
614                                          p_rx_cid->cid,
615                                          params,
616                                          abs_stats_id,
617                                          bd_max_bytes,
618                                          bd_chain_phys_addr,
619                                          cqe_pbl_addr,
620                                          cqe_pbl_size);
621
622         if (rc != 0)
623                 qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
624
625         return rc;
626 }
627
628 static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
629                                     u16 rx_queue_id,
630                                     bool eq_completion_only,
631                                     bool cqe_completion)
632 {
633         struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
634         struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
635         struct qed_spq_entry *p_ent = NULL;
636         struct qed_sp_init_data init_data;
637         u16 abs_rx_q_id = 0;
638         int rc = -EINVAL;
639
640         /* Get SPQ entry */
641         memset(&init_data, 0, sizeof(init_data));
642         init_data.cid = p_rx_cid->cid;
643         init_data.opaque_fid = p_rx_cid->opaque_fid;
644         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
645
646         rc = qed_sp_init_request(p_hwfn, &p_ent,
647                                  ETH_RAMROD_RX_QUEUE_STOP,
648                                  PROTOCOLID_ETH, &init_data);
649         if (rc)
650                 return rc;
651
652         p_ramrod = &p_ent->ramrod.rx_queue_stop;
653
654         qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
655         qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
656         p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
657
658         /* Cleaning the queue requires the completion to arrive there.
659          * In addition, VFs require the answer to come as eqe to PF.
660          */
661         p_ramrod->complete_cqe_flg =
662                 (!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) &&
663                  !eq_completion_only) || cqe_completion;
664         p_ramrod->complete_event_flg =
665                 !(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) ||
666                 eq_completion_only;
667
668         rc = qed_spq_post(p_hwfn, p_ent, NULL);
669         if (rc)
670                 return rc;
671
672         return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
673 }
674
675 static int
676 qed_sp_eth_txq_start_ramrod(struct qed_hwfn  *p_hwfn,
677                             u16  opaque_fid,
678                             u32  cid,
679                             struct qed_queue_start_common_params *p_params,
680                             u8  stats_id,
681                             dma_addr_t pbl_addr,
682                             u16 pbl_size,
683                             union qed_qm_pq_params *p_pq_params)
684 {
685         struct tx_queue_start_ramrod_data *p_ramrod = NULL;
686         struct qed_spq_entry *p_ent = NULL;
687         struct qed_sp_init_data init_data;
688         struct qed_hw_cid_data *p_tx_cid;
689         u8 abs_vport_id;
690         int rc = -EINVAL;
691         u16 pq_id;
692
693         /* Store information for the stop */
694         p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
695         p_tx_cid->cid           = cid;
696         p_tx_cid->opaque_fid    = opaque_fid;
697
698         rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
699         if (rc)
700                 return rc;
701
702         /* Get SPQ entry */
703         memset(&init_data, 0, sizeof(init_data));
704         init_data.cid = cid;
705         init_data.opaque_fid = opaque_fid;
706         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
707
708         rc = qed_sp_init_request(p_hwfn, &p_ent,
709                                  ETH_RAMROD_TX_QUEUE_START,
710                                  PROTOCOLID_ETH, &init_data);
711         if (rc)
712                 return rc;
713
714         p_ramrod                = &p_ent->ramrod.tx_queue_start;
715         p_ramrod->vport_id      = abs_vport_id;
716
717         p_ramrod->sb_id                 = cpu_to_le16(p_params->sb);
718         p_ramrod->sb_index              = p_params->sb_idx;
719         p_ramrod->stats_counter_id      = stats_id;
720
721         p_ramrod->pbl_size              = cpu_to_le16(pbl_size);
722         DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
723
724         pq_id                   = qed_get_qm_pq(p_hwfn,
725                                                 PROTOCOLID_ETH,
726                                                 p_pq_params);
727         p_ramrod->qm_pq_id      = cpu_to_le16(pq_id);
728
729         return qed_spq_post(p_hwfn, p_ent, NULL);
730 }
731
732 static int
733 qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
734                           u16 opaque_fid,
735                           struct qed_queue_start_common_params *p_params,
736                           dma_addr_t pbl_addr,
737                           u16 pbl_size,
738                           void __iomem **pp_doorbell)
739 {
740         struct qed_hw_cid_data *p_tx_cid;
741         union qed_qm_pq_params pq_params;
742         u8 abs_stats_id = 0;
743         int rc;
744
745         rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
746         if (rc)
747                 return rc;
748
749         p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
750         memset(p_tx_cid, 0, sizeof(*p_tx_cid));
751         memset(&pq_params, 0, sizeof(pq_params));
752
753         /* Allocate a CID for the queue */
754         rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
755                                  &p_tx_cid->cid);
756         if (rc) {
757                 DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
758                 return rc;
759         }
760         p_tx_cid->b_cid_allocated = true;
761
762         DP_VERBOSE(p_hwfn, QED_MSG_SP,
763                    "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
764                    opaque_fid, p_tx_cid->cid,
765                    p_params->queue_id, p_params->vport_id, p_params->sb);
766
767         rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
768                                          opaque_fid,
769                                          p_tx_cid->cid,
770                                          p_params,
771                                          abs_stats_id,
772                                          pbl_addr,
773                                          pbl_size,
774                                          &pq_params);
775
776         *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
777                                      qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY);
778
779         if (rc)
780                 qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
781
782         return rc;
783 }
784
785 static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
786                                     u16 tx_queue_id)
787 {
788         struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
789         struct qed_spq_entry *p_ent = NULL;
790         struct qed_sp_init_data init_data;
791         int rc = -EINVAL;
792
793         /* Get SPQ entry */
794         memset(&init_data, 0, sizeof(init_data));
795         init_data.cid = p_tx_cid->cid;
796         init_data.opaque_fid = p_tx_cid->opaque_fid;
797         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
798
799         rc = qed_sp_init_request(p_hwfn, &p_ent,
800                                  ETH_RAMROD_TX_QUEUE_STOP,
801                                  PROTOCOLID_ETH, &init_data);
802         if (rc)
803                 return rc;
804
805         rc = qed_spq_post(p_hwfn, p_ent, NULL);
806         if (rc)
807                 return rc;
808
809         return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
810 }
811
812 static enum eth_filter_action
813 qed_filter_action(enum qed_filter_opcode opcode)
814 {
815         enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
816
817         switch (opcode) {
818         case QED_FILTER_ADD:
819                 action = ETH_FILTER_ACTION_ADD;
820                 break;
821         case QED_FILTER_REMOVE:
822                 action = ETH_FILTER_ACTION_REMOVE;
823                 break;
824         case QED_FILTER_FLUSH:
825                 action = ETH_FILTER_ACTION_REMOVE_ALL;
826                 break;
827         default:
828                 action = MAX_ETH_FILTER_ACTION;
829         }
830
831         return action;
832 }
833
834 static void qed_set_fw_mac_addr(__le16 *fw_msb,
835                                 __le16 *fw_mid,
836                                 __le16 *fw_lsb,
837                                 u8 *mac)
838 {
839         ((u8 *)fw_msb)[0] = mac[1];
840         ((u8 *)fw_msb)[1] = mac[0];
841         ((u8 *)fw_mid)[0] = mac[3];
842         ((u8 *)fw_mid)[1] = mac[2];
843         ((u8 *)fw_lsb)[0] = mac[5];
844         ((u8 *)fw_lsb)[1] = mac[4];
845 }
846
847 static int
848 qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
849                         u16 opaque_fid,
850                         struct qed_filter_ucast *p_filter_cmd,
851                         struct vport_filter_update_ramrod_data **pp_ramrod,
852                         struct qed_spq_entry **pp_ent,
853                         enum spq_mode comp_mode,
854                         struct qed_spq_comp_cb *p_comp_data)
855 {
856         u8 vport_to_add_to = 0, vport_to_remove_from = 0;
857         struct vport_filter_update_ramrod_data *p_ramrod;
858         struct eth_filter_cmd *p_first_filter;
859         struct eth_filter_cmd *p_second_filter;
860         struct qed_sp_init_data init_data;
861         enum eth_filter_action action;
862         int rc;
863
864         rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
865                           &vport_to_remove_from);
866         if (rc)
867                 return rc;
868
869         rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
870                           &vport_to_add_to);
871         if (rc)
872                 return rc;
873
874         /* Get SPQ entry */
875         memset(&init_data, 0, sizeof(init_data));
876         init_data.cid = qed_spq_get_cid(p_hwfn);
877         init_data.opaque_fid = opaque_fid;
878         init_data.comp_mode = comp_mode;
879         init_data.p_comp_data = p_comp_data;
880
881         rc = qed_sp_init_request(p_hwfn, pp_ent,
882                                  ETH_RAMROD_FILTERS_UPDATE,
883                                  PROTOCOLID_ETH, &init_data);
884         if (rc)
885                 return rc;
886
887         *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
888         p_ramrod = *pp_ramrod;
889         p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
890         p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
891
892         switch (p_filter_cmd->opcode) {
893         case QED_FILTER_REPLACE:
894         case QED_FILTER_MOVE:
895                 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
896         default:
897                 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
898         }
899
900         p_first_filter  = &p_ramrod->filter_cmds[0];
901         p_second_filter = &p_ramrod->filter_cmds[1];
902
903         switch (p_filter_cmd->type) {
904         case QED_FILTER_MAC:
905                 p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
906         case QED_FILTER_VLAN:
907                 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
908         case QED_FILTER_MAC_VLAN:
909                 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
910         case QED_FILTER_INNER_MAC:
911                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
912         case QED_FILTER_INNER_VLAN:
913                 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
914         case QED_FILTER_INNER_PAIR:
915                 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
916         case QED_FILTER_INNER_MAC_VNI_PAIR:
917                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
918                 break;
919         case QED_FILTER_MAC_VNI_PAIR:
920                 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
921         case QED_FILTER_VNI:
922                 p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
923         }
924
925         if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
926             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
927             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
928             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
929             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
930             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
931                 qed_set_fw_mac_addr(&p_first_filter->mac_msb,
932                                     &p_first_filter->mac_mid,
933                                     &p_first_filter->mac_lsb,
934                                     (u8 *)p_filter_cmd->mac);
935         }
936
937         if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
938             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
939             (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
940             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
941                 p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
942
943         if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
944             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
945             (p_first_filter->type == ETH_FILTER_TYPE_VNI))
946                 p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
947
948         if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
949                 p_second_filter->type           = p_first_filter->type;
950                 p_second_filter->mac_msb        = p_first_filter->mac_msb;
951                 p_second_filter->mac_mid        = p_first_filter->mac_mid;
952                 p_second_filter->mac_lsb        = p_first_filter->mac_lsb;
953                 p_second_filter->vlan_id        = p_first_filter->vlan_id;
954                 p_second_filter->vni            = p_first_filter->vni;
955
956                 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
957
958                 p_first_filter->vport_id = vport_to_remove_from;
959
960                 p_second_filter->action         = ETH_FILTER_ACTION_ADD;
961                 p_second_filter->vport_id       = vport_to_add_to;
962         } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
963                 p_first_filter->vport_id = vport_to_add_to;
964                 memcpy(p_second_filter, p_first_filter,
965                        sizeof(*p_second_filter));
966                 p_first_filter->action  = ETH_FILTER_ACTION_REMOVE_ALL;
967                 p_second_filter->action = ETH_FILTER_ACTION_ADD;
968         } else {
969                 action = qed_filter_action(p_filter_cmd->opcode);
970
971                 if (action == MAX_ETH_FILTER_ACTION) {
972                         DP_NOTICE(p_hwfn,
973                                   "%d is not supported yet\n",
974                                   p_filter_cmd->opcode);
975                         return -EINVAL;
976                 }
977
978                 p_first_filter->action = action;
979                 p_first_filter->vport_id = (p_filter_cmd->opcode ==
980                                             QED_FILTER_REMOVE) ?
981                                            vport_to_remove_from :
982                                            vport_to_add_to;
983         }
984
985         return 0;
986 }
987
988 static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
989                                    u16 opaque_fid,
990                                    struct qed_filter_ucast *p_filter_cmd,
991                                    enum spq_mode comp_mode,
992                                    struct qed_spq_comp_cb *p_comp_data)
993 {
994         struct vport_filter_update_ramrod_data  *p_ramrod       = NULL;
995         struct qed_spq_entry                    *p_ent          = NULL;
996         struct eth_filter_cmd_header            *p_header;
997         int                                     rc;
998
999         rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1000                                      &p_ramrod, &p_ent,
1001                                      comp_mode, p_comp_data);
1002         if (rc != 0) {
1003                 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1004                 return rc;
1005         }
1006         p_header = &p_ramrod->filter_cmd_hdr;
1007         p_header->assert_on_error = p_filter_cmd->assert_on_error;
1008
1009         rc = qed_spq_post(p_hwfn, p_ent, NULL);
1010         if (rc != 0) {
1011                 DP_ERR(p_hwfn,
1012                        "Unicast filter ADD command failed %d\n",
1013                        rc);
1014                 return rc;
1015         }
1016
1017         DP_VERBOSE(p_hwfn, QED_MSG_SP,
1018                    "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1019                    (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
1020                    ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
1021                    "REMOVE" :
1022                    ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
1023                     "MOVE" : "REPLACE")),
1024                    (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
1025                    ((p_filter_cmd->type == QED_FILTER_VLAN) ?
1026                     "VLAN" : "MAC & VLAN"),
1027                    p_ramrod->filter_cmd_hdr.cmd_cnt,
1028                    p_filter_cmd->is_rx_filter,
1029                    p_filter_cmd->is_tx_filter);
1030         DP_VERBOSE(p_hwfn, QED_MSG_SP,
1031                    "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1032                    p_filter_cmd->vport_to_add_to,
1033                    p_filter_cmd->vport_to_remove_from,
1034                    p_filter_cmd->mac[0],
1035                    p_filter_cmd->mac[1],
1036                    p_filter_cmd->mac[2],
1037                    p_filter_cmd->mac[3],
1038                    p_filter_cmd->mac[4],
1039                    p_filter_cmd->mac[5],
1040                    p_filter_cmd->vlan);
1041
1042         return 0;
1043 }
1044
1045 /*******************************************************************************
1046  * Description:
1047  *         Calculates crc 32 on a buffer
1048  *         Note: crc32_length MUST be aligned to 8
1049  * Return:
1050  ******************************************************************************/
1051 static u32 qed_calc_crc32c(u8 *crc32_packet,
1052                            u32 crc32_length,
1053                            u32 crc32_seed,
1054                            u8 complement)
1055 {
1056         u32 byte = 0;
1057         u32 bit = 0;
1058         u8 msb = 0;
1059         u8 current_byte = 0;
1060         u32 crc32_result = crc32_seed;
1061
1062         if ((!crc32_packet) ||
1063             (crc32_length == 0) ||
1064             ((crc32_length % 8) != 0))
1065                 return crc32_result;
1066         for (byte = 0; byte < crc32_length; byte++) {
1067                 current_byte = crc32_packet[byte];
1068                 for (bit = 0; bit < 8; bit++) {
1069                         msb = (u8)(crc32_result >> 31);
1070                         crc32_result = crc32_result << 1;
1071                         if (msb != (0x1 & (current_byte >> bit))) {
1072                                 crc32_result = crc32_result ^ CRC32_POLY;
1073                                 crc32_result |= 1; /*crc32_result[0] = 1;*/
1074                         }
1075                 }
1076         }
1077         return crc32_result;
1078 }
1079
1080 static inline u32 qed_crc32c_le(u32 seed,
1081                                 u8 *mac,
1082                                 u32 len)
1083 {
1084         u32 packet_buf[2] = { 0 };
1085
1086         memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
1087         return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1088 }
1089
1090 static u8 qed_mcast_bin_from_mac(u8 *mac)
1091 {
1092         u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1093                                 mac, ETH_ALEN);
1094
1095         return crc & 0xff;
1096 }
1097
1098 static int
1099 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1100                         u16 opaque_fid,
1101                         struct qed_filter_mcast *p_filter_cmd,
1102                         enum spq_mode comp_mode,
1103                         struct qed_spq_comp_cb *p_comp_data)
1104 {
1105         unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1106         struct vport_update_ramrod_data *p_ramrod = NULL;
1107         struct qed_spq_entry *p_ent = NULL;
1108         struct qed_sp_init_data init_data;
1109         u8 abs_vport_id = 0;
1110         int rc, i;
1111
1112         if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1113                 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1114                                   &abs_vport_id);
1115                 if (rc)
1116                         return rc;
1117         } else {
1118                 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1119                                   &abs_vport_id);
1120                 if (rc)
1121                         return rc;
1122         }
1123
1124         /* Get SPQ entry */
1125         memset(&init_data, 0, sizeof(init_data));
1126         init_data.cid = qed_spq_get_cid(p_hwfn);
1127         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1128         init_data.comp_mode = comp_mode;
1129         init_data.p_comp_data = p_comp_data;
1130
1131         rc = qed_sp_init_request(p_hwfn, &p_ent,
1132                                  ETH_RAMROD_VPORT_UPDATE,
1133                                  PROTOCOLID_ETH, &init_data);
1134         if (rc) {
1135                 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1136                 return rc;
1137         }
1138
1139         p_ramrod = &p_ent->ramrod.vport_update;
1140         p_ramrod->common.update_approx_mcast_flg = 1;
1141
1142         /* explicitly clear out the entire vector */
1143         memset(&p_ramrod->approx_mcast.bins, 0,
1144                sizeof(p_ramrod->approx_mcast.bins));
1145         memset(bins, 0, sizeof(unsigned long) *
1146                ETH_MULTICAST_MAC_BINS_IN_REGS);
1147         /* filter ADD op is explicit set op and it removes
1148          *  any existing filters for the vport
1149          */
1150         if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1151                 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1152                         u32 bit;
1153
1154                         bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1155                         __set_bit(bit, bins);
1156                 }
1157
1158                 /* Convert to correct endianity */
1159                 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1160                         u32 *p_bins = (u32 *)bins;
1161                         struct vport_update_ramrod_mcast *approx_mcast;
1162
1163                         approx_mcast = &p_ramrod->approx_mcast;
1164                         approx_mcast->bins[i] = cpu_to_le32(p_bins[i]);
1165                 }
1166         }
1167
1168         p_ramrod->common.vport_id = abs_vport_id;
1169
1170         return qed_spq_post(p_hwfn, p_ent, NULL);
1171 }
1172
1173 static int
1174 qed_filter_mcast_cmd(struct qed_dev *cdev,
1175                      struct qed_filter_mcast *p_filter_cmd,
1176                      enum spq_mode comp_mode,
1177                      struct qed_spq_comp_cb *p_comp_data)
1178 {
1179         int rc = 0;
1180         int i;
1181
1182         /* only ADD and REMOVE operations are supported for multi-cast */
1183         if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
1184              (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
1185             (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
1186                 return -EINVAL;
1187
1188         for_each_hwfn(cdev, i) {
1189                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1190
1191                 u16 opaque_fid;
1192
1193                 if (rc != 0)
1194                         break;
1195
1196                 opaque_fid = p_hwfn->hw_info.opaque_fid;
1197
1198                 rc = qed_sp_eth_filter_mcast(p_hwfn,
1199                                              opaque_fid,
1200                                              p_filter_cmd,
1201                                              comp_mode,
1202                                              p_comp_data);
1203         }
1204         return rc;
1205 }
1206
1207 static int qed_filter_ucast_cmd(struct qed_dev *cdev,
1208                                 struct qed_filter_ucast *p_filter_cmd,
1209                                 enum spq_mode comp_mode,
1210                                 struct qed_spq_comp_cb *p_comp_data)
1211 {
1212         int rc = 0;
1213         int i;
1214
1215         for_each_hwfn(cdev, i) {
1216                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1217                 u16 opaque_fid;
1218
1219                 if (rc != 0)
1220                         break;
1221
1222                 opaque_fid = p_hwfn->hw_info.opaque_fid;
1223
1224                 rc = qed_sp_eth_filter_ucast(p_hwfn,
1225                                              opaque_fid,
1226                                              p_filter_cmd,
1227                                              comp_mode,
1228                                              p_comp_data);
1229         }
1230
1231         return rc;
1232 }
1233
1234 static int qed_fill_eth_dev_info(struct qed_dev *cdev,
1235                                  struct qed_dev_eth_info *info)
1236 {
1237         int i;
1238
1239         memset(info, 0, sizeof(*info));
1240
1241         info->num_tc = 1;
1242
1243         if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
1244                 for_each_hwfn(cdev, i)
1245                         info->num_queues += FEAT_NUM(&cdev->hwfns[i],
1246                                                      QED_PF_L2_QUE);
1247                 if (cdev->int_params.fp_msix_cnt)
1248                         info->num_queues = min_t(u8, info->num_queues,
1249                                                  cdev->int_params.fp_msix_cnt);
1250         } else {
1251                 info->num_queues = cdev->num_hwfns;
1252         }
1253
1254         info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
1255         ether_addr_copy(info->port_mac,
1256                         cdev->hwfns[0].hw_info.hw_mac_addr);
1257
1258         qed_fill_dev_info(cdev, &info->common);
1259
1260         return 0;
1261 }
1262
1263 static void qed_register_eth_ops(struct qed_dev *cdev,
1264                                  struct qed_eth_cb_ops *ops,
1265                                  void *cookie)
1266 {
1267         cdev->protocol_ops.eth  = ops;
1268         cdev->ops_cookie        = cookie;
1269 }
1270
1271 static int qed_start_vport(struct qed_dev *cdev,
1272                            u8 vport_id,
1273                            u16 mtu,
1274                            u8 drop_ttl0_flg,
1275                            u8 inner_vlan_removal_en_flg)
1276 {
1277         int rc, i;
1278
1279         for_each_hwfn(cdev, i) {
1280                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1281
1282                 rc = qed_sp_vport_start(p_hwfn,
1283                                         p_hwfn->hw_info.concrete_fid,
1284                                         p_hwfn->hw_info.opaque_fid,
1285                                         vport_id,
1286                                         mtu,
1287                                         drop_ttl0_flg,
1288                                         inner_vlan_removal_en_flg);
1289
1290                 if (rc) {
1291                         DP_ERR(cdev, "Failed to start VPORT\n");
1292                         return rc;
1293                 }
1294
1295                 qed_hw_start_fastpath(p_hwfn);
1296
1297                 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1298                            "Started V-PORT %d with MTU %d\n",
1299                            vport_id, mtu);
1300         }
1301
1302         qed_reset_vport_stats(cdev);
1303
1304         return 0;
1305 }
1306
1307 static int qed_stop_vport(struct qed_dev *cdev,
1308                           u8 vport_id)
1309 {
1310         int rc, i;
1311
1312         for_each_hwfn(cdev, i) {
1313                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1314
1315                 rc = qed_sp_vport_stop(p_hwfn,
1316                                        p_hwfn->hw_info.opaque_fid,
1317                                        vport_id);
1318
1319                 if (rc) {
1320                         DP_ERR(cdev, "Failed to stop VPORT\n");
1321                         return rc;
1322                 }
1323         }
1324         return 0;
1325 }
1326
1327 static int qed_update_vport(struct qed_dev *cdev,
1328                             struct qed_update_vport_params *params)
1329 {
1330         struct qed_sp_vport_update_params sp_params;
1331         struct qed_rss_params sp_rss_params;
1332         int rc, i;
1333
1334         if (!cdev)
1335                 return -ENODEV;
1336
1337         memset(&sp_params, 0, sizeof(sp_params));
1338         memset(&sp_rss_params, 0, sizeof(sp_rss_params));
1339
1340         /* Translate protocol params into sp params */
1341         sp_params.vport_id = params->vport_id;
1342         sp_params.update_vport_active_rx_flg =
1343                 params->update_vport_active_flg;
1344         sp_params.update_vport_active_tx_flg =
1345                 params->update_vport_active_flg;
1346         sp_params.vport_active_rx_flg = params->vport_active_flg;
1347         sp_params.vport_active_tx_flg = params->vport_active_flg;
1348         sp_params.accept_any_vlan = params->accept_any_vlan;
1349         sp_params.update_accept_any_vlan_flg =
1350                 params->update_accept_any_vlan_flg;
1351
1352         /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
1353          * We need to re-fix the rss values per engine for CMT.
1354          */
1355         if (cdev->num_hwfns > 1 && params->update_rss_flg) {
1356                 struct qed_update_vport_rss_params *rss =
1357                         &params->rss_params;
1358                 int k, max = 0;
1359
1360                 /* Find largest entry, since it's possible RSS needs to
1361                  * be disabled [in case only 1 queue per-hwfn]
1362                  */
1363                 for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
1364                         max = (max > rss->rss_ind_table[k]) ?
1365                                 max : rss->rss_ind_table[k];
1366
1367                 /* Either fix RSS values or disable RSS */
1368                 if (cdev->num_hwfns < max + 1) {
1369                         int divisor = (max + cdev->num_hwfns - 1) /
1370                                 cdev->num_hwfns;
1371
1372                         DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1373                                    "CMT - fixing RSS values (modulo %02x)\n",
1374                                    divisor);
1375
1376                         for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
1377                                 rss->rss_ind_table[k] =
1378                                         rss->rss_ind_table[k] % divisor;
1379                 } else {
1380                         DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1381                                    "CMT - 1 queue per-hwfn; Disabling RSS\n");
1382                         params->update_rss_flg = 0;
1383                 }
1384         }
1385
1386         /* Now, update the RSS configuration for actual configuration */
1387         if (params->update_rss_flg) {
1388                 sp_rss_params.update_rss_config = 1;
1389                 sp_rss_params.rss_enable = 1;
1390                 sp_rss_params.update_rss_capabilities = 1;
1391                 sp_rss_params.update_rss_ind_table = 1;
1392                 sp_rss_params.update_rss_key = 1;
1393                 sp_rss_params.rss_caps = QED_RSS_IPV4 |
1394                                          QED_RSS_IPV6 |
1395                                          QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
1396                 sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
1397                 memcpy(sp_rss_params.rss_ind_table,
1398                        params->rss_params.rss_ind_table,
1399                        QED_RSS_IND_TABLE_SIZE * sizeof(u16));
1400                 memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
1401                        QED_RSS_KEY_SIZE * sizeof(u32));
1402         }
1403         sp_params.rss_params = &sp_rss_params;
1404
1405         for_each_hwfn(cdev, i) {
1406                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1407
1408                 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1409                 rc = qed_sp_vport_update(p_hwfn, &sp_params,
1410                                          QED_SPQ_MODE_EBLOCK,
1411                                          NULL);
1412                 if (rc) {
1413                         DP_ERR(cdev, "Failed to update VPORT\n");
1414                         return rc;
1415                 }
1416
1417                 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1418                            "Updated V-PORT %d: active_flag %d [update %d]\n",
1419                            params->vport_id, params->vport_active_flg,
1420                            params->update_vport_active_flg);
1421         }
1422
1423         return 0;
1424 }
1425
1426 static int qed_start_rxq(struct qed_dev *cdev,
1427                          struct qed_queue_start_common_params *params,
1428                          u16 bd_max_bytes,
1429                          dma_addr_t bd_chain_phys_addr,
1430                          dma_addr_t cqe_pbl_addr,
1431                          u16 cqe_pbl_size,
1432                          void __iomem **pp_prod)
1433 {
1434         int rc, hwfn_index;
1435         struct qed_hwfn *p_hwfn;
1436
1437         hwfn_index = params->rss_id % cdev->num_hwfns;
1438         p_hwfn = &cdev->hwfns[hwfn_index];
1439
1440         /* Fix queue ID in 100g mode */
1441         params->queue_id /= cdev->num_hwfns;
1442
1443         rc = qed_sp_eth_rx_queue_start(p_hwfn,
1444                                        p_hwfn->hw_info.opaque_fid,
1445                                        params,
1446                                        bd_max_bytes,
1447                                        bd_chain_phys_addr,
1448                                        cqe_pbl_addr,
1449                                        cqe_pbl_size,
1450                                        pp_prod);
1451
1452         if (rc) {
1453                 DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id);
1454                 return rc;
1455         }
1456
1457         DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1458                    "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1459                    params->queue_id, params->rss_id, params->vport_id,
1460                    params->sb);
1461
1462         return 0;
1463 }
1464
1465 static int qed_stop_rxq(struct qed_dev *cdev,
1466                         struct qed_stop_rxq_params *params)
1467 {
1468         int rc, hwfn_index;
1469         struct qed_hwfn *p_hwfn;
1470
1471         hwfn_index      = params->rss_id % cdev->num_hwfns;
1472         p_hwfn          = &cdev->hwfns[hwfn_index];
1473
1474         rc = qed_sp_eth_rx_queue_stop(p_hwfn,
1475                                       params->rx_queue_id / cdev->num_hwfns,
1476                                       params->eq_completion_only,
1477                                       false);
1478         if (rc) {
1479                 DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
1480                 return rc;
1481         }
1482
1483         return 0;
1484 }
1485
1486 static int qed_start_txq(struct qed_dev *cdev,
1487                          struct qed_queue_start_common_params *p_params,
1488                          dma_addr_t pbl_addr,
1489                          u16 pbl_size,
1490                          void __iomem **pp_doorbell)
1491 {
1492         struct qed_hwfn *p_hwfn;
1493         int rc, hwfn_index;
1494
1495         hwfn_index      = p_params->rss_id % cdev->num_hwfns;
1496         p_hwfn          = &cdev->hwfns[hwfn_index];
1497
1498         /* Fix queue ID in 100g mode */
1499         p_params->queue_id /= cdev->num_hwfns;
1500
1501         rc = qed_sp_eth_tx_queue_start(p_hwfn,
1502                                        p_hwfn->hw_info.opaque_fid,
1503                                        p_params,
1504                                        pbl_addr,
1505                                        pbl_size,
1506                                        pp_doorbell);
1507
1508         if (rc) {
1509                 DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
1510                 return rc;
1511         }
1512
1513         DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1514                    "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1515                    p_params->queue_id, p_params->rss_id, p_params->vport_id,
1516                    p_params->sb);
1517
1518         return 0;
1519 }
1520
1521 #define QED_HW_STOP_RETRY_LIMIT (10)
1522 static int qed_fastpath_stop(struct qed_dev *cdev)
1523 {
1524         qed_hw_stop_fastpath(cdev);
1525
1526         return 0;
1527 }
1528
1529 static int qed_stop_txq(struct qed_dev *cdev,
1530                         struct qed_stop_txq_params *params)
1531 {
1532         struct qed_hwfn *p_hwfn;
1533         int rc, hwfn_index;
1534
1535         hwfn_index      = params->rss_id % cdev->num_hwfns;
1536         p_hwfn          = &cdev->hwfns[hwfn_index];
1537
1538         rc = qed_sp_eth_tx_queue_stop(p_hwfn,
1539                                       params->tx_queue_id / cdev->num_hwfns);
1540         if (rc) {
1541                 DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
1542                 return rc;
1543         }
1544
1545         return 0;
1546 }
1547
1548 static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
1549                                         enum qed_filter_rx_mode_type type)
1550 {
1551         struct qed_filter_accept_flags accept_flags;
1552
1553         memset(&accept_flags, 0, sizeof(accept_flags));
1554
1555         accept_flags.update_rx_mode_config      = 1;
1556         accept_flags.update_tx_mode_config      = 1;
1557         accept_flags.rx_accept_filter           = QED_ACCEPT_UCAST_MATCHED |
1558                                                   QED_ACCEPT_MCAST_MATCHED |
1559                                                   QED_ACCEPT_BCAST;
1560         accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
1561                                         QED_ACCEPT_MCAST_MATCHED |
1562                                         QED_ACCEPT_BCAST;
1563
1564         if (type == QED_FILTER_RX_MODE_TYPE_PROMISC)
1565                 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
1566                                                  QED_ACCEPT_MCAST_UNMATCHED;
1567         else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
1568                 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
1569
1570         return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
1571                                      QED_SPQ_MODE_CB, NULL);
1572 }
1573
1574 static int qed_configure_filter_ucast(struct qed_dev *cdev,
1575                                       struct qed_filter_ucast_params *params)
1576 {
1577         struct qed_filter_ucast ucast;
1578
1579         if (!params->vlan_valid && !params->mac_valid) {
1580                 DP_NOTICE(
1581                         cdev,
1582                         "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
1583                 return -EINVAL;
1584         }
1585
1586         memset(&ucast, 0, sizeof(ucast));
1587         switch (params->type) {
1588         case QED_FILTER_XCAST_TYPE_ADD:
1589                 ucast.opcode = QED_FILTER_ADD;
1590                 break;
1591         case QED_FILTER_XCAST_TYPE_DEL:
1592                 ucast.opcode = QED_FILTER_REMOVE;
1593                 break;
1594         case QED_FILTER_XCAST_TYPE_REPLACE:
1595                 ucast.opcode = QED_FILTER_REPLACE;
1596                 break;
1597         default:
1598                 DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
1599                           params->type);
1600         }
1601
1602         if (params->vlan_valid && params->mac_valid) {
1603                 ucast.type = QED_FILTER_MAC_VLAN;
1604                 ether_addr_copy(ucast.mac, params->mac);
1605                 ucast.vlan = params->vlan;
1606         } else if (params->mac_valid) {
1607                 ucast.type = QED_FILTER_MAC;
1608                 ether_addr_copy(ucast.mac, params->mac);
1609         } else {
1610                 ucast.type = QED_FILTER_VLAN;
1611                 ucast.vlan = params->vlan;
1612         }
1613
1614         ucast.is_rx_filter = true;
1615         ucast.is_tx_filter = true;
1616
1617         return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
1618 }
1619
1620 static int qed_configure_filter_mcast(struct qed_dev *cdev,
1621                                       struct qed_filter_mcast_params *params)
1622 {
1623         struct qed_filter_mcast mcast;
1624         int i;
1625
1626         memset(&mcast, 0, sizeof(mcast));
1627         switch (params->type) {
1628         case QED_FILTER_XCAST_TYPE_ADD:
1629                 mcast.opcode = QED_FILTER_ADD;
1630                 break;
1631         case QED_FILTER_XCAST_TYPE_DEL:
1632                 mcast.opcode = QED_FILTER_REMOVE;
1633                 break;
1634         default:
1635                 DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
1636                           params->type);
1637         }
1638
1639         mcast.num_mc_addrs = params->num;
1640         for (i = 0; i < mcast.num_mc_addrs; i++)
1641                 ether_addr_copy(mcast.mac[i], params->mac[i]);
1642
1643         return qed_filter_mcast_cmd(cdev, &mcast,
1644                                     QED_SPQ_MODE_CB, NULL);
1645 }
1646
1647 static int qed_configure_filter(struct qed_dev *cdev,
1648                                 struct qed_filter_params *params)
1649 {
1650         enum qed_filter_rx_mode_type accept_flags;
1651
1652         switch (params->type) {
1653         case QED_FILTER_TYPE_UCAST:
1654                 return qed_configure_filter_ucast(cdev, &params->filter.ucast);
1655         case QED_FILTER_TYPE_MCAST:
1656                 return qed_configure_filter_mcast(cdev, &params->filter.mcast);
1657         case QED_FILTER_TYPE_RX_MODE:
1658                 accept_flags = params->filter.accept_flags;
1659                 return qed_configure_filter_rx_mode(cdev, accept_flags);
1660         default:
1661                 DP_NOTICE(cdev, "Unknown filter type %d\n",
1662                           (int)params->type);
1663                 return -EINVAL;
1664         }
1665 }
1666
1667 static int qed_fp_cqe_completion(struct qed_dev *dev,
1668                                  u8 rss_id,
1669                                  struct eth_slow_path_rx_cqe *cqe)
1670 {
1671         return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
1672                                       cqe);
1673 }
1674
1675 static const struct qed_eth_ops qed_eth_ops_pass = {
1676         .common = &qed_common_ops_pass,
1677         .fill_dev_info = &qed_fill_eth_dev_info,
1678         .register_ops = &qed_register_eth_ops,
1679         .vport_start = &qed_start_vport,
1680         .vport_stop = &qed_stop_vport,
1681         .vport_update = &qed_update_vport,
1682         .q_rx_start = &qed_start_rxq,
1683         .q_rx_stop = &qed_stop_rxq,
1684         .q_tx_start = &qed_start_txq,
1685         .q_tx_stop = &qed_stop_txq,
1686         .filter_config = &qed_configure_filter,
1687         .fastpath_stop = &qed_fastpath_stop,
1688         .eth_cqe_completion = &qed_fp_cqe_completion,
1689         .get_vport_stats = &qed_get_vport_stats,
1690 };
1691
1692 const struct qed_eth_ops *qed_get_eth_ops(u32 version)
1693 {
1694         if (version != QED_ETH_INTERFACE_VERSION) {
1695                 pr_notice("Cannot supply ethtool operations [%08x != %08x]\n",
1696                           version, QED_ETH_INTERFACE_VERSION);
1697                 return NULL;
1698         }
1699
1700         return &qed_eth_ops_pass;
1701 }
1702 EXPORT_SYMBOL(qed_get_eth_ops);
1703
1704 void qed_put_eth_ops(void)
1705 {
1706         /* TODO - reference count for module? */
1707 }
1708 EXPORT_SYMBOL(qed_put_eth_ops);