2 * Copyright (c) 2011 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/dcbnl.h>
35 #include <linux/math64.h>
41 MLX4_CEE_STATE_DOWN = 0,
42 MLX4_CEE_STATE_UP = 1,
45 /* Definitions for QCN
48 struct mlx4_congestion_control_mb_prio_802_1_qau_params {
49 __be32 modify_enable_high;
50 __be32 modify_enable_low;
52 __be32 extended_enable;
54 __be32 rpg_time_reset;
55 __be32 rpg_byte_reset;
61 __be32 rpg_min_dec_fac;
67 __be32 gd_coefficient;
69 __be32 cp_sample_base;
73 struct mlx4_congestion_control_mb_prio_802_1_qau_statistics {
74 __be64 rppp_rp_centiseconds;
77 __be32 rppp_created_rps;
78 __be32 estimated_total_rate;
79 __be32 max_active_rate_limiter_index;
80 __be32 dropped_cnms_busy_fw;
82 __be32 cnms_handled_successfully;
83 __be32 min_total_limiters_rate;
84 __be32 max_total_limiters_rate;
88 static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap)
90 struct mlx4_en_priv *priv = netdev_priv(dev);
93 case DCB_CAP_ATTR_PFC:
96 case DCB_CAP_ATTR_DCBX:
97 *cap = priv->cee_params.dcbx_cap;
99 case DCB_CAP_ATTR_PFC_TCS:
100 *cap = 1 << mlx4_max_tc(priv->mdev->dev);
110 static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev)
112 struct mlx4_en_priv *priv = netdev_priv(netdev);
114 return priv->cee_params.dcb_cfg.pfc_state;
117 static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
119 struct mlx4_en_priv *priv = netdev_priv(netdev);
121 priv->cee_params.dcb_cfg.pfc_state = state;
124 static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
127 struct mlx4_en_priv *priv = netdev_priv(netdev);
129 *setting = priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc;
132 static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
135 struct mlx4_en_priv *priv = netdev_priv(netdev);
137 priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc = setting;
138 priv->cee_params.dcb_cfg.pfc_state = true;
141 static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
143 struct mlx4_en_priv *priv = netdev_priv(netdev);
145 if (!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
148 if (tcid == DCB_NUMTCS_ATTR_PFC)
149 *num = mlx4_max_tc(priv->mdev->dev);
156 static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
158 struct mlx4_en_priv *priv = netdev_priv(netdev);
159 struct mlx4_en_dev *mdev = priv->mdev;
160 struct mlx4_en_cee_config *dcb_cfg = &priv->cee_params.dcb_cfg;
163 if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
166 if (dcb_cfg->pfc_state) {
169 priv->prof->rx_pause = 0;
170 priv->prof->tx_pause = 0;
171 for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
172 u8 tc_mask = 1 << tc;
174 switch (dcb_cfg->tc_config[tc].dcb_pfc) {
176 priv->prof->tx_ppp &= ~tc_mask;
177 priv->prof->rx_ppp &= ~tc_mask;
179 case pfc_enabled_full:
180 priv->prof->tx_ppp |= tc_mask;
181 priv->prof->rx_ppp |= tc_mask;
184 priv->prof->tx_ppp |= tc_mask;
185 priv->prof->rx_ppp &= ~tc_mask;
188 priv->prof->tx_ppp &= ~tc_mask;
189 priv->prof->rx_ppp |= tc_mask;
195 en_dbg(DRV, priv, "Set pfc on\n");
197 priv->prof->rx_pause = 1;
198 priv->prof->tx_pause = 1;
199 en_dbg(DRV, priv, "Set pfc off\n");
202 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
203 priv->rx_skb_size + ETH_FCS_LEN,
204 priv->prof->tx_pause,
206 priv->prof->rx_pause,
209 en_err(priv, "Failed setting pause params\n");
213 static u8 mlx4_en_dcbnl_get_state(struct net_device *dev)
215 struct mlx4_en_priv *priv = netdev_priv(dev);
217 if (priv->flags & MLX4_EN_FLAG_DCB_ENABLED)
218 return MLX4_CEE_STATE_UP;
220 return MLX4_CEE_STATE_DOWN;
223 static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
225 struct mlx4_en_priv *priv = netdev_priv(dev);
228 if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
231 if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
235 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
236 num_tcs = IEEE_8021QAZ_MAX_TCS;
238 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
241 return mlx4_en_setup_tc(dev, num_tcs);
244 /* On success returns a non-zero 802.1p user priority bitmap
245 * otherwise returns 0 as the invalid user priority bitmap to
248 static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
250 struct mlx4_en_priv *priv = netdev_priv(netdev);
251 struct dcb_app app = {
255 if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
258 return dcb_getapp(netdev, &app);
261 static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
264 struct mlx4_en_priv *priv = netdev_priv(netdev);
267 if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
270 memset(&app, 0, sizeof(struct dcb_app));
271 app.selector = idtype;
275 return dcb_setapp(netdev, &app);
278 static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
279 struct ieee_ets *ets)
281 struct mlx4_en_priv *priv = netdev_priv(dev);
282 struct ieee_ets *my_ets = &priv->ets;
287 ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
288 ets->cbs = my_ets->cbs;
289 memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
290 memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
291 memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
296 static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
299 int total_ets_bw = 0;
302 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
303 if (ets->prio_tc[i] >= MLX4_EN_NUM_UP) {
304 en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
309 switch (ets->tc_tsa[i]) {
310 case IEEE_8021QAZ_TSA_STRICT:
312 case IEEE_8021QAZ_TSA_ETS:
314 total_ets_bw += ets->tc_tx_bw[i];
317 en_err(priv, "TC[%d]: Not supported TSA: %d\n",
323 if (has_ets_tc && total_ets_bw != MLX4_EN_BW_MAX) {
324 en_err(priv, "Bad ETS BW sum: %d. Should be exactly 100%%\n",
332 static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
333 struct ieee_ets *ets, u16 *ratelimit)
335 struct mlx4_en_dev *mdev = priv->mdev;
338 __u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = { 0 };
339 __u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 };
341 ets = ets ?: &priv->ets;
342 ratelimit = ratelimit ?: priv->maxrate;
344 /* higher TC means higher priority => lower pg */
345 for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
346 switch (ets->tc_tsa[i]) {
347 case IEEE_8021QAZ_TSA_STRICT:
348 pg[i] = num_strict++;
349 tc_tx_bw[i] = MLX4_EN_BW_MAX;
351 case IEEE_8021QAZ_TSA_ETS:
352 pg[i] = MLX4_EN_TC_ETS;
353 tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX4_EN_BW_MIN;
358 return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg,
363 mlx4_en_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
365 struct mlx4_en_priv *priv = netdev_priv(dev);
366 struct mlx4_en_dev *mdev = priv->mdev;
369 err = mlx4_en_ets_validate(priv, ets);
373 err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc);
377 err = mlx4_en_config_port_scheduler(priv, ets, NULL);
381 memcpy(&priv->ets, ets, sizeof(priv->ets));
386 static int mlx4_en_dcbnl_ieee_getpfc(struct net_device *dev,
387 struct ieee_pfc *pfc)
389 struct mlx4_en_priv *priv = netdev_priv(dev);
391 pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
392 pfc->pfc_en = priv->prof->tx_ppp;
397 static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
398 struct ieee_pfc *pfc)
400 struct mlx4_en_priv *priv = netdev_priv(dev);
401 struct mlx4_en_port_profile *prof = priv->prof;
402 struct mlx4_en_dev *mdev = priv->mdev;
405 en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
411 prof->rx_pause = !pfc->pfc_en;
412 prof->tx_pause = !pfc->pfc_en;
413 prof->rx_ppp = pfc->pfc_en;
414 prof->tx_ppp = pfc->pfc_en;
416 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
417 priv->rx_skb_size + ETH_FCS_LEN,
423 en_err(priv, "Failed setting pause params\n");
425 mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
426 prof->rx_ppp, prof->rx_pause,
427 prof->tx_ppp, prof->tx_pause);
432 static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
434 struct mlx4_en_priv *priv = netdev_priv(dev);
436 return priv->cee_params.dcbx_cap;
439 static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
441 struct mlx4_en_priv *priv = netdev_priv(dev);
442 struct ieee_ets ets = {0};
443 struct ieee_pfc pfc = {0};
445 if (mode == priv->cee_params.dcbx_cap)
448 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
449 ((mode & DCB_CAP_DCBX_VER_IEEE) &&
450 (mode & DCB_CAP_DCBX_VER_CEE)) ||
451 !(mode & DCB_CAP_DCBX_HOST))
454 priv->cee_params.dcbx_cap = mode;
456 ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
457 pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS;
459 if (mode & DCB_CAP_DCBX_VER_IEEE) {
460 if (mlx4_en_dcbnl_ieee_setets(dev, &ets))
462 if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc))
464 } else if (mode & DCB_CAP_DCBX_VER_CEE) {
465 if (mlx4_en_dcbnl_set_all(dev))
468 if (mlx4_en_dcbnl_ieee_setets(dev, &ets))
470 if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc))
472 if (mlx4_en_setup_tc(dev, 0))
481 #define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */
482 static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev,
483 struct ieee_maxrate *maxrate)
485 struct mlx4_en_priv *priv = netdev_priv(dev);
488 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
489 maxrate->tc_maxrate[i] =
490 priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB;
495 static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
496 struct ieee_maxrate *maxrate)
498 struct mlx4_en_priv *priv = netdev_priv(dev);
499 u16 tmp[IEEE_8021QAZ_MAX_TCS];
502 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
503 /* Convert from Kbps into HW units, rounding result up.
504 * Setting to 0, means unlimited BW.
506 tmp[i] = div_u64(maxrate->tc_maxrate[i] +
507 MLX4_RATELIMIT_UNITS_IN_KB - 1,
508 MLX4_RATELIMIT_UNITS_IN_KB);
511 err = mlx4_en_config_port_scheduler(priv, NULL, tmp);
515 memcpy(priv->maxrate, tmp, sizeof(priv->maxrate));
520 #define RPG_ENABLE_BIT 31
521 #define CN_TAG_BIT 30
523 static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev,
524 struct ieee_qcn *qcn)
526 struct mlx4_en_priv *priv = netdev_priv(dev);
527 struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
528 struct mlx4_cmd_mailbox *mailbox_out = NULL;
529 u64 mailbox_in_dma = 0;
533 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
536 mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
537 if (IS_ERR(mailbox_out))
540 (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)
543 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
544 inmod = priv->port | ((1 << i) << 8) |
545 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
546 err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
548 inmod, MLX4_CONGESTION_CONTROL_GET_PARAMS,
549 MLX4_CMD_CONGESTION_CTRL_OPCODE,
550 MLX4_CMD_TIME_CLASS_C,
553 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
558 be32_to_cpu(hw_qcn->extended_enable) >> RPG_ENABLE_BIT;
559 qcn->rppp_max_rps[i] =
560 be32_to_cpu(hw_qcn->rppp_max_rps);
561 qcn->rpg_time_reset[i] =
562 be32_to_cpu(hw_qcn->rpg_time_reset);
563 qcn->rpg_byte_reset[i] =
564 be32_to_cpu(hw_qcn->rpg_byte_reset);
565 qcn->rpg_threshold[i] =
566 be32_to_cpu(hw_qcn->rpg_threshold);
567 qcn->rpg_max_rate[i] =
568 be32_to_cpu(hw_qcn->rpg_max_rate);
569 qcn->rpg_ai_rate[i] =
570 be32_to_cpu(hw_qcn->rpg_ai_rate);
571 qcn->rpg_hai_rate[i] =
572 be32_to_cpu(hw_qcn->rpg_hai_rate);
574 be32_to_cpu(hw_qcn->rpg_gd);
575 qcn->rpg_min_dec_fac[i] =
576 be32_to_cpu(hw_qcn->rpg_min_dec_fac);
577 qcn->rpg_min_rate[i] =
578 be32_to_cpu(hw_qcn->rpg_min_rate);
579 qcn->cndd_state_machine[i] =
582 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
586 static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev,
587 struct ieee_qcn *qcn)
589 struct mlx4_en_priv *priv = netdev_priv(dev);
590 struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
591 struct mlx4_cmd_mailbox *mailbox_in = NULL;
592 u64 mailbox_in_dma = 0;
595 #define MODIFY_ENABLE_HIGH_MASK 0xc0000000
596 #define MODIFY_ENABLE_LOW_MASK 0xffc00000
598 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
601 mailbox_in = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
602 if (IS_ERR(mailbox_in))
605 mailbox_in_dma = mailbox_in->dma;
607 (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)mailbox_in->buf;
608 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
609 inmod = priv->port | ((1 << i) << 8) |
610 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
612 /* Before updating QCN parameter,
613 * need to set it's modify enable bit to 1
616 hw_qcn->modify_enable_high = cpu_to_be32(
617 MODIFY_ENABLE_HIGH_MASK);
618 hw_qcn->modify_enable_low = cpu_to_be32(MODIFY_ENABLE_LOW_MASK);
620 hw_qcn->extended_enable = cpu_to_be32(qcn->rpg_enable[i] << RPG_ENABLE_BIT);
621 hw_qcn->rppp_max_rps = cpu_to_be32(qcn->rppp_max_rps[i]);
622 hw_qcn->rpg_time_reset = cpu_to_be32(qcn->rpg_time_reset[i]);
623 hw_qcn->rpg_byte_reset = cpu_to_be32(qcn->rpg_byte_reset[i]);
624 hw_qcn->rpg_threshold = cpu_to_be32(qcn->rpg_threshold[i]);
625 hw_qcn->rpg_max_rate = cpu_to_be32(qcn->rpg_max_rate[i]);
626 hw_qcn->rpg_ai_rate = cpu_to_be32(qcn->rpg_ai_rate[i]);
627 hw_qcn->rpg_hai_rate = cpu_to_be32(qcn->rpg_hai_rate[i]);
628 hw_qcn->rpg_gd = cpu_to_be32(qcn->rpg_gd[i]);
629 hw_qcn->rpg_min_dec_fac = cpu_to_be32(qcn->rpg_min_dec_fac[i]);
630 hw_qcn->rpg_min_rate = cpu_to_be32(qcn->rpg_min_rate[i]);
631 priv->cndd_state[i] = qcn->cndd_state_machine[i];
632 if (qcn->cndd_state_machine[i] == DCB_CNDD_INTERIOR_READY)
633 hw_qcn->extended_enable |= cpu_to_be32(1 << CN_TAG_BIT);
635 err = mlx4_cmd(priv->mdev->dev, mailbox_in_dma, inmod,
636 MLX4_CONGESTION_CONTROL_SET_PARAMS,
637 MLX4_CMD_CONGESTION_CTRL_OPCODE,
638 MLX4_CMD_TIME_CLASS_C,
641 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
645 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
649 static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
650 struct ieee_qcn_stats *qcn_stats)
652 struct mlx4_en_priv *priv = netdev_priv(dev);
653 struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *hw_qcn_stats;
654 struct mlx4_cmd_mailbox *mailbox_out = NULL;
655 u64 mailbox_in_dma = 0;
659 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
662 mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
663 if (IS_ERR(mailbox_out))
667 (struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *)
670 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
671 inmod = priv->port | ((1 << i) << 8) |
672 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
673 err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
674 mailbox_out->dma, inmod,
675 MLX4_CONGESTION_CONTROL_GET_STATISTICS,
676 MLX4_CMD_CONGESTION_CTRL_OPCODE,
677 MLX4_CMD_TIME_CLASS_C,
680 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
683 qcn_stats->rppp_rp_centiseconds[i] =
684 be64_to_cpu(hw_qcn_stats->rppp_rp_centiseconds);
685 qcn_stats->rppp_created_rps[i] =
686 be32_to_cpu(hw_qcn_stats->rppp_created_rps);
688 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
692 const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
693 .ieee_getets = mlx4_en_dcbnl_ieee_getets,
694 .ieee_setets = mlx4_en_dcbnl_ieee_setets,
695 .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
696 .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
697 .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn,
698 .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn,
699 .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
700 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
701 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
703 .getstate = mlx4_en_dcbnl_get_state,
704 .setstate = mlx4_en_dcbnl_set_state,
705 .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg,
706 .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg,
707 .setall = mlx4_en_dcbnl_set_all,
708 .getcap = mlx4_en_dcbnl_getcap,
709 .getnumtcs = mlx4_en_dcbnl_getnumtcs,
710 .getpfcstate = mlx4_en_dcbnl_getpfcstate,
711 .setpfcstate = mlx4_en_dcbnl_setpfcstate,
712 .getapp = mlx4_en_dcbnl_getapp,
713 .setapp = mlx4_en_dcbnl_setapp,
715 .getdcbx = mlx4_en_dcbnl_getdcbx,
716 .setdcbx = mlx4_en_dcbnl_setdcbx,
719 const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
720 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
721 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
723 .setstate = mlx4_en_dcbnl_set_state,
724 .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg,
725 .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg,
726 .setall = mlx4_en_dcbnl_set_all,
727 .getnumtcs = mlx4_en_dcbnl_getnumtcs,
728 .getpfcstate = mlx4_en_dcbnl_getpfcstate,
729 .setpfcstate = mlx4_en_dcbnl_setpfcstate,
730 .getapp = mlx4_en_dcbnl_getapp,
731 .setapp = mlx4_en_dcbnl_setapp,
733 .getdcbx = mlx4_en_dcbnl_getdcbx,
734 .setdcbx = mlx4_en_dcbnl_setdcbx,