net/mlx5e: Single flow order-0 pages for Striding RQ
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
index 98fb0d8..136554b 100644 (file)
 #include "eswitch.h"
 #include "vxlan.h"
 
-enum {
-       MLX5_EN_QP_FLUSH_TIMEOUT_MS     = 5000,
-       MLX5_EN_QP_FLUSH_MSLEEP_QUANT   = 20,
-       MLX5_EN_QP_FLUSH_MAX_ITER       = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
-                                         MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
-};
-
 struct mlx5e_rq_param {
        u32                     rqc[MLX5_ST_SZ_DW(rqc)];
        struct mlx5_wq_param    wq;
@@ -145,7 +138,6 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
                s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
                s->rx_wqe_err   += rq_stats->wqe_err;
                s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
-               s->rx_mpwqe_frag   += rq_stats->mpwqe_frag;
                s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
                s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
                s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
@@ -162,6 +154,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
                        s->tx_queue_stopped     += sq_stats->stopped;
                        s->tx_queue_wake        += sq_stats->wake;
                        s->tx_queue_dropped     += sq_stats->dropped;
+                       s->tx_xmit_more         += sq_stats->xmit_more;
                        s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
                        tx_offload_none         += sq_stats->csum_none;
                }
@@ -180,18 +173,15 @@ static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
 {
        int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
        u32 *out = (u32 *)priv->stats.vport.query_vport_out;
-       u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+       u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
        struct mlx5_core_dev *mdev = priv->mdev;
 
-       memset(in, 0, sizeof(in));
-
        MLX5_SET(query_vport_counter_in, in, opcode,
                 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
        MLX5_SET(query_vport_counter_in, in, op_mod, 0);
        MLX5_SET(query_vport_counter_in, in, other_vport, 0);
 
        memset(out, 0, outlen);
-
        mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
 }
 
@@ -304,6 +294,107 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
 #define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
 #define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
 
+static inline int mlx5e_get_wqe_mtt_sz(void)
+{
+       /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
+        * To avoid copying garbage after the mtt array, we allocate
+        * a little more.
+        */
+       return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
+                    MLX5_UMR_MTT_ALIGNMENT);
+}
+
+static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq,
+                                      struct mlx5e_umr_wqe *wqe, u16 ix)
+{
+       struct mlx5_wqe_ctrl_seg      *cseg = &wqe->ctrl;
+       struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+       struct mlx5_wqe_data_seg      *dseg = &wqe->data;
+       struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+       u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
+       u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
+
+       cseg->qpn_ds    = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
+                                     ds_cnt);
+       cseg->fm_ce_se  = MLX5_WQE_CTRL_CQ_UPDATE;
+       cseg->imm       = rq->mkey_be;
+
+       ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
+       ucseg->klm_octowords =
+               cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
+       ucseg->bsf_octowords =
+               cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
+       ucseg->mkey_mask     = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+
+       dseg->lkey = sq->mkey_be;
+       dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
+}
+
+static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
+                                    struct mlx5e_channel *c)
+{
+       int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+       int mtt_sz = mlx5e_get_wqe_mtt_sz();
+       int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
+       int i;
+
+       rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info),
+                                   GFP_KERNEL, cpu_to_node(c->cpu));
+       if (!rq->wqe_info)
+               goto err_out;
+
+       /* We allocate more than mtt_sz as we will align the pointer */
+       rq->mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
+                                       cpu_to_node(c->cpu));
+       if (unlikely(!rq->mtt_no_align))
+               goto err_free_wqe_info;
+
+       for (i = 0; i < wq_sz; i++) {
+               struct mlx5e_mpw_info *wi = &rq->wqe_info[i];
+
+               wi->umr.mtt = PTR_ALIGN(rq->mtt_no_align + i * mtt_alloc,
+                                       MLX5_UMR_ALIGN);
+               wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz,
+                                                 PCI_DMA_TODEVICE);
+               if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr)))
+                       goto err_unmap_mtts;
+
+               mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i);
+       }
+
+       return 0;
+
+err_unmap_mtts:
+       while (--i >= 0) {
+               struct mlx5e_mpw_info *wi = &rq->wqe_info[i];
+
+               dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz,
+                                PCI_DMA_TODEVICE);
+       }
+       kfree(rq->mtt_no_align);
+err_free_wqe_info:
+       kfree(rq->wqe_info);
+
+err_out:
+       return -ENOMEM;
+}
+
+static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
+{
+       int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+       int mtt_sz = mlx5e_get_wqe_mtt_sz();
+       int i;
+
+       for (i = 0; i < wq_sz; i++) {
+               struct mlx5e_mpw_info *wi = &rq->wqe_info[i];
+
+               dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz,
+                                PCI_DMA_TODEVICE);
+       }
+       kfree(rq->mtt_no_align);
+       kfree(rq->wqe_info);
+}
+
 static int mlx5e_create_rq(struct mlx5e_channel *c,
                           struct mlx5e_rq_param *param,
                           struct mlx5e_rq *rq)
@@ -328,22 +419,31 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
 
        wq_sz = mlx5_wq_ll_get_size(&rq->wq);
 
+       rq->wq_type = priv->params.rq_wq_type;
+       rq->pdev    = c->pdev;
+       rq->netdev  = c->netdev;
+       rq->tstamp  = &priv->tstamp;
+       rq->channel = c;
+       rq->ix      = c->ix;
+       rq->priv    = c->priv;
+
        switch (priv->params.rq_wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-               rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info),
-                                           GFP_KERNEL, cpu_to_node(c->cpu));
-               if (!rq->wqe_info) {
-                       err = -ENOMEM;
-                       goto err_rq_wq_destroy;
-               }
                rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
                rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
                rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
 
+               rq->mpwqe_mtt_offset = c->ix *
+                       MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size));
+
                rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
                rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
                rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
                byte_count = rq->wqe_sz;
+               rq->mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
+               err = mlx5e_rq_alloc_mpwqe_info(rq, c);
+               if (err)
+                       goto err_rq_wq_destroy;
                break;
        default: /* MLX5_WQ_TYPE_LINKED_LIST */
                rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
@@ -362,27 +462,19 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz);
                byte_count = rq->wqe_sz;
                byte_count |= MLX5_HW_START_PADDING;
+               rq->mkey_be = c->mkey_be;
        }
 
        for (i = 0; i < wq_sz; i++) {
                struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
 
                wqe->data.byte_count = cpu_to_be32(byte_count);
+               wqe->data.lkey = rq->mkey_be;
        }
 
        INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
        rq->am.mode = priv->params.rx_cq_period_mode;
 
-       rq->wq_type = priv->params.rq_wq_type;
-       rq->pdev    = c->pdev;
-       rq->netdev  = c->netdev;
-       rq->tstamp  = &priv->tstamp;
-       rq->channel = c;
-       rq->ix      = c->ix;
-       rq->priv    = c->priv;
-       rq->mkey_be = c->mkey_be;
-       rq->umr_mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
-
        return 0;
 
 err_rq_wq_destroy:
@@ -395,7 +487,7 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
 {
        switch (rq->wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-               kfree(rq->wqe_info);
+               mlx5e_rq_free_mpwqe_info(rq);
                break;
        default: /* MLX5_WQ_TYPE_LINKED_LIST */
                kfree(rq->skb);
@@ -428,7 +520,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
 
        MLX5_SET(rqc,  rqc, cqn,                rq->cq.mcq.cqn);
        MLX5_SET(rqc,  rqc, state,              MLX5_RQC_STATE_RST);
-       MLX5_SET(rqc,  rqc, flush_in_error_en,  1);
        MLX5_SET(rqc,  rqc, vsd, priv->params.vlan_strip_disable);
        MLX5_SET(wq,   wq,  log_wq_pg_sz,       rq->wq_ctrl.buf.page_shift -
                                                MLX5_ADAPTER_PAGE_SHIFT);
@@ -492,7 +583,8 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
        rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
 
        MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
-       MLX5_SET64(modify_rq_in, in, modify_bitmask, MLX5_RQ_BITMASK_VSD);
+       MLX5_SET64(modify_rq_in, in, modify_bitmask,
+                  MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
        MLX5_SET(rqc, rqc, vsd, vsd);
        MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
 
@@ -525,6 +617,27 @@ static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
        return -ETIMEDOUT;
 }
 
+static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
+{
+       struct mlx5_wq_ll *wq = &rq->wq;
+       struct mlx5e_rx_wqe *wqe;
+       __be16 wqe_ix_be;
+       u16 wqe_ix;
+
+       /* UMR WQE (if in progress) is always at wq->head */
+       if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
+               mlx5e_free_rx_mpwqe(rq, &rq->wqe_info[wq->head]);
+
+       while (!mlx5_wq_ll_is_empty(wq)) {
+               wqe_ix_be = *wq->tail_next;
+               wqe_ix    = be16_to_cpu(wqe_ix_be);
+               wqe       = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
+               rq->dealloc_wqe(rq, wqe_ix);
+               mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
+                              &wqe->next.next_wqe_index);
+       }
+}
+
 static int mlx5e_open_rq(struct mlx5e_channel *c,
                         struct mlx5e_rq_param *param,
                         struct mlx5e_rq *rq)
@@ -548,8 +661,6 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
        if (param->am_enabled)
                set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
 
-       set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
-
        sq->ico_wqe_info[pi].opcode     = MLX5_OPCODE_NOP;
        sq->ico_wqe_info[pi].num_wqebbs = 1;
        mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
@@ -566,23 +677,8 @@ err_destroy_rq:
 
 static void mlx5e_close_rq(struct mlx5e_rq *rq)
 {
-       int tout = 0;
-       int err;
-
-       clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+       set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state);
        napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
-
-       err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
-       while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
-              tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
-               msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
-
-       if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
-               set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
-
-       /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
-       napi_synchronize(&rq->channel->napi);
-
        cancel_work_sync(&rq->am.work);
 
        mlx5e_disable_rq(rq);
@@ -821,7 +917,6 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
                goto err_disable_sq;
 
        if (sq->txq) {
-               set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
                netdev_tx_reset_queue(sq->txq);
                netif_tx_start_queue(sq->txq);
        }
@@ -845,38 +940,20 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
 
 static void mlx5e_close_sq(struct mlx5e_sq *sq)
 {
-       int tout = 0;
-       int err;
+       set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
+       /* prevent netif_tx_wake_queue */
+       napi_synchronize(&sq->channel->napi);
 
        if (sq->txq) {
-               clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
-               /* prevent netif_tx_wake_queue */
-               napi_synchronize(&sq->channel->napi);
                netif_tx_disable_queue(sq->txq);
 
-               /* ensure hw is notified of all pending wqes */
+               /* last doorbell out, godspeed .. */
                if (mlx5e_sq_has_room_for(sq, 1))
                        mlx5e_send_nop(sq, true);
-
-               err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
-                                     MLX5_SQC_STATE_ERR, false, 0);
-               if (err)
-                       set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
-       }
-
-       /* wait till sq is empty, unless a TX timeout occurred on this SQ */
-       while (sq->cc != sq->pc &&
-              !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
-               msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
-               if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
-                       set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
        }
 
-       /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
-       napi_synchronize(&sq->channel->napi);
-
-       mlx5e_free_tx_descs(sq);
        mlx5e_disable_sq(sq);
+       mlx5e_free_tx_descs(sq);
        mlx5e_destroy_sq(sq);
 }
 
@@ -1826,10 +1903,6 @@ int mlx5e_open_locked(struct net_device *netdev)
        netif_set_real_num_tx_queues(netdev, num_txqs);
        netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
 
-       err = mlx5e_set_dev_port_mtu(netdev);
-       if (err)
-               goto err_clear_state_opened_flag;
-
        err = mlx5e_open_channels(priv);
        if (err) {
                netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
@@ -1908,6 +1981,9 @@ int mlx5e_close(struct net_device *netdev)
        struct mlx5e_priv *priv = netdev_priv(netdev);
        int err;
 
+       if (!netif_device_present(netdev))
+               return -ENODEV;
+
        mutex_lock(&priv->state_lock);
        err = mlx5e_close_locked(netdev);
        mutex_unlock(&priv->state_lock);
@@ -2022,14 +2098,15 @@ static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
 static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
-       u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+       u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
        void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
 
-       memset(in, 0, sizeof(in));
-
        MLX5_SET(tisc, tisc, prio, tc << 1);
        MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
 
+       if (mlx5_lag_is_lacp_owner(mdev))
+               MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
+
        return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
 }
 
@@ -2573,6 +2650,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
        u16 max_mtu;
        u16 min_mtu;
        int err = 0;
+       bool reset;
 
        mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
 
@@ -2588,13 +2666,18 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
 
        mutex_lock(&priv->state_lock);
 
+       reset = !priv->params.lro_en &&
+               (priv->params.rq_wq_type !=
+                MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+
        was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-       if (was_opened)
+       if (was_opened && reset)
                mlx5e_close_locked(netdev);
 
        netdev->mtu = new_mtu;
+       mlx5e_set_dev_port_mtu(netdev);
 
-       if (was_opened)
+       if (was_opened && reset)
                err = mlx5e_open_locked(netdev);
 
        mutex_unlock(&priv->state_lock);
@@ -2794,7 +2877,7 @@ static void mlx5e_tx_timeout(struct net_device *dev)
                if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
                        continue;
                sched_work = true;
-               set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+               set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
                netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
                           i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
        }
@@ -3228,7 +3311,8 @@ static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
 static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
-       u64 npages = priv->profile->max_nch(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
+       u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev),
+                                        BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW));
        int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
        void *mkc;
        u32 *in;
@@ -3240,6 +3324,8 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
 
        mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
 
+       npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages);
+
        MLX5_SET(mkc, mkc, free, 1);
        MLX5_SET(mkc, mkc, umr_en, 1);
        MLX5_SET(mkc, mkc, lw, 1);
@@ -3250,7 +3336,7 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
        MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
        MLX5_SET64(mkc, mkc, len, npages << PAGE_SHIFT);
        MLX5_SET(mkc, mkc, translations_octword_size,
-                mlx5e_get_mtt_octw(npages));
+                MLX5_MTT_OCTW(npages));
        MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
 
        err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen);
@@ -3374,6 +3460,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
        struct mlx5_eswitch *esw = mdev->priv.eswitch;
        struct mlx5_eswitch_rep rep;
 
+       mlx5_lag_add(mdev, netdev);
+
        if (mlx5e_vxlan_allowed(mdev)) {
                rtnl_lock();
                udp_tunnel_get_rx_info(netdev);
@@ -3384,6 +3472,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
        queue_work(priv->wq, &priv->set_rx_mode_work);
 
        if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
+               mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
                rep.load = mlx5e_nic_rep_load;
                rep.unload = mlx5e_nic_rep_unload;
                rep.vport = 0;
@@ -3396,6 +3485,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
 {
        queue_work(priv->wq, &priv->set_rx_mode_work);
        mlx5e_disable_async_events(priv);
+       mlx5_lag_remove(priv->mdev);
 }
 
 static const struct mlx5e_profile mlx5e_nic_profile = {
@@ -3412,13 +3502,13 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
        .max_tc            = MLX5E_MAX_NUM_TC,
 };
 
-void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
-                         const struct mlx5e_profile *profile, void *ppriv)
+struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
+                                      const struct mlx5e_profile *profile,
+                                      void *ppriv)
 {
+       int nch = profile->max_nch(mdev);
        struct net_device *netdev;
        struct mlx5e_priv *priv;
-       int nch = profile->max_nch(mdev);
-       int err;
 
        netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
                                    nch * profile->max_tc,
@@ -3436,12 +3526,31 @@ void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
 
        priv->wq = create_singlethread_workqueue("mlx5e");
        if (!priv->wq)
-               goto err_free_netdev;
+               goto err_cleanup_nic;
+
+       return netdev;
+
+err_cleanup_nic:
+       profile->cleanup(priv);
+       free_netdev(netdev);
+
+       return NULL;
+}
+
+int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
+{
+       const struct mlx5e_profile *profile;
+       struct mlx5e_priv *priv;
+       int err;
+
+       priv = netdev_priv(netdev);
+       profile = priv->profile;
+       clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
 
        err = mlx5e_create_umr_mkey(priv);
        if (err) {
                mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
-               goto err_destroy_wq;
+               goto out;
        }
 
        err = profile->init_tx(priv);
@@ -3462,20 +3571,18 @@ void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
 
        mlx5e_init_l2_addr(priv);
 
-       err = register_netdev(netdev);
-       if (err) {
-               mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
-               goto err_dealloc_q_counters;
-       }
+       mlx5e_set_dev_port_mtu(netdev);
 
        if (profile->enable)
                profile->enable(priv);
 
-       return priv;
+       rtnl_lock();
+       if (netif_running(netdev))
+               mlx5e_open(netdev);
+       netif_device_attach(netdev);
+       rtnl_unlock();
 
-err_dealloc_q_counters:
-       mlx5e_destroy_q_counter(priv);
-       profile->cleanup_rx(priv);
+       return 0;
 
 err_close_drop_rq:
        mlx5e_close_drop_rq(priv);
@@ -3486,13 +3593,8 @@ err_cleanup_tx:
 err_destroy_umr_mkey:
        mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
 
-err_destroy_wq:
-       destroy_workqueue(priv->wq);
-
-err_free_netdev:
-       free_netdev(netdev);
-
-       return NULL;
+out:
+       return err;
 }
 
 static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
@@ -3500,30 +3602,98 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
        struct mlx5_eswitch *esw = mdev->priv.eswitch;
        int total_vfs = MLX5_TOTAL_VPORTS(mdev);
        int vport;
+       u8 mac[ETH_ALEN];
 
        if (!MLX5_CAP_GEN(mdev, vport_group_manager))
                return;
 
+       mlx5_query_nic_vport_mac_address(mdev, 0, mac);
+
        for (vport = 1; vport < total_vfs; vport++) {
                struct mlx5_eswitch_rep rep;
 
                rep.load = mlx5e_vport_rep_load;
                rep.unload = mlx5e_vport_rep_unload;
                rep.vport = vport;
+               ether_addr_copy(rep.hw_id, mac);
                mlx5_eswitch_register_vport_rep(esw, &rep);
        }
 }
 
+void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       const struct mlx5e_profile *profile = priv->profile;
+
+       set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+       if (profile->disable)
+               profile->disable(priv);
+
+       flush_workqueue(priv->wq);
+
+       rtnl_lock();
+       if (netif_running(netdev))
+               mlx5e_close(netdev);
+       netif_device_detach(netdev);
+       rtnl_unlock();
+
+       mlx5e_destroy_q_counter(priv);
+       profile->cleanup_rx(priv);
+       mlx5e_close_drop_rq(priv);
+       profile->cleanup_tx(priv);
+       mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
+       cancel_delayed_work_sync(&priv->update_stats_work);
+}
+
+/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
+ * hardware contexts and to connect it to the current netdev.
+ */
+static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
+{
+       struct mlx5e_priv *priv = vpriv;
+       struct net_device *netdev = priv->netdev;
+       int err;
+
+       if (netif_device_present(netdev))
+               return 0;
+
+       err = mlx5e_create_mdev_resources(mdev);
+       if (err)
+               return err;
+
+       err = mlx5e_attach_netdev(mdev, netdev);
+       if (err) {
+               mlx5e_destroy_mdev_resources(mdev);
+               return err;
+       }
+
+       return 0;
+}
+
+static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
+{
+       struct mlx5e_priv *priv = vpriv;
+       struct net_device *netdev = priv->netdev;
+
+       if (!netif_device_present(netdev))
+               return;
+
+       mlx5e_detach_netdev(mdev, netdev);
+       mlx5e_destroy_mdev_resources(mdev);
+}
+
 static void *mlx5e_add(struct mlx5_core_dev *mdev)
 {
        struct mlx5_eswitch *esw = mdev->priv.eswitch;
+       int total_vfs = MLX5_TOTAL_VPORTS(mdev);
        void *ppriv = NULL;
-       void *ret;
-
-       if (mlx5e_check_required_hca_cap(mdev))
-               return NULL;
+       void *priv;
+       int vport;
+       int err;
+       struct net_device *netdev;
 
-       if (mlx5e_create_mdev_resources(mdev))
+       err = mlx5e_check_required_hca_cap(mdev);
+       if (err)
                return NULL;
 
        mlx5e_register_vport_rep(mdev);
@@ -3531,12 +3701,39 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
        if (MLX5_CAP_GEN(mdev, vport_group_manager))
                ppriv = &esw->offloads.vport_reps[0];
 
-       ret = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
-       if (!ret) {
-               mlx5e_destroy_mdev_resources(mdev);
-               return NULL;
+       netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
+       if (!netdev) {
+               mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
+               goto err_unregister_reps;
+       }
+
+       priv = netdev_priv(netdev);
+
+       err = mlx5e_attach(mdev, priv);
+       if (err) {
+               mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
+               goto err_destroy_netdev;
+       }
+
+       err = register_netdev(netdev);
+       if (err) {
+               mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
+               goto err_detach;
        }
-       return ret;
+
+       return priv;
+
+err_detach:
+       mlx5e_detach(mdev, priv);
+
+err_destroy_netdev:
+       mlx5e_destroy_netdev(mdev, priv);
+
+err_unregister_reps:
+       for (vport = 1; vport < total_vfs; vport++)
+               mlx5_eswitch_unregister_vport_rep(esw, vport);
+
+       return NULL;
 }
 
 void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
@@ -3544,30 +3741,11 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
        const struct mlx5e_profile *profile = priv->profile;
        struct net_device *netdev = priv->netdev;
 
-       set_bit(MLX5E_STATE_DESTROYING, &priv->state);
-       if (profile->disable)
-               profile->disable(priv);
-
-       flush_workqueue(priv->wq);
-       if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
-               netif_device_detach(netdev);
-               mlx5e_close(netdev);
-       } else {
-               unregister_netdev(netdev);
-       }
-
-       mlx5e_destroy_q_counter(priv);
-       profile->cleanup_rx(priv);
-       mlx5e_close_drop_rq(priv);
-       profile->cleanup_tx(priv);
-       mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
-       cancel_delayed_work_sync(&priv->update_stats_work);
+       unregister_netdev(netdev);
        destroy_workqueue(priv->wq);
        if (profile->cleanup)
                profile->cleanup(priv);
-
-       if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
-               free_netdev(netdev);
+       free_netdev(netdev);
 }
 
 static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
@@ -3577,12 +3755,11 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
        struct mlx5e_priv *priv = vpriv;
        int vport;
 
-       mlx5e_destroy_netdev(mdev, priv);
-
        for (vport = 1; vport < total_vfs; vport++)
                mlx5_eswitch_unregister_vport_rep(esw, vport);
 
-       mlx5e_destroy_mdev_resources(mdev);
+       mlx5e_detach(mdev, vpriv);
+       mlx5e_destroy_netdev(mdev, priv);
 }
 
 static void *mlx5e_get_netdev(void *vpriv)
@@ -3595,6 +3772,8 @@ static void *mlx5e_get_netdev(void *vpriv)
 static struct mlx5_interface mlx5e_interface = {
        .add       = mlx5e_add,
        .remove    = mlx5e_remove,
+       .attach    = mlx5e_attach,
+       .detach    = mlx5e_detach,
        .event     = mlx5e_async_event,
        .protocol  = MLX5_INTERFACE_PROTOCOL_ETH,
        .get_dev   = mlx5e_get_netdev,