net/mlx5e: Direct TIR per RQ
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
index 0c53236..04ad659 100644 (file)
@@ -109,6 +109,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
                s->lro_bytes    += rq_stats->lro_bytes;
                s->rx_csum_none += rq_stats->csum_none;
                s->rx_csum_sw   += rq_stats->csum_sw;
+               s->rx_csum_inner += rq_stats->csum_inner;
                s->rx_wqe_err   += rq_stats->wqe_err;
                s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
                s->rx_mpwqe_frag   += rq_stats->mpwqe_frag;
@@ -135,6 +136,10 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
        s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
        s->rx_csum_good    = s->rx_packets - s->rx_csum_none -
                             s->rx_csum_sw;
+
+       s->link_down_events = MLX5_GET(ppcnt_reg,
+                               priv->stats.pport.phy_counters,
+                               counter_set.phys_layer_cntrs.link_down_events);
 }
 
 static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
@@ -161,6 +166,7 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
        struct mlx5_core_dev *mdev = priv->mdev;
        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+       int prio;
        void *out;
        u32 *in;
 
@@ -182,6 +188,18 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
        MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 
+       out = pstats->phy_counters;
+       MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
+       mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+       MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
+       for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+               out = pstats->per_prio_counters[prio];
+               MLX5_SET(ppcnt_reg, in, prio_tc, prio);
+               mlx5_core_access_reg(mdev, in, sz, out, sz,
+                                    MLX5_REG_PPCNT, 0, 0);
+       }
+
 free_out:
        kvfree(in);
 }
@@ -199,10 +217,10 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
 
 void mlx5e_update_stats(struct mlx5e_priv *priv)
 {
-       mlx5e_update_sw_counters(priv);
        mlx5e_update_q_counter(priv);
        mlx5e_update_vport_counters(priv);
        mlx5e_update_pport_counters(priv);
+       mlx5e_update_sw_counters(priv);
 }
 
 static void mlx5e_update_stats_work(struct work_struct *work)
@@ -371,6 +389,7 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
        MLX5_SET(rqc,  rqc, cqn,                rq->cq.mcq.cqn);
        MLX5_SET(rqc,  rqc, state,              MLX5_RQC_STATE_RST);
        MLX5_SET(rqc,  rqc, flush_in_error_en,  1);
+       MLX5_SET(rqc,  rqc, vsd, priv->params.vlan_strip_disable);
        MLX5_SET(wq,   wq,  log_wq_pg_sz,       rq->wq_ctrl.buf.page_shift -
                                                MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(wq, wq,  dbr_addr,           rq->wq_ctrl.db.dma);
@@ -385,7 +404,8 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
        return err;
 }
 
-static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
+static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
+                                int next_state)
 {
        struct mlx5e_channel *c = rq->channel;
        struct mlx5e_priv *priv = c->priv;
@@ -413,6 +433,36 @@ static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
        return err;
 }
 
+static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
+{
+       struct mlx5e_channel *c = rq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       void *in;
+       void *rqc;
+       int inlen;
+       int err;
+
+       inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+       MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
+       MLX5_SET64(modify_rq_in, in, modify_bitmask, MLX5_RQ_BITMASK_VSD);
+       MLX5_SET(rqc, rqc, vsd, vsd);
+       MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
+
+       err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+
+       kvfree(in);
+
+       return err;
+}
+
 static void mlx5e_disable_rq(struct mlx5e_rq *rq)
 {
        mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
@@ -451,7 +501,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
        if (err)
                goto err_destroy_rq;
 
-       err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+       err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
        if (err)
                goto err_disable_rq;
 
@@ -476,7 +526,7 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
        clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
        napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
 
-       mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
+       mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
        while (!mlx5_wq_ll_is_empty(&rq->wq))
                msleep(20);
 
@@ -1195,13 +1245,10 @@ static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
        param->icosq = true;
 }
 
-static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
-                                     struct mlx5e_channel_param *cparam)
+static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
 {
        u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
 
-       memset(cparam, 0, sizeof(*cparam));
-
        mlx5e_build_rq_param(priv, &cparam->rq);
        mlx5e_build_sq_param(priv, &cparam->sq);
        mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
@@ -1212,7 +1259,7 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
 
 static int mlx5e_open_channels(struct mlx5e_priv *priv)
 {
-       struct mlx5e_channel_param cparam;
+       struct mlx5e_channel_param *cparam;
        int nch = priv->params.num_channels;
        int err = -ENOMEM;
        int i;
@@ -1224,12 +1271,15 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv)
        priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
                                      sizeof(struct mlx5e_sq *), GFP_KERNEL);
 
-       if (!priv->channel || !priv->txq_to_sq_map)
+       cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
+
+       if (!priv->channel || !priv->txq_to_sq_map || !cparam)
                goto err_free_txq_to_sq_map;
 
-       mlx5e_build_channel_param(priv, &cparam);
+       mlx5e_build_channel_param(priv, cparam);
+
        for (i = 0; i < nch; i++) {
-               err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
+               err = mlx5e_open_channel(priv, i, cparam, &priv->channel[i]);
                if (err)
                        goto err_close_channels;
        }
@@ -1240,6 +1290,7 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv)
                        goto err_close_channels;
        }
 
+       kfree(cparam);
        return 0;
 
 err_close_channels:
@@ -1249,6 +1300,7 @@ err_close_channels:
 err_free_txq_to_sq_map:
        kfree(priv->txq_to_sq_map);
        kfree(priv->channel);
+       kfree(cparam);
 
        return err;
 }
@@ -1288,48 +1340,36 @@ static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
 
        for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
                int ix = i;
+               u32 rqn;
 
                if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
                        ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
 
                ix = priv->params.indirection_rqt[ix];
-               MLX5_SET(rqtc, rqtc, rq_num[i],
-                        test_bit(MLX5E_STATE_OPENED, &priv->state) ?
-                        priv->channel[ix]->rq.rqn :
-                        priv->drop_rq.rqn);
+               rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+                               priv->channel[ix]->rq.rqn :
+                               priv->drop_rq.rqn;
+               MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
        }
 }
 
-static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
-                               enum mlx5e_rqt_ix rqt_ix)
+static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
+                                     int ix)
 {
+       u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+                       priv->channel[ix]->rq.rqn :
+                       priv->drop_rq.rqn;
 
-       switch (rqt_ix) {
-       case MLX5E_INDIRECTION_RQT:
-               mlx5e_fill_indir_rqt_rqns(priv, rqtc);
-
-               break;
-
-       default: /* MLX5E_SINGLE_RQ_RQT */
-               MLX5_SET(rqtc, rqtc, rq_num[0],
-                        test_bit(MLX5E_STATE_OPENED, &priv->state) ?
-                        priv->channel[0]->rq.rqn :
-                        priv->drop_rq.rqn);
-
-               break;
-       }
+       MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
 }
 
-static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
+static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, int ix, u32 *rqtn)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
-       u32 *in;
        void *rqtc;
        int inlen;
-       int sz;
        int err;
-
-       sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
+       u32 *in;
 
        inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
        in = mlx5_vzalloc(inlen);
@@ -1341,26 +1381,73 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
        MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
        MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
 
-       mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
+       if (sz > 1) /* RSS */
+               mlx5e_fill_indir_rqt_rqns(priv, rqtc);
+       else
+               mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
 
-       err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
+       err = mlx5_core_create_rqt(mdev, in, inlen, rqtn);
 
        kvfree(in);
+       return err;
+}
+
+static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, u32 rqtn)
+{
+       mlx5_core_destroy_rqt(priv->mdev, rqtn);
+}
+
+static int mlx5e_create_rqts(struct mlx5e_priv *priv)
+{
+       int nch = mlx5e_get_max_num_channels(priv->mdev);
+       u32 *rqtn;
+       int err;
+       int ix;
+
+       /* Indirect RQT */
+       rqtn = &priv->indir_rqtn;
+       err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqtn);
+       if (err)
+               return err;
+
+       /* Direct RQTs */
+       for (ix = 0; ix < nch; ix++) {
+               rqtn = &priv->direct_tir[ix].rqtn;
+               err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqtn);
+               if (err)
+                       goto err_destroy_rqts;
+       }
+
+       return 0;
+
+err_destroy_rqts:
+       for (ix--; ix >= 0; ix--)
+               mlx5e_destroy_rqt(priv, priv->direct_tir[ix].rqtn);
+
+       mlx5e_destroy_rqt(priv, priv->indir_rqtn);
 
        return err;
 }
 
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
+static void mlx5e_destroy_rqts(struct mlx5e_priv *priv)
+{
+       int nch = mlx5e_get_max_num_channels(priv->mdev);
+       int i;
+
+       for (i = 0; i < nch; i++)
+               mlx5e_destroy_rqt(priv, priv->direct_tir[i].rqtn);
+
+       mlx5e_destroy_rqt(priv, priv->indir_rqtn);
+}
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
-       u32 *in;
        void *rqtc;
        int inlen;
-       int sz;
+       u32 *in;
        int err;
 
-       sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
-
        inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
        in = mlx5_vzalloc(inlen);
        if (!in)
@@ -1369,27 +1456,31 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
        rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
 
        MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
-
-       mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
+       if (sz > 1) /* RSS */
+               mlx5e_fill_indir_rqt_rqns(priv, rqtc);
+       else
+               mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
 
        MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
 
-       err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
+       err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
 
        kvfree(in);
 
        return err;
 }
 
-static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
-{
-       mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
-}
-
 static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
 {
-       mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
-       mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+       u32 rqtn;
+       int ix;
+
+       rqtn = priv->indir_rqtn;
+       mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
+       for (ix = 0; ix < priv->params.num_channels; ix++) {
+               rqtn = priv->direct_tir[ix].rqtn;
+               mlx5e_redirect_rqt(priv, rqtn, 1, ix);
+       }
 }
 
 static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
@@ -1434,6 +1525,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
        int inlen;
        int err;
        int tt;
+       int ix;
 
        inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
        in = mlx5_vzalloc(inlen);
@@ -1445,23 +1537,32 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
 
        mlx5e_build_tir_ctx_lro(tirc, priv);
 
-       for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
-               err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
+       for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+               err = mlx5_core_modify_tir(mdev, priv->indir_tirn[tt], in,
+                                          inlen);
+               if (err)
+                       goto free_in;
+       }
+
+       for (ix = 0; ix < mlx5e_get_max_num_channels(mdev); ix++) {
+               err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
+                                          in, inlen);
                if (err)
-                       break;
+                       goto free_in;
        }
 
+free_in:
        kvfree(in);
 
        return err;
 }
 
-static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
-                                                 u32 tirn)
+static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
 {
        void *in;
        int inlen;
        int err;
+       int i;
 
        inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
        in = mlx5_vzalloc(inlen);
@@ -1470,46 +1571,70 @@ static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
 
        MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
 
-       err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
+       for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
+               err = mlx5_core_modify_tir(priv->mdev, priv->indir_tirn[i], in,
+                                          inlen);
+               if (err)
+                       return err;
+       }
+
+       for (i = 0; i < priv->params.num_channels; i++) {
+               err = mlx5_core_modify_tir(priv->mdev,
+                                          priv->direct_tir[i].tirn, in,
+                                          inlen);
+               if (err)
+                       return err;
+       }
 
        kvfree(in);
 
-       return err;
+       return 0;
 }
 
-static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
+static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
 {
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
        int err;
-       int i;
 
-       for (i = 0; i < MLX5E_NUM_TT; i++) {
-               err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
-                                                            priv->tirn[i]);
-               if (err)
-                       return err;
-       }
+       err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
+       if (err)
+               return err;
 
+       /* Update vport context MTU */
+       mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
        return 0;
 }
 
+static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u16 hw_mtu = 0;
+       int err;
+
+       err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
+       if (err || !hw_mtu) /* fallback to port oper mtu */
+               mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
+
+       *mtu = MLX5E_HW2SW_MTU(hw_mtu);
+}
+
 static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
-       struct mlx5_core_dev *mdev = priv->mdev;
-       int hw_mtu;
+       u16 mtu;
        int err;
 
-       err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
+       err = mlx5e_set_mtu(priv, netdev->mtu);
        if (err)
                return err;
 
-       mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
-
-       if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
-               netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
-                           __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
+       mlx5e_query_mtu(priv, &mtu);
+       if (mtu != netdev->mtu)
+               netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
+                           __func__, mtu, netdev->mtu);
 
-       netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
+       netdev->mtu = mtu;
        return 0;
 }
 
@@ -1773,7 +1898,8 @@ static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
                mlx5e_destroy_tis(priv, tc);
 }
 
-static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
+static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
+                                     enum mlx5e_traffic_types tt)
 {
        void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
 
@@ -1794,19 +1920,8 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
        mlx5e_build_tir_ctx_lro(tirc, priv);
 
        MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
-
-       switch (tt) {
-       case MLX5E_TT_ANY:
-               MLX5_SET(tirc, tirc, indirect_table,
-                        priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
-               MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
-               break;
-       default:
-               MLX5_SET(tirc, tirc, indirect_table,
-                        priv->rqtn[MLX5E_INDIRECTION_RQT]);
-               mlx5e_build_tir_ctx_hash(tirc, priv);
-               break;
-       }
+       MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqtn);
+       mlx5e_build_tir_ctx_hash(tirc, priv);
 
        switch (tt) {
        case MLX5E_TT_IPV4_TCP:
@@ -1886,64 +2001,107 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
                         MLX5_HASH_IP);
                break;
+       default:
+               WARN_ONCE(true,
+                         "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
        }
 }
 
-static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
+static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
+                                      u32 rqtn)
 {
-       struct mlx5_core_dev *mdev = priv->mdev;
-       u32 *in;
+       MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+
+       mlx5e_build_tir_ctx_lro(tirc, priv);
+
+       MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+       MLX5_SET(tirc, tirc, indirect_table, rqtn);
+       MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
+}
+
+static int mlx5e_create_tirs(struct mlx5e_priv *priv)
+{
+       int nch = mlx5e_get_max_num_channels(priv->mdev);
        void *tirc;
        int inlen;
+       u32 *tirn;
        int err;
+       u32 *in;
+       int ix;
+       int tt;
 
        inlen = MLX5_ST_SZ_BYTES(create_tir_in);
        in = mlx5_vzalloc(inlen);
        if (!in)
                return -ENOMEM;
 
-       tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+       /* indirect tirs */
+       for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+               memset(in, 0, inlen);
+               tirn = &priv->indir_tirn[tt];
+               tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+               mlx5e_build_indir_tir_ctx(priv, tirc, tt);
+               err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn);
+               if (err)
+                       goto err_destroy_tirs;
+       }
 
-       mlx5e_build_tir_ctx(priv, tirc, tt);
+       /* direct tirs */
+       for (ix = 0; ix < nch; ix++) {
+               memset(in, 0, inlen);
+               tirn = &priv->direct_tir[ix].tirn;
+               tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+               mlx5e_build_direct_tir_ctx(priv, tirc,
+                                          priv->direct_tir[ix].rqtn);
+               err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn);
+               if (err)
+                       goto err_destroy_ch_tirs;
+       }
 
-       err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+       kvfree(in);
+
+       return 0;
+
+err_destroy_ch_tirs:
+       for (ix--; ix >= 0; ix--)
+               mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[ix].tirn);
+
+err_destroy_tirs:
+       for (tt--; tt >= 0; tt--)
+               mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[tt]);
 
        kvfree(in);
 
        return err;
 }
 
-static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
+static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
 {
-       mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
+       int nch = mlx5e_get_max_num_channels(priv->mdev);
+       int i;
+
+       for (i = 0; i < nch; i++)
+               mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[i].tirn);
+
+       for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
+               mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[i]);
 }
 
-static int mlx5e_create_tirs(struct mlx5e_priv *priv)
+int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
 {
-       int err;
+       int err = 0;
        int i;
 
-       for (i = 0; i < MLX5E_NUM_TT; i++) {
-               err = mlx5e_create_tir(priv, i);
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+               return 0;
+
+       for (i = 0; i < priv->params.num_channels; i++) {
+               err = mlx5e_modify_rq_vsd(&priv->channel[i]->rq, vsd);
                if (err)
-                       goto err_destroy_tirs;
+                       return err;
        }
 
        return 0;
-
-err_destroy_tirs:
-       for (i--; i >= 0; i--)
-               mlx5e_destroy_tir(priv, i);
-
-       return err;
-}
-
-static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
-{
-       int i;
-
-       for (i = 0; i < MLX5E_NUM_TT; i++)
-               mlx5e_destroy_tir(priv, i);
 }
 
 static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
@@ -2061,68 +2219,156 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
        return 0;
 }
 
-static int mlx5e_set_features(struct net_device *netdev,
-                             netdev_features_t features)
+#define MLX5E_SET_FEATURE(netdev, feature, enable)     \
+       do {                                            \
+               if (enable)                             \
+                       netdev->features |= feature;    \
+               else                                    \
+                       netdev->features &= ~feature;   \
+       } while (0)
+
+typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
+
+static int set_feature_lro(struct net_device *netdev, bool enable)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
-       int err = 0;
-       netdev_features_t changes = features ^ netdev->features;
+       bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+       int err;
 
        mutex_lock(&priv->state_lock);
 
-       if (changes & NETIF_F_LRO) {
-               bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-
-               if (was_opened && (priv->params.rq_wq_type ==
-                                  MLX5_WQ_TYPE_LINKED_LIST))
-                       mlx5e_close_locked(priv->netdev);
-
-               priv->params.lro_en = !!(features & NETIF_F_LRO);
-               err = mlx5e_modify_tirs_lro(priv);
-               if (err)
-                       mlx5_core_warn(priv->mdev, "lro modify failed, %d\n",
-                                      err);
+       if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
+               mlx5e_close_locked(priv->netdev);
 
-               if (was_opened && (priv->params.rq_wq_type ==
-                                  MLX5_WQ_TYPE_LINKED_LIST))
-                       err = mlx5e_open_locked(priv->netdev);
+       priv->params.lro_en = enable;
+       err = mlx5e_modify_tirs_lro(priv);
+       if (err) {
+               netdev_err(netdev, "lro modify failed, %d\n", err);
+               priv->params.lro_en = !enable;
        }
 
+       if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
+               mlx5e_open_locked(priv->netdev);
+
        mutex_unlock(&priv->state_lock);
 
-       if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
-               if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
-                       mlx5e_enable_vlan_filter(priv);
-               else
-                       mlx5e_disable_vlan_filter(priv);
-       }
+       return err;
+}
+
+static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
 
-       if ((changes & NETIF_F_HW_TC) && !(features & NETIF_F_HW_TC) &&
-           mlx5e_tc_num_filters(priv)) {
+       if (enable)
+               mlx5e_enable_vlan_filter(priv);
+       else
+               mlx5e_disable_vlan_filter(priv);
+
+       return 0;
+}
+
+static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+
+       if (!enable && mlx5e_tc_num_filters(priv)) {
                netdev_err(netdev,
                           "Active offloaded tc filters, can't turn hw_tc_offload off\n");
                return -EINVAL;
        }
 
+       return 0;
+}
+
+static int set_feature_rx_all(struct net_device *netdev, bool enable)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       return mlx5_set_port_fcs(mdev, !enable);
+}
+
+static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       int err;
+
+       mutex_lock(&priv->state_lock);
+
+       priv->params.vlan_strip_disable = !enable;
+       err = mlx5e_modify_rqs_vsd(priv, !enable);
+       if (err)
+               priv->params.vlan_strip_disable = enable;
+
+       mutex_unlock(&priv->state_lock);
+
        return err;
 }
 
+static int mlx5e_handle_feature(struct net_device *netdev,
+                               netdev_features_t wanted_features,
+                               netdev_features_t feature,
+                               mlx5e_feature_handler feature_handler)
+{
+       netdev_features_t changes = wanted_features ^ netdev->features;
+       bool enable = !!(wanted_features & feature);
+       int err;
+
+       if (!(changes & feature))
+               return 0;
+
+       err = feature_handler(netdev, enable);
+       if (err) {
+               netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
+                          enable ? "Enable" : "Disable", feature, err);
+               return err;
+       }
+
+       MLX5E_SET_FEATURE(netdev, feature, enable);
+       return 0;
+}
+
+static int mlx5e_set_features(struct net_device *netdev,
+                             netdev_features_t features)
+{
+       int err;
+
+       err  = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
+                                   set_feature_lro);
+       err |= mlx5e_handle_feature(netdev, features,
+                                   NETIF_F_HW_VLAN_CTAG_FILTER,
+                                   set_feature_vlan_filter);
+       err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
+                                   set_feature_tc_num_filters);
+       err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
+                                   set_feature_rx_all);
+       err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
+                                   set_feature_rx_vlan);
+
+       return err ? -EINVAL : 0;
+}
+
+#define MXL5_HW_MIN_MTU 64
+#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
+
 static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
        bool was_opened;
-       int max_mtu;
+       u16 max_mtu;
+       u16 min_mtu;
        int err = 0;
 
        mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
 
        max_mtu = MLX5E_HW2SW_MTU(max_mtu);
+       min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
 
-       if (new_mtu > max_mtu) {
+       if (new_mtu > max_mtu || new_mtu < min_mtu) {
                netdev_err(netdev,
-                          "%s: Bad MTU (%d) > (%d) Max\n",
-                          __func__, new_mtu, max_mtu);
+                          "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
+                          __func__, new_mtu, min_mtu, max_mtu);
                return -EINVAL;
        }
 
@@ -2493,6 +2739,8 @@ static void mlx5e_build_netdev(struct net_device *netdev)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
+       bool fcs_supported;
+       bool fcs_enabled;
 
        SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
 
@@ -2536,10 +2784,18 @@ static void mlx5e_build_netdev(struct net_device *netdev)
                netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
        }
 
+       mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
+
+       if (fcs_supported)
+               netdev->hw_features |= NETIF_F_RXALL;
+
        netdev->features          = netdev->hw_features;
        if (!priv->params.lro_en)
                netdev->features  &= ~NETIF_F_LRO;
 
+       if (fcs_enabled)
+               netdev->features  &= ~NETIF_F_RXALL;
+
 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
        if (FT_CAP(flow_modify_en) &&
            FT_CAP(modify_root) &&
@@ -2701,22 +2957,16 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
                goto err_destroy_tises;
        }
 
-       err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
+       err = mlx5e_create_rqts(priv);
        if (err) {
-               mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
+               mlx5_core_warn(mdev, "create rqts failed, %d\n", err);
                goto err_close_drop_rq;
        }
 
-       err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
-       if (err) {
-               mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
-               goto err_destroy_rqt_indir;
-       }
-
        err = mlx5e_create_tirs(priv);
        if (err) {
                mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
-               goto err_destroy_rqt_single;
+               goto err_destroy_rqts;
        }
 
        err = mlx5e_create_flow_tables(priv);
@@ -2745,8 +2995,11 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
                goto err_tc_cleanup;
        }
 
-       if (mlx5e_vxlan_allowed(mdev))
+       if (mlx5e_vxlan_allowed(mdev)) {
+               rtnl_lock();
                vxlan_get_rx_port(netdev);
+               rtnl_unlock();
+       }
 
        mlx5e_enable_async_events(priv);
        schedule_work(&priv->set_rx_mode_work);
@@ -2763,11 +3016,8 @@ err_dealloc_q_counters:
 err_destroy_tirs:
        mlx5e_destroy_tirs(priv);
 
-err_destroy_rqt_single:
-       mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
-
-err_destroy_rqt_indir:
-       mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+err_destroy_rqts:
+       mlx5e_destroy_rqts(priv);
 
 err_close_drop_rq:
        mlx5e_close_drop_rq(priv);
@@ -2806,14 +3056,22 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
        schedule_work(&priv->set_rx_mode_work);
        mlx5e_disable_async_events(priv);
        flush_scheduled_work();
-       unregister_netdev(netdev);
+       if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
+               netif_device_detach(netdev);
+               mutex_lock(&priv->state_lock);
+               if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+                       mlx5e_close_locked(netdev);
+               mutex_unlock(&priv->state_lock);
+       } else {
+               unregister_netdev(netdev);
+       }
+
        mlx5e_tc_cleanup(priv);
        mlx5e_vxlan_cleanup(priv);
        mlx5e_destroy_q_counter(priv);
        mlx5e_destroy_flow_tables(priv);
        mlx5e_destroy_tirs(priv);
-       mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
-       mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+       mlx5e_destroy_rqts(priv);
        mlx5e_close_drop_rq(priv);
        mlx5e_destroy_tises(priv);
        mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
@@ -2821,7 +3079,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
        mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
        mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
        mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
-       free_netdev(netdev);
+
+       if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
+               free_netdev(netdev);
 }
 
 static void *mlx5e_get_netdev(void *vpriv)