net/mlx5e: Timeout if SQ doesn't flush during close
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
index f5c8d5d..b94c84b 100644 (file)
 #include "eswitch.h"
 #include "vxlan.h"
 
+enum {
+       MLX5_EN_QP_FLUSH_TIMEOUT_MS     = 5000,
+       MLX5_EN_QP_FLUSH_MSLEEP_QUANT   = 20,
+       MLX5_EN_QP_FLUSH_MAX_ITER       = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
+                                         MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
+};
+
 struct mlx5e_rq_param {
        u32                        rqc[MLX5_ST_SZ_DW(rqc)];
        struct mlx5_wq_param       wq;
@@ -105,11 +112,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
 
                s->rx_packets   += rq_stats->packets;
                s->rx_bytes     += rq_stats->bytes;
-               s->lro_packets  += rq_stats->lro_packets;
-               s->lro_bytes    += rq_stats->lro_bytes;
+               s->rx_lro_packets += rq_stats->lro_packets;
+               s->rx_lro_bytes += rq_stats->lro_bytes;
                s->rx_csum_none += rq_stats->csum_none;
-               s->rx_csum_sw   += rq_stats->csum_sw;
-               s->rx_csum_inner += rq_stats->csum_inner;
+               s->rx_csum_complete += rq_stats->csum_complete;
+               s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
                s->rx_wqe_err   += rq_stats->wqe_err;
                s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
                s->rx_mpwqe_frag   += rq_stats->mpwqe_frag;
@@ -122,24 +129,23 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
 
                        s->tx_packets           += sq_stats->packets;
                        s->tx_bytes             += sq_stats->bytes;
-                       s->tso_packets          += sq_stats->tso_packets;
-                       s->tso_bytes            += sq_stats->tso_bytes;
-                       s->tso_inner_packets    += sq_stats->tso_inner_packets;
-                       s->tso_inner_bytes      += sq_stats->tso_inner_bytes;
+                       s->tx_tso_packets       += sq_stats->tso_packets;
+                       s->tx_tso_bytes         += sq_stats->tso_bytes;
+                       s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
+                       s->tx_tso_inner_bytes   += sq_stats->tso_inner_bytes;
                        s->tx_queue_stopped     += sq_stats->stopped;
                        s->tx_queue_wake        += sq_stats->wake;
                        s->tx_queue_dropped     += sq_stats->dropped;
-                       s->tx_csum_inner        += sq_stats->csum_offload_inner;
-                       tx_offload_none         += sq_stats->csum_offload_none;
+                       s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
+                       tx_offload_none         += sq_stats->csum_none;
                }
        }
 
        /* Update calculated offload counters */
-       s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
-       s->rx_csum_good    = s->rx_packets - s->rx_csum_none -
-                            s->rx_csum_sw;
+       s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
+       s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
 
-       s->link_down_events = MLX5_GET(ppcnt_reg,
+       s->link_down_events_phy = MLX5_GET(ppcnt_reg,
                                priv->stats.pport.phy_counters,
                                counter_set.phys_layer_cntrs.link_down_events);
 }
@@ -244,7 +250,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
 {
        struct mlx5e_priv *priv = vpriv;
 
-       if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
+       if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
                return;
 
        switch (event) {
@@ -260,12 +266,12 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
 
 static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
 {
-       set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+       set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
 }
 
 static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
 {
-       clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+       clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
        synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
 }
 
@@ -580,7 +586,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
        void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
        int err;
 
-       err = mlx5_alloc_map_uar(mdev, &sq->uar, true);
+       err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
        if (err)
                return err;
 
@@ -783,6 +789,9 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
 
 static void mlx5e_close_sq(struct mlx5e_sq *sq)
 {
+       int tout = 0;
+       int err;
+
        if (sq->txq) {
                clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
                /* prevent netif_tx_wake_queue */
@@ -793,15 +802,24 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq)
                if (mlx5e_sq_has_room_for(sq, 1))
                        mlx5e_send_nop(sq, true);
 
-               mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+               err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
+                                     MLX5_SQC_STATE_ERR);
+               if (err)
+                       set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
        }
 
-       while (sq->cc != sq->pc) /* wait till sq is empty */
-               msleep(20);
+       /* wait till sq is empty, unless a TX timeout occurred on this SQ */
+       while (sq->cc != sq->pc &&
+              !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
+               msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
+               if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
+                       set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+       }
 
        /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
        napi_synchronize(&sq->channel->napi);
 
+       mlx5e_free_tx_descs(sq);
        mlx5e_disable_sq(sq);
        mlx5e_destroy_sq(sq);
 }