d4e1c30452009718d9761a8a4949f5195d7f4c59
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/mlx5/fs.h>
34 #include "en.h"
35 #include "eswitch.h"
36
37 struct mlx5e_rq_param {
38         u32                        rqc[MLX5_ST_SZ_DW(rqc)];
39         struct mlx5_wq_param       wq;
40 };
41
42 struct mlx5e_sq_param {
43         u32                        sqc[MLX5_ST_SZ_DW(sqc)];
44         struct mlx5_wq_param       wq;
45         u16                        max_inline;
46 };
47
48 struct mlx5e_cq_param {
49         u32                        cqc[MLX5_ST_SZ_DW(cqc)];
50         struct mlx5_wq_param       wq;
51         u16                        eq_ix;
52 };
53
54 struct mlx5e_channel_param {
55         struct mlx5e_rq_param      rq;
56         struct mlx5e_sq_param      sq;
57         struct mlx5e_cq_param      rx_cq;
58         struct mlx5e_cq_param      tx_cq;
59 };
60
61 static void mlx5e_update_carrier(struct mlx5e_priv *priv)
62 {
63         struct mlx5_core_dev *mdev = priv->mdev;
64         u8 port_state;
65
66         port_state = mlx5_query_vport_state(mdev,
67                 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
68
69         if (port_state == VPORT_STATE_UP)
70                 netif_carrier_on(priv->netdev);
71         else
72                 netif_carrier_off(priv->netdev);
73 }
74
75 static void mlx5e_update_carrier_work(struct work_struct *work)
76 {
77         struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
78                                                update_carrier_work);
79
80         mutex_lock(&priv->state_lock);
81         if (test_bit(MLX5E_STATE_OPENED, &priv->state))
82                 mlx5e_update_carrier(priv);
83         mutex_unlock(&priv->state_lock);
84 }
85
86 static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
87 {
88         struct mlx5_core_dev *mdev = priv->mdev;
89         struct mlx5e_pport_stats *s = &priv->stats.pport;
90         u32 *in;
91         u32 *out;
92         int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
93
94         in  = mlx5_vzalloc(sz);
95         out = mlx5_vzalloc(sz);
96         if (!in || !out)
97                 goto free_out;
98
99         MLX5_SET(ppcnt_reg, in, local_port, 1);
100
101         MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
102         mlx5_core_access_reg(mdev, in, sz, out,
103                              sz, MLX5_REG_PPCNT, 0, 0);
104         memcpy(s->IEEE_802_3_counters,
105                MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
106                sizeof(s->IEEE_802_3_counters));
107
108         MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
109         mlx5_core_access_reg(mdev, in, sz, out,
110                              sz, MLX5_REG_PPCNT, 0, 0);
111         memcpy(s->RFC_2863_counters,
112                MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
113                sizeof(s->RFC_2863_counters));
114
115         MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
116         mlx5_core_access_reg(mdev, in, sz, out,
117                              sz, MLX5_REG_PPCNT, 0, 0);
118         memcpy(s->RFC_2819_counters,
119                MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
120                sizeof(s->RFC_2819_counters));
121
122 free_out:
123         kvfree(in);
124         kvfree(out);
125 }
126
127 void mlx5e_update_stats(struct mlx5e_priv *priv)
128 {
129         struct mlx5_core_dev *mdev = priv->mdev;
130         struct mlx5e_vport_stats *s = &priv->stats.vport;
131         struct mlx5e_rq_stats *rq_stats;
132         struct mlx5e_sq_stats *sq_stats;
133         u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
134         u32 *out;
135         int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
136         u64 tx_offload_none;
137         int i, j;
138
139         out = mlx5_vzalloc(outlen);
140         if (!out)
141                 return;
142
143         /* Collect firts the SW counters and then HW for consistency */
144         s->tso_packets          = 0;
145         s->tso_bytes            = 0;
146         s->tx_queue_stopped     = 0;
147         s->tx_queue_wake        = 0;
148         s->tx_queue_dropped     = 0;
149         tx_offload_none         = 0;
150         s->lro_packets          = 0;
151         s->lro_bytes            = 0;
152         s->rx_csum_none         = 0;
153         s->rx_csum_sw           = 0;
154         s->rx_wqe_err           = 0;
155         for (i = 0; i < priv->params.num_channels; i++) {
156                 rq_stats = &priv->channel[i]->rq.stats;
157
158                 s->lro_packets  += rq_stats->lro_packets;
159                 s->lro_bytes    += rq_stats->lro_bytes;
160                 s->rx_csum_none += rq_stats->csum_none;
161                 s->rx_csum_sw   += rq_stats->csum_sw;
162                 s->rx_wqe_err   += rq_stats->wqe_err;
163
164                 for (j = 0; j < priv->params.num_tc; j++) {
165                         sq_stats = &priv->channel[i]->sq[j].stats;
166
167                         s->tso_packets          += sq_stats->tso_packets;
168                         s->tso_bytes            += sq_stats->tso_bytes;
169                         s->tx_queue_stopped     += sq_stats->stopped;
170                         s->tx_queue_wake        += sq_stats->wake;
171                         s->tx_queue_dropped     += sq_stats->dropped;
172                         tx_offload_none         += sq_stats->csum_offload_none;
173                 }
174         }
175
176         /* HW counters */
177         memset(in, 0, sizeof(in));
178
179         MLX5_SET(query_vport_counter_in, in, opcode,
180                  MLX5_CMD_OP_QUERY_VPORT_COUNTER);
181         MLX5_SET(query_vport_counter_in, in, op_mod, 0);
182         MLX5_SET(query_vport_counter_in, in, other_vport, 0);
183
184         memset(out, 0, outlen);
185
186         if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
187                 goto free_out;
188
189 #define MLX5_GET_CTR(p, x) \
190         MLX5_GET64(query_vport_counter_out, p, x)
191
192         s->rx_error_packets     =
193                 MLX5_GET_CTR(out, received_errors.packets);
194         s->rx_error_bytes       =
195                 MLX5_GET_CTR(out, received_errors.octets);
196         s->tx_error_packets     =
197                 MLX5_GET_CTR(out, transmit_errors.packets);
198         s->tx_error_bytes       =
199                 MLX5_GET_CTR(out, transmit_errors.octets);
200
201         s->rx_unicast_packets   =
202                 MLX5_GET_CTR(out, received_eth_unicast.packets);
203         s->rx_unicast_bytes     =
204                 MLX5_GET_CTR(out, received_eth_unicast.octets);
205         s->tx_unicast_packets   =
206                 MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
207         s->tx_unicast_bytes     =
208                 MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
209
210         s->rx_multicast_packets =
211                 MLX5_GET_CTR(out, received_eth_multicast.packets);
212         s->rx_multicast_bytes   =
213                 MLX5_GET_CTR(out, received_eth_multicast.octets);
214         s->tx_multicast_packets =
215                 MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
216         s->tx_multicast_bytes   =
217                 MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
218
219         s->rx_broadcast_packets =
220                 MLX5_GET_CTR(out, received_eth_broadcast.packets);
221         s->rx_broadcast_bytes   =
222                 MLX5_GET_CTR(out, received_eth_broadcast.octets);
223         s->tx_broadcast_packets =
224                 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
225         s->tx_broadcast_bytes   =
226                 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
227
228         s->rx_packets =
229                 s->rx_unicast_packets +
230                 s->rx_multicast_packets +
231                 s->rx_broadcast_packets;
232         s->rx_bytes =
233                 s->rx_unicast_bytes +
234                 s->rx_multicast_bytes +
235                 s->rx_broadcast_bytes;
236         s->tx_packets =
237                 s->tx_unicast_packets +
238                 s->tx_multicast_packets +
239                 s->tx_broadcast_packets;
240         s->tx_bytes =
241                 s->tx_unicast_bytes +
242                 s->tx_multicast_bytes +
243                 s->tx_broadcast_bytes;
244
245         /* Update calculated offload counters */
246         s->tx_csum_offload = s->tx_packets - tx_offload_none;
247         s->rx_csum_good    = s->rx_packets - s->rx_csum_none -
248                                s->rx_csum_sw;
249
250         mlx5e_update_pport_counters(priv);
251 free_out:
252         kvfree(out);
253 }
254
255 static void mlx5e_update_stats_work(struct work_struct *work)
256 {
257         struct delayed_work *dwork = to_delayed_work(work);
258         struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
259                                                update_stats_work);
260         mutex_lock(&priv->state_lock);
261         if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
262                 mlx5e_update_stats(priv);
263                 schedule_delayed_work(dwork,
264                                       msecs_to_jiffies(
265                                               MLX5E_UPDATE_STATS_INTERVAL));
266         }
267         mutex_unlock(&priv->state_lock);
268 }
269
270 static void __mlx5e_async_event(struct mlx5e_priv *priv,
271                                 enum mlx5_dev_event event)
272 {
273         switch (event) {
274         case MLX5_DEV_EVENT_PORT_UP:
275         case MLX5_DEV_EVENT_PORT_DOWN:
276                 schedule_work(&priv->update_carrier_work);
277                 break;
278
279         default:
280                 break;
281         }
282 }
283
284 static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
285                               enum mlx5_dev_event event, unsigned long param)
286 {
287         struct mlx5e_priv *priv = vpriv;
288
289         spin_lock(&priv->async_events_spinlock);
290         if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
291                 __mlx5e_async_event(priv, event);
292         spin_unlock(&priv->async_events_spinlock);
293 }
294
295 static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
296 {
297         set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
298 }
299
300 static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
301 {
302         spin_lock_irq(&priv->async_events_spinlock);
303         clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
304         spin_unlock_irq(&priv->async_events_spinlock);
305 }
306
307 #define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
308 #define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
309
310 static int mlx5e_create_rq(struct mlx5e_channel *c,
311                            struct mlx5e_rq_param *param,
312                            struct mlx5e_rq *rq)
313 {
314         struct mlx5e_priv *priv = c->priv;
315         struct mlx5_core_dev *mdev = priv->mdev;
316         void *rqc = param->rqc;
317         void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
318         int wq_sz;
319         int err;
320         int i;
321
322         param->wq.db_numa_node = cpu_to_node(c->cpu);
323
324         err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
325                                 &rq->wq_ctrl);
326         if (err)
327                 return err;
328
329         rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
330
331         wq_sz = mlx5_wq_ll_get_size(&rq->wq);
332         rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
333                                cpu_to_node(c->cpu));
334         if (!rq->skb) {
335                 err = -ENOMEM;
336                 goto err_rq_wq_destroy;
337         }
338
339         rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
340                                              MLX5E_SW2HW_MTU(priv->netdev->mtu);
341         rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
342
343         for (i = 0; i < wq_sz; i++) {
344                 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
345                 u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
346
347                 wqe->data.lkey       = c->mkey_be;
348                 wqe->data.byte_count =
349                         cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
350         }
351
352         rq->pdev    = c->pdev;
353         rq->netdev  = c->netdev;
354         rq->tstamp  = &priv->tstamp;
355         rq->channel = c;
356         rq->ix      = c->ix;
357         rq->priv    = c->priv;
358
359         return 0;
360
361 err_rq_wq_destroy:
362         mlx5_wq_destroy(&rq->wq_ctrl);
363
364         return err;
365 }
366
367 static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
368 {
369         kfree(rq->skb);
370         mlx5_wq_destroy(&rq->wq_ctrl);
371 }
372
373 static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
374 {
375         struct mlx5e_priv *priv = rq->priv;
376         struct mlx5_core_dev *mdev = priv->mdev;
377
378         void *in;
379         void *rqc;
380         void *wq;
381         int inlen;
382         int err;
383
384         inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
385                 sizeof(u64) * rq->wq_ctrl.buf.npages;
386         in = mlx5_vzalloc(inlen);
387         if (!in)
388                 return -ENOMEM;
389
390         rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
391         wq  = MLX5_ADDR_OF(rqc, rqc, wq);
392
393         memcpy(rqc, param->rqc, sizeof(param->rqc));
394
395         MLX5_SET(rqc,  rqc, cqn,                rq->cq.mcq.cqn);
396         MLX5_SET(rqc,  rqc, state,              MLX5_RQC_STATE_RST);
397         MLX5_SET(rqc,  rqc, flush_in_error_en,  1);
398         MLX5_SET(wq,   wq,  log_wq_pg_sz,       rq->wq_ctrl.buf.page_shift -
399                                                 MLX5_ADAPTER_PAGE_SHIFT);
400         MLX5_SET64(wq, wq,  dbr_addr,           rq->wq_ctrl.db.dma);
401
402         mlx5_fill_page_array(&rq->wq_ctrl.buf,
403                              (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
404
405         err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
406
407         kvfree(in);
408
409         return err;
410 }
411
412 static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
413 {
414         struct mlx5e_channel *c = rq->channel;
415         struct mlx5e_priv *priv = c->priv;
416         struct mlx5_core_dev *mdev = priv->mdev;
417
418         void *in;
419         void *rqc;
420         int inlen;
421         int err;
422
423         inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
424         in = mlx5_vzalloc(inlen);
425         if (!in)
426                 return -ENOMEM;
427
428         rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
429
430         MLX5_SET(modify_rq_in, in, rq_state, curr_state);
431         MLX5_SET(rqc, rqc, state, next_state);
432
433         err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
434
435         kvfree(in);
436
437         return err;
438 }
439
440 static void mlx5e_disable_rq(struct mlx5e_rq *rq)
441 {
442         mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
443 }
444
445 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
446 {
447         unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
448         struct mlx5e_channel *c = rq->channel;
449         struct mlx5e_priv *priv = c->priv;
450         struct mlx5_wq_ll *wq = &rq->wq;
451
452         while (time_before(jiffies, exp_time)) {
453                 if (wq->cur_sz >= priv->params.min_rx_wqes)
454                         return 0;
455
456                 msleep(20);
457         }
458
459         return -ETIMEDOUT;
460 }
461
462 static int mlx5e_open_rq(struct mlx5e_channel *c,
463                          struct mlx5e_rq_param *param,
464                          struct mlx5e_rq *rq)
465 {
466         int err;
467
468         err = mlx5e_create_rq(c, param, rq);
469         if (err)
470                 return err;
471
472         err = mlx5e_enable_rq(rq, param);
473         if (err)
474                 goto err_destroy_rq;
475
476         err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
477         if (err)
478                 goto err_disable_rq;
479
480         set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
481         mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */
482
483         return 0;
484
485 err_disable_rq:
486         mlx5e_disable_rq(rq);
487 err_destroy_rq:
488         mlx5e_destroy_rq(rq);
489
490         return err;
491 }
492
493 static void mlx5e_close_rq(struct mlx5e_rq *rq)
494 {
495         clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
496         napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
497
498         mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
499         while (!mlx5_wq_ll_is_empty(&rq->wq))
500                 msleep(20);
501
502         /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
503         napi_synchronize(&rq->channel->napi);
504
505         mlx5e_disable_rq(rq);
506         mlx5e_destroy_rq(rq);
507 }
508
509 static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
510 {
511         kfree(sq->wqe_info);
512         kfree(sq->dma_fifo);
513         kfree(sq->skb);
514 }
515
516 static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
517 {
518         int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
519         int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
520
521         sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
522         sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
523                                     numa);
524         sq->wqe_info = kzalloc_node(wq_sz * sizeof(*sq->wqe_info), GFP_KERNEL,
525                                     numa);
526
527         if (!sq->skb || !sq->dma_fifo || !sq->wqe_info) {
528                 mlx5e_free_sq_db(sq);
529                 return -ENOMEM;
530         }
531
532         sq->dma_fifo_mask = df_sz - 1;
533
534         return 0;
535 }
536
537 static int mlx5e_create_sq(struct mlx5e_channel *c,
538                            int tc,
539                            struct mlx5e_sq_param *param,
540                            struct mlx5e_sq *sq)
541 {
542         struct mlx5e_priv *priv = c->priv;
543         struct mlx5_core_dev *mdev = priv->mdev;
544
545         void *sqc = param->sqc;
546         void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
547         int txq_ix;
548         int err;
549
550         err = mlx5_alloc_map_uar(mdev, &sq->uar);
551         if (err)
552                 return err;
553
554         param->wq.db_numa_node = cpu_to_node(c->cpu);
555
556         err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
557                                  &sq->wq_ctrl);
558         if (err)
559                 goto err_unmap_free_uar;
560
561         sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
562         sq->uar_map     = sq->uar.map;
563         sq->uar_bf_map  = sq->uar.bf_map;
564         sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
565         sq->max_inline  = param->max_inline;
566
567         err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
568         if (err)
569                 goto err_sq_wq_destroy;
570
571         txq_ix = c->ix + tc * priv->params.num_channels;
572         sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
573
574         sq->pdev      = c->pdev;
575         sq->tstamp    = &priv->tstamp;
576         sq->mkey_be   = c->mkey_be;
577         sq->channel   = c;
578         sq->tc        = tc;
579         sq->edge      = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
580         sq->bf_budget = MLX5E_SQ_BF_BUDGET;
581         priv->txq_to_sq_map[txq_ix] = sq;
582
583         return 0;
584
585 err_sq_wq_destroy:
586         mlx5_wq_destroy(&sq->wq_ctrl);
587
588 err_unmap_free_uar:
589         mlx5_unmap_free_uar(mdev, &sq->uar);
590
591         return err;
592 }
593
594 static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
595 {
596         struct mlx5e_channel *c = sq->channel;
597         struct mlx5e_priv *priv = c->priv;
598
599         mlx5e_free_sq_db(sq);
600         mlx5_wq_destroy(&sq->wq_ctrl);
601         mlx5_unmap_free_uar(priv->mdev, &sq->uar);
602 }
603
604 static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
605 {
606         struct mlx5e_channel *c = sq->channel;
607         struct mlx5e_priv *priv = c->priv;
608         struct mlx5_core_dev *mdev = priv->mdev;
609
610         void *in;
611         void *sqc;
612         void *wq;
613         int inlen;
614         int err;
615
616         inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
617                 sizeof(u64) * sq->wq_ctrl.buf.npages;
618         in = mlx5_vzalloc(inlen);
619         if (!in)
620                 return -ENOMEM;
621
622         sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
623         wq = MLX5_ADDR_OF(sqc, sqc, wq);
624
625         memcpy(sqc, param->sqc, sizeof(param->sqc));
626
627         MLX5_SET(sqc,  sqc, tis_num_0,          priv->tisn[sq->tc]);
628         MLX5_SET(sqc,  sqc, cqn,                c->sq[sq->tc].cq.mcq.cqn);
629         MLX5_SET(sqc,  sqc, state,              MLX5_SQC_STATE_RST);
630         MLX5_SET(sqc,  sqc, tis_lst_sz,         1);
631         MLX5_SET(sqc,  sqc, flush_in_error_en,  1);
632
633         MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
634         MLX5_SET(wq,   wq, uar_page,      sq->uar.index);
635         MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
636                                           MLX5_ADAPTER_PAGE_SHIFT);
637         MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
638
639         mlx5_fill_page_array(&sq->wq_ctrl.buf,
640                              (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
641
642         err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
643
644         kvfree(in);
645
646         return err;
647 }
648
649 static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
650 {
651         struct mlx5e_channel *c = sq->channel;
652         struct mlx5e_priv *priv = c->priv;
653         struct mlx5_core_dev *mdev = priv->mdev;
654
655         void *in;
656         void *sqc;
657         int inlen;
658         int err;
659
660         inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
661         in = mlx5_vzalloc(inlen);
662         if (!in)
663                 return -ENOMEM;
664
665         sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
666
667         MLX5_SET(modify_sq_in, in, sq_state, curr_state);
668         MLX5_SET(sqc, sqc, state, next_state);
669
670         err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
671
672         kvfree(in);
673
674         return err;
675 }
676
677 static void mlx5e_disable_sq(struct mlx5e_sq *sq)
678 {
679         struct mlx5e_channel *c = sq->channel;
680         struct mlx5e_priv *priv = c->priv;
681         struct mlx5_core_dev *mdev = priv->mdev;
682
683         mlx5_core_destroy_sq(mdev, sq->sqn);
684 }
685
686 static int mlx5e_open_sq(struct mlx5e_channel *c,
687                          int tc,
688                          struct mlx5e_sq_param *param,
689                          struct mlx5e_sq *sq)
690 {
691         int err;
692
693         err = mlx5e_create_sq(c, tc, param, sq);
694         if (err)
695                 return err;
696
697         err = mlx5e_enable_sq(sq, param);
698         if (err)
699                 goto err_destroy_sq;
700
701         err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
702         if (err)
703                 goto err_disable_sq;
704
705         set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
706         netdev_tx_reset_queue(sq->txq);
707         netif_tx_start_queue(sq->txq);
708
709         return 0;
710
711 err_disable_sq:
712         mlx5e_disable_sq(sq);
713 err_destroy_sq:
714         mlx5e_destroy_sq(sq);
715
716         return err;
717 }
718
719 static inline void netif_tx_disable_queue(struct netdev_queue *txq)
720 {
721         __netif_tx_lock_bh(txq);
722         netif_tx_stop_queue(txq);
723         __netif_tx_unlock_bh(txq);
724 }
725
726 static void mlx5e_close_sq(struct mlx5e_sq *sq)
727 {
728         clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
729         napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
730         netif_tx_disable_queue(sq->txq);
731
732         /* ensure hw is notified of all pending wqes */
733         if (mlx5e_sq_has_room_for(sq, 1))
734                 mlx5e_send_nop(sq, true);
735
736         mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
737         while (sq->cc != sq->pc) /* wait till sq is empty */
738                 msleep(20);
739
740         /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
741         napi_synchronize(&sq->channel->napi);
742
743         mlx5e_disable_sq(sq);
744         mlx5e_destroy_sq(sq);
745 }
746
747 static int mlx5e_create_cq(struct mlx5e_channel *c,
748                            struct mlx5e_cq_param *param,
749                            struct mlx5e_cq *cq)
750 {
751         struct mlx5e_priv *priv = c->priv;
752         struct mlx5_core_dev *mdev = priv->mdev;
753         struct mlx5_core_cq *mcq = &cq->mcq;
754         int eqn_not_used;
755         unsigned int irqn;
756         int err;
757         u32 i;
758
759         param->wq.buf_numa_node = cpu_to_node(c->cpu);
760         param->wq.db_numa_node  = cpu_to_node(c->cpu);
761         param->eq_ix   = c->ix;
762
763         err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
764                                &cq->wq_ctrl);
765         if (err)
766                 return err;
767
768         mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
769
770         cq->napi        = &c->napi;
771
772         mcq->cqe_sz     = 64;
773         mcq->set_ci_db  = cq->wq_ctrl.db.db;
774         mcq->arm_db     = cq->wq_ctrl.db.db + 1;
775         *mcq->set_ci_db = 0;
776         *mcq->arm_db    = 0;
777         mcq->vector     = param->eq_ix;
778         mcq->comp       = mlx5e_completion_event;
779         mcq->event      = mlx5e_cq_error_event;
780         mcq->irqn       = irqn;
781         mcq->uar        = &priv->cq_uar;
782
783         for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
784                 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
785
786                 cqe->op_own = 0xf1;
787         }
788
789         cq->channel = c;
790         cq->priv = priv;
791
792         return 0;
793 }
794
795 static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
796 {
797         mlx5_wq_destroy(&cq->wq_ctrl);
798 }
799
800 static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
801 {
802         struct mlx5e_priv *priv = cq->priv;
803         struct mlx5_core_dev *mdev = priv->mdev;
804         struct mlx5_core_cq *mcq = &cq->mcq;
805
806         void *in;
807         void *cqc;
808         int inlen;
809         unsigned int irqn_not_used;
810         int eqn;
811         int err;
812
813         inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
814                 sizeof(u64) * cq->wq_ctrl.buf.npages;
815         in = mlx5_vzalloc(inlen);
816         if (!in)
817                 return -ENOMEM;
818
819         cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
820
821         memcpy(cqc, param->cqc, sizeof(param->cqc));
822
823         mlx5_fill_page_array(&cq->wq_ctrl.buf,
824                              (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
825
826         mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
827
828         MLX5_SET(cqc,   cqc, c_eqn,         eqn);
829         MLX5_SET(cqc,   cqc, uar_page,      mcq->uar->index);
830         MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
831                                             MLX5_ADAPTER_PAGE_SHIFT);
832         MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
833
834         err = mlx5_core_create_cq(mdev, mcq, in, inlen);
835
836         kvfree(in);
837
838         if (err)
839                 return err;
840
841         mlx5e_cq_arm(cq);
842
843         return 0;
844 }
845
846 static void mlx5e_disable_cq(struct mlx5e_cq *cq)
847 {
848         struct mlx5e_priv *priv = cq->priv;
849         struct mlx5_core_dev *mdev = priv->mdev;
850
851         mlx5_core_destroy_cq(mdev, &cq->mcq);
852 }
853
854 static int mlx5e_open_cq(struct mlx5e_channel *c,
855                          struct mlx5e_cq_param *param,
856                          struct mlx5e_cq *cq,
857                          u16 moderation_usecs,
858                          u16 moderation_frames)
859 {
860         int err;
861         struct mlx5e_priv *priv = c->priv;
862         struct mlx5_core_dev *mdev = priv->mdev;
863
864         err = mlx5e_create_cq(c, param, cq);
865         if (err)
866                 return err;
867
868         err = mlx5e_enable_cq(cq, param);
869         if (err)
870                 goto err_destroy_cq;
871
872         err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
873                                              moderation_usecs,
874                                              moderation_frames);
875         if (err)
876                 goto err_destroy_cq;
877
878         return 0;
879
880 err_destroy_cq:
881         mlx5e_destroy_cq(cq);
882
883         return err;
884 }
885
886 static void mlx5e_close_cq(struct mlx5e_cq *cq)
887 {
888         mlx5e_disable_cq(cq);
889         mlx5e_destroy_cq(cq);
890 }
891
892 static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
893 {
894         return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
895 }
896
897 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
898                              struct mlx5e_channel_param *cparam)
899 {
900         struct mlx5e_priv *priv = c->priv;
901         int err;
902         int tc;
903
904         for (tc = 0; tc < c->num_tc; tc++) {
905                 err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
906                                     priv->params.tx_cq_moderation_usec,
907                                     priv->params.tx_cq_moderation_pkts);
908                 if (err)
909                         goto err_close_tx_cqs;
910         }
911
912         return 0;
913
914 err_close_tx_cqs:
915         for (tc--; tc >= 0; tc--)
916                 mlx5e_close_cq(&c->sq[tc].cq);
917
918         return err;
919 }
920
921 static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
922 {
923         int tc;
924
925         for (tc = 0; tc < c->num_tc; tc++)
926                 mlx5e_close_cq(&c->sq[tc].cq);
927 }
928
929 static int mlx5e_open_sqs(struct mlx5e_channel *c,
930                           struct mlx5e_channel_param *cparam)
931 {
932         int err;
933         int tc;
934
935         for (tc = 0; tc < c->num_tc; tc++) {
936                 err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
937                 if (err)
938                         goto err_close_sqs;
939         }
940
941         return 0;
942
943 err_close_sqs:
944         for (tc--; tc >= 0; tc--)
945                 mlx5e_close_sq(&c->sq[tc]);
946
947         return err;
948 }
949
950 static void mlx5e_close_sqs(struct mlx5e_channel *c)
951 {
952         int tc;
953
954         for (tc = 0; tc < c->num_tc; tc++)
955                 mlx5e_close_sq(&c->sq[tc]);
956 }
957
958 static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
959 {
960         int i;
961
962         for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
963                 priv->channeltc_to_txq_map[ix][i] =
964                         ix + i * priv->params.num_channels;
965 }
966
967 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
968                               struct mlx5e_channel_param *cparam,
969                               struct mlx5e_channel **cp)
970 {
971         struct net_device *netdev = priv->netdev;
972         int cpu = mlx5e_get_cpu(priv, ix);
973         struct mlx5e_channel *c;
974         int err;
975
976         c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
977         if (!c)
978                 return -ENOMEM;
979
980         c->priv     = priv;
981         c->ix       = ix;
982         c->cpu      = cpu;
983         c->pdev     = &priv->mdev->pdev->dev;
984         c->netdev   = priv->netdev;
985         c->mkey_be  = cpu_to_be32(priv->mr.key);
986         c->num_tc   = priv->params.num_tc;
987
988         mlx5e_build_channeltc_to_txq_map(priv, ix);
989
990         netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
991
992         err = mlx5e_open_tx_cqs(c, cparam);
993         if (err)
994                 goto err_napi_del;
995
996         err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
997                             priv->params.rx_cq_moderation_usec,
998                             priv->params.rx_cq_moderation_pkts);
999         if (err)
1000                 goto err_close_tx_cqs;
1001
1002         napi_enable(&c->napi);
1003
1004         err = mlx5e_open_sqs(c, cparam);
1005         if (err)
1006                 goto err_disable_napi;
1007
1008         err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1009         if (err)
1010                 goto err_close_sqs;
1011
1012         netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
1013         *cp = c;
1014
1015         return 0;
1016
1017 err_close_sqs:
1018         mlx5e_close_sqs(c);
1019
1020 err_disable_napi:
1021         napi_disable(&c->napi);
1022         mlx5e_close_cq(&c->rq.cq);
1023
1024 err_close_tx_cqs:
1025         mlx5e_close_tx_cqs(c);
1026
1027 err_napi_del:
1028         netif_napi_del(&c->napi);
1029         napi_hash_del(&c->napi);
1030         kfree(c);
1031
1032         return err;
1033 }
1034
1035 static void mlx5e_close_channel(struct mlx5e_channel *c)
1036 {
1037         mlx5e_close_rq(&c->rq);
1038         mlx5e_close_sqs(c);
1039         napi_disable(&c->napi);
1040         mlx5e_close_cq(&c->rq.cq);
1041         mlx5e_close_tx_cqs(c);
1042         netif_napi_del(&c->napi);
1043
1044         napi_hash_del(&c->napi);
1045         synchronize_rcu();
1046
1047         kfree(c);
1048 }
1049
1050 static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1051                                  struct mlx5e_rq_param *param)
1052 {
1053         void *rqc = param->rqc;
1054         void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1055
1056         MLX5_SET(wq, wq, wq_type,          MLX5_WQ_TYPE_LINKED_LIST);
1057         MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1058         MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
1059         MLX5_SET(wq, wq, log_wq_sz,        priv->params.log_rq_size);
1060         MLX5_SET(wq, wq, pd,               priv->pdn);
1061
1062         param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1063         param->wq.linear = 1;
1064 }
1065
1066 static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
1067                                  struct mlx5e_sq_param *param)
1068 {
1069         void *sqc = param->sqc;
1070         void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1071
1072         MLX5_SET(wq, wq, log_wq_sz,     priv->params.log_sq_size);
1073         MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1074         MLX5_SET(wq, wq, pd,            priv->pdn);
1075
1076         param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1077         param->max_inline = priv->params.tx_max_inline;
1078 }
1079
1080 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1081                                         struct mlx5e_cq_param *param)
1082 {
1083         void *cqc = param->cqc;
1084
1085         MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
1086 }
1087
1088 static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1089                                     struct mlx5e_cq_param *param)
1090 {
1091         void *cqc = param->cqc;
1092
1093         MLX5_SET(cqc, cqc, log_cq_size,  priv->params.log_rq_size);
1094
1095         mlx5e_build_common_cq_param(priv, param);
1096 }
1097
1098 static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1099                                     struct mlx5e_cq_param *param)
1100 {
1101         void *cqc = param->cqc;
1102
1103         MLX5_SET(cqc, cqc, log_cq_size,  priv->params.log_sq_size);
1104
1105         mlx5e_build_common_cq_param(priv, param);
1106 }
1107
1108 static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
1109                                       struct mlx5e_channel_param *cparam)
1110 {
1111         memset(cparam, 0, sizeof(*cparam));
1112
1113         mlx5e_build_rq_param(priv, &cparam->rq);
1114         mlx5e_build_sq_param(priv, &cparam->sq);
1115         mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1116         mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
1117 }
1118
1119 static int mlx5e_open_channels(struct mlx5e_priv *priv)
1120 {
1121         struct mlx5e_channel_param cparam;
1122         int nch = priv->params.num_channels;
1123         int err = -ENOMEM;
1124         int i;
1125         int j;
1126
1127         priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
1128                                 GFP_KERNEL);
1129
1130         priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
1131                                       sizeof(struct mlx5e_sq *), GFP_KERNEL);
1132
1133         if (!priv->channel || !priv->txq_to_sq_map)
1134                 goto err_free_txq_to_sq_map;
1135
1136         mlx5e_build_channel_param(priv, &cparam);
1137         for (i = 0; i < nch; i++) {
1138                 err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
1139                 if (err)
1140                         goto err_close_channels;
1141         }
1142
1143         for (j = 0; j < nch; j++) {
1144                 err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
1145                 if (err)
1146                         goto err_close_channels;
1147         }
1148
1149         return 0;
1150
1151 err_close_channels:
1152         for (i--; i >= 0; i--)
1153                 mlx5e_close_channel(priv->channel[i]);
1154
1155 err_free_txq_to_sq_map:
1156         kfree(priv->txq_to_sq_map);
1157         kfree(priv->channel);
1158
1159         return err;
1160 }
1161
1162 static void mlx5e_close_channels(struct mlx5e_priv *priv)
1163 {
1164         int i;
1165
1166         for (i = 0; i < priv->params.num_channels; i++)
1167                 mlx5e_close_channel(priv->channel[i]);
1168
1169         kfree(priv->txq_to_sq_map);
1170         kfree(priv->channel);
1171 }
1172
1173 static int mlx5e_rx_hash_fn(int hfunc)
1174 {
1175         return (hfunc == ETH_RSS_HASH_TOP) ?
1176                MLX5_RX_HASH_FN_TOEPLITZ :
1177                MLX5_RX_HASH_FN_INVERTED_XOR8;
1178 }
1179
1180 static int mlx5e_bits_invert(unsigned long a, int size)
1181 {
1182         int inv = 0;
1183         int i;
1184
1185         for (i = 0; i < size; i++)
1186                 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
1187
1188         return inv;
1189 }
1190
1191 static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
1192 {
1193         int i;
1194
1195         for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
1196                 int ix = i;
1197
1198                 if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
1199                         ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
1200
1201                 ix = priv->params.indirection_rqt[ix];
1202                 ix = ix % priv->params.num_channels;
1203                 MLX5_SET(rqtc, rqtc, rq_num[i],
1204                          test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1205                          priv->channel[ix]->rq.rqn :
1206                          priv->drop_rq.rqn);
1207         }
1208 }
1209
1210 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
1211                                 enum mlx5e_rqt_ix rqt_ix)
1212 {
1213
1214         switch (rqt_ix) {
1215         case MLX5E_INDIRECTION_RQT:
1216                 mlx5e_fill_indir_rqt_rqns(priv, rqtc);
1217
1218                 break;
1219
1220         default: /* MLX5E_SINGLE_RQ_RQT */
1221                 MLX5_SET(rqtc, rqtc, rq_num[0],
1222                          test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1223                          priv->channel[0]->rq.rqn :
1224                          priv->drop_rq.rqn);
1225
1226                 break;
1227         }
1228 }
1229
1230 static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
1231 {
1232         struct mlx5_core_dev *mdev = priv->mdev;
1233         u32 *in;
1234         void *rqtc;
1235         int inlen;
1236         int sz;
1237         int err;
1238
1239         sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
1240
1241         inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1242         in = mlx5_vzalloc(inlen);
1243         if (!in)
1244                 return -ENOMEM;
1245
1246         rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
1247
1248         MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1249         MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
1250
1251         mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
1252
1253         err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
1254
1255         kvfree(in);
1256
1257         return err;
1258 }
1259
1260 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
1261 {
1262         struct mlx5_core_dev *mdev = priv->mdev;
1263         u32 *in;
1264         void *rqtc;
1265         int inlen;
1266         int sz;
1267         int err;
1268
1269         sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
1270
1271         inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
1272         in = mlx5_vzalloc(inlen);
1273         if (!in)
1274                 return -ENOMEM;
1275
1276         rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
1277
1278         MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1279
1280         mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
1281
1282         MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
1283
1284         err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
1285
1286         kvfree(in);
1287
1288         return err;
1289 }
1290
1291 static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
1292 {
1293         mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
1294 }
1295
1296 static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
1297 {
1298         mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
1299         mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
1300 }
1301
1302 static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
1303 {
1304         if (!priv->params.lro_en)
1305                 return;
1306
1307 #define ROUGH_MAX_L2_L3_HDR_SZ 256
1308
1309         MLX5_SET(tirc, tirc, lro_enable_mask,
1310                  MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
1311                  MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
1312         MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
1313                  (priv->params.lro_wqe_sz -
1314                   ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
1315         MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
1316                  MLX5_CAP_ETH(priv->mdev,
1317                               lro_timer_supported_periods[2]));
1318 }
1319
1320 static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
1321 {
1322         struct mlx5_core_dev *mdev = priv->mdev;
1323
1324         void *in;
1325         void *tirc;
1326         int inlen;
1327         int err;
1328
1329         inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1330         in = mlx5_vzalloc(inlen);
1331         if (!in)
1332                 return -ENOMEM;
1333
1334         MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
1335         tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
1336
1337         mlx5e_build_tir_ctx_lro(tirc, priv);
1338
1339         err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
1340
1341         kvfree(in);
1342
1343         return err;
1344 }
1345
1346 static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
1347                                                   u32 tirn)
1348 {
1349         void *in;
1350         int inlen;
1351         int err;
1352
1353         inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1354         in = mlx5_vzalloc(inlen);
1355         if (!in)
1356                 return -ENOMEM;
1357
1358         MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
1359
1360         err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
1361
1362         kvfree(in);
1363
1364         return err;
1365 }
1366
1367 static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
1368 {
1369         int err;
1370         int i;
1371
1372         for (i = 0; i < MLX5E_NUM_TT; i++) {
1373                 err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
1374                                                              priv->tirn[i]);
1375                 if (err)
1376                         return err;
1377         }
1378
1379         return 0;
1380 }
1381
1382 static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
1383 {
1384         struct mlx5e_priv *priv = netdev_priv(netdev);
1385         struct mlx5_core_dev *mdev = priv->mdev;
1386         int hw_mtu;
1387         int err;
1388
1389         err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
1390         if (err)
1391                 return err;
1392
1393         mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
1394
1395         if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
1396                 netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
1397                             __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
1398
1399         netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
1400         return 0;
1401 }
1402
1403 int mlx5e_open_locked(struct net_device *netdev)
1404 {
1405         struct mlx5e_priv *priv = netdev_priv(netdev);
1406         int num_txqs;
1407         int err;
1408
1409         set_bit(MLX5E_STATE_OPENED, &priv->state);
1410
1411         num_txqs = priv->params.num_channels * priv->params.num_tc;
1412         netif_set_real_num_tx_queues(netdev, num_txqs);
1413         netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
1414
1415         err = mlx5e_set_dev_port_mtu(netdev);
1416         if (err)
1417                 goto err_clear_state_opened_flag;
1418
1419         err = mlx5e_open_channels(priv);
1420         if (err) {
1421                 netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
1422                            __func__, err);
1423                 goto err_clear_state_opened_flag;
1424         }
1425
1426         err = mlx5e_refresh_tirs_self_loopback_enable(priv);
1427         if (err) {
1428                 netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
1429                            __func__, err);
1430                 goto err_close_channels;
1431         }
1432
1433         mlx5e_update_carrier(priv);
1434         mlx5e_redirect_rqts(priv);
1435         mlx5e_timestamp_init(priv);
1436
1437         schedule_delayed_work(&priv->update_stats_work, 0);
1438
1439         return 0;
1440
1441 err_close_channels:
1442         mlx5e_close_channels(priv);
1443 err_clear_state_opened_flag:
1444         clear_bit(MLX5E_STATE_OPENED, &priv->state);
1445         return err;
1446 }
1447
1448 static int mlx5e_open(struct net_device *netdev)
1449 {
1450         struct mlx5e_priv *priv = netdev_priv(netdev);
1451         int err;
1452
1453         mutex_lock(&priv->state_lock);
1454         err = mlx5e_open_locked(netdev);
1455         mutex_unlock(&priv->state_lock);
1456
1457         return err;
1458 }
1459
1460 int mlx5e_close_locked(struct net_device *netdev)
1461 {
1462         struct mlx5e_priv *priv = netdev_priv(netdev);
1463
1464         /* May already be CLOSED in case a previous configuration operation
1465          * (e.g RX/TX queue size change) that involves close&open failed.
1466          */
1467         if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
1468                 return 0;
1469
1470         clear_bit(MLX5E_STATE_OPENED, &priv->state);
1471
1472         mlx5e_timestamp_cleanup(priv);
1473         mlx5e_redirect_rqts(priv);
1474         netif_carrier_off(priv->netdev);
1475         mlx5e_close_channels(priv);
1476
1477         return 0;
1478 }
1479
1480 static int mlx5e_close(struct net_device *netdev)
1481 {
1482         struct mlx5e_priv *priv = netdev_priv(netdev);
1483         int err;
1484
1485         mutex_lock(&priv->state_lock);
1486         err = mlx5e_close_locked(netdev);
1487         mutex_unlock(&priv->state_lock);
1488
1489         return err;
1490 }
1491
1492 static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
1493                                 struct mlx5e_rq *rq,
1494                                 struct mlx5e_rq_param *param)
1495 {
1496         struct mlx5_core_dev *mdev = priv->mdev;
1497         void *rqc = param->rqc;
1498         void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
1499         int err;
1500
1501         param->wq.db_numa_node = param->wq.buf_numa_node;
1502
1503         err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
1504                                 &rq->wq_ctrl);
1505         if (err)
1506                 return err;
1507
1508         rq->priv = priv;
1509
1510         return 0;
1511 }
1512
1513 static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
1514                                 struct mlx5e_cq *cq,
1515                                 struct mlx5e_cq_param *param)
1516 {
1517         struct mlx5_core_dev *mdev = priv->mdev;
1518         struct mlx5_core_cq *mcq = &cq->mcq;
1519         int eqn_not_used;
1520         unsigned int irqn;
1521         int err;
1522
1523         err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1524                                &cq->wq_ctrl);
1525         if (err)
1526                 return err;
1527
1528         mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1529
1530         mcq->cqe_sz     = 64;
1531         mcq->set_ci_db  = cq->wq_ctrl.db.db;
1532         mcq->arm_db     = cq->wq_ctrl.db.db + 1;
1533         *mcq->set_ci_db = 0;
1534         *mcq->arm_db    = 0;
1535         mcq->vector     = param->eq_ix;
1536         mcq->comp       = mlx5e_completion_event;
1537         mcq->event      = mlx5e_cq_error_event;
1538         mcq->irqn       = irqn;
1539         mcq->uar        = &priv->cq_uar;
1540
1541         cq->priv = priv;
1542
1543         return 0;
1544 }
1545
1546 static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
1547 {
1548         struct mlx5e_cq_param cq_param;
1549         struct mlx5e_rq_param rq_param;
1550         struct mlx5e_rq *rq = &priv->drop_rq;
1551         struct mlx5e_cq *cq = &priv->drop_rq.cq;
1552         int err;
1553
1554         memset(&cq_param, 0, sizeof(cq_param));
1555         memset(&rq_param, 0, sizeof(rq_param));
1556         mlx5e_build_rx_cq_param(priv, &cq_param);
1557         mlx5e_build_rq_param(priv, &rq_param);
1558
1559         err = mlx5e_create_drop_cq(priv, cq, &cq_param);
1560         if (err)
1561                 return err;
1562
1563         err = mlx5e_enable_cq(cq, &cq_param);
1564         if (err)
1565                 goto err_destroy_cq;
1566
1567         err = mlx5e_create_drop_rq(priv, rq, &rq_param);
1568         if (err)
1569                 goto err_disable_cq;
1570
1571         err = mlx5e_enable_rq(rq, &rq_param);
1572         if (err)
1573                 goto err_destroy_rq;
1574
1575         return 0;
1576
1577 err_destroy_rq:
1578         mlx5e_destroy_rq(&priv->drop_rq);
1579
1580 err_disable_cq:
1581         mlx5e_disable_cq(&priv->drop_rq.cq);
1582
1583 err_destroy_cq:
1584         mlx5e_destroy_cq(&priv->drop_rq.cq);
1585
1586         return err;
1587 }
1588
1589 static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
1590 {
1591         mlx5e_disable_rq(&priv->drop_rq);
1592         mlx5e_destroy_rq(&priv->drop_rq);
1593         mlx5e_disable_cq(&priv->drop_rq.cq);
1594         mlx5e_destroy_cq(&priv->drop_rq.cq);
1595 }
1596
1597 static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
1598 {
1599         struct mlx5_core_dev *mdev = priv->mdev;
1600         u32 in[MLX5_ST_SZ_DW(create_tis_in)];
1601         void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
1602
1603         memset(in, 0, sizeof(in));
1604
1605         MLX5_SET(tisc, tisc, prio,  tc);
1606         MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
1607
1608         return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
1609 }
1610
1611 static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
1612 {
1613         mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
1614 }
1615
1616 static int mlx5e_create_tises(struct mlx5e_priv *priv)
1617 {
1618         int err;
1619         int tc;
1620
1621         for (tc = 0; tc < priv->params.num_tc; tc++) {
1622                 err = mlx5e_create_tis(priv, tc);
1623                 if (err)
1624                         goto err_close_tises;
1625         }
1626
1627         return 0;
1628
1629 err_close_tises:
1630         for (tc--; tc >= 0; tc--)
1631                 mlx5e_destroy_tis(priv, tc);
1632
1633         return err;
1634 }
1635
1636 static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
1637 {
1638         int tc;
1639
1640         for (tc = 0; tc < priv->params.num_tc; tc++)
1641                 mlx5e_destroy_tis(priv, tc);
1642 }
1643
1644 static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
1645 {
1646         void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1647
1648         MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
1649
1650 #define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
1651                                  MLX5_HASH_FIELD_SEL_DST_IP)
1652
1653 #define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
1654                                  MLX5_HASH_FIELD_SEL_DST_IP   |\
1655                                  MLX5_HASH_FIELD_SEL_L4_SPORT |\
1656                                  MLX5_HASH_FIELD_SEL_L4_DPORT)
1657
1658 #define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
1659                                  MLX5_HASH_FIELD_SEL_DST_IP   |\
1660                                  MLX5_HASH_FIELD_SEL_IPSEC_SPI)
1661
1662         mlx5e_build_tir_ctx_lro(tirc, priv);
1663
1664         MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
1665
1666         switch (tt) {
1667         case MLX5E_TT_ANY:
1668                 MLX5_SET(tirc, tirc, indirect_table,
1669                          priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
1670                 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
1671                 break;
1672         default:
1673                 MLX5_SET(tirc, tirc, indirect_table,
1674                          priv->rqtn[MLX5E_INDIRECTION_RQT]);
1675                 MLX5_SET(tirc, tirc, rx_hash_fn,
1676                          mlx5e_rx_hash_fn(priv->params.rss_hfunc));
1677                 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
1678                         void *rss_key = MLX5_ADDR_OF(tirc, tirc,
1679                                                      rx_hash_toeplitz_key);
1680                         size_t len = MLX5_FLD_SZ_BYTES(tirc,
1681                                                        rx_hash_toeplitz_key);
1682
1683                         MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
1684                         memcpy(rss_key, priv->params.toeplitz_hash_key, len);
1685                 }
1686                 break;
1687         }
1688
1689         switch (tt) {
1690         case MLX5E_TT_IPV4_TCP:
1691                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1692                          MLX5_L3_PROT_TYPE_IPV4);
1693                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1694                          MLX5_L4_PROT_TYPE_TCP);
1695                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1696                          MLX5_HASH_IP_L4PORTS);
1697                 break;
1698
1699         case MLX5E_TT_IPV6_TCP:
1700                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1701                          MLX5_L3_PROT_TYPE_IPV6);
1702                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1703                          MLX5_L4_PROT_TYPE_TCP);
1704                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1705                          MLX5_HASH_IP_L4PORTS);
1706                 break;
1707
1708         case MLX5E_TT_IPV4_UDP:
1709                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1710                          MLX5_L3_PROT_TYPE_IPV4);
1711                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1712                          MLX5_L4_PROT_TYPE_UDP);
1713                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1714                          MLX5_HASH_IP_L4PORTS);
1715                 break;
1716
1717         case MLX5E_TT_IPV6_UDP:
1718                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1719                          MLX5_L3_PROT_TYPE_IPV6);
1720                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1721                          MLX5_L4_PROT_TYPE_UDP);
1722                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1723                          MLX5_HASH_IP_L4PORTS);
1724                 break;
1725
1726         case MLX5E_TT_IPV4_IPSEC_AH:
1727                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1728                          MLX5_L3_PROT_TYPE_IPV4);
1729                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1730                          MLX5_HASH_IP_IPSEC_SPI);
1731                 break;
1732
1733         case MLX5E_TT_IPV6_IPSEC_AH:
1734                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1735                          MLX5_L3_PROT_TYPE_IPV6);
1736                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1737                          MLX5_HASH_IP_IPSEC_SPI);
1738                 break;
1739
1740         case MLX5E_TT_IPV4_IPSEC_ESP:
1741                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1742                          MLX5_L3_PROT_TYPE_IPV4);
1743                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1744                          MLX5_HASH_IP_IPSEC_SPI);
1745                 break;
1746
1747         case MLX5E_TT_IPV6_IPSEC_ESP:
1748                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1749                          MLX5_L3_PROT_TYPE_IPV6);
1750                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1751                          MLX5_HASH_IP_IPSEC_SPI);
1752                 break;
1753
1754         case MLX5E_TT_IPV4:
1755                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1756                          MLX5_L3_PROT_TYPE_IPV4);
1757                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1758                          MLX5_HASH_IP);
1759                 break;
1760
1761         case MLX5E_TT_IPV6:
1762                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1763                          MLX5_L3_PROT_TYPE_IPV6);
1764                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1765                          MLX5_HASH_IP);
1766                 break;
1767         }
1768 }
1769
1770 static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
1771 {
1772         struct mlx5_core_dev *mdev = priv->mdev;
1773         u32 *in;
1774         void *tirc;
1775         int inlen;
1776         int err;
1777
1778         inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1779         in = mlx5_vzalloc(inlen);
1780         if (!in)
1781                 return -ENOMEM;
1782
1783         tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
1784
1785         mlx5e_build_tir_ctx(priv, tirc, tt);
1786
1787         err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
1788
1789         kvfree(in);
1790
1791         return err;
1792 }
1793
1794 static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
1795 {
1796         mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
1797 }
1798
1799 static int mlx5e_create_tirs(struct mlx5e_priv *priv)
1800 {
1801         int err;
1802         int i;
1803
1804         for (i = 0; i < MLX5E_NUM_TT; i++) {
1805                 err = mlx5e_create_tir(priv, i);
1806                 if (err)
1807                         goto err_destroy_tirs;
1808         }
1809
1810         return 0;
1811
1812 err_destroy_tirs:
1813         for (i--; i >= 0; i--)
1814                 mlx5e_destroy_tir(priv, i);
1815
1816         return err;
1817 }
1818
1819 static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
1820 {
1821         int i;
1822
1823         for (i = 0; i < MLX5E_NUM_TT; i++)
1824                 mlx5e_destroy_tir(priv, i);
1825 }
1826
1827 static struct rtnl_link_stats64 *
1828 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
1829 {
1830         struct mlx5e_priv *priv = netdev_priv(dev);
1831         struct mlx5e_vport_stats *vstats = &priv->stats.vport;
1832
1833         stats->rx_packets = vstats->rx_packets;
1834         stats->rx_bytes   = vstats->rx_bytes;
1835         stats->tx_packets = vstats->tx_packets;
1836         stats->tx_bytes   = vstats->tx_bytes;
1837         stats->multicast  = vstats->rx_multicast_packets +
1838                             vstats->tx_multicast_packets;
1839         stats->tx_errors  = vstats->tx_error_packets;
1840         stats->rx_errors  = vstats->rx_error_packets;
1841         stats->tx_dropped = vstats->tx_queue_dropped;
1842         stats->rx_crc_errors = 0;
1843         stats->rx_length_errors = 0;
1844
1845         return stats;
1846 }
1847
1848 static void mlx5e_set_rx_mode(struct net_device *dev)
1849 {
1850         struct mlx5e_priv *priv = netdev_priv(dev);
1851
1852         schedule_work(&priv->set_rx_mode_work);
1853 }
1854
1855 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
1856 {
1857         struct mlx5e_priv *priv = netdev_priv(netdev);
1858         struct sockaddr *saddr = addr;
1859
1860         if (!is_valid_ether_addr(saddr->sa_data))
1861                 return -EADDRNOTAVAIL;
1862
1863         netif_addr_lock_bh(netdev);
1864         ether_addr_copy(netdev->dev_addr, saddr->sa_data);
1865         netif_addr_unlock_bh(netdev);
1866
1867         schedule_work(&priv->set_rx_mode_work);
1868
1869         return 0;
1870 }
1871
1872 static int mlx5e_set_features(struct net_device *netdev,
1873                               netdev_features_t features)
1874 {
1875         struct mlx5e_priv *priv = netdev_priv(netdev);
1876         int err = 0;
1877         netdev_features_t changes = features ^ netdev->features;
1878
1879         mutex_lock(&priv->state_lock);
1880
1881         if (changes & NETIF_F_LRO) {
1882                 bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
1883
1884                 if (was_opened)
1885                         mlx5e_close_locked(priv->netdev);
1886
1887                 priv->params.lro_en = !!(features & NETIF_F_LRO);
1888                 mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP);
1889                 mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP);
1890
1891                 if (was_opened)
1892                         err = mlx5e_open_locked(priv->netdev);
1893         }
1894
1895         mutex_unlock(&priv->state_lock);
1896
1897         if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
1898                 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1899                         mlx5e_enable_vlan_filter(priv);
1900                 else
1901                         mlx5e_disable_vlan_filter(priv);
1902         }
1903
1904         return err;
1905 }
1906
1907 static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
1908 {
1909         struct mlx5e_priv *priv = netdev_priv(netdev);
1910         struct mlx5_core_dev *mdev = priv->mdev;
1911         bool was_opened;
1912         int max_mtu;
1913         int err = 0;
1914
1915         mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
1916
1917         max_mtu = MLX5E_HW2SW_MTU(max_mtu);
1918
1919         if (new_mtu > max_mtu) {
1920                 netdev_err(netdev,
1921                            "%s: Bad MTU (%d) > (%d) Max\n",
1922                            __func__, new_mtu, max_mtu);
1923                 return -EINVAL;
1924         }
1925
1926         mutex_lock(&priv->state_lock);
1927
1928         was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
1929         if (was_opened)
1930                 mlx5e_close_locked(netdev);
1931
1932         netdev->mtu = new_mtu;
1933
1934         if (was_opened)
1935                 err = mlx5e_open_locked(netdev);
1936
1937         mutex_unlock(&priv->state_lock);
1938
1939         return err;
1940 }
1941
1942 static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1943 {
1944         switch (cmd) {
1945         case SIOCSHWTSTAMP:
1946                 return mlx5e_hwstamp_set(dev, ifr);
1947         case SIOCGHWTSTAMP:
1948                 return mlx5e_hwstamp_get(dev, ifr);
1949         default:
1950                 return -EOPNOTSUPP;
1951         }
1952 }
1953
1954 static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
1955 {
1956         struct mlx5e_priv *priv = netdev_priv(dev);
1957         struct mlx5_core_dev *mdev = priv->mdev;
1958
1959         return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
1960 }
1961
1962 static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
1963 {
1964         struct mlx5e_priv *priv = netdev_priv(dev);
1965         struct mlx5_core_dev *mdev = priv->mdev;
1966
1967         return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
1968                                            vlan, qos);
1969 }
1970
1971 static int mlx5_vport_link2ifla(u8 esw_link)
1972 {
1973         switch (esw_link) {
1974         case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
1975                 return IFLA_VF_LINK_STATE_DISABLE;
1976         case MLX5_ESW_VPORT_ADMIN_STATE_UP:
1977                 return IFLA_VF_LINK_STATE_ENABLE;
1978         }
1979         return IFLA_VF_LINK_STATE_AUTO;
1980 }
1981
1982 static int mlx5_ifla_link2vport(u8 ifla_link)
1983 {
1984         switch (ifla_link) {
1985         case IFLA_VF_LINK_STATE_DISABLE:
1986                 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
1987         case IFLA_VF_LINK_STATE_ENABLE:
1988                 return MLX5_ESW_VPORT_ADMIN_STATE_UP;
1989         }
1990         return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
1991 }
1992
1993 static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
1994                                    int link_state)
1995 {
1996         struct mlx5e_priv *priv = netdev_priv(dev);
1997         struct mlx5_core_dev *mdev = priv->mdev;
1998
1999         return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
2000                                             mlx5_ifla_link2vport(link_state));
2001 }
2002
2003 static int mlx5e_get_vf_config(struct net_device *dev,
2004                                int vf, struct ifla_vf_info *ivi)
2005 {
2006         struct mlx5e_priv *priv = netdev_priv(dev);
2007         struct mlx5_core_dev *mdev = priv->mdev;
2008         int err;
2009
2010         err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
2011         if (err)
2012                 return err;
2013         ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
2014         return 0;
2015 }
2016
2017 static int mlx5e_get_vf_stats(struct net_device *dev,
2018                               int vf, struct ifla_vf_stats *vf_stats)
2019 {
2020         struct mlx5e_priv *priv = netdev_priv(dev);
2021         struct mlx5_core_dev *mdev = priv->mdev;
2022
2023         return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
2024                                             vf_stats);
2025 }
2026
2027 static const struct net_device_ops mlx5e_netdev_ops_basic = {
2028         .ndo_open                = mlx5e_open,
2029         .ndo_stop                = mlx5e_close,
2030         .ndo_start_xmit          = mlx5e_xmit,
2031         .ndo_get_stats64         = mlx5e_get_stats,
2032         .ndo_set_rx_mode         = mlx5e_set_rx_mode,
2033         .ndo_set_mac_address     = mlx5e_set_mac,
2034         .ndo_vlan_rx_add_vid     = mlx5e_vlan_rx_add_vid,
2035         .ndo_vlan_rx_kill_vid    = mlx5e_vlan_rx_kill_vid,
2036         .ndo_set_features        = mlx5e_set_features,
2037         .ndo_change_mtu          = mlx5e_change_mtu,
2038         .ndo_do_ioctl            = mlx5e_ioctl,
2039 };
2040
2041 static const struct net_device_ops mlx5e_netdev_ops_sriov = {
2042         .ndo_open                = mlx5e_open,
2043         .ndo_stop                = mlx5e_close,
2044         .ndo_start_xmit          = mlx5e_xmit,
2045         .ndo_get_stats64         = mlx5e_get_stats,
2046         .ndo_set_rx_mode         = mlx5e_set_rx_mode,
2047         .ndo_set_mac_address     = mlx5e_set_mac,
2048         .ndo_vlan_rx_add_vid     = mlx5e_vlan_rx_add_vid,
2049         .ndo_vlan_rx_kill_vid    = mlx5e_vlan_rx_kill_vid,
2050         .ndo_set_features        = mlx5e_set_features,
2051         .ndo_change_mtu          = mlx5e_change_mtu,
2052         .ndo_do_ioctl            = mlx5e_ioctl,
2053         .ndo_set_vf_mac          = mlx5e_set_vf_mac,
2054         .ndo_set_vf_vlan         = mlx5e_set_vf_vlan,
2055         .ndo_get_vf_config       = mlx5e_get_vf_config,
2056         .ndo_set_vf_link_state   = mlx5e_set_vf_link_state,
2057         .ndo_get_vf_stats        = mlx5e_get_vf_stats,
2058 };
2059
2060 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
2061 {
2062         if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2063                 return -ENOTSUPP;
2064         if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
2065             !MLX5_CAP_GEN(mdev, nic_flow_table) ||
2066             !MLX5_CAP_ETH(mdev, csum_cap) ||
2067             !MLX5_CAP_ETH(mdev, max_lso_cap) ||
2068             !MLX5_CAP_ETH(mdev, vlan_cap) ||
2069             !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
2070             MLX5_CAP_FLOWTABLE(mdev,
2071                                flow_table_properties_nic_receive.max_ft_level)
2072                                < 3) {
2073                 mlx5_core_warn(mdev,
2074                                "Not creating net device, some required device capabilities are missing\n");
2075                 return -ENOTSUPP;
2076         }
2077         if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
2078                 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
2079
2080         return 0;
2081 }
2082
2083 u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
2084 {
2085         int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
2086
2087         return bf_buf_size -
2088                sizeof(struct mlx5e_tx_wqe) +
2089                2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
2090 }
2091
2092 static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
2093                                     struct net_device *netdev,
2094                                     int num_channels)
2095 {
2096         struct mlx5e_priv *priv = netdev_priv(netdev);
2097         int i;
2098
2099         priv->params.log_sq_size           =
2100                 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
2101         priv->params.log_rq_size           =
2102                 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
2103         priv->params.rx_cq_moderation_usec =
2104                 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
2105         priv->params.rx_cq_moderation_pkts =
2106                 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
2107         priv->params.tx_cq_moderation_usec =
2108                 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
2109         priv->params.tx_cq_moderation_pkts =
2110                 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
2111         priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
2112         priv->params.min_rx_wqes           =
2113                 MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
2114         priv->params.num_tc                = 1;
2115         priv->params.default_vlan_prio     = 0;
2116         priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;
2117
2118         netdev_rss_key_fill(priv->params.toeplitz_hash_key,
2119                             sizeof(priv->params.toeplitz_hash_key));
2120
2121         for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
2122                 priv->params.indirection_rqt[i] = i % num_channels;
2123
2124         priv->params.lro_wqe_sz            =
2125                 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
2126
2127         priv->mdev                         = mdev;
2128         priv->netdev                       = netdev;
2129         priv->params.num_channels          = num_channels;
2130         priv->default_vlan_prio            = priv->params.default_vlan_prio;
2131
2132         spin_lock_init(&priv->async_events_spinlock);
2133         mutex_init(&priv->state_lock);
2134
2135         INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
2136         INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
2137         INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
2138 }
2139
2140 static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
2141 {
2142         struct mlx5e_priv *priv = netdev_priv(netdev);
2143
2144         mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
2145         if (is_zero_ether_addr(netdev->dev_addr) &&
2146             !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
2147                 eth_hw_addr_random(netdev);
2148                 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
2149         }
2150 }
2151
2152 static void mlx5e_build_netdev(struct net_device *netdev)
2153 {
2154         struct mlx5e_priv *priv = netdev_priv(netdev);
2155         struct mlx5_core_dev *mdev = priv->mdev;
2156
2157         SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
2158
2159         if (MLX5_CAP_GEN(mdev, vport_group_manager))
2160                 netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
2161         else
2162                 netdev->netdev_ops = &mlx5e_netdev_ops_basic;
2163
2164         netdev->watchdog_timeo    = 15 * HZ;
2165
2166         netdev->ethtool_ops       = &mlx5e_ethtool_ops;
2167
2168         netdev->vlan_features    |= NETIF_F_SG;
2169         netdev->vlan_features    |= NETIF_F_IP_CSUM;
2170         netdev->vlan_features    |= NETIF_F_IPV6_CSUM;
2171         netdev->vlan_features    |= NETIF_F_GRO;
2172         netdev->vlan_features    |= NETIF_F_TSO;
2173         netdev->vlan_features    |= NETIF_F_TSO6;
2174         netdev->vlan_features    |= NETIF_F_RXCSUM;
2175         netdev->vlan_features    |= NETIF_F_RXHASH;
2176
2177         if (!!MLX5_CAP_ETH(mdev, lro_cap))
2178                 netdev->vlan_features    |= NETIF_F_LRO;
2179
2180         netdev->hw_features       = netdev->vlan_features;
2181         netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_TX;
2182         netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_RX;
2183         netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_FILTER;
2184
2185         netdev->features          = netdev->hw_features;
2186         if (!priv->params.lro_en)
2187                 netdev->features  &= ~NETIF_F_LRO;
2188
2189         netdev->features         |= NETIF_F_HIGHDMA;
2190
2191         netdev->priv_flags       |= IFF_UNICAST_FLT;
2192
2193         mlx5e_set_netdev_dev_addr(netdev);
2194 }
2195
2196 static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
2197                              struct mlx5_core_mr *mr)
2198 {
2199         struct mlx5_core_dev *mdev = priv->mdev;
2200         struct mlx5_create_mkey_mbox_in *in;
2201         int err;
2202
2203         in = mlx5_vzalloc(sizeof(*in));
2204         if (!in)
2205                 return -ENOMEM;
2206
2207         in->seg.flags = MLX5_PERM_LOCAL_WRITE |
2208                         MLX5_PERM_LOCAL_READ  |
2209                         MLX5_ACCESS_MODE_PA;
2210         in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
2211         in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
2212
2213         err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
2214                                     NULL);
2215
2216         kvfree(in);
2217
2218         return err;
2219 }
2220
2221 static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
2222 {
2223         struct net_device *netdev;
2224         struct mlx5e_priv *priv;
2225         int nch = mlx5e_get_max_num_channels(mdev);
2226         int err;
2227
2228         if (mlx5e_check_required_hca_cap(mdev))
2229                 return NULL;
2230
2231         netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch);
2232         if (!netdev) {
2233                 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
2234                 return NULL;
2235         }
2236
2237         mlx5e_build_netdev_priv(mdev, netdev, nch);
2238         mlx5e_build_netdev(netdev);
2239
2240         netif_carrier_off(netdev);
2241
2242         priv = netdev_priv(netdev);
2243
2244         err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
2245         if (err) {
2246                 mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
2247                 goto err_free_netdev;
2248         }
2249
2250         err = mlx5_core_alloc_pd(mdev, &priv->pdn);
2251         if (err) {
2252                 mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
2253                 goto err_unmap_free_uar;
2254         }
2255
2256         err = mlx5_core_alloc_transport_domain(mdev, &priv->tdn);
2257         if (err) {
2258                 mlx5_core_err(mdev, "alloc td failed, %d\n", err);
2259                 goto err_dealloc_pd;
2260         }
2261
2262         err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
2263         if (err) {
2264                 mlx5_core_err(mdev, "create mkey failed, %d\n", err);
2265                 goto err_dealloc_transport_domain;
2266         }
2267
2268         err = mlx5e_create_tises(priv);
2269         if (err) {
2270                 mlx5_core_warn(mdev, "create tises failed, %d\n", err);
2271                 goto err_destroy_mkey;
2272         }
2273
2274         err = mlx5e_open_drop_rq(priv);
2275         if (err) {
2276                 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
2277                 goto err_destroy_tises;
2278         }
2279
2280         err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
2281         if (err) {
2282                 mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
2283                 goto err_close_drop_rq;
2284         }
2285
2286         err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
2287         if (err) {
2288                 mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
2289                 goto err_destroy_rqt_indir;
2290         }
2291
2292         err = mlx5e_create_tirs(priv);
2293         if (err) {
2294                 mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
2295                 goto err_destroy_rqt_single;
2296         }
2297
2298         err = mlx5e_create_flow_tables(priv);
2299         if (err) {
2300                 mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
2301                 goto err_destroy_tirs;
2302         }
2303
2304         mlx5e_init_eth_addr(priv);
2305
2306         err = register_netdev(netdev);
2307         if (err) {
2308                 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
2309                 goto err_destroy_flow_tables;
2310         }
2311
2312         mlx5e_enable_async_events(priv);
2313         schedule_work(&priv->set_rx_mode_work);
2314
2315         return priv;
2316
2317 err_destroy_flow_tables:
2318         mlx5e_destroy_flow_tables(priv);
2319
2320 err_destroy_tirs:
2321         mlx5e_destroy_tirs(priv);
2322
2323 err_destroy_rqt_single:
2324         mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
2325
2326 err_destroy_rqt_indir:
2327         mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
2328
2329 err_close_drop_rq:
2330         mlx5e_close_drop_rq(priv);
2331
2332 err_destroy_tises:
2333         mlx5e_destroy_tises(priv);
2334
2335 err_destroy_mkey:
2336         mlx5_core_destroy_mkey(mdev, &priv->mr);
2337
2338 err_dealloc_transport_domain:
2339         mlx5_core_dealloc_transport_domain(mdev, priv->tdn);
2340
2341 err_dealloc_pd:
2342         mlx5_core_dealloc_pd(mdev, priv->pdn);
2343
2344 err_unmap_free_uar:
2345         mlx5_unmap_free_uar(mdev, &priv->cq_uar);
2346
2347 err_free_netdev:
2348         free_netdev(netdev);
2349
2350         return NULL;
2351 }
2352
2353 static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
2354 {
2355         struct mlx5e_priv *priv = vpriv;
2356         struct net_device *netdev = priv->netdev;
2357
2358         set_bit(MLX5E_STATE_DESTROYING, &priv->state);
2359
2360         schedule_work(&priv->set_rx_mode_work);
2361         mlx5e_disable_async_events(priv);
2362         flush_scheduled_work();
2363         unregister_netdev(netdev);
2364         mlx5e_destroy_flow_tables(priv);
2365         mlx5e_destroy_tirs(priv);
2366         mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
2367         mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
2368         mlx5e_close_drop_rq(priv);
2369         mlx5e_destroy_tises(priv);
2370         mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
2371         mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
2372         mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
2373         mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
2374         free_netdev(netdev);
2375 }
2376
2377 static void *mlx5e_get_netdev(void *vpriv)
2378 {
2379         struct mlx5e_priv *priv = vpriv;
2380
2381         return priv->netdev;
2382 }
2383
2384 static struct mlx5_interface mlx5e_interface = {
2385         .add       = mlx5e_create_netdev,
2386         .remove    = mlx5e_destroy_netdev,
2387         .event     = mlx5e_async_event,
2388         .protocol  = MLX5_INTERFACE_PROTOCOL_ETH,
2389         .get_dev   = mlx5e_get_netdev,
2390 };
2391
2392 void mlx5e_init(void)
2393 {
2394         mlx5_register_interface(&mlx5e_interface);
2395 }
2396
2397 void mlx5e_cleanup(void)
2398 {
2399         mlx5_unregister_interface(&mlx5e_interface);
2400 }