mlxsw: spectrum: Expose per-tc counters via ethtool
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/notifier.h>
53 #include <linux/dcbnl.h>
54 #include <linux/inetdevice.h>
55 #include <net/switchdev.h>
56 #include <generated/utsrelease.h>
57
58 #include "spectrum.h"
59 #include "core.h"
60 #include "reg.h"
61 #include "port.h"
62 #include "trap.h"
63 #include "txheader.h"
64
65 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
66 static const char mlxsw_sp_driver_version[] = "1.0";
67
68 /* tx_hdr_version
69  * Tx header version.
70  * Must be set to 1.
71  */
72 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
73
74 /* tx_hdr_ctl
75  * Packet control type.
76  * 0 - Ethernet control (e.g. EMADs, LACP)
77  * 1 - Ethernet data
78  */
79 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
80
81 /* tx_hdr_proto
82  * Packet protocol type. Must be set to 1 (Ethernet).
83  */
84 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
85
86 /* tx_hdr_rx_is_router
87  * Packet is sent from the router. Valid for data packets only.
88  */
89 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
90
91 /* tx_hdr_fid_valid
92  * Indicates if the 'fid' field is valid and should be used for
93  * forwarding lookup. Valid for data packets only.
94  */
95 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
96
97 /* tx_hdr_swid
98  * Switch partition ID. Must be set to 0.
99  */
100 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
101
102 /* tx_hdr_control_tclass
103  * Indicates if the packet should use the control TClass and not one
104  * of the data TClasses.
105  */
106 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
107
108 /* tx_hdr_etclass
109  * Egress TClass to be used on the egress device on the egress port.
110  */
111 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
112
113 /* tx_hdr_port_mid
114  * Destination local port for unicast packets.
115  * Destination multicast ID for multicast packets.
116  *
117  * Control packets are directed to a specific egress port, while data
118  * packets are transmitted through the CPU port (0) into the switch partition,
119  * where forwarding rules are applied.
120  */
121 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
122
123 /* tx_hdr_fid
124  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
125  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
126  * Valid for data packets only.
127  */
128 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
129
130 /* tx_hdr_type
131  * 0 - Data packets
132  * 6 - Control packets
133  */
134 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
135
136 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
137                                      const struct mlxsw_tx_info *tx_info)
138 {
139         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
140
141         memset(txhdr, 0, MLXSW_TXHDR_LEN);
142
143         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
144         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
145         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
146         mlxsw_tx_hdr_swid_set(txhdr, 0);
147         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
148         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
149         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
150 }
151
152 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
153 {
154         char spad_pl[MLXSW_REG_SPAD_LEN];
155         int err;
156
157         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
158         if (err)
159                 return err;
160         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
161         return 0;
162 }
163
164 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
165                                           bool is_up)
166 {
167         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
168         char paos_pl[MLXSW_REG_PAOS_LEN];
169
170         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
171                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
172                             MLXSW_PORT_ADMIN_STATUS_DOWN);
173         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
174 }
175
176 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
177                                          bool *p_is_up)
178 {
179         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
180         char paos_pl[MLXSW_REG_PAOS_LEN];
181         u8 oper_status;
182         int err;
183
184         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
185         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
186         if (err)
187                 return err;
188         oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
189         *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
190         return 0;
191 }
192
193 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
194                                       unsigned char *addr)
195 {
196         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
197         char ppad_pl[MLXSW_REG_PPAD_LEN];
198
199         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
200         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
201         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
202 }
203
204 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
205 {
206         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
207         unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
208
209         ether_addr_copy(addr, mlxsw_sp->base_mac);
210         addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
211         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
212 }
213
214 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
215 {
216         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
217         char pmtu_pl[MLXSW_REG_PMTU_LEN];
218         int max_mtu;
219         int err;
220
221         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
222         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
223         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
224         if (err)
225                 return err;
226         max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
227
228         if (mtu > max_mtu)
229                 return -EINVAL;
230
231         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
232         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
233 }
234
235 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
236                                     u8 swid)
237 {
238         char pspa_pl[MLXSW_REG_PSPA_LEN];
239
240         mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
241         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
242 }
243
244 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
245 {
246         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
247
248         return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
249                                         swid);
250 }
251
252 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
253                                      bool enable)
254 {
255         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
256         char svpe_pl[MLXSW_REG_SVPE_LEN];
257
258         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
259         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
260 }
261
262 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
263                                  enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
264                                  u16 vid)
265 {
266         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
267         char svfa_pl[MLXSW_REG_SVFA_LEN];
268
269         mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
270                             fid, vid);
271         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
272 }
273
274 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
275                                           u16 vid, bool learn_enable)
276 {
277         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
278         char *spvmlr_pl;
279         int err;
280
281         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
282         if (!spvmlr_pl)
283                 return -ENOMEM;
284         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
285                               learn_enable);
286         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
287         kfree(spvmlr_pl);
288         return err;
289 }
290
291 static int
292 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
293 {
294         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
295         char sspr_pl[MLXSW_REG_SSPR_LEN];
296
297         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
298         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
299 }
300
301 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
302                                          u8 local_port, u8 *p_module,
303                                          u8 *p_width, u8 *p_lane)
304 {
305         char pmlp_pl[MLXSW_REG_PMLP_LEN];
306         int err;
307
308         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
309         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
310         if (err)
311                 return err;
312         *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
313         *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
314         *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
315         return 0;
316 }
317
318 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
319                                     u8 module, u8 width, u8 lane)
320 {
321         char pmlp_pl[MLXSW_REG_PMLP_LEN];
322         int i;
323
324         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
325         mlxsw_reg_pmlp_width_set(pmlp_pl, width);
326         for (i = 0; i < width; i++) {
327                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
328                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
329         }
330
331         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
332 }
333
334 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
335 {
336         char pmlp_pl[MLXSW_REG_PMLP_LEN];
337
338         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
339         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
340         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
341 }
342
343 static int mlxsw_sp_port_open(struct net_device *dev)
344 {
345         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
346         int err;
347
348         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
349         if (err)
350                 return err;
351         netif_start_queue(dev);
352         return 0;
353 }
354
355 static int mlxsw_sp_port_stop(struct net_device *dev)
356 {
357         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
358
359         netif_stop_queue(dev);
360         return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
361 }
362
363 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
364                                       struct net_device *dev)
365 {
366         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
367         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
368         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
369         const struct mlxsw_tx_info tx_info = {
370                 .local_port = mlxsw_sp_port->local_port,
371                 .is_emad = false,
372         };
373         u64 len;
374         int err;
375
376         if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
377                 return NETDEV_TX_BUSY;
378
379         if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
380                 struct sk_buff *skb_orig = skb;
381
382                 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
383                 if (!skb) {
384                         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
385                         dev_kfree_skb_any(skb_orig);
386                         return NETDEV_TX_OK;
387                 }
388         }
389
390         if (eth_skb_pad(skb)) {
391                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
392                 return NETDEV_TX_OK;
393         }
394
395         mlxsw_sp_txhdr_construct(skb, &tx_info);
396         /* TX header is consumed by HW on the way so we shouldn't count its
397          * bytes as being sent.
398          */
399         len = skb->len - MLXSW_TXHDR_LEN;
400
401         /* Due to a race we might fail here because of a full queue. In that
402          * unlikely case we simply drop the packet.
403          */
404         err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
405
406         if (!err) {
407                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
408                 u64_stats_update_begin(&pcpu_stats->syncp);
409                 pcpu_stats->tx_packets++;
410                 pcpu_stats->tx_bytes += len;
411                 u64_stats_update_end(&pcpu_stats->syncp);
412         } else {
413                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
414                 dev_kfree_skb_any(skb);
415         }
416         return NETDEV_TX_OK;
417 }
418
419 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
420 {
421 }
422
423 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
424 {
425         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
426         struct sockaddr *addr = p;
427         int err;
428
429         if (!is_valid_ether_addr(addr->sa_data))
430                 return -EADDRNOTAVAIL;
431
432         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
433         if (err)
434                 return err;
435         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
436         return 0;
437 }
438
439 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
440                                  bool pause_en, bool pfc_en, u16 delay)
441 {
442         u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
443
444         delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
445                          MLXSW_SP_PAUSE_DELAY;
446
447         if (pause_en || pfc_en)
448                 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
449                                                     pg_size + delay, pg_size);
450         else
451                 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
452 }
453
454 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
455                                  u8 *prio_tc, bool pause_en,
456                                  struct ieee_pfc *my_pfc)
457 {
458         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
459         u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
460         u16 delay = !!my_pfc ? my_pfc->delay : 0;
461         char pbmc_pl[MLXSW_REG_PBMC_LEN];
462         int i, j, err;
463
464         mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
465         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
466         if (err)
467                 return err;
468
469         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
470                 bool configure = false;
471                 bool pfc = false;
472
473                 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
474                         if (prio_tc[j] == i) {
475                                 pfc = pfc_en & BIT(j);
476                                 configure = true;
477                                 break;
478                         }
479                 }
480
481                 if (!configure)
482                         continue;
483                 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
484         }
485
486         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
487 }
488
489 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
490                                       int mtu, bool pause_en)
491 {
492         u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
493         bool dcb_en = !!mlxsw_sp_port->dcb.ets;
494         struct ieee_pfc *my_pfc;
495         u8 *prio_tc;
496
497         prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
498         my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
499
500         return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
501                                             pause_en, my_pfc);
502 }
503
504 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
505 {
506         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
507         bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
508         int err;
509
510         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
511         if (err)
512                 return err;
513         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
514         if (err)
515                 goto err_port_mtu_set;
516         dev->mtu = mtu;
517         return 0;
518
519 err_port_mtu_set:
520         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
521         return err;
522 }
523
524 static struct rtnl_link_stats64 *
525 mlxsw_sp_port_get_stats64(struct net_device *dev,
526                           struct rtnl_link_stats64 *stats)
527 {
528         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
529         struct mlxsw_sp_port_pcpu_stats *p;
530         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
531         u32 tx_dropped = 0;
532         unsigned int start;
533         int i;
534
535         for_each_possible_cpu(i) {
536                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
537                 do {
538                         start = u64_stats_fetch_begin_irq(&p->syncp);
539                         rx_packets      = p->rx_packets;
540                         rx_bytes        = p->rx_bytes;
541                         tx_packets      = p->tx_packets;
542                         tx_bytes        = p->tx_bytes;
543                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
544
545                 stats->rx_packets       += rx_packets;
546                 stats->rx_bytes         += rx_bytes;
547                 stats->tx_packets       += tx_packets;
548                 stats->tx_bytes         += tx_bytes;
549                 /* tx_dropped is u32, updated without syncp protection. */
550                 tx_dropped      += p->tx_dropped;
551         }
552         stats->tx_dropped       = tx_dropped;
553         return stats;
554 }
555
556 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
557                            u16 vid_end, bool is_member, bool untagged)
558 {
559         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
560         char *spvm_pl;
561         int err;
562
563         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
564         if (!spvm_pl)
565                 return -ENOMEM;
566
567         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
568                             vid_end, is_member, untagged);
569         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
570         kfree(spvm_pl);
571         return err;
572 }
573
574 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
575 {
576         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
577         u16 vid, last_visited_vid;
578         int err;
579
580         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
581                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
582                                                    vid);
583                 if (err) {
584                         last_visited_vid = vid;
585                         goto err_port_vid_to_fid_set;
586                 }
587         }
588
589         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
590         if (err) {
591                 last_visited_vid = VLAN_N_VID;
592                 goto err_port_vid_to_fid_set;
593         }
594
595         return 0;
596
597 err_port_vid_to_fid_set:
598         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
599                 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
600                                              vid);
601         return err;
602 }
603
604 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
605 {
606         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
607         u16 vid;
608         int err;
609
610         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
611         if (err)
612                 return err;
613
614         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
615                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
616                                                    vid, vid);
617                 if (err)
618                         return err;
619         }
620
621         return 0;
622 }
623
624 static struct mlxsw_sp_port *
625 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
626 {
627         struct mlxsw_sp_port *mlxsw_sp_vport;
628
629         mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
630         if (!mlxsw_sp_vport)
631                 return NULL;
632
633         /* dev will be set correctly after the VLAN device is linked
634          * with the real device. In case of bridge SELF invocation, dev
635          * will remain as is.
636          */
637         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
638         mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
639         mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
640         mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
641         mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
642         mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
643         mlxsw_sp_vport->vport.vid = vid;
644
645         list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
646
647         return mlxsw_sp_vport;
648 }
649
650 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
651 {
652         list_del(&mlxsw_sp_vport->vport.list);
653         kfree(mlxsw_sp_vport);
654 }
655
656 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
657                           u16 vid)
658 {
659         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
660         struct mlxsw_sp_port *mlxsw_sp_vport;
661         bool untagged = vid == 1;
662         int err;
663
664         /* VLAN 0 is added to HW filter when device goes up, but it is
665          * reserved in our case, so simply return.
666          */
667         if (!vid)
668                 return 0;
669
670         if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
671                 netdev_warn(dev, "VID=%d already configured\n", vid);
672                 return 0;
673         }
674
675         mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
676         if (!mlxsw_sp_vport) {
677                 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
678                 return -ENOMEM;
679         }
680
681         /* When adding the first VLAN interface on a bridged port we need to
682          * transition all the active 802.1Q bridge VLANs to use explicit
683          * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
684          */
685         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
686                 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
687                 if (err) {
688                         netdev_err(dev, "Failed to set to Virtual mode\n");
689                         goto err_port_vp_mode_trans;
690                 }
691         }
692
693         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
694         if (err) {
695                 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
696                 goto err_port_vid_learning_set;
697         }
698
699         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
700         if (err) {
701                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
702                            vid);
703                 goto err_port_add_vid;
704         }
705
706         return 0;
707
708 err_port_add_vid:
709         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
710 err_port_vid_learning_set:
711         if (list_is_singular(&mlxsw_sp_port->vports_list))
712                 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
713 err_port_vp_mode_trans:
714         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
715         return err;
716 }
717
718 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
719                                   __be16 __always_unused proto, u16 vid)
720 {
721         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
722         struct mlxsw_sp_port *mlxsw_sp_vport;
723         struct mlxsw_sp_fid *f;
724         int err;
725
726         /* VLAN 0 is removed from HW filter when device goes down, but
727          * it is reserved in our case, so simply return.
728          */
729         if (!vid)
730                 return 0;
731
732         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
733         if (!mlxsw_sp_vport) {
734                 netdev_warn(dev, "VID=%d does not exist\n", vid);
735                 return 0;
736         }
737
738         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
739         if (err) {
740                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
741                            vid);
742                 return err;
743         }
744
745         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
746         if (err) {
747                 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
748                 return err;
749         }
750
751         /* Drop FID reference. If this was the last reference the
752          * resources will be freed.
753          */
754         f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
755         if (f && !WARN_ON(!f->leave))
756                 f->leave(mlxsw_sp_vport);
757
758         /* When removing the last VLAN interface on a bridged port we need to
759          * transition all active 802.1Q bridge VLANs to use VID to FID
760          * mappings and set port's mode to VLAN mode.
761          */
762         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
763                 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
764                 if (err) {
765                         netdev_err(dev, "Failed to set to VLAN mode\n");
766                         return err;
767                 }
768         }
769
770         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
771
772         return 0;
773 }
774
775 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
776                                             size_t len)
777 {
778         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
779         u8 module = mlxsw_sp_port->mapping.module;
780         u8 width = mlxsw_sp_port->mapping.width;
781         u8 lane = mlxsw_sp_port->mapping.lane;
782         int err;
783
784         if (!mlxsw_sp_port->split)
785                 err = snprintf(name, len, "p%d", module + 1);
786         else
787                 err = snprintf(name, len, "p%ds%d", module + 1,
788                                lane / width);
789
790         if (err >= len)
791                 return -EINVAL;
792
793         return 0;
794 }
795
796 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
797         .ndo_open               = mlxsw_sp_port_open,
798         .ndo_stop               = mlxsw_sp_port_stop,
799         .ndo_start_xmit         = mlxsw_sp_port_xmit,
800         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
801         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
802         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
803         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
804         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
805         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
806         .ndo_neigh_construct    = mlxsw_sp_router_neigh_construct,
807         .ndo_neigh_destroy      = mlxsw_sp_router_neigh_destroy,
808         .ndo_fdb_add            = switchdev_port_fdb_add,
809         .ndo_fdb_del            = switchdev_port_fdb_del,
810         .ndo_fdb_dump           = switchdev_port_fdb_dump,
811         .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
812         .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
813         .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
814         .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
815 };
816
817 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
818                                       struct ethtool_drvinfo *drvinfo)
819 {
820         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
821         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
822
823         strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
824         strlcpy(drvinfo->version, mlxsw_sp_driver_version,
825                 sizeof(drvinfo->version));
826         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
827                  "%d.%d.%d",
828                  mlxsw_sp->bus_info->fw_rev.major,
829                  mlxsw_sp->bus_info->fw_rev.minor,
830                  mlxsw_sp->bus_info->fw_rev.subminor);
831         strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
832                 sizeof(drvinfo->bus_info));
833 }
834
835 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
836                                          struct ethtool_pauseparam *pause)
837 {
838         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
839
840         pause->rx_pause = mlxsw_sp_port->link.rx_pause;
841         pause->tx_pause = mlxsw_sp_port->link.tx_pause;
842 }
843
844 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
845                                    struct ethtool_pauseparam *pause)
846 {
847         char pfcc_pl[MLXSW_REG_PFCC_LEN];
848
849         mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
850         mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
851         mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
852
853         return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
854                                pfcc_pl);
855 }
856
857 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
858                                         struct ethtool_pauseparam *pause)
859 {
860         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
861         bool pause_en = pause->tx_pause || pause->rx_pause;
862         int err;
863
864         if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
865                 netdev_err(dev, "PFC already enabled on port\n");
866                 return -EINVAL;
867         }
868
869         if (pause->autoneg) {
870                 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
871                 return -EINVAL;
872         }
873
874         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
875         if (err) {
876                 netdev_err(dev, "Failed to configure port's headroom\n");
877                 return err;
878         }
879
880         err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
881         if (err) {
882                 netdev_err(dev, "Failed to set PAUSE parameters\n");
883                 goto err_port_pause_configure;
884         }
885
886         mlxsw_sp_port->link.rx_pause = pause->rx_pause;
887         mlxsw_sp_port->link.tx_pause = pause->tx_pause;
888
889         return 0;
890
891 err_port_pause_configure:
892         pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
893         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
894         return err;
895 }
896
897 struct mlxsw_sp_port_hw_stats {
898         char str[ETH_GSTRING_LEN];
899         u64 (*getter)(char *payload);
900 };
901
902 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
903         {
904                 .str = "a_frames_transmitted_ok",
905                 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
906         },
907         {
908                 .str = "a_frames_received_ok",
909                 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
910         },
911         {
912                 .str = "a_frame_check_sequence_errors",
913                 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
914         },
915         {
916                 .str = "a_alignment_errors",
917                 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
918         },
919         {
920                 .str = "a_octets_transmitted_ok",
921                 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
922         },
923         {
924                 .str = "a_octets_received_ok",
925                 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
926         },
927         {
928                 .str = "a_multicast_frames_xmitted_ok",
929                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
930         },
931         {
932                 .str = "a_broadcast_frames_xmitted_ok",
933                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
934         },
935         {
936                 .str = "a_multicast_frames_received_ok",
937                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
938         },
939         {
940                 .str = "a_broadcast_frames_received_ok",
941                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
942         },
943         {
944                 .str = "a_in_range_length_errors",
945                 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
946         },
947         {
948                 .str = "a_out_of_range_length_field",
949                 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
950         },
951         {
952                 .str = "a_frame_too_long_errors",
953                 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
954         },
955         {
956                 .str = "a_symbol_error_during_carrier",
957                 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
958         },
959         {
960                 .str = "a_mac_control_frames_transmitted",
961                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
962         },
963         {
964                 .str = "a_mac_control_frames_received",
965                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
966         },
967         {
968                 .str = "a_unsupported_opcodes_received",
969                 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
970         },
971         {
972                 .str = "a_pause_mac_ctrl_frames_received",
973                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
974         },
975         {
976                 .str = "a_pause_mac_ctrl_frames_xmitted",
977                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
978         },
979 };
980
981 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
982
983 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
984         {
985                 .str = "rx_octets_prio",
986                 .getter = mlxsw_reg_ppcnt_rx_octets_get,
987         },
988         {
989                 .str = "rx_frames_prio",
990                 .getter = mlxsw_reg_ppcnt_rx_frames_get,
991         },
992         {
993                 .str = "tx_octets_prio",
994                 .getter = mlxsw_reg_ppcnt_tx_octets_get,
995         },
996         {
997                 .str = "tx_frames_prio",
998                 .getter = mlxsw_reg_ppcnt_tx_frames_get,
999         },
1000         {
1001                 .str = "rx_pause_prio",
1002                 .getter = mlxsw_reg_ppcnt_rx_pause_get,
1003         },
1004         {
1005                 .str = "rx_pause_duration_prio",
1006                 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1007         },
1008         {
1009                 .str = "tx_pause_prio",
1010                 .getter = mlxsw_reg_ppcnt_tx_pause_get,
1011         },
1012         {
1013                 .str = "tx_pause_duration_prio",
1014                 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1015         },
1016 };
1017
1018 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1019
1020 static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(char *ppcnt_pl)
1021 {
1022         u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
1023
1024         return MLXSW_SP_CELLS_TO_BYTES(transmit_queue);
1025 }
1026
1027 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1028         {
1029                 .str = "tc_transmit_queue_tc",
1030                 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get,
1031         },
1032         {
1033                 .str = "tc_no_buffer_discard_uc_tc",
1034                 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1035         },
1036 };
1037
1038 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1039
1040 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
1041                                          (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1042                                           MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
1043                                          IEEE_8021QAZ_MAX_TCS)
1044
1045 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
1046 {
1047         int i;
1048
1049         for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
1050                 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1051                          mlxsw_sp_port_hw_prio_stats[i].str, prio);
1052                 *p += ETH_GSTRING_LEN;
1053         }
1054 }
1055
1056 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
1057 {
1058         int i;
1059
1060         for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
1061                 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1062                          mlxsw_sp_port_hw_tc_stats[i].str, tc);
1063                 *p += ETH_GSTRING_LEN;
1064         }
1065 }
1066
1067 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1068                                       u32 stringset, u8 *data)
1069 {
1070         u8 *p = data;
1071         int i;
1072
1073         switch (stringset) {
1074         case ETH_SS_STATS:
1075                 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1076                         memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1077                                ETH_GSTRING_LEN);
1078                         p += ETH_GSTRING_LEN;
1079                 }
1080
1081                 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1082                         mlxsw_sp_port_get_prio_strings(&p, i);
1083
1084                 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1085                         mlxsw_sp_port_get_tc_strings(&p, i);
1086
1087                 break;
1088         }
1089 }
1090
1091 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1092                                      enum ethtool_phys_id_state state)
1093 {
1094         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1095         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1096         char mlcr_pl[MLXSW_REG_MLCR_LEN];
1097         bool active;
1098
1099         switch (state) {
1100         case ETHTOOL_ID_ACTIVE:
1101                 active = true;
1102                 break;
1103         case ETHTOOL_ID_INACTIVE:
1104                 active = false;
1105                 break;
1106         default:
1107                 return -EOPNOTSUPP;
1108         }
1109
1110         mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1111         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1112 }
1113
1114 static int
1115 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
1116                                int *p_len, enum mlxsw_reg_ppcnt_grp grp)
1117 {
1118         switch (grp) {
1119         case  MLXSW_REG_PPCNT_IEEE_8023_CNT:
1120                 *p_hw_stats = mlxsw_sp_port_hw_stats;
1121                 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
1122                 break;
1123         case MLXSW_REG_PPCNT_PRIO_CNT:
1124                 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
1125                 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1126                 break;
1127         case MLXSW_REG_PPCNT_TC_CNT:
1128                 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
1129                 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
1130                 break;
1131         default:
1132                 WARN_ON(1);
1133                 return -ENOTSUPP;
1134         }
1135         return 0;
1136 }
1137
1138 static void __mlxsw_sp_port_get_stats(struct net_device *dev,
1139                                       enum mlxsw_reg_ppcnt_grp grp, int prio,
1140                                       u64 *data, int data_index)
1141 {
1142         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1143         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1144         struct mlxsw_sp_port_hw_stats *hw_stats;
1145         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1146         int i, len;
1147         int err;
1148
1149         err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
1150         if (err)
1151                 return;
1152         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
1153         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1154         for (i = 0; i < len; i++)
1155                 data[data_index + i] = !err ? hw_stats[i].getter(ppcnt_pl) : 0;
1156 }
1157
1158 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1159                                     struct ethtool_stats *stats, u64 *data)
1160 {
1161         int i, data_index = 0;
1162
1163         /* IEEE 802.3 Counters */
1164         __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
1165                                   data, data_index);
1166         data_index = MLXSW_SP_PORT_HW_STATS_LEN;
1167
1168         /* Per-Priority Counters */
1169         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1170                 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
1171                                           data, data_index);
1172                 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1173         }
1174
1175         /* Per-TC Counters */
1176         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1177                 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
1178                                           data, data_index);
1179                 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
1180         }
1181 }
1182
1183 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1184 {
1185         switch (sset) {
1186         case ETH_SS_STATS:
1187                 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
1188         default:
1189                 return -EOPNOTSUPP;
1190         }
1191 }
1192
1193 struct mlxsw_sp_port_link_mode {
1194         u32 mask;
1195         u32 supported;
1196         u32 advertised;
1197         u32 speed;
1198 };
1199
1200 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1201         {
1202                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1203                 .supported      = SUPPORTED_100baseT_Full,
1204                 .advertised     = ADVERTISED_100baseT_Full,
1205                 .speed          = 100,
1206         },
1207         {
1208                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1209                 .speed          = 100,
1210         },
1211         {
1212                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1213                                   MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1214                 .supported      = SUPPORTED_1000baseKX_Full,
1215                 .advertised     = ADVERTISED_1000baseKX_Full,
1216                 .speed          = 1000,
1217         },
1218         {
1219                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1220                 .supported      = SUPPORTED_10000baseT_Full,
1221                 .advertised     = ADVERTISED_10000baseT_Full,
1222                 .speed          = 10000,
1223         },
1224         {
1225                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1226                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1227                 .supported      = SUPPORTED_10000baseKX4_Full,
1228                 .advertised     = ADVERTISED_10000baseKX4_Full,
1229                 .speed          = 10000,
1230         },
1231         {
1232                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1233                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1234                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1235                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1236                 .supported      = SUPPORTED_10000baseKR_Full,
1237                 .advertised     = ADVERTISED_10000baseKR_Full,
1238                 .speed          = 10000,
1239         },
1240         {
1241                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1242                 .supported      = SUPPORTED_20000baseKR2_Full,
1243                 .advertised     = ADVERTISED_20000baseKR2_Full,
1244                 .speed          = 20000,
1245         },
1246         {
1247                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1248                 .supported      = SUPPORTED_40000baseCR4_Full,
1249                 .advertised     = ADVERTISED_40000baseCR4_Full,
1250                 .speed          = 40000,
1251         },
1252         {
1253                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1254                 .supported      = SUPPORTED_40000baseKR4_Full,
1255                 .advertised     = ADVERTISED_40000baseKR4_Full,
1256                 .speed          = 40000,
1257         },
1258         {
1259                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1260                 .supported      = SUPPORTED_40000baseSR4_Full,
1261                 .advertised     = ADVERTISED_40000baseSR4_Full,
1262                 .speed          = 40000,
1263         },
1264         {
1265                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1266                 .supported      = SUPPORTED_40000baseLR4_Full,
1267                 .advertised     = ADVERTISED_40000baseLR4_Full,
1268                 .speed          = 40000,
1269         },
1270         {
1271                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1272                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1273                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1274                 .speed          = 25000,
1275         },
1276         {
1277                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1278                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1279                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1280                 .speed          = 50000,
1281         },
1282         {
1283                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1284                 .supported      = SUPPORTED_56000baseKR4_Full,
1285                 .advertised     = ADVERTISED_56000baseKR4_Full,
1286                 .speed          = 56000,
1287         },
1288         {
1289                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1290                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1291                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1292                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1293                 .speed          = 100000,
1294         },
1295 };
1296
1297 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1298
1299 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1300 {
1301         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1302                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1303                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1304                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1305                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1306                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1307                 return SUPPORTED_FIBRE;
1308
1309         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1310                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1311                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1312                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1313                               MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1314                 return SUPPORTED_Backplane;
1315         return 0;
1316 }
1317
1318 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1319 {
1320         u32 modes = 0;
1321         int i;
1322
1323         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1324                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1325                         modes |= mlxsw_sp_port_link_mode[i].supported;
1326         }
1327         return modes;
1328 }
1329
1330 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1331 {
1332         u32 modes = 0;
1333         int i;
1334
1335         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1336                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1337                         modes |= mlxsw_sp_port_link_mode[i].advertised;
1338         }
1339         return modes;
1340 }
1341
1342 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1343                                             struct ethtool_cmd *cmd)
1344 {
1345         u32 speed = SPEED_UNKNOWN;
1346         u8 duplex = DUPLEX_UNKNOWN;
1347         int i;
1348
1349         if (!carrier_ok)
1350                 goto out;
1351
1352         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1353                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1354                         speed = mlxsw_sp_port_link_mode[i].speed;
1355                         duplex = DUPLEX_FULL;
1356                         break;
1357                 }
1358         }
1359 out:
1360         ethtool_cmd_speed_set(cmd, speed);
1361         cmd->duplex = duplex;
1362 }
1363
1364 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1365 {
1366         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1367                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1368                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1369                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1370                 return PORT_FIBRE;
1371
1372         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1373                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1374                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1375                 return PORT_DA;
1376
1377         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1378                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1379                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1380                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1381                 return PORT_NONE;
1382
1383         return PORT_OTHER;
1384 }
1385
1386 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1387                                       struct ethtool_cmd *cmd)
1388 {
1389         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1390         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1391         char ptys_pl[MLXSW_REG_PTYS_LEN];
1392         u32 eth_proto_cap;
1393         u32 eth_proto_admin;
1394         u32 eth_proto_oper;
1395         int err;
1396
1397         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1398         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1399         if (err) {
1400                 netdev_err(dev, "Failed to get proto");
1401                 return err;
1402         }
1403         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1404                               &eth_proto_admin, &eth_proto_oper);
1405
1406         cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1407                          mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1408                          SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1409         cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1410         mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1411                                         eth_proto_oper, cmd);
1412
1413         eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1414         cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1415         cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1416
1417         cmd->transceiver = XCVR_INTERNAL;
1418         return 0;
1419 }
1420
1421 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1422 {
1423         u32 ptys_proto = 0;
1424         int i;
1425
1426         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1427                 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1428                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1429         }
1430         return ptys_proto;
1431 }
1432
1433 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1434 {
1435         u32 ptys_proto = 0;
1436         int i;
1437
1438         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1439                 if (speed == mlxsw_sp_port_link_mode[i].speed)
1440                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1441         }
1442         return ptys_proto;
1443 }
1444
1445 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1446 {
1447         u32 ptys_proto = 0;
1448         int i;
1449
1450         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1451                 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1452                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1453         }
1454         return ptys_proto;
1455 }
1456
1457 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1458                                       struct ethtool_cmd *cmd)
1459 {
1460         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1461         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1462         char ptys_pl[MLXSW_REG_PTYS_LEN];
1463         u32 speed;
1464         u32 eth_proto_new;
1465         u32 eth_proto_cap;
1466         u32 eth_proto_admin;
1467         bool is_up;
1468         int err;
1469
1470         speed = ethtool_cmd_speed(cmd);
1471
1472         eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1473                 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1474                 mlxsw_sp_to_ptys_speed(speed);
1475
1476         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1477         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1478         if (err) {
1479                 netdev_err(dev, "Failed to get proto");
1480                 return err;
1481         }
1482         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1483
1484         eth_proto_new = eth_proto_new & eth_proto_cap;
1485         if (!eth_proto_new) {
1486                 netdev_err(dev, "Not supported proto admin requested");
1487                 return -EINVAL;
1488         }
1489         if (eth_proto_new == eth_proto_admin)
1490                 return 0;
1491
1492         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1493         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1494         if (err) {
1495                 netdev_err(dev, "Failed to set proto admin");
1496                 return err;
1497         }
1498
1499         err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1500         if (err) {
1501                 netdev_err(dev, "Failed to get oper status");
1502                 return err;
1503         }
1504         if (!is_up)
1505                 return 0;
1506
1507         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1508         if (err) {
1509                 netdev_err(dev, "Failed to set admin status");
1510                 return err;
1511         }
1512
1513         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1514         if (err) {
1515                 netdev_err(dev, "Failed to set admin status");
1516                 return err;
1517         }
1518
1519         return 0;
1520 }
1521
1522 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1523         .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
1524         .get_link               = ethtool_op_get_link,
1525         .get_pauseparam         = mlxsw_sp_port_get_pauseparam,
1526         .set_pauseparam         = mlxsw_sp_port_set_pauseparam,
1527         .get_strings            = mlxsw_sp_port_get_strings,
1528         .set_phys_id            = mlxsw_sp_port_set_phys_id,
1529         .get_ethtool_stats      = mlxsw_sp_port_get_stats,
1530         .get_sset_count         = mlxsw_sp_port_get_sset_count,
1531         .get_settings           = mlxsw_sp_port_get_settings,
1532         .set_settings           = mlxsw_sp_port_set_settings,
1533 };
1534
1535 static int
1536 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1537 {
1538         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1539         u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1540         char ptys_pl[MLXSW_REG_PTYS_LEN];
1541         u32 eth_proto_admin;
1542
1543         eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1544         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1545                             eth_proto_admin);
1546         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1547 }
1548
1549 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1550                           enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1551                           bool dwrr, u8 dwrr_weight)
1552 {
1553         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1554         char qeec_pl[MLXSW_REG_QEEC_LEN];
1555
1556         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1557                             next_index);
1558         mlxsw_reg_qeec_de_set(qeec_pl, true);
1559         mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1560         mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1561         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1562 }
1563
1564 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1565                                   enum mlxsw_reg_qeec_hr hr, u8 index,
1566                                   u8 next_index, u32 maxrate)
1567 {
1568         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1569         char qeec_pl[MLXSW_REG_QEEC_LEN];
1570
1571         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1572                             next_index);
1573         mlxsw_reg_qeec_mase_set(qeec_pl, true);
1574         mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1575         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1576 }
1577
1578 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1579                               u8 switch_prio, u8 tclass)
1580 {
1581         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1582         char qtct_pl[MLXSW_REG_QTCT_LEN];
1583
1584         mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1585                             tclass);
1586         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1587 }
1588
1589 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1590 {
1591         int err, i;
1592
1593         /* Setup the elements hierarcy, so that each TC is linked to
1594          * one subgroup, which are all member in the same group.
1595          */
1596         err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1597                                     MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
1598                                     0);
1599         if (err)
1600                 return err;
1601         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1602                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1603                                             MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
1604                                             0, false, 0);
1605                 if (err)
1606                         return err;
1607         }
1608         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1609                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1610                                             MLXSW_REG_QEEC_HIERARCY_TC, i, i,
1611                                             false, 0);
1612                 if (err)
1613                         return err;
1614         }
1615
1616         /* Make sure the max shaper is disabled in all hierarcies that
1617          * support it.
1618          */
1619         err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1620                                             MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
1621                                             MLXSW_REG_QEEC_MAS_DIS);
1622         if (err)
1623                 return err;
1624         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1625                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1626                                                     MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
1627                                                     i, 0,
1628                                                     MLXSW_REG_QEEC_MAS_DIS);
1629                 if (err)
1630                         return err;
1631         }
1632         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1633                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1634                                                     MLXSW_REG_QEEC_HIERARCY_TC,
1635                                                     i, i,
1636                                                     MLXSW_REG_QEEC_MAS_DIS);
1637                 if (err)
1638                         return err;
1639         }
1640
1641         /* Map all priorities to traffic class 0. */
1642         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1643                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1644                 if (err)
1645                         return err;
1646         }
1647
1648         return 0;
1649 }
1650
1651 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1652                                 bool split, u8 module, u8 width, u8 lane)
1653 {
1654         struct mlxsw_sp_port *mlxsw_sp_port;
1655         struct net_device *dev;
1656         size_t bytes;
1657         int err;
1658
1659         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1660         if (!dev)
1661                 return -ENOMEM;
1662         mlxsw_sp_port = netdev_priv(dev);
1663         mlxsw_sp_port->dev = dev;
1664         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1665         mlxsw_sp_port->local_port = local_port;
1666         mlxsw_sp_port->split = split;
1667         mlxsw_sp_port->mapping.module = module;
1668         mlxsw_sp_port->mapping.width = width;
1669         mlxsw_sp_port->mapping.lane = lane;
1670         bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1671         mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1672         if (!mlxsw_sp_port->active_vlans) {
1673                 err = -ENOMEM;
1674                 goto err_port_active_vlans_alloc;
1675         }
1676         mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1677         if (!mlxsw_sp_port->untagged_vlans) {
1678                 err = -ENOMEM;
1679                 goto err_port_untagged_vlans_alloc;
1680         }
1681         INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1682
1683         mlxsw_sp_port->pcpu_stats =
1684                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1685         if (!mlxsw_sp_port->pcpu_stats) {
1686                 err = -ENOMEM;
1687                 goto err_alloc_stats;
1688         }
1689
1690         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1691         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1692
1693         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1694         if (err) {
1695                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1696                         mlxsw_sp_port->local_port);
1697                 goto err_dev_addr_init;
1698         }
1699
1700         netif_carrier_off(dev);
1701
1702         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1703                          NETIF_F_HW_VLAN_CTAG_FILTER;
1704
1705         /* Each packet needs to have a Tx header (metadata) on top all other
1706          * headers.
1707          */
1708         dev->hard_header_len += MLXSW_TXHDR_LEN;
1709
1710         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1711         if (err) {
1712                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1713                         mlxsw_sp_port->local_port);
1714                 goto err_port_system_port_mapping_set;
1715         }
1716
1717         err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1718         if (err) {
1719                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1720                         mlxsw_sp_port->local_port);
1721                 goto err_port_swid_set;
1722         }
1723
1724         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
1725         if (err) {
1726                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1727                         mlxsw_sp_port->local_port);
1728                 goto err_port_speed_by_width_set;
1729         }
1730
1731         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1732         if (err) {
1733                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1734                         mlxsw_sp_port->local_port);
1735                 goto err_port_mtu_set;
1736         }
1737
1738         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1739         if (err)
1740                 goto err_port_admin_status_set;
1741
1742         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1743         if (err) {
1744                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1745                         mlxsw_sp_port->local_port);
1746                 goto err_port_buffers_init;
1747         }
1748
1749         err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1750         if (err) {
1751                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1752                         mlxsw_sp_port->local_port);
1753                 goto err_port_ets_init;
1754         }
1755
1756         /* ETS and buffers must be initialized before DCB. */
1757         err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1758         if (err) {
1759                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1760                         mlxsw_sp_port->local_port);
1761                 goto err_port_dcb_init;
1762         }
1763
1764         mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1765         err = register_netdev(dev);
1766         if (err) {
1767                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1768                         mlxsw_sp_port->local_port);
1769                 goto err_register_netdev;
1770         }
1771
1772         err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
1773                                    mlxsw_sp_port->local_port, dev,
1774                                    mlxsw_sp_port->split, module);
1775         if (err) {
1776                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1777                         mlxsw_sp_port->local_port);
1778                 goto err_core_port_init;
1779         }
1780
1781         err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1782         if (err)
1783                 goto err_port_vlan_init;
1784
1785         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1786         return 0;
1787
1788 err_port_vlan_init:
1789         mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1790 err_core_port_init:
1791         unregister_netdev(dev);
1792 err_register_netdev:
1793 err_port_dcb_init:
1794 err_port_ets_init:
1795 err_port_buffers_init:
1796 err_port_admin_status_set:
1797 err_port_mtu_set:
1798 err_port_speed_by_width_set:
1799 err_port_swid_set:
1800 err_port_system_port_mapping_set:
1801 err_dev_addr_init:
1802         free_percpu(mlxsw_sp_port->pcpu_stats);
1803 err_alloc_stats:
1804         kfree(mlxsw_sp_port->untagged_vlans);
1805 err_port_untagged_vlans_alloc:
1806         kfree(mlxsw_sp_port->active_vlans);
1807 err_port_active_vlans_alloc:
1808         free_netdev(dev);
1809         return err;
1810 }
1811
1812 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1813 {
1814         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1815
1816         if (!mlxsw_sp_port)
1817                 return;
1818         mlxsw_sp->ports[local_port] = NULL;
1819         mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1820         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1821         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1822         mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
1823         mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1824         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1825         mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1826         free_percpu(mlxsw_sp_port->pcpu_stats);
1827         kfree(mlxsw_sp_port->untagged_vlans);
1828         kfree(mlxsw_sp_port->active_vlans);
1829         WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
1830         free_netdev(mlxsw_sp_port->dev);
1831 }
1832
1833 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1834 {
1835         int i;
1836
1837         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1838                 mlxsw_sp_port_remove(mlxsw_sp, i);
1839         kfree(mlxsw_sp->ports);
1840 }
1841
1842 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1843 {
1844         u8 module, width, lane;
1845         size_t alloc_size;
1846         int i;
1847         int err;
1848
1849         alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1850         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1851         if (!mlxsw_sp->ports)
1852                 return -ENOMEM;
1853
1854         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1855                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1856                                                     &width, &lane);
1857                 if (err)
1858                         goto err_port_module_info_get;
1859                 if (!width)
1860                         continue;
1861                 mlxsw_sp->port_to_module[i] = module;
1862                 err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
1863                                            lane);
1864                 if (err)
1865                         goto err_port_create;
1866         }
1867         return 0;
1868
1869 err_port_create:
1870 err_port_module_info_get:
1871         for (i--; i >= 1; i--)
1872                 mlxsw_sp_port_remove(mlxsw_sp, i);
1873         kfree(mlxsw_sp->ports);
1874         return err;
1875 }
1876
1877 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1878 {
1879         u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
1880
1881         return local_port - offset;
1882 }
1883
1884 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1885                                       u8 module, unsigned int count)
1886 {
1887         u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1888         int err, i;
1889
1890         for (i = 0; i < count; i++) {
1891                 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
1892                                                width, i * width);
1893                 if (err)
1894                         goto err_port_module_map;
1895         }
1896
1897         for (i = 0; i < count; i++) {
1898                 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
1899                 if (err)
1900                         goto err_port_swid_set;
1901         }
1902
1903         for (i = 0; i < count; i++) {
1904                 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
1905                                            module, width, i * width);
1906                 if (err)
1907                         goto err_port_create;
1908         }
1909
1910         return 0;
1911
1912 err_port_create:
1913         for (i--; i >= 0; i--)
1914                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1915         i = count;
1916 err_port_swid_set:
1917         for (i--; i >= 0; i--)
1918                 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
1919                                          MLXSW_PORT_SWID_DISABLED_PORT);
1920         i = count;
1921 err_port_module_map:
1922         for (i--; i >= 0; i--)
1923                 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
1924         return err;
1925 }
1926
1927 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1928                                          u8 base_port, unsigned int count)
1929 {
1930         u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
1931         int i;
1932
1933         /* Split by four means we need to re-create two ports, otherwise
1934          * only one.
1935          */
1936         count = count / 2;
1937
1938         for (i = 0; i < count; i++) {
1939                 local_port = base_port + i * 2;
1940                 module = mlxsw_sp->port_to_module[local_port];
1941
1942                 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1943                                          0);
1944         }
1945
1946         for (i = 0; i < count; i++)
1947                 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
1948
1949         for (i = 0; i < count; i++) {
1950                 local_port = base_port + i * 2;
1951                 module = mlxsw_sp->port_to_module[local_port];
1952
1953                 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
1954                                      width, 0);
1955         }
1956 }
1957
1958 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1959                                unsigned int count)
1960 {
1961         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1962         struct mlxsw_sp_port *mlxsw_sp_port;
1963         u8 module, cur_width, base_port;
1964         int i;
1965         int err;
1966
1967         mlxsw_sp_port = mlxsw_sp->ports[local_port];
1968         if (!mlxsw_sp_port) {
1969                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1970                         local_port);
1971                 return -EINVAL;
1972         }
1973
1974         module = mlxsw_sp_port->mapping.module;
1975         cur_width = mlxsw_sp_port->mapping.width;
1976
1977         if (count != 2 && count != 4) {
1978                 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
1979                 return -EINVAL;
1980         }
1981
1982         if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
1983                 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
1984                 return -EINVAL;
1985         }
1986
1987         /* Make sure we have enough slave (even) ports for the split. */
1988         if (count == 2) {
1989                 base_port = local_port;
1990                 if (mlxsw_sp->ports[base_port + 1]) {
1991                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1992                         return -EINVAL;
1993                 }
1994         } else {
1995                 base_port = mlxsw_sp_cluster_base_port_get(local_port);
1996                 if (mlxsw_sp->ports[base_port + 1] ||
1997                     mlxsw_sp->ports[base_port + 3]) {
1998                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1999                         return -EINVAL;
2000                 }
2001         }
2002
2003         for (i = 0; i < count; i++)
2004                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2005
2006         err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2007         if (err) {
2008                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2009                 goto err_port_split_create;
2010         }
2011
2012         return 0;
2013
2014 err_port_split_create:
2015         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2016         return err;
2017 }
2018
2019 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2020 {
2021         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2022         struct mlxsw_sp_port *mlxsw_sp_port;
2023         u8 cur_width, base_port;
2024         unsigned int count;
2025         int i;
2026
2027         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2028         if (!mlxsw_sp_port) {
2029                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2030                         local_port);
2031                 return -EINVAL;
2032         }
2033
2034         if (!mlxsw_sp_port->split) {
2035                 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2036                 return -EINVAL;
2037         }
2038
2039         cur_width = mlxsw_sp_port->mapping.width;
2040         count = cur_width == 1 ? 4 : 2;
2041
2042         base_port = mlxsw_sp_cluster_base_port_get(local_port);
2043
2044         /* Determine which ports to remove. */
2045         if (count == 2 && local_port >= base_port + 2)
2046                 base_port = base_port + 2;
2047
2048         for (i = 0; i < count; i++)
2049                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2050
2051         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2052
2053         return 0;
2054 }
2055
2056 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2057                                      char *pude_pl, void *priv)
2058 {
2059         struct mlxsw_sp *mlxsw_sp = priv;
2060         struct mlxsw_sp_port *mlxsw_sp_port;
2061         enum mlxsw_reg_pude_oper_status status;
2062         u8 local_port;
2063
2064         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2065         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2066         if (!mlxsw_sp_port)
2067                 return;
2068
2069         status = mlxsw_reg_pude_oper_status_get(pude_pl);
2070         if (status == MLXSW_PORT_OPER_STATUS_UP) {
2071                 netdev_info(mlxsw_sp_port->dev, "link up\n");
2072                 netif_carrier_on(mlxsw_sp_port->dev);
2073         } else {
2074                 netdev_info(mlxsw_sp_port->dev, "link down\n");
2075                 netif_carrier_off(mlxsw_sp_port->dev);
2076         }
2077 }
2078
2079 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
2080         .func = mlxsw_sp_pude_event_func,
2081         .trap_id = MLXSW_TRAP_ID_PUDE,
2082 };
2083
2084 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
2085                                    enum mlxsw_event_trap_id trap_id)
2086 {
2087         struct mlxsw_event_listener *el;
2088         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2089         int err;
2090
2091         switch (trap_id) {
2092         case MLXSW_TRAP_ID_PUDE:
2093                 el = &mlxsw_sp_pude_event;
2094                 break;
2095         }
2096         err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
2097         if (err)
2098                 return err;
2099
2100         mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
2101         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2102         if (err)
2103                 goto err_event_trap_set;
2104
2105         return 0;
2106
2107 err_event_trap_set:
2108         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2109         return err;
2110 }
2111
2112 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
2113                                       enum mlxsw_event_trap_id trap_id)
2114 {
2115         struct mlxsw_event_listener *el;
2116
2117         switch (trap_id) {
2118         case MLXSW_TRAP_ID_PUDE:
2119                 el = &mlxsw_sp_pude_event;
2120                 break;
2121         }
2122         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2123 }
2124
2125 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
2126                                       void *priv)
2127 {
2128         struct mlxsw_sp *mlxsw_sp = priv;
2129         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2130         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2131
2132         if (unlikely(!mlxsw_sp_port)) {
2133                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2134                                      local_port);
2135                 return;
2136         }
2137
2138         skb->dev = mlxsw_sp_port->dev;
2139
2140         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2141         u64_stats_update_begin(&pcpu_stats->syncp);
2142         pcpu_stats->rx_packets++;
2143         pcpu_stats->rx_bytes += skb->len;
2144         u64_stats_update_end(&pcpu_stats->syncp);
2145
2146         skb->protocol = eth_type_trans(skb, skb->dev);
2147         netif_receive_skb(skb);
2148 }
2149
2150 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2151         {
2152                 .func = mlxsw_sp_rx_listener_func,
2153                 .local_port = MLXSW_PORT_DONT_CARE,
2154                 .trap_id = MLXSW_TRAP_ID_FDB_MC,
2155         },
2156         /* Traps for specific L2 packet types, not trapped as FDB MC */
2157         {
2158                 .func = mlxsw_sp_rx_listener_func,
2159                 .local_port = MLXSW_PORT_DONT_CARE,
2160                 .trap_id = MLXSW_TRAP_ID_STP,
2161         },
2162         {
2163                 .func = mlxsw_sp_rx_listener_func,
2164                 .local_port = MLXSW_PORT_DONT_CARE,
2165                 .trap_id = MLXSW_TRAP_ID_LACP,
2166         },
2167         {
2168                 .func = mlxsw_sp_rx_listener_func,
2169                 .local_port = MLXSW_PORT_DONT_CARE,
2170                 .trap_id = MLXSW_TRAP_ID_EAPOL,
2171         },
2172         {
2173                 .func = mlxsw_sp_rx_listener_func,
2174                 .local_port = MLXSW_PORT_DONT_CARE,
2175                 .trap_id = MLXSW_TRAP_ID_LLDP,
2176         },
2177         {
2178                 .func = mlxsw_sp_rx_listener_func,
2179                 .local_port = MLXSW_PORT_DONT_CARE,
2180                 .trap_id = MLXSW_TRAP_ID_MMRP,
2181         },
2182         {
2183                 .func = mlxsw_sp_rx_listener_func,
2184                 .local_port = MLXSW_PORT_DONT_CARE,
2185                 .trap_id = MLXSW_TRAP_ID_MVRP,
2186         },
2187         {
2188                 .func = mlxsw_sp_rx_listener_func,
2189                 .local_port = MLXSW_PORT_DONT_CARE,
2190                 .trap_id = MLXSW_TRAP_ID_RPVST,
2191         },
2192         {
2193                 .func = mlxsw_sp_rx_listener_func,
2194                 .local_port = MLXSW_PORT_DONT_CARE,
2195                 .trap_id = MLXSW_TRAP_ID_DHCP,
2196         },
2197         {
2198                 .func = mlxsw_sp_rx_listener_func,
2199                 .local_port = MLXSW_PORT_DONT_CARE,
2200                 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
2201         },
2202         {
2203                 .func = mlxsw_sp_rx_listener_func,
2204                 .local_port = MLXSW_PORT_DONT_CARE,
2205                 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
2206         },
2207         {
2208                 .func = mlxsw_sp_rx_listener_func,
2209                 .local_port = MLXSW_PORT_DONT_CARE,
2210                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
2211         },
2212         {
2213                 .func = mlxsw_sp_rx_listener_func,
2214                 .local_port = MLXSW_PORT_DONT_CARE,
2215                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
2216         },
2217         {
2218                 .func = mlxsw_sp_rx_listener_func,
2219                 .local_port = MLXSW_PORT_DONT_CARE,
2220                 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
2221         },
2222         {
2223                 .func = mlxsw_sp_rx_listener_func,
2224                 .local_port = MLXSW_PORT_DONT_CARE,
2225                 .trap_id = MLXSW_TRAP_ID_ARPBC,
2226         },
2227         {
2228                 .func = mlxsw_sp_rx_listener_func,
2229                 .local_port = MLXSW_PORT_DONT_CARE,
2230                 .trap_id = MLXSW_TRAP_ID_ARPUC,
2231         },
2232         {
2233                 .func = mlxsw_sp_rx_listener_func,
2234                 .local_port = MLXSW_PORT_DONT_CARE,
2235                 .trap_id = MLXSW_TRAP_ID_IP2ME,
2236         },
2237         {
2238                 .func = mlxsw_sp_rx_listener_func,
2239                 .local_port = MLXSW_PORT_DONT_CARE,
2240                 .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0,
2241         },
2242         {
2243                 .func = mlxsw_sp_rx_listener_func,
2244                 .local_port = MLXSW_PORT_DONT_CARE,
2245                 .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4,
2246         },
2247 };
2248
2249 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2250 {
2251         char htgt_pl[MLXSW_REG_HTGT_LEN];
2252         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2253         int i;
2254         int err;
2255
2256         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2257         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2258         if (err)
2259                 return err;
2260
2261         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2262         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2263         if (err)
2264                 return err;
2265
2266         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2267                 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2268                                                       &mlxsw_sp_rx_listener[i],
2269                                                       mlxsw_sp);
2270                 if (err)
2271                         goto err_rx_listener_register;
2272
2273                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2274                                     mlxsw_sp_rx_listener[i].trap_id);
2275                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2276                 if (err)
2277                         goto err_rx_trap_set;
2278         }
2279         return 0;
2280
2281 err_rx_trap_set:
2282         mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2283                                           &mlxsw_sp_rx_listener[i],
2284                                           mlxsw_sp);
2285 err_rx_listener_register:
2286         for (i--; i >= 0; i--) {
2287                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
2288                                     mlxsw_sp_rx_listener[i].trap_id);
2289                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2290
2291                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2292                                                   &mlxsw_sp_rx_listener[i],
2293                                                   mlxsw_sp);
2294         }
2295         return err;
2296 }
2297
2298 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2299 {
2300         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2301         int i;
2302
2303         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2304                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
2305                                     mlxsw_sp_rx_listener[i].trap_id);
2306                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2307
2308                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2309                                                   &mlxsw_sp_rx_listener[i],
2310                                                   mlxsw_sp);
2311         }
2312 }
2313
2314 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2315                                  enum mlxsw_reg_sfgc_type type,
2316                                  enum mlxsw_reg_sfgc_bridge_type bridge_type)
2317 {
2318         enum mlxsw_flood_table_type table_type;
2319         enum mlxsw_sp_flood_table flood_table;
2320         char sfgc_pl[MLXSW_REG_SFGC_LEN];
2321
2322         if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2323                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2324         else
2325                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2326
2327         if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2328                 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2329         else
2330                 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2331
2332         mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2333                             flood_table);
2334         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2335 }
2336
2337 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2338 {
2339         int type, err;
2340
2341         for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2342                 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2343                         continue;
2344
2345                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2346                                             MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2347                 if (err)
2348                         return err;
2349
2350                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2351                                             MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2352                 if (err)
2353                         return err;
2354         }
2355
2356         return 0;
2357 }
2358
2359 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2360 {
2361         char slcr_pl[MLXSW_REG_SLCR_LEN];
2362
2363         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2364                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
2365                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2366                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
2367                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
2368                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
2369                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
2370                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
2371                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2372         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2373 }
2374
2375 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2376                          const struct mlxsw_bus_info *mlxsw_bus_info)
2377 {
2378         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2379         int err;
2380
2381         mlxsw_sp->core = mlxsw_core;
2382         mlxsw_sp->bus_info = mlxsw_bus_info;
2383         INIT_LIST_HEAD(&mlxsw_sp->fids);
2384         INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
2385         INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2386
2387         err = mlxsw_sp_base_mac_get(mlxsw_sp);
2388         if (err) {
2389                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2390                 return err;
2391         }
2392
2393         err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2394         if (err) {
2395                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2396                 return err;
2397         }
2398
2399         err = mlxsw_sp_traps_init(mlxsw_sp);
2400         if (err) {
2401                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2402                 goto err_rx_listener_register;
2403         }
2404
2405         err = mlxsw_sp_flood_init(mlxsw_sp);
2406         if (err) {
2407                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2408                 goto err_flood_init;
2409         }
2410
2411         err = mlxsw_sp_buffers_init(mlxsw_sp);
2412         if (err) {
2413                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2414                 goto err_buffers_init;
2415         }
2416
2417         err = mlxsw_sp_lag_init(mlxsw_sp);
2418         if (err) {
2419                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2420                 goto err_lag_init;
2421         }
2422
2423         err = mlxsw_sp_switchdev_init(mlxsw_sp);
2424         if (err) {
2425                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2426                 goto err_switchdev_init;
2427         }
2428
2429         err = mlxsw_sp_router_init(mlxsw_sp);
2430         if (err) {
2431                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
2432                 goto err_router_init;
2433         }
2434
2435         err = mlxsw_sp_ports_create(mlxsw_sp);
2436         if (err) {
2437                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2438                 goto err_ports_create;
2439         }
2440
2441         return 0;
2442
2443 err_ports_create:
2444         mlxsw_sp_router_fini(mlxsw_sp);
2445 err_router_init:
2446         mlxsw_sp_switchdev_fini(mlxsw_sp);
2447 err_switchdev_init:
2448 err_lag_init:
2449         mlxsw_sp_buffers_fini(mlxsw_sp);
2450 err_buffers_init:
2451 err_flood_init:
2452         mlxsw_sp_traps_fini(mlxsw_sp);
2453 err_rx_listener_register:
2454         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2455         return err;
2456 }
2457
2458 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2459 {
2460         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2461         int i;
2462
2463         mlxsw_sp_ports_remove(mlxsw_sp);
2464         mlxsw_sp_router_fini(mlxsw_sp);
2465         mlxsw_sp_switchdev_fini(mlxsw_sp);
2466         mlxsw_sp_buffers_fini(mlxsw_sp);
2467         mlxsw_sp_traps_fini(mlxsw_sp);
2468         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2469         WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
2470         WARN_ON(!list_empty(&mlxsw_sp->fids));
2471         for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
2472                 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
2473 }
2474
2475 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2476         .used_max_vepa_channels         = 1,
2477         .max_vepa_channels              = 0,
2478         .used_max_lag                   = 1,
2479         .max_lag                        = MLXSW_SP_LAG_MAX,
2480         .used_max_port_per_lag          = 1,
2481         .max_port_per_lag               = MLXSW_SP_PORT_PER_LAG_MAX,
2482         .used_max_mid                   = 1,
2483         .max_mid                        = MLXSW_SP_MID_MAX,
2484         .used_max_pgt                   = 1,
2485         .max_pgt                        = 0,
2486         .used_max_system_port           = 1,
2487         .max_system_port                = 64,
2488         .used_max_vlan_groups           = 1,
2489         .max_vlan_groups                = 127,
2490         .used_max_regions               = 1,
2491         .max_regions                    = 400,
2492         .used_flood_tables              = 1,
2493         .used_flood_mode                = 1,
2494         .flood_mode                     = 3,
2495         .max_fid_offset_flood_tables    = 2,
2496         .fid_offset_flood_table_size    = VLAN_N_VID - 1,
2497         .max_fid_flood_tables           = 2,
2498         .fid_flood_table_size           = MLXSW_SP_VFID_MAX,
2499         .used_max_ib_mc                 = 1,
2500         .max_ib_mc                      = 0,
2501         .used_max_pkey                  = 1,
2502         .max_pkey                       = 0,
2503         .used_kvd_sizes                 = 1,
2504         .kvd_linear_size                = MLXSW_SP_KVD_LINEAR_SIZE,
2505         .kvd_hash_single_size           = MLXSW_SP_KVD_HASH_SINGLE_SIZE,
2506         .kvd_hash_double_size           = MLXSW_SP_KVD_HASH_DOUBLE_SIZE,
2507         .swid_config                    = {
2508                 {
2509                         .used_type      = 1,
2510                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
2511                 }
2512         },
2513 };
2514
2515 static struct mlxsw_driver mlxsw_sp_driver = {
2516         .kind                           = MLXSW_DEVICE_KIND_SPECTRUM,
2517         .owner                          = THIS_MODULE,
2518         .priv_size                      = sizeof(struct mlxsw_sp),
2519         .init                           = mlxsw_sp_init,
2520         .fini                           = mlxsw_sp_fini,
2521         .port_split                     = mlxsw_sp_port_split,
2522         .port_unsplit                   = mlxsw_sp_port_unsplit,
2523         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
2524         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
2525         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
2526         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
2527         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
2528         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
2529         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
2530         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
2531         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
2532         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
2533         .txhdr_construct                = mlxsw_sp_txhdr_construct,
2534         .txhdr_len                      = MLXSW_TXHDR_LEN,
2535         .profile                        = &mlxsw_sp_config_profile,
2536 };
2537
2538 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2539 {
2540         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2541 }
2542
2543 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
2544 {
2545         struct net_device *lower_dev;
2546         struct list_head *iter;
2547
2548         if (mlxsw_sp_port_dev_check(dev))
2549                 return netdev_priv(dev);
2550
2551         netdev_for_each_all_lower_dev(dev, lower_dev, iter) {
2552                 if (mlxsw_sp_port_dev_check(lower_dev))
2553                         return netdev_priv(lower_dev);
2554         }
2555         return NULL;
2556 }
2557
2558 static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
2559 {
2560         struct mlxsw_sp_port *mlxsw_sp_port;
2561
2562         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2563         return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
2564 }
2565
2566 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
2567 {
2568         struct net_device *lower_dev;
2569         struct list_head *iter;
2570
2571         if (mlxsw_sp_port_dev_check(dev))
2572                 return netdev_priv(dev);
2573
2574         netdev_for_each_all_lower_dev_rcu(dev, lower_dev, iter) {
2575                 if (mlxsw_sp_port_dev_check(lower_dev))
2576                         return netdev_priv(lower_dev);
2577         }
2578         return NULL;
2579 }
2580
2581 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
2582 {
2583         struct mlxsw_sp_port *mlxsw_sp_port;
2584
2585         rcu_read_lock();
2586         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
2587         if (mlxsw_sp_port)
2588                 dev_hold(mlxsw_sp_port->dev);
2589         rcu_read_unlock();
2590         return mlxsw_sp_port;
2591 }
2592
2593 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
2594 {
2595         dev_put(mlxsw_sp_port->dev);
2596 }
2597
2598 static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
2599                                        unsigned long event)
2600 {
2601         switch (event) {
2602         case NETDEV_UP:
2603                 if (!r)
2604                         return true;
2605                 r->ref_count++;
2606                 return false;
2607         case NETDEV_DOWN:
2608                 if (r && --r->ref_count == 0)
2609                         return true;
2610                 /* It is possible we already removed the RIF ourselves
2611                  * if it was assigned to a netdev that is now a bridge
2612                  * or LAG slave.
2613                  */
2614                 return false;
2615         }
2616
2617         return false;
2618 }
2619
2620 static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
2621 {
2622         int i;
2623
2624         for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
2625                 if (!mlxsw_sp->rifs[i])
2626                         return i;
2627
2628         return MLXSW_SP_RIF_MAX;
2629 }
2630
2631 static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
2632                                            bool *p_lagged, u16 *p_system_port)
2633 {
2634         u8 local_port = mlxsw_sp_vport->local_port;
2635
2636         *p_lagged = mlxsw_sp_vport->lagged;
2637         *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
2638 }
2639
2640 static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
2641                                     struct net_device *l3_dev, u16 rif,
2642                                     bool create)
2643 {
2644         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2645         bool lagged = mlxsw_sp_vport->lagged;
2646         char ritr_pl[MLXSW_REG_RITR_LEN];
2647         u16 system_port;
2648
2649         mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif,
2650                             l3_dev->mtu, l3_dev->dev_addr);
2651
2652         mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
2653         mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
2654                                   mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
2655
2656         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2657 }
2658
2659 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
2660
2661 static struct mlxsw_sp_fid *
2662 mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
2663 {
2664         struct mlxsw_sp_fid *f;
2665
2666         f = kzalloc(sizeof(*f), GFP_KERNEL);
2667         if (!f)
2668                 return NULL;
2669
2670         f->leave = mlxsw_sp_vport_rif_sp_leave;
2671         f->ref_count = 0;
2672         f->dev = l3_dev;
2673         f->fid = fid;
2674
2675         return f;
2676 }
2677
2678 static struct mlxsw_sp_rif *
2679 mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
2680 {
2681         struct mlxsw_sp_rif *r;
2682
2683         r = kzalloc(sizeof(*r), GFP_KERNEL);
2684         if (!r)
2685                 return NULL;
2686
2687         ether_addr_copy(r->addr, l3_dev->dev_addr);
2688         r->mtu = l3_dev->mtu;
2689         r->ref_count = 1;
2690         r->dev = l3_dev;
2691         r->rif = rif;
2692         r->f = f;
2693
2694         return r;
2695 }
2696
2697 static struct mlxsw_sp_rif *
2698 mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
2699                              struct net_device *l3_dev)
2700 {
2701         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2702         struct mlxsw_sp_fid *f;
2703         struct mlxsw_sp_rif *r;
2704         u16 fid, rif;
2705         int err;
2706
2707         rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
2708         if (rif == MLXSW_SP_RIF_MAX)
2709                 return ERR_PTR(-ERANGE);
2710
2711         err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
2712         if (err)
2713                 return ERR_PTR(err);
2714
2715         fid = mlxsw_sp_rif_sp_to_fid(rif);
2716         err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
2717         if (err)
2718                 goto err_rif_fdb_op;
2719
2720         f = mlxsw_sp_rfid_alloc(fid, l3_dev);
2721         if (!f) {
2722                 err = -ENOMEM;
2723                 goto err_rfid_alloc;
2724         }
2725
2726         r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
2727         if (!r) {
2728                 err = -ENOMEM;
2729                 goto err_rif_alloc;
2730         }
2731
2732         f->r = r;
2733         mlxsw_sp->rifs[rif] = r;
2734
2735         return r;
2736
2737 err_rif_alloc:
2738         kfree(f);
2739 err_rfid_alloc:
2740         mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2741 err_rif_fdb_op:
2742         mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
2743         return ERR_PTR(err);
2744 }
2745
2746 static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
2747                                           struct mlxsw_sp_rif *r)
2748 {
2749         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2750         struct net_device *l3_dev = r->dev;
2751         struct mlxsw_sp_fid *f = r->f;
2752         u16 fid = f->fid;
2753         u16 rif = r->rif;
2754
2755         mlxsw_sp->rifs[rif] = NULL;
2756         f->r = NULL;
2757
2758         kfree(r);
2759
2760         kfree(f);
2761
2762         mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2763
2764         mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
2765 }
2766
2767 static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
2768                                       struct net_device *l3_dev)
2769 {
2770         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2771         struct mlxsw_sp_rif *r;
2772
2773         r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
2774         if (!r) {
2775                 r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
2776                 if (IS_ERR(r))
2777                         return PTR_ERR(r);
2778         }
2779
2780         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
2781         r->f->ref_count++;
2782
2783         netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
2784
2785         return 0;
2786 }
2787
2788 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
2789 {
2790         struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
2791
2792         netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
2793
2794         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
2795         if (--f->ref_count == 0)
2796                 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
2797 }
2798
2799 static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
2800                                          struct net_device *port_dev,
2801                                          unsigned long event, u16 vid)
2802 {
2803         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
2804         struct mlxsw_sp_port *mlxsw_sp_vport;
2805
2806         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2807         if (WARN_ON(!mlxsw_sp_vport))
2808                 return -EINVAL;
2809
2810         switch (event) {
2811         case NETDEV_UP:
2812                 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
2813         case NETDEV_DOWN:
2814                 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
2815                 break;
2816         }
2817
2818         return 0;
2819 }
2820
2821 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
2822                                         unsigned long event)
2823 {
2824         if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
2825                 return 0;
2826
2827         return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
2828 }
2829
2830 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
2831                                          struct net_device *lag_dev,
2832                                          unsigned long event, u16 vid)
2833 {
2834         struct net_device *port_dev;
2835         struct list_head *iter;
2836         int err;
2837
2838         netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
2839                 if (mlxsw_sp_port_dev_check(port_dev)) {
2840                         err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
2841                                                             event, vid);
2842                         if (err)
2843                                 return err;
2844                 }
2845         }
2846
2847         return 0;
2848 }
2849
2850 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
2851                                        unsigned long event)
2852 {
2853         if (netif_is_bridge_port(lag_dev))
2854                 return 0;
2855
2856         return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
2857 }
2858
2859 static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
2860                                                     struct net_device *l3_dev)
2861 {
2862         u16 fid;
2863
2864         if (is_vlan_dev(l3_dev))
2865                 fid = vlan_dev_vlan_id(l3_dev);
2866         else if (mlxsw_sp->master_bridge.dev == l3_dev)
2867                 fid = 1;
2868         else
2869                 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
2870
2871         return mlxsw_sp_fid_find(mlxsw_sp, fid);
2872 }
2873
2874 static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
2875 {
2876         if (mlxsw_sp_fid_is_vfid(fid))
2877                 return MLXSW_REG_RITR_FID_IF;
2878         else
2879                 return MLXSW_REG_RITR_VLAN_IF;
2880 }
2881
2882 static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp,
2883                                   struct net_device *l3_dev,
2884                                   u16 fid, u16 rif,
2885                                   bool create)
2886 {
2887         enum mlxsw_reg_ritr_if_type rif_type;
2888         char ritr_pl[MLXSW_REG_RITR_LEN];
2889
2890         rif_type = mlxsw_sp_rif_type_get(fid);
2891         mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu,
2892                             l3_dev->dev_addr);
2893         mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
2894
2895         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2896 }
2897
2898 static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
2899                                       struct net_device *l3_dev,
2900                                       struct mlxsw_sp_fid *f)
2901 {
2902         struct mlxsw_sp_rif *r;
2903         u16 rif;
2904         int err;
2905
2906         rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
2907         if (rif == MLXSW_SP_RIF_MAX)
2908                 return -ERANGE;
2909
2910         err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
2911         if (err)
2912                 return err;
2913
2914         err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
2915         if (err)
2916                 goto err_rif_fdb_op;
2917
2918         r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
2919         if (!r) {
2920                 err = -ENOMEM;
2921                 goto err_rif_alloc;
2922         }
2923
2924         f->r = r;
2925         mlxsw_sp->rifs[rif] = r;
2926
2927         netdev_dbg(l3_dev, "RIF=%d created\n", rif);
2928
2929         return 0;
2930
2931 err_rif_alloc:
2932         mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
2933 err_rif_fdb_op:
2934         mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
2935         return err;
2936 }
2937
2938 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
2939                                  struct mlxsw_sp_rif *r)
2940 {
2941         struct net_device *l3_dev = r->dev;
2942         struct mlxsw_sp_fid *f = r->f;
2943         u16 rif = r->rif;
2944
2945         mlxsw_sp->rifs[rif] = NULL;
2946         f->r = NULL;
2947
2948         kfree(r);
2949
2950         mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
2951
2952         mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
2953
2954         netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
2955 }
2956
2957 static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
2958                                           struct net_device *br_dev,
2959                                           unsigned long event)
2960 {
2961         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
2962         struct mlxsw_sp_fid *f;
2963
2964         /* FID can either be an actual FID if the L3 device is the
2965          * VLAN-aware bridge or a VLAN device on top. Otherwise, the
2966          * L3 device is a VLAN-unaware bridge and we get a vFID.
2967          */
2968         f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
2969         if (WARN_ON(!f))
2970                 return -EINVAL;
2971
2972         switch (event) {
2973         case NETDEV_UP:
2974                 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
2975         case NETDEV_DOWN:
2976                 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
2977                 break;
2978         }
2979
2980         return 0;
2981 }
2982
2983 static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
2984                                         unsigned long event)
2985 {
2986         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
2987         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
2988         u16 vid = vlan_dev_vlan_id(vlan_dev);
2989
2990         if (mlxsw_sp_port_dev_check(real_dev))
2991                 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
2992                                                      vid);
2993         else if (netif_is_lag_master(real_dev))
2994                 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
2995                                                      vid);
2996         else if (netif_is_bridge_master(real_dev) &&
2997                  mlxsw_sp->master_bridge.dev == real_dev)
2998                 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
2999                                                       event);
3000
3001         return 0;
3002 }
3003
3004 static int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
3005                                    unsigned long event, void *ptr)
3006 {
3007         struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3008         struct net_device *dev = ifa->ifa_dev->dev;
3009         struct mlxsw_sp *mlxsw_sp;
3010         struct mlxsw_sp_rif *r;
3011         int err = 0;
3012
3013         mlxsw_sp = mlxsw_sp_lower_get(dev);
3014         if (!mlxsw_sp)
3015                 goto out;
3016
3017         r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3018         if (!mlxsw_sp_rif_should_config(r, event))
3019                 goto out;
3020
3021         if (mlxsw_sp_port_dev_check(dev))
3022                 err = mlxsw_sp_inetaddr_port_event(dev, event);
3023         else if (netif_is_lag_master(dev))
3024                 err = mlxsw_sp_inetaddr_lag_event(dev, event);
3025         else if (netif_is_bridge_master(dev))
3026                 err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
3027         else if (is_vlan_dev(dev))
3028                 err = mlxsw_sp_inetaddr_vlan_event(dev, event);
3029
3030 out:
3031         return notifier_from_errno(err);
3032 }
3033
3034 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
3035                              const char *mac, int mtu)
3036 {
3037         char ritr_pl[MLXSW_REG_RITR_LEN];
3038         int err;
3039
3040         mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
3041         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3042         if (err)
3043                 return err;
3044
3045         mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3046         mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3047         mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3048         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3049 }
3050
3051 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3052 {
3053         struct mlxsw_sp *mlxsw_sp;
3054         struct mlxsw_sp_rif *r;
3055         int err;
3056
3057         mlxsw_sp = mlxsw_sp_lower_get(dev);
3058         if (!mlxsw_sp)
3059                 return 0;
3060
3061         r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3062         if (!r)
3063                 return 0;
3064
3065         err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
3066         if (err)
3067                 return err;
3068
3069         err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
3070         if (err)
3071                 goto err_rif_edit;
3072
3073         err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
3074         if (err)
3075                 goto err_rif_fdb_op;
3076
3077         ether_addr_copy(r->addr, dev->dev_addr);
3078         r->mtu = dev->mtu;
3079
3080         netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
3081
3082         return 0;
3083
3084 err_rif_fdb_op:
3085         mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
3086 err_rif_edit:
3087         mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
3088         return err;
3089 }
3090
3091 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
3092                                          u16 fid)
3093 {
3094         if (mlxsw_sp_fid_is_vfid(fid))
3095                 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
3096         else
3097                 return test_bit(fid, lag_port->active_vlans);
3098 }
3099
3100 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
3101                                            u16 fid)
3102 {
3103         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3104         u8 local_port = mlxsw_sp_port->local_port;
3105         u16 lag_id = mlxsw_sp_port->lag_id;
3106         int i, count = 0;
3107
3108         if (!mlxsw_sp_port->lagged)
3109                 return true;
3110
3111         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
3112                 struct mlxsw_sp_port *lag_port;
3113
3114                 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
3115                 if (!lag_port || lag_port->local_port == local_port)
3116                         continue;
3117                 if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
3118                         count++;
3119         }
3120
3121         return !count;
3122 }
3123
3124 static int
3125 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3126                                     u16 fid)
3127 {
3128         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3129         char sfdf_pl[MLXSW_REG_SFDF_LEN];
3130
3131         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
3132         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3133         mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
3134                                                 mlxsw_sp_port->local_port);
3135
3136         netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
3137                    mlxsw_sp_port->local_port, fid);
3138
3139         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3140 }
3141
3142 static int
3143 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3144                                       u16 fid)
3145 {
3146         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3147         char sfdf_pl[MLXSW_REG_SFDF_LEN];
3148
3149         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
3150         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3151         mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
3152
3153         netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
3154                    mlxsw_sp_port->lag_id, fid);
3155
3156         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3157 }
3158
3159 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
3160 {
3161         if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
3162                 return 0;
3163
3164         if (mlxsw_sp_port->lagged)
3165                 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
3166                                                              fid);
3167         else
3168                 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
3169 }
3170
3171 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
3172 {
3173         struct mlxsw_sp_fid *f, *tmp;
3174
3175         list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
3176                 if (--f->ref_count == 0)
3177                         mlxsw_sp_fid_destroy(mlxsw_sp, f);
3178                 else
3179                         WARN_ON_ONCE(1);
3180 }
3181
3182 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
3183                                          struct net_device *br_dev)
3184 {
3185         return !mlxsw_sp->master_bridge.dev ||
3186                mlxsw_sp->master_bridge.dev == br_dev;
3187 }
3188
3189 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
3190                                        struct net_device *br_dev)
3191 {
3192         mlxsw_sp->master_bridge.dev = br_dev;
3193         mlxsw_sp->master_bridge.ref_count++;
3194 }
3195
3196 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
3197 {
3198         if (--mlxsw_sp->master_bridge.ref_count == 0) {
3199                 mlxsw_sp->master_bridge.dev = NULL;
3200                 /* It's possible upper VLAN devices are still holding
3201                  * references to underlying FIDs. Drop the reference
3202                  * and release the resources if it was the last one.
3203                  * If it wasn't, then something bad happened.
3204                  */
3205                 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
3206         }
3207 }
3208
3209 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
3210                                      struct net_device *br_dev)
3211 {
3212         struct net_device *dev = mlxsw_sp_port->dev;
3213         int err;
3214
3215         /* When port is not bridged untagged packets are tagged with
3216          * PVID=VID=1, thereby creating an implicit VLAN interface in
3217          * the device. Remove it and let bridge code take care of its
3218          * own VLANs.
3219          */
3220         err = mlxsw_sp_port_kill_vid(dev, 0, 1);
3221         if (err)
3222                 return err;
3223
3224         mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
3225
3226         mlxsw_sp_port->learning = 1;
3227         mlxsw_sp_port->learning_sync = 1;
3228         mlxsw_sp_port->uc_flood = 1;
3229         mlxsw_sp_port->bridged = 1;
3230
3231         return 0;
3232 }
3233
3234 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3235 {
3236         struct net_device *dev = mlxsw_sp_port->dev;
3237
3238         mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
3239
3240         mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
3241
3242         mlxsw_sp_port->learning = 0;
3243         mlxsw_sp_port->learning_sync = 0;
3244         mlxsw_sp_port->uc_flood = 0;
3245         mlxsw_sp_port->bridged = 0;
3246
3247         /* Add implicit VLAN interface in the device, so that untagged
3248          * packets will be classified to the default vFID.
3249          */
3250         mlxsw_sp_port_add_vid(dev, 0, 1);
3251 }
3252
3253 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3254 {
3255         char sldr_pl[MLXSW_REG_SLDR_LEN];
3256
3257         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3258         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3259 }
3260
3261 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3262 {
3263         char sldr_pl[MLXSW_REG_SLDR_LEN];
3264
3265         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3266         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3267 }
3268
3269 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3270                                      u16 lag_id, u8 port_index)
3271 {
3272         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3273         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3274
3275         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3276                                       lag_id, port_index);
3277         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3278 }
3279
3280 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3281                                         u16 lag_id)
3282 {
3283         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3284         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3285
3286         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3287                                          lag_id);
3288         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3289 }
3290
3291 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3292                                         u16 lag_id)
3293 {
3294         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3295         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3296
3297         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3298                                         lag_id);
3299         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3300 }
3301
3302 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3303                                          u16 lag_id)
3304 {
3305         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3306         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3307
3308         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3309                                          lag_id);
3310         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3311 }
3312
3313 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3314                                   struct net_device *lag_dev,
3315                                   u16 *p_lag_id)
3316 {
3317         struct mlxsw_sp_upper *lag;
3318         int free_lag_id = -1;
3319         int i;
3320
3321         for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
3322                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3323                 if (lag->ref_count) {
3324                         if (lag->dev == lag_dev) {
3325                                 *p_lag_id = i;
3326                                 return 0;
3327                         }
3328                 } else if (free_lag_id < 0) {
3329                         free_lag_id = i;
3330                 }
3331         }
3332         if (free_lag_id < 0)
3333                 return -EBUSY;
3334         *p_lag_id = free_lag_id;
3335         return 0;
3336 }
3337
3338 static bool
3339 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3340                           struct net_device *lag_dev,
3341                           struct netdev_lag_upper_info *lag_upper_info)
3342 {
3343         u16 lag_id;
3344
3345         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3346                 return false;
3347         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3348                 return false;
3349         return true;
3350 }
3351
3352 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3353                                        u16 lag_id, u8 *p_port_index)
3354 {
3355         int i;
3356
3357         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
3358                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3359                         *p_port_index = i;
3360                         return 0;
3361                 }
3362         }
3363         return -EBUSY;
3364 }
3365
3366 static void
3367 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3368                                   u16 lag_id)
3369 {
3370         struct mlxsw_sp_port *mlxsw_sp_vport;
3371         struct mlxsw_sp_fid *f;
3372
3373         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3374         if (WARN_ON(!mlxsw_sp_vport))
3375                 return;
3376
3377         /* If vPort is assigned a RIF, then leave it since it's no
3378          * longer valid.
3379          */
3380         f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3381         if (f)
3382                 f->leave(mlxsw_sp_vport);
3383
3384         mlxsw_sp_vport->lag_id = lag_id;
3385         mlxsw_sp_vport->lagged = 1;
3386 }
3387
3388 static void
3389 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3390 {
3391         struct mlxsw_sp_port *mlxsw_sp_vport;
3392         struct mlxsw_sp_fid *f;
3393
3394         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3395         if (WARN_ON(!mlxsw_sp_vport))
3396                 return;
3397
3398         f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3399         if (f)
3400                 f->leave(mlxsw_sp_vport);
3401
3402         mlxsw_sp_vport->lagged = 0;
3403 }
3404
3405 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3406                                   struct net_device *lag_dev)
3407 {
3408         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3409         struct mlxsw_sp_upper *lag;
3410         u16 lag_id;
3411         u8 port_index;
3412         int err;
3413
3414         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3415         if (err)
3416                 return err;
3417         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3418         if (!lag->ref_count) {
3419                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3420                 if (err)
3421                         return err;
3422                 lag->dev = lag_dev;
3423         }
3424
3425         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3426         if (err)
3427                 return err;
3428         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3429         if (err)
3430                 goto err_col_port_add;
3431         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
3432         if (err)
3433                 goto err_col_port_enable;
3434
3435         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3436                                    mlxsw_sp_port->local_port);
3437         mlxsw_sp_port->lag_id = lag_id;
3438         mlxsw_sp_port->lagged = 1;
3439         lag->ref_count++;
3440
3441         mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id);
3442
3443         return 0;
3444
3445 err_col_port_enable:
3446         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3447 err_col_port_add:
3448         if (!lag->ref_count)
3449                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3450         return err;
3451 }
3452
3453 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3454                                     struct net_device *lag_dev)
3455 {
3456         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3457         u16 lag_id = mlxsw_sp_port->lag_id;
3458         struct mlxsw_sp_upper *lag;
3459
3460         if (!mlxsw_sp_port->lagged)
3461                 return;
3462         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3463         WARN_ON(lag->ref_count == 0);
3464
3465         mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
3466         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3467
3468         if (mlxsw_sp_port->bridged) {
3469                 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
3470                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3471         }
3472
3473         if (lag->ref_count == 1)
3474                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3475
3476         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3477                                      mlxsw_sp_port->local_port);
3478         mlxsw_sp_port->lagged = 0;
3479         lag->ref_count--;
3480
3481         mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
3482 }
3483
3484 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3485                                       u16 lag_id)
3486 {
3487         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3488         char sldr_pl[MLXSW_REG_SLDR_LEN];
3489
3490         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3491                                          mlxsw_sp_port->local_port);
3492         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3493 }
3494
3495 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3496                                          u16 lag_id)
3497 {
3498         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3499         char sldr_pl[MLXSW_REG_SLDR_LEN];
3500
3501         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3502                                             mlxsw_sp_port->local_port);
3503         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3504 }
3505
3506 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
3507                                        bool lag_tx_enabled)
3508 {
3509         if (lag_tx_enabled)
3510                 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
3511                                                   mlxsw_sp_port->lag_id);
3512         else
3513                 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3514                                                      mlxsw_sp_port->lag_id);
3515 }
3516
3517 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3518                                      struct netdev_lag_lower_state_info *info)
3519 {
3520         return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
3521 }
3522
3523 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
3524                                    struct net_device *vlan_dev)
3525 {
3526         struct mlxsw_sp_port *mlxsw_sp_vport;
3527         u16 vid = vlan_dev_vlan_id(vlan_dev);
3528
3529         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3530         if (WARN_ON(!mlxsw_sp_vport))
3531                 return -EINVAL;
3532
3533         mlxsw_sp_vport->dev = vlan_dev;
3534
3535         return 0;
3536 }
3537
3538 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
3539                                       struct net_device *vlan_dev)
3540 {
3541         struct mlxsw_sp_port *mlxsw_sp_vport;
3542         u16 vid = vlan_dev_vlan_id(vlan_dev);
3543
3544         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3545         if (WARN_ON(!mlxsw_sp_vport))
3546                 return;
3547
3548         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3549 }
3550
3551 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3552                                                unsigned long event, void *ptr)
3553 {
3554         struct netdev_notifier_changeupper_info *info;
3555         struct mlxsw_sp_port *mlxsw_sp_port;
3556         struct net_device *upper_dev;
3557         struct mlxsw_sp *mlxsw_sp;
3558         int err = 0;
3559
3560         mlxsw_sp_port = netdev_priv(dev);
3561         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3562         info = ptr;
3563
3564         switch (event) {
3565         case NETDEV_PRECHANGEUPPER:
3566                 upper_dev = info->upper_dev;
3567                 if (!is_vlan_dev(upper_dev) &&
3568                     !netif_is_lag_master(upper_dev) &&
3569                     !netif_is_bridge_master(upper_dev))
3570                         return -EINVAL;
3571                 if (!info->linking)
3572                         break;
3573                 /* HW limitation forbids to put ports to multiple bridges. */
3574                 if (netif_is_bridge_master(upper_dev) &&
3575                     !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
3576                         return -EINVAL;
3577                 if (netif_is_lag_master(upper_dev) &&
3578                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3579                                                info->upper_info))
3580                         return -EINVAL;
3581                 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
3582                         return -EINVAL;
3583                 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
3584                     !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
3585                         return -EINVAL;
3586                 break;
3587         case NETDEV_CHANGEUPPER:
3588                 upper_dev = info->upper_dev;
3589                 if (is_vlan_dev(upper_dev)) {
3590                         if (info->linking)
3591                                 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
3592                                                               upper_dev);
3593                         else
3594                                  mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
3595                                                            upper_dev);
3596                 } else if (netif_is_bridge_master(upper_dev)) {
3597                         if (info->linking)
3598                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
3599                                                                 upper_dev);
3600                         else
3601                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3602                 } else if (netif_is_lag_master(upper_dev)) {
3603                         if (info->linking)
3604                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3605                                                              upper_dev);
3606                         else
3607                                 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3608                                                         upper_dev);
3609                 } else {
3610                         err = -EINVAL;
3611                         WARN_ON(1);
3612                 }
3613                 break;
3614         }
3615
3616         return err;
3617 }
3618
3619 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
3620                                                unsigned long event, void *ptr)
3621 {
3622         struct netdev_notifier_changelowerstate_info *info;
3623         struct mlxsw_sp_port *mlxsw_sp_port;
3624         int err;
3625
3626         mlxsw_sp_port = netdev_priv(dev);
3627         info = ptr;
3628
3629         switch (event) {
3630         case NETDEV_CHANGELOWERSTATE:
3631                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
3632                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
3633                                                         info->lower_state_info);
3634                         if (err)
3635                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
3636                 }
3637                 break;
3638         }
3639
3640         return 0;
3641 }
3642
3643 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
3644                                          unsigned long event, void *ptr)
3645 {
3646         switch (event) {
3647         case NETDEV_PRECHANGEUPPER:
3648         case NETDEV_CHANGEUPPER:
3649                 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
3650         case NETDEV_CHANGELOWERSTATE:
3651                 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
3652         }
3653
3654         return 0;
3655 }
3656
3657 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3658                                         unsigned long event, void *ptr)
3659 {
3660         struct net_device *dev;
3661         struct list_head *iter;
3662         int ret;
3663
3664         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3665                 if (mlxsw_sp_port_dev_check(dev)) {
3666                         ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3667                         if (ret)
3668                                 return ret;
3669                 }
3670         }
3671
3672         return 0;
3673 }
3674
3675 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
3676                                             struct net_device *vlan_dev)
3677 {
3678         u16 fid = vlan_dev_vlan_id(vlan_dev);
3679         struct mlxsw_sp_fid *f;
3680
3681         f = mlxsw_sp_fid_find(mlxsw_sp, fid);
3682         if (!f) {
3683                 f = mlxsw_sp_fid_create(mlxsw_sp, fid);
3684                 if (IS_ERR(f))
3685                         return PTR_ERR(f);
3686         }
3687
3688         f->ref_count++;
3689
3690         return 0;
3691 }
3692
3693 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
3694                                                struct net_device *vlan_dev)
3695 {
3696         u16 fid = vlan_dev_vlan_id(vlan_dev);
3697         struct mlxsw_sp_fid *f;
3698
3699         f = mlxsw_sp_fid_find(mlxsw_sp, fid);
3700         if (f && f->r)
3701                 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
3702         if (f && --f->ref_count == 0)
3703                 mlxsw_sp_fid_destroy(mlxsw_sp, f);
3704 }
3705
3706 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
3707                                            unsigned long event, void *ptr)
3708 {
3709         struct netdev_notifier_changeupper_info *info;
3710         struct net_device *upper_dev;
3711         struct mlxsw_sp *mlxsw_sp;
3712         int err;
3713
3714         mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3715         if (!mlxsw_sp)
3716                 return 0;
3717         if (br_dev != mlxsw_sp->master_bridge.dev)
3718                 return 0;
3719
3720         info = ptr;
3721
3722         switch (event) {
3723         case NETDEV_CHANGEUPPER:
3724                 upper_dev = info->upper_dev;
3725                 if (!is_vlan_dev(upper_dev))
3726                         break;
3727                 if (info->linking) {
3728                         err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
3729                                                                upper_dev);
3730                         if (err)
3731                                 return err;
3732                 } else {
3733                         mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev);
3734                 }
3735                 break;
3736         }
3737
3738         return 0;
3739 }
3740
3741 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
3742 {
3743         return find_first_zero_bit(mlxsw_sp->vfids.mapped,
3744                                    MLXSW_SP_VFID_MAX);
3745 }
3746
3747 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
3748 {
3749         char sfmr_pl[MLXSW_REG_SFMR_LEN];
3750
3751         mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
3752         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
3753 }
3754
3755 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
3756
3757 static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
3758                                                  struct net_device *br_dev)
3759 {
3760         struct device *dev = mlxsw_sp->bus_info->dev;
3761         struct mlxsw_sp_fid *f;
3762         u16 vfid, fid;
3763         int err;
3764
3765         vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
3766         if (vfid == MLXSW_SP_VFID_MAX) {
3767                 dev_err(dev, "No available vFIDs\n");
3768                 return ERR_PTR(-ERANGE);
3769         }
3770
3771         fid = mlxsw_sp_vfid_to_fid(vfid);
3772         err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
3773         if (err) {
3774                 dev_err(dev, "Failed to create FID=%d\n", fid);
3775                 return ERR_PTR(err);
3776         }
3777
3778         f = kzalloc(sizeof(*f), GFP_KERNEL);
3779         if (!f)
3780                 goto err_allocate_vfid;
3781
3782         f->leave = mlxsw_sp_vport_vfid_leave;
3783         f->fid = fid;
3784         f->dev = br_dev;
3785
3786         list_add(&f->list, &mlxsw_sp->vfids.list);
3787         set_bit(vfid, mlxsw_sp->vfids.mapped);
3788
3789         return f;
3790
3791 err_allocate_vfid:
3792         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3793         return ERR_PTR(-ENOMEM);
3794 }
3795
3796 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
3797                                   struct mlxsw_sp_fid *f)
3798 {
3799         u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
3800         u16 fid = f->fid;
3801
3802         clear_bit(vfid, mlxsw_sp->vfids.mapped);
3803         list_del(&f->list);
3804
3805         if (f->r)
3806                 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
3807
3808         kfree(f);
3809
3810         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3811 }
3812
3813 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
3814                                   bool valid)
3815 {
3816         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
3817         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3818
3819         return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
3820                                             vid);
3821 }
3822
3823 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3824                                     struct net_device *br_dev)
3825 {
3826         struct mlxsw_sp_fid *f;
3827         int err;
3828
3829         f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
3830         if (!f) {
3831                 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
3832                 if (IS_ERR(f))
3833                         return PTR_ERR(f);
3834         }
3835
3836         err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
3837         if (err)
3838                 goto err_vport_flood_set;
3839
3840         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
3841         if (err)
3842                 goto err_vport_fid_map;
3843
3844         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
3845         f->ref_count++;
3846
3847         netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
3848
3849         return 0;
3850
3851 err_vport_fid_map:
3852         mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
3853 err_vport_flood_set:
3854         if (!f->ref_count)
3855                 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3856         return err;
3857 }
3858
3859 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3860 {
3861         struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3862
3863         netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
3864
3865         mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
3866
3867         mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
3868
3869         mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
3870
3871         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
3872         if (--f->ref_count == 0)
3873                 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3874 }
3875
3876 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3877                                       struct net_device *br_dev)
3878 {
3879         struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3880         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3881         struct net_device *dev = mlxsw_sp_vport->dev;
3882         int err;
3883
3884         if (f && !WARN_ON(!f->leave))
3885                 f->leave(mlxsw_sp_vport);
3886
3887         err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
3888         if (err) {
3889                 netdev_err(dev, "Failed to join vFID\n");
3890                 return err;
3891         }
3892
3893         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3894         if (err) {
3895                 netdev_err(dev, "Failed to enable learning\n");
3896                 goto err_port_vid_learning_set;
3897         }
3898
3899         mlxsw_sp_vport->learning = 1;
3900         mlxsw_sp_vport->learning_sync = 1;
3901         mlxsw_sp_vport->uc_flood = 1;
3902         mlxsw_sp_vport->bridged = 1;
3903
3904         return 0;
3905
3906 err_port_vid_learning_set:
3907         mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
3908         return err;
3909 }
3910
3911 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3912 {
3913         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3914
3915         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3916
3917         mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
3918
3919         mlxsw_sp_vport->learning = 0;
3920         mlxsw_sp_vport->learning_sync = 0;
3921         mlxsw_sp_vport->uc_flood = 0;
3922         mlxsw_sp_vport->bridged = 0;
3923 }
3924
3925 static bool
3926 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3927                                   const struct net_device *br_dev)
3928 {
3929         struct mlxsw_sp_port *mlxsw_sp_vport;
3930
3931         list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3932                             vport.list) {
3933                 struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
3934
3935                 if (dev && dev == br_dev)
3936                         return false;
3937         }
3938
3939         return true;
3940 }
3941
3942 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3943                                           unsigned long event, void *ptr,
3944                                           u16 vid)
3945 {
3946         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3947         struct netdev_notifier_changeupper_info *info = ptr;
3948         struct mlxsw_sp_port *mlxsw_sp_vport;
3949         struct net_device *upper_dev;
3950         int err = 0;
3951
3952         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3953
3954         switch (event) {
3955         case NETDEV_PRECHANGEUPPER:
3956                 upper_dev = info->upper_dev;
3957                 if (!netif_is_bridge_master(upper_dev))
3958                         return -EINVAL;
3959                 if (!info->linking)
3960                         break;
3961                 /* We can't have multiple VLAN interfaces configured on
3962                  * the same port and being members in the same bridge.
3963                  */
3964                 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3965                                                        upper_dev))
3966                         return -EINVAL;
3967                 break;
3968         case NETDEV_CHANGEUPPER:
3969                 upper_dev = info->upper_dev;
3970                 if (info->linking) {
3971                         if (WARN_ON(!mlxsw_sp_vport))
3972                                 return -EINVAL;
3973                         err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3974                                                          upper_dev);
3975                 } else {
3976                         if (!mlxsw_sp_vport)
3977                                 return 0;
3978                         mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
3979                 }
3980         }
3981
3982         return err;
3983 }
3984
3985 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3986                                               unsigned long event, void *ptr,
3987                                               u16 vid)
3988 {
3989         struct net_device *dev;
3990         struct list_head *iter;
3991         int ret;
3992
3993         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3994                 if (mlxsw_sp_port_dev_check(dev)) {
3995                         ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
3996                                                              vid);
3997                         if (ret)
3998                                 return ret;
3999                 }
4000         }
4001
4002         return 0;
4003 }
4004
4005 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4006                                          unsigned long event, void *ptr)
4007 {
4008         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4009         u16 vid = vlan_dev_vlan_id(vlan_dev);
4010
4011         if (mlxsw_sp_port_dev_check(real_dev))
4012                 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
4013                                                       vid);
4014         else if (netif_is_lag_master(real_dev))
4015                 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
4016                                                           vid);
4017
4018         return 0;
4019 }
4020
4021 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4022                                     unsigned long event, void *ptr)
4023 {
4024         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4025         int err = 0;
4026
4027         if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4028                 err = mlxsw_sp_netdevice_router_port_event(dev);
4029         else if (mlxsw_sp_port_dev_check(dev))
4030                 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
4031         else if (netif_is_lag_master(dev))
4032                 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4033         else if (netif_is_bridge_master(dev))
4034                 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
4035         else if (is_vlan_dev(dev))
4036                 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4037
4038         return notifier_from_errno(err);
4039 }
4040
4041 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4042         .notifier_call = mlxsw_sp_netdevice_event,
4043 };
4044
4045 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4046         .notifier_call = mlxsw_sp_inetaddr_event,
4047         .priority = 10, /* Must be called before FIB notifier block */
4048 };
4049
4050 static int __init mlxsw_sp_module_init(void)
4051 {
4052         int err;
4053
4054         register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4055         register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4056         err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4057         if (err)
4058                 goto err_core_driver_register;
4059         return 0;
4060
4061 err_core_driver_register:
4062         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4063         return err;
4064 }
4065
4066 static void __exit mlxsw_sp_module_exit(void)
4067 {
4068         mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4069         unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4070         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4071 }
4072
4073 module_init(mlxsw_sp_module_init);
4074 module_exit(mlxsw_sp_module_exit);
4075
4076 MODULE_LICENSE("Dual BSD/GPL");
4077 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4078 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4079 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);