Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next...
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <net/devlink.h>
53 #include <net/switchdev.h>
54 #include <generated/utsrelease.h>
55
56 #include "spectrum.h"
57 #include "core.h"
58 #include "reg.h"
59 #include "port.h"
60 #include "trap.h"
61 #include "txheader.h"
62
63 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
64 static const char mlxsw_sp_driver_version[] = "1.0";
65
66 /* tx_hdr_version
67  * Tx header version.
68  * Must be set to 1.
69  */
70 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
71
72 /* tx_hdr_ctl
73  * Packet control type.
74  * 0 - Ethernet control (e.g. EMADs, LACP)
75  * 1 - Ethernet data
76  */
77 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
78
79 /* tx_hdr_proto
80  * Packet protocol type. Must be set to 1 (Ethernet).
81  */
82 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
83
84 /* tx_hdr_rx_is_router
85  * Packet is sent from the router. Valid for data packets only.
86  */
87 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
88
89 /* tx_hdr_fid_valid
90  * Indicates if the 'fid' field is valid and should be used for
91  * forwarding lookup. Valid for data packets only.
92  */
93 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
94
95 /* tx_hdr_swid
96  * Switch partition ID. Must be set to 0.
97  */
98 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
99
100 /* tx_hdr_control_tclass
101  * Indicates if the packet should use the control TClass and not one
102  * of the data TClasses.
103  */
104 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
105
106 /* tx_hdr_etclass
107  * Egress TClass to be used on the egress device on the egress port.
108  */
109 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
110
111 /* tx_hdr_port_mid
112  * Destination local port for unicast packets.
113  * Destination multicast ID for multicast packets.
114  *
115  * Control packets are directed to a specific egress port, while data
116  * packets are transmitted through the CPU port (0) into the switch partition,
117  * where forwarding rules are applied.
118  */
119 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
120
121 /* tx_hdr_fid
122  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
123  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
124  * Valid for data packets only.
125  */
126 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
127
128 /* tx_hdr_type
129  * 0 - Data packets
130  * 6 - Control packets
131  */
132 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
133
134 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
135                                      const struct mlxsw_tx_info *tx_info)
136 {
137         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
138
139         memset(txhdr, 0, MLXSW_TXHDR_LEN);
140
141         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
142         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
143         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
144         mlxsw_tx_hdr_swid_set(txhdr, 0);
145         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
146         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
147         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
148 }
149
150 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
151 {
152         char spad_pl[MLXSW_REG_SPAD_LEN];
153         int err;
154
155         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
156         if (err)
157                 return err;
158         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
159         return 0;
160 }
161
162 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
163                                           bool is_up)
164 {
165         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
166         char paos_pl[MLXSW_REG_PAOS_LEN];
167
168         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
169                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
170                             MLXSW_PORT_ADMIN_STATUS_DOWN);
171         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
172 }
173
174 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
175                                          bool *p_is_up)
176 {
177         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
178         char paos_pl[MLXSW_REG_PAOS_LEN];
179         u8 oper_status;
180         int err;
181
182         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
183         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
184         if (err)
185                 return err;
186         oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
187         *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
188         return 0;
189 }
190
191 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
192                                       unsigned char *addr)
193 {
194         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
195         char ppad_pl[MLXSW_REG_PPAD_LEN];
196
197         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
198         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
199         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
200 }
201
202 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
203 {
204         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
205         unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
206
207         ether_addr_copy(addr, mlxsw_sp->base_mac);
208         addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
209         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
210 }
211
212 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
213                                        u16 vid, enum mlxsw_reg_spms_state state)
214 {
215         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
216         char *spms_pl;
217         int err;
218
219         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
220         if (!spms_pl)
221                 return -ENOMEM;
222         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
223         mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
224         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
225         kfree(spms_pl);
226         return err;
227 }
228
229 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
230 {
231         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
232         char pmtu_pl[MLXSW_REG_PMTU_LEN];
233         int max_mtu;
234         int err;
235
236         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
237         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
238         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
239         if (err)
240                 return err;
241         max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
242
243         if (mtu > max_mtu)
244                 return -EINVAL;
245
246         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
247         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
248 }
249
250 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
251 {
252         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
253         char pspa_pl[MLXSW_REG_PSPA_LEN];
254
255         mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
256         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
257 }
258
259 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
260                                      bool enable)
261 {
262         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
263         char svpe_pl[MLXSW_REG_SVPE_LEN];
264
265         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
266         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
267 }
268
269 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
270                                  enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
271                                  u16 vid)
272 {
273         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
274         char svfa_pl[MLXSW_REG_SVFA_LEN];
275
276         mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
277                             fid, vid);
278         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
279 }
280
281 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
282                                           u16 vid, bool learn_enable)
283 {
284         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
285         char *spvmlr_pl;
286         int err;
287
288         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
289         if (!spvmlr_pl)
290                 return -ENOMEM;
291         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
292                               learn_enable);
293         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
294         kfree(spvmlr_pl);
295         return err;
296 }
297
298 static int
299 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
300 {
301         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
302         char sspr_pl[MLXSW_REG_SSPR_LEN];
303
304         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
305         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
306 }
307
308 static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
309                                            u8 local_port, u8 *p_module,
310                                            u8 *p_width, u8 *p_lane)
311 {
312         char pmlp_pl[MLXSW_REG_PMLP_LEN];
313         int err;
314
315         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
316         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
317         if (err)
318                 return err;
319         *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
320         *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
321         *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
322         return 0;
323 }
324
325 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
326                                          u8 local_port, u8 *p_module,
327                                          u8 *p_width)
328 {
329         u8 lane;
330
331         return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module,
332                                                p_width, &lane);
333 }
334
335 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
336                                     u8 module, u8 width, u8 lane)
337 {
338         char pmlp_pl[MLXSW_REG_PMLP_LEN];
339         int i;
340
341         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
342         mlxsw_reg_pmlp_width_set(pmlp_pl, width);
343         for (i = 0; i < width; i++) {
344                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
345                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
346         }
347
348         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
349 }
350
351 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
352 {
353         char pmlp_pl[MLXSW_REG_PMLP_LEN];
354
355         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
356         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
357         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
358 }
359
360 static int mlxsw_sp_port_open(struct net_device *dev)
361 {
362         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
363         int err;
364
365         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
366         if (err)
367                 return err;
368         netif_start_queue(dev);
369         return 0;
370 }
371
372 static int mlxsw_sp_port_stop(struct net_device *dev)
373 {
374         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
375
376         netif_stop_queue(dev);
377         return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
378 }
379
380 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
381                                       struct net_device *dev)
382 {
383         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
384         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
385         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
386         const struct mlxsw_tx_info tx_info = {
387                 .local_port = mlxsw_sp_port->local_port,
388                 .is_emad = false,
389         };
390         u64 len;
391         int err;
392
393         if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
394                 return NETDEV_TX_BUSY;
395
396         if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
397                 struct sk_buff *skb_orig = skb;
398
399                 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
400                 if (!skb) {
401                         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
402                         dev_kfree_skb_any(skb_orig);
403                         return NETDEV_TX_OK;
404                 }
405         }
406
407         if (eth_skb_pad(skb)) {
408                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
409                 return NETDEV_TX_OK;
410         }
411
412         mlxsw_sp_txhdr_construct(skb, &tx_info);
413         len = skb->len;
414         /* Due to a race we might fail here because of a full queue. In that
415          * unlikely case we simply drop the packet.
416          */
417         err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
418
419         if (!err) {
420                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
421                 u64_stats_update_begin(&pcpu_stats->syncp);
422                 pcpu_stats->tx_packets++;
423                 pcpu_stats->tx_bytes += len;
424                 u64_stats_update_end(&pcpu_stats->syncp);
425         } else {
426                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
427                 dev_kfree_skb_any(skb);
428         }
429         return NETDEV_TX_OK;
430 }
431
432 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
433 {
434 }
435
436 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
437 {
438         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
439         struct sockaddr *addr = p;
440         int err;
441
442         if (!is_valid_ether_addr(addr->sa_data))
443                 return -EADDRNOTAVAIL;
444
445         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
446         if (err)
447                 return err;
448         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
449         return 0;
450 }
451
452 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
453 {
454         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
455         int err;
456
457         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
458         if (err)
459                 return err;
460         dev->mtu = mtu;
461         return 0;
462 }
463
464 static struct rtnl_link_stats64 *
465 mlxsw_sp_port_get_stats64(struct net_device *dev,
466                           struct rtnl_link_stats64 *stats)
467 {
468         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
469         struct mlxsw_sp_port_pcpu_stats *p;
470         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
471         u32 tx_dropped = 0;
472         unsigned int start;
473         int i;
474
475         for_each_possible_cpu(i) {
476                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
477                 do {
478                         start = u64_stats_fetch_begin_irq(&p->syncp);
479                         rx_packets      = p->rx_packets;
480                         rx_bytes        = p->rx_bytes;
481                         tx_packets      = p->tx_packets;
482                         tx_bytes        = p->tx_bytes;
483                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
484
485                 stats->rx_packets       += rx_packets;
486                 stats->rx_bytes         += rx_bytes;
487                 stats->tx_packets       += tx_packets;
488                 stats->tx_bytes         += tx_bytes;
489                 /* tx_dropped is u32, updated without syncp protection. */
490                 tx_dropped      += p->tx_dropped;
491         }
492         stats->tx_dropped       = tx_dropped;
493         return stats;
494 }
495
496 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
497                            u16 vid_end, bool is_member, bool untagged)
498 {
499         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
500         char *spvm_pl;
501         int err;
502
503         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
504         if (!spvm_pl)
505                 return -ENOMEM;
506
507         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
508                             vid_end, is_member, untagged);
509         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
510         kfree(spvm_pl);
511         return err;
512 }
513
514 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
515 {
516         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
517         u16 vid, last_visited_vid;
518         int err;
519
520         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
521                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
522                                                    vid);
523                 if (err) {
524                         last_visited_vid = vid;
525                         goto err_port_vid_to_fid_set;
526                 }
527         }
528
529         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
530         if (err) {
531                 last_visited_vid = VLAN_N_VID;
532                 goto err_port_vid_to_fid_set;
533         }
534
535         return 0;
536
537 err_port_vid_to_fid_set:
538         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
539                 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
540                                              vid);
541         return err;
542 }
543
544 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
545 {
546         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
547         u16 vid;
548         int err;
549
550         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
551         if (err)
552                 return err;
553
554         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
555                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
556                                                    vid, vid);
557                 if (err)
558                         return err;
559         }
560
561         return 0;
562 }
563
564 static struct mlxsw_sp_vfid *
565 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
566 {
567         struct mlxsw_sp_vfid *vfid;
568
569         list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
570                 if (vfid->vid == vid)
571                         return vfid;
572         }
573
574         return NULL;
575 }
576
577 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
578 {
579         return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
580                                    MLXSW_SP_VFID_PORT_MAX);
581 }
582
583 static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
584 {
585         u16 fid = mlxsw_sp_vfid_to_fid(vfid);
586         char sfmr_pl[MLXSW_REG_SFMR_LEN];
587
588         mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0);
589         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
590 }
591
592 static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
593 {
594         u16 fid = mlxsw_sp_vfid_to_fid(vfid);
595         char sfmr_pl[MLXSW_REG_SFMR_LEN];
596
597         mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0);
598         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
599 }
600
601 static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
602                                                   u16 vid)
603 {
604         struct device *dev = mlxsw_sp->bus_info->dev;
605         struct mlxsw_sp_vfid *vfid;
606         u16 n_vfid;
607         int err;
608
609         n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
610         if (n_vfid == MLXSW_SP_VFID_PORT_MAX) {
611                 dev_err(dev, "No available vFIDs\n");
612                 return ERR_PTR(-ERANGE);
613         }
614
615         err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
616         if (err) {
617                 dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
618                 return ERR_PTR(err);
619         }
620
621         vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
622         if (!vfid)
623                 goto err_allocate_vfid;
624
625         vfid->vfid = n_vfid;
626         vfid->vid = vid;
627
628         list_add(&vfid->list, &mlxsw_sp->port_vfids.list);
629         set_bit(n_vfid, mlxsw_sp->port_vfids.mapped);
630
631         return vfid;
632
633 err_allocate_vfid:
634         __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
635         return ERR_PTR(-ENOMEM);
636 }
637
638 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
639                                   struct mlxsw_sp_vfid *vfid)
640 {
641         clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
642         list_del(&vfid->list);
643
644         __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
645
646         kfree(vfid);
647 }
648
649 static struct mlxsw_sp_port *
650 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
651                            struct mlxsw_sp_vfid *vfid)
652 {
653         struct mlxsw_sp_port *mlxsw_sp_vport;
654
655         mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
656         if (!mlxsw_sp_vport)
657                 return NULL;
658
659         /* dev will be set correctly after the VLAN device is linked
660          * with the real device. In case of bridge SELF invocation, dev
661          * will remain as is.
662          */
663         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
664         mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
665         mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
666         mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
667         mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
668         mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
669         mlxsw_sp_vport->vport.vfid = vfid;
670         mlxsw_sp_vport->vport.vid = vfid->vid;
671
672         list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
673
674         return mlxsw_sp_vport;
675 }
676
677 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
678 {
679         list_del(&mlxsw_sp_vport->vport.list);
680         kfree(mlxsw_sp_vport);
681 }
682
683 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
684                           u16 vid)
685 {
686         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
687         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
688         struct mlxsw_sp_port *mlxsw_sp_vport;
689         struct mlxsw_sp_vfid *vfid;
690         int err;
691
692         /* VLAN 0 is added to HW filter when device goes up, but it is
693          * reserved in our case, so simply return.
694          */
695         if (!vid)
696                 return 0;
697
698         if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
699                 netdev_warn(dev, "VID=%d already configured\n", vid);
700                 return 0;
701         }
702
703         vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
704         if (!vfid) {
705                 vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
706                 if (IS_ERR(vfid)) {
707                         netdev_err(dev, "Failed to create vFID for VID=%d\n",
708                                    vid);
709                         return PTR_ERR(vfid);
710                 }
711         }
712
713         mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
714         if (!mlxsw_sp_vport) {
715                 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
716                 err = -ENOMEM;
717                 goto err_port_vport_create;
718         }
719
720         if (!vfid->nr_vports) {
721                 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
722                                                true, false);
723                 if (err) {
724                         netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
725                                    vfid->vfid);
726                         goto err_vport_flood_set;
727                 }
728         }
729
730         /* When adding the first VLAN interface on a bridged port we need to
731          * transition all the active 802.1Q bridge VLANs to use explicit
732          * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
733          */
734         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
735                 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
736                 if (err) {
737                         netdev_err(dev, "Failed to set to Virtual mode\n");
738                         goto err_port_vp_mode_trans;
739                 }
740         }
741
742         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
743                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
744                                            true,
745                                            mlxsw_sp_vfid_to_fid(vfid->vfid),
746                                            vid);
747         if (err) {
748                 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
749                            vid, vfid->vfid);
750                 goto err_port_vid_to_fid_set;
751         }
752
753         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
754         if (err) {
755                 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
756                 goto err_port_vid_learning_set;
757         }
758
759         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
760         if (err) {
761                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
762                            vid);
763                 goto err_port_add_vid;
764         }
765
766         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
767                                           MLXSW_REG_SPMS_STATE_FORWARDING);
768         if (err) {
769                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
770                 goto err_port_stp_state_set;
771         }
772
773         vfid->nr_vports++;
774
775         return 0;
776
777 err_port_stp_state_set:
778         mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
779 err_port_add_vid:
780         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
781 err_port_vid_learning_set:
782         mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
783                                      MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
784                                      mlxsw_sp_vfid_to_fid(vfid->vfid), vid);
785 err_port_vid_to_fid_set:
786         if (list_is_singular(&mlxsw_sp_port->vports_list))
787                 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
788 err_port_vp_mode_trans:
789         if (!vfid->nr_vports)
790                 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
791                                          false);
792 err_vport_flood_set:
793         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
794 err_port_vport_create:
795         if (!vfid->nr_vports)
796                 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
797         return err;
798 }
799
800 int mlxsw_sp_port_kill_vid(struct net_device *dev,
801                            __be16 __always_unused proto, u16 vid)
802 {
803         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
804         struct mlxsw_sp_port *mlxsw_sp_vport;
805         struct mlxsw_sp_vfid *vfid;
806         int err;
807
808         /* VLAN 0 is removed from HW filter when device goes down, but
809          * it is reserved in our case, so simply return.
810          */
811         if (!vid)
812                 return 0;
813
814         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
815         if (!mlxsw_sp_vport) {
816                 netdev_warn(dev, "VID=%d does not exist\n", vid);
817                 return 0;
818         }
819
820         vfid = mlxsw_sp_vport->vport.vfid;
821
822         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
823                                           MLXSW_REG_SPMS_STATE_DISCARDING);
824         if (err) {
825                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
826                 return err;
827         }
828
829         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
830         if (err) {
831                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
832                            vid);
833                 return err;
834         }
835
836         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
837         if (err) {
838                 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
839                 return err;
840         }
841
842         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
843                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
844                                            false,
845                                            mlxsw_sp_vfid_to_fid(vfid->vfid),
846                                            vid);
847         if (err) {
848                 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
849                            vid, vfid->vfid);
850                 return err;
851         }
852
853         /* When removing the last VLAN interface on a bridged port we need to
854          * transition all active 802.1Q bridge VLANs to use VID to FID
855          * mappings and set port's mode to VLAN mode.
856          */
857         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
858                 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
859                 if (err) {
860                         netdev_err(dev, "Failed to set to VLAN mode\n");
861                         return err;
862                 }
863         }
864
865         vfid->nr_vports--;
866         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
867
868         /* Destroy the vFID if no vPorts are assigned to it anymore. */
869         if (!vfid->nr_vports)
870                 mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
871
872         return 0;
873 }
874
875 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
876                                             size_t len)
877 {
878         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
879         u8 module, width, lane;
880         int err;
881
882         err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp,
883                                               mlxsw_sp_port->local_port,
884                                               &module, &width, &lane);
885         if (err) {
886                 netdev_err(dev, "Failed to retrieve module information\n");
887                 return err;
888         }
889
890         if (!mlxsw_sp_port->split)
891                 err = snprintf(name, len, "p%d", module + 1);
892         else
893                 err = snprintf(name, len, "p%ds%d", module + 1,
894                                lane / width);
895
896         if (err >= len)
897                 return -EINVAL;
898
899         return 0;
900 }
901
902 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
903         .ndo_open               = mlxsw_sp_port_open,
904         .ndo_stop               = mlxsw_sp_port_stop,
905         .ndo_start_xmit         = mlxsw_sp_port_xmit,
906         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
907         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
908         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
909         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
910         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
911         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
912         .ndo_fdb_add            = switchdev_port_fdb_add,
913         .ndo_fdb_del            = switchdev_port_fdb_del,
914         .ndo_fdb_dump           = switchdev_port_fdb_dump,
915         .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
916         .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
917         .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
918         .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
919 };
920
921 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
922                                       struct ethtool_drvinfo *drvinfo)
923 {
924         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
925         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
926
927         strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
928         strlcpy(drvinfo->version, mlxsw_sp_driver_version,
929                 sizeof(drvinfo->version));
930         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
931                  "%d.%d.%d",
932                  mlxsw_sp->bus_info->fw_rev.major,
933                  mlxsw_sp->bus_info->fw_rev.minor,
934                  mlxsw_sp->bus_info->fw_rev.subminor);
935         strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
936                 sizeof(drvinfo->bus_info));
937 }
938
939 struct mlxsw_sp_port_hw_stats {
940         char str[ETH_GSTRING_LEN];
941         u64 (*getter)(char *payload);
942 };
943
944 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
945         {
946                 .str = "a_frames_transmitted_ok",
947                 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
948         },
949         {
950                 .str = "a_frames_received_ok",
951                 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
952         },
953         {
954                 .str = "a_frame_check_sequence_errors",
955                 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
956         },
957         {
958                 .str = "a_alignment_errors",
959                 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
960         },
961         {
962                 .str = "a_octets_transmitted_ok",
963                 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
964         },
965         {
966                 .str = "a_octets_received_ok",
967                 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
968         },
969         {
970                 .str = "a_multicast_frames_xmitted_ok",
971                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
972         },
973         {
974                 .str = "a_broadcast_frames_xmitted_ok",
975                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
976         },
977         {
978                 .str = "a_multicast_frames_received_ok",
979                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
980         },
981         {
982                 .str = "a_broadcast_frames_received_ok",
983                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
984         },
985         {
986                 .str = "a_in_range_length_errors",
987                 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
988         },
989         {
990                 .str = "a_out_of_range_length_field",
991                 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
992         },
993         {
994                 .str = "a_frame_too_long_errors",
995                 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
996         },
997         {
998                 .str = "a_symbol_error_during_carrier",
999                 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1000         },
1001         {
1002                 .str = "a_mac_control_frames_transmitted",
1003                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1004         },
1005         {
1006                 .str = "a_mac_control_frames_received",
1007                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1008         },
1009         {
1010                 .str = "a_unsupported_opcodes_received",
1011                 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1012         },
1013         {
1014                 .str = "a_pause_mac_ctrl_frames_received",
1015                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1016         },
1017         {
1018                 .str = "a_pause_mac_ctrl_frames_xmitted",
1019                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1020         },
1021 };
1022
1023 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1024
1025 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1026                                       u32 stringset, u8 *data)
1027 {
1028         u8 *p = data;
1029         int i;
1030
1031         switch (stringset) {
1032         case ETH_SS_STATS:
1033                 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1034                         memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1035                                ETH_GSTRING_LEN);
1036                         p += ETH_GSTRING_LEN;
1037                 }
1038                 break;
1039         }
1040 }
1041
1042 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1043                                      enum ethtool_phys_id_state state)
1044 {
1045         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1046         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1047         char mlcr_pl[MLXSW_REG_MLCR_LEN];
1048         bool active;
1049
1050         switch (state) {
1051         case ETHTOOL_ID_ACTIVE:
1052                 active = true;
1053                 break;
1054         case ETHTOOL_ID_INACTIVE:
1055                 active = false;
1056                 break;
1057         default:
1058                 return -EOPNOTSUPP;
1059         }
1060
1061         mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1062         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1063 }
1064
1065 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1066                                     struct ethtool_stats *stats, u64 *data)
1067 {
1068         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1069         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1070         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1071         int i;
1072         int err;
1073
1074         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
1075         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1076         for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
1077                 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
1078 }
1079
1080 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1081 {
1082         switch (sset) {
1083         case ETH_SS_STATS:
1084                 return MLXSW_SP_PORT_HW_STATS_LEN;
1085         default:
1086                 return -EOPNOTSUPP;
1087         }
1088 }
1089
1090 struct mlxsw_sp_port_link_mode {
1091         u32 mask;
1092         u32 supported;
1093         u32 advertised;
1094         u32 speed;
1095 };
1096
1097 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1098         {
1099                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1100                 .supported      = SUPPORTED_100baseT_Full,
1101                 .advertised     = ADVERTISED_100baseT_Full,
1102                 .speed          = 100,
1103         },
1104         {
1105                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1106                 .speed          = 100,
1107         },
1108         {
1109                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1110                                   MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1111                 .supported      = SUPPORTED_1000baseKX_Full,
1112                 .advertised     = ADVERTISED_1000baseKX_Full,
1113                 .speed          = 1000,
1114         },
1115         {
1116                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1117                 .supported      = SUPPORTED_10000baseT_Full,
1118                 .advertised     = ADVERTISED_10000baseT_Full,
1119                 .speed          = 10000,
1120         },
1121         {
1122                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1123                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1124                 .supported      = SUPPORTED_10000baseKX4_Full,
1125                 .advertised     = ADVERTISED_10000baseKX4_Full,
1126                 .speed          = 10000,
1127         },
1128         {
1129                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1130                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1131                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1132                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1133                 .supported      = SUPPORTED_10000baseKR_Full,
1134                 .advertised     = ADVERTISED_10000baseKR_Full,
1135                 .speed          = 10000,
1136         },
1137         {
1138                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1139                 .supported      = SUPPORTED_20000baseKR2_Full,
1140                 .advertised     = ADVERTISED_20000baseKR2_Full,
1141                 .speed          = 20000,
1142         },
1143         {
1144                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1145                 .supported      = SUPPORTED_40000baseCR4_Full,
1146                 .advertised     = ADVERTISED_40000baseCR4_Full,
1147                 .speed          = 40000,
1148         },
1149         {
1150                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1151                 .supported      = SUPPORTED_40000baseKR4_Full,
1152                 .advertised     = ADVERTISED_40000baseKR4_Full,
1153                 .speed          = 40000,
1154         },
1155         {
1156                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1157                 .supported      = SUPPORTED_40000baseSR4_Full,
1158                 .advertised     = ADVERTISED_40000baseSR4_Full,
1159                 .speed          = 40000,
1160         },
1161         {
1162                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1163                 .supported      = SUPPORTED_40000baseLR4_Full,
1164                 .advertised     = ADVERTISED_40000baseLR4_Full,
1165                 .speed          = 40000,
1166         },
1167         {
1168                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1169                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1170                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1171                 .speed          = 25000,
1172         },
1173         {
1174                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1175                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1176                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1177                 .speed          = 50000,
1178         },
1179         {
1180                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1181                 .supported      = SUPPORTED_56000baseKR4_Full,
1182                 .advertised     = ADVERTISED_56000baseKR4_Full,
1183                 .speed          = 56000,
1184         },
1185         {
1186                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1187                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1188                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1189                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1190                 .speed          = 100000,
1191         },
1192 };
1193
1194 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1195
1196 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1197 {
1198         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1199                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1200                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1201                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1202                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1203                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1204                 return SUPPORTED_FIBRE;
1205
1206         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1207                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1208                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1209                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1210                               MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1211                 return SUPPORTED_Backplane;
1212         return 0;
1213 }
1214
1215 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1216 {
1217         u32 modes = 0;
1218         int i;
1219
1220         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1221                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1222                         modes |= mlxsw_sp_port_link_mode[i].supported;
1223         }
1224         return modes;
1225 }
1226
1227 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1228 {
1229         u32 modes = 0;
1230         int i;
1231
1232         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1233                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1234                         modes |= mlxsw_sp_port_link_mode[i].advertised;
1235         }
1236         return modes;
1237 }
1238
1239 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1240                                             struct ethtool_cmd *cmd)
1241 {
1242         u32 speed = SPEED_UNKNOWN;
1243         u8 duplex = DUPLEX_UNKNOWN;
1244         int i;
1245
1246         if (!carrier_ok)
1247                 goto out;
1248
1249         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1250                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1251                         speed = mlxsw_sp_port_link_mode[i].speed;
1252                         duplex = DUPLEX_FULL;
1253                         break;
1254                 }
1255         }
1256 out:
1257         ethtool_cmd_speed_set(cmd, speed);
1258         cmd->duplex = duplex;
1259 }
1260
1261 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1262 {
1263         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1264                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1265                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1266                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1267                 return PORT_FIBRE;
1268
1269         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1270                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1271                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1272                 return PORT_DA;
1273
1274         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1275                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1276                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1277                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1278                 return PORT_NONE;
1279
1280         return PORT_OTHER;
1281 }
1282
1283 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1284                                       struct ethtool_cmd *cmd)
1285 {
1286         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1287         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1288         char ptys_pl[MLXSW_REG_PTYS_LEN];
1289         u32 eth_proto_cap;
1290         u32 eth_proto_admin;
1291         u32 eth_proto_oper;
1292         int err;
1293
1294         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1295         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1296         if (err) {
1297                 netdev_err(dev, "Failed to get proto");
1298                 return err;
1299         }
1300         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1301                               &eth_proto_admin, &eth_proto_oper);
1302
1303         cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1304                          mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1305                          SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1306         cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1307         mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1308                                         eth_proto_oper, cmd);
1309
1310         eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1311         cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1312         cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1313
1314         cmd->transceiver = XCVR_INTERNAL;
1315         return 0;
1316 }
1317
1318 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1319 {
1320         u32 ptys_proto = 0;
1321         int i;
1322
1323         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1324                 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1325                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1326         }
1327         return ptys_proto;
1328 }
1329
1330 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1331 {
1332         u32 ptys_proto = 0;
1333         int i;
1334
1335         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1336                 if (speed == mlxsw_sp_port_link_mode[i].speed)
1337                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1338         }
1339         return ptys_proto;
1340 }
1341
1342 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1343 {
1344         u32 ptys_proto = 0;
1345         int i;
1346
1347         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1348                 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1349                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1350         }
1351         return ptys_proto;
1352 }
1353
1354 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1355                                       struct ethtool_cmd *cmd)
1356 {
1357         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1358         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1359         char ptys_pl[MLXSW_REG_PTYS_LEN];
1360         u32 speed;
1361         u32 eth_proto_new;
1362         u32 eth_proto_cap;
1363         u32 eth_proto_admin;
1364         bool is_up;
1365         int err;
1366
1367         speed = ethtool_cmd_speed(cmd);
1368
1369         eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1370                 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1371                 mlxsw_sp_to_ptys_speed(speed);
1372
1373         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1374         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1375         if (err) {
1376                 netdev_err(dev, "Failed to get proto");
1377                 return err;
1378         }
1379         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1380
1381         eth_proto_new = eth_proto_new & eth_proto_cap;
1382         if (!eth_proto_new) {
1383                 netdev_err(dev, "Not supported proto admin requested");
1384                 return -EINVAL;
1385         }
1386         if (eth_proto_new == eth_proto_admin)
1387                 return 0;
1388
1389         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1390         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1391         if (err) {
1392                 netdev_err(dev, "Failed to set proto admin");
1393                 return err;
1394         }
1395
1396         err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1397         if (err) {
1398                 netdev_err(dev, "Failed to get oper status");
1399                 return err;
1400         }
1401         if (!is_up)
1402                 return 0;
1403
1404         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1405         if (err) {
1406                 netdev_err(dev, "Failed to set admin status");
1407                 return err;
1408         }
1409
1410         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1411         if (err) {
1412                 netdev_err(dev, "Failed to set admin status");
1413                 return err;
1414         }
1415
1416         return 0;
1417 }
1418
1419 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1420         .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
1421         .get_link               = ethtool_op_get_link,
1422         .get_strings            = mlxsw_sp_port_get_strings,
1423         .set_phys_id            = mlxsw_sp_port_set_phys_id,
1424         .get_ethtool_stats      = mlxsw_sp_port_get_stats,
1425         .get_sset_count         = mlxsw_sp_port_get_sset_count,
1426         .get_settings           = mlxsw_sp_port_get_settings,
1427         .set_settings           = mlxsw_sp_port_set_settings,
1428 };
1429
1430 static int
1431 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1432 {
1433         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1434         u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1435         char ptys_pl[MLXSW_REG_PTYS_LEN];
1436         u32 eth_proto_admin;
1437
1438         eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1439         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1440                             eth_proto_admin);
1441         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1442 }
1443
1444 static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1445                                   bool split, u8 module, u8 width)
1446 {
1447         struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
1448         struct mlxsw_sp_port *mlxsw_sp_port;
1449         struct devlink_port *devlink_port;
1450         struct net_device *dev;
1451         size_t bytes;
1452         int err;
1453
1454         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1455         if (!dev)
1456                 return -ENOMEM;
1457         mlxsw_sp_port = netdev_priv(dev);
1458         mlxsw_sp_port->dev = dev;
1459         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1460         mlxsw_sp_port->local_port = local_port;
1461         mlxsw_sp_port->split = split;
1462         bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1463         mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1464         if (!mlxsw_sp_port->active_vlans) {
1465                 err = -ENOMEM;
1466                 goto err_port_active_vlans_alloc;
1467         }
1468         mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1469         if (!mlxsw_sp_port->untagged_vlans) {
1470                 err = -ENOMEM;
1471                 goto err_port_untagged_vlans_alloc;
1472         }
1473         INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1474
1475         mlxsw_sp_port->pcpu_stats =
1476                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1477         if (!mlxsw_sp_port->pcpu_stats) {
1478                 err = -ENOMEM;
1479                 goto err_alloc_stats;
1480         }
1481
1482         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1483         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1484
1485         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1486         if (err) {
1487                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1488                         mlxsw_sp_port->local_port);
1489                 goto err_dev_addr_init;
1490         }
1491
1492         netif_carrier_off(dev);
1493
1494         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1495                          NETIF_F_HW_VLAN_CTAG_FILTER;
1496
1497         /* Each packet needs to have a Tx header (metadata) on top all other
1498          * headers.
1499          */
1500         dev->hard_header_len += MLXSW_TXHDR_LEN;
1501
1502         devlink_port = &mlxsw_sp_port->devlink_port;
1503         if (mlxsw_sp_port->split)
1504                 devlink_port_split_set(devlink_port, module);
1505         err = devlink_port_register(devlink, devlink_port, local_port);
1506         if (err) {
1507                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register devlink port\n",
1508                         mlxsw_sp_port->local_port);
1509                 goto err_devlink_port_register;
1510         }
1511
1512         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1513         if (err) {
1514                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1515                         mlxsw_sp_port->local_port);
1516                 goto err_port_system_port_mapping_set;
1517         }
1518
1519         err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1520         if (err) {
1521                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1522                         mlxsw_sp_port->local_port);
1523                 goto err_port_swid_set;
1524         }
1525
1526         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
1527         if (err) {
1528                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1529                         mlxsw_sp_port->local_port);
1530                 goto err_port_speed_by_width_set;
1531         }
1532
1533         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1534         if (err) {
1535                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1536                         mlxsw_sp_port->local_port);
1537                 goto err_port_mtu_set;
1538         }
1539
1540         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1541         if (err)
1542                 goto err_port_admin_status_set;
1543
1544         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1545         if (err) {
1546                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1547                         mlxsw_sp_port->local_port);
1548                 goto err_port_buffers_init;
1549         }
1550
1551         mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1552         err = register_netdev(dev);
1553         if (err) {
1554                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1555                         mlxsw_sp_port->local_port);
1556                 goto err_register_netdev;
1557         }
1558
1559         devlink_port_type_eth_set(devlink_port, dev);
1560
1561         err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1562         if (err)
1563                 goto err_port_vlan_init;
1564
1565         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1566         return 0;
1567
1568 err_port_vlan_init:
1569         unregister_netdev(dev);
1570 err_register_netdev:
1571 err_port_buffers_init:
1572 err_port_admin_status_set:
1573 err_port_mtu_set:
1574 err_port_speed_by_width_set:
1575 err_port_swid_set:
1576 err_port_system_port_mapping_set:
1577         devlink_port_unregister(&mlxsw_sp_port->devlink_port);
1578 err_devlink_port_register:
1579 err_dev_addr_init:
1580         free_percpu(mlxsw_sp_port->pcpu_stats);
1581 err_alloc_stats:
1582         kfree(mlxsw_sp_port->untagged_vlans);
1583 err_port_untagged_vlans_alloc:
1584         kfree(mlxsw_sp_port->active_vlans);
1585 err_port_active_vlans_alloc:
1586         free_netdev(dev);
1587         return err;
1588 }
1589
1590 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1591                                 bool split, u8 module, u8 width, u8 lane)
1592 {
1593         int err;
1594
1595         err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1596                                        lane);
1597         if (err)
1598                 return err;
1599
1600         err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
1601                                      width);
1602         if (err)
1603                 goto err_port_create;
1604
1605         return 0;
1606
1607 err_port_create:
1608         mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
1609         return err;
1610 }
1611
1612 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1613 {
1614         struct net_device *dev = mlxsw_sp_port->dev;
1615         struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
1616
1617         list_for_each_entry_safe(mlxsw_sp_vport, tmp,
1618                                  &mlxsw_sp_port->vports_list, vport.list) {
1619                 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1620
1621                 /* vPorts created for VLAN devices should already be gone
1622                  * by now, since we unregistered the port netdev.
1623                  */
1624                 WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
1625                 mlxsw_sp_port_kill_vid(dev, 0, vid);
1626         }
1627 }
1628
1629 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1630 {
1631         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1632         struct devlink_port *devlink_port;
1633
1634         if (!mlxsw_sp_port)
1635                 return;
1636         mlxsw_sp->ports[local_port] = NULL;
1637         devlink_port = &mlxsw_sp_port->devlink_port;
1638         devlink_port_type_clear(devlink_port);
1639         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1640         devlink_port_unregister(devlink_port);
1641         mlxsw_sp_port_vports_fini(mlxsw_sp_port);
1642         mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1643         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1644         mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1645         free_percpu(mlxsw_sp_port->pcpu_stats);
1646         kfree(mlxsw_sp_port->untagged_vlans);
1647         kfree(mlxsw_sp_port->active_vlans);
1648         free_netdev(mlxsw_sp_port->dev);
1649 }
1650
1651 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1652 {
1653         int i;
1654
1655         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1656                 mlxsw_sp_port_remove(mlxsw_sp, i);
1657         kfree(mlxsw_sp->ports);
1658 }
1659
1660 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1661 {
1662         size_t alloc_size;
1663         u8 module, width;
1664         int i;
1665         int err;
1666
1667         alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1668         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1669         if (!mlxsw_sp->ports)
1670                 return -ENOMEM;
1671
1672         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1673                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1674                                                     &width);
1675                 if (err)
1676                         goto err_port_module_info_get;
1677                 if (!width)
1678                         continue;
1679                 mlxsw_sp->port_to_module[i] = module;
1680                 err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width);
1681                 if (err)
1682                         goto err_port_create;
1683         }
1684         return 0;
1685
1686 err_port_create:
1687 err_port_module_info_get:
1688         for (i--; i >= 1; i--)
1689                 mlxsw_sp_port_remove(mlxsw_sp, i);
1690         kfree(mlxsw_sp->ports);
1691         return err;
1692 }
1693
1694 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1695 {
1696         u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
1697
1698         return local_port - offset;
1699 }
1700
1701 static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count)
1702 {
1703         struct mlxsw_sp *mlxsw_sp = priv;
1704         struct mlxsw_sp_port *mlxsw_sp_port;
1705         u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1706         u8 module, cur_width, base_port;
1707         int i;
1708         int err;
1709
1710         mlxsw_sp_port = mlxsw_sp->ports[local_port];
1711         if (!mlxsw_sp_port) {
1712                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1713                         local_port);
1714                 return -EINVAL;
1715         }
1716
1717         if (count != 2 && count != 4) {
1718                 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
1719                 return -EINVAL;
1720         }
1721
1722         err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
1723                                             &cur_width);
1724         if (err) {
1725                 netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
1726                 return err;
1727         }
1728
1729         if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
1730                 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
1731                 return -EINVAL;
1732         }
1733
1734         /* Make sure we have enough slave (even) ports for the split. */
1735         if (count == 2) {
1736                 base_port = local_port;
1737                 if (mlxsw_sp->ports[base_port + 1]) {
1738                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1739                         return -EINVAL;
1740                 }
1741         } else {
1742                 base_port = mlxsw_sp_cluster_base_port_get(local_port);
1743                 if (mlxsw_sp->ports[base_port + 1] ||
1744                     mlxsw_sp->ports[base_port + 3]) {
1745                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1746                         return -EINVAL;
1747                 }
1748         }
1749
1750         for (i = 0; i < count; i++)
1751                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1752
1753         for (i = 0; i < count; i++) {
1754                 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
1755                                            module, width, i * width);
1756                 if (err) {
1757                         dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
1758                         goto err_port_create;
1759                 }
1760         }
1761
1762         return 0;
1763
1764 err_port_create:
1765         for (i--; i >= 0; i--)
1766                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1767         for (i = 0; i < count / 2; i++) {
1768                 module = mlxsw_sp->port_to_module[base_port + i * 2];
1769                 mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
1770                                      module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
1771         }
1772         return err;
1773 }
1774
1775 static int mlxsw_sp_port_unsplit(void *priv, u8 local_port)
1776 {
1777         struct mlxsw_sp *mlxsw_sp = priv;
1778         struct mlxsw_sp_port *mlxsw_sp_port;
1779         u8 module, cur_width, base_port;
1780         unsigned int count;
1781         int i;
1782         int err;
1783
1784         mlxsw_sp_port = mlxsw_sp->ports[local_port];
1785         if (!mlxsw_sp_port) {
1786                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1787                         local_port);
1788                 return -EINVAL;
1789         }
1790
1791         if (!mlxsw_sp_port->split) {
1792                 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
1793                 return -EINVAL;
1794         }
1795
1796         err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
1797                                             &cur_width);
1798         if (err) {
1799                 netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
1800                 return err;
1801         }
1802         count = cur_width == 1 ? 4 : 2;
1803
1804         base_port = mlxsw_sp_cluster_base_port_get(local_port);
1805
1806         /* Determine which ports to remove. */
1807         if (count == 2 && local_port >= base_port + 2)
1808                 base_port = base_port + 2;
1809
1810         for (i = 0; i < count; i++)
1811                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1812
1813         for (i = 0; i < count / 2; i++) {
1814                 module = mlxsw_sp->port_to_module[base_port + i * 2];
1815                 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
1816                                            module, MLXSW_PORT_MODULE_MAX_WIDTH,
1817                                            0);
1818                 if (err)
1819                         dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
1820         }
1821
1822         return 0;
1823 }
1824
1825 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
1826                                      char *pude_pl, void *priv)
1827 {
1828         struct mlxsw_sp *mlxsw_sp = priv;
1829         struct mlxsw_sp_port *mlxsw_sp_port;
1830         enum mlxsw_reg_pude_oper_status status;
1831         u8 local_port;
1832
1833         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1834         mlxsw_sp_port = mlxsw_sp->ports[local_port];
1835         if (!mlxsw_sp_port) {
1836                 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
1837                          local_port);
1838                 return;
1839         }
1840
1841         status = mlxsw_reg_pude_oper_status_get(pude_pl);
1842         if (status == MLXSW_PORT_OPER_STATUS_UP) {
1843                 netdev_info(mlxsw_sp_port->dev, "link up\n");
1844                 netif_carrier_on(mlxsw_sp_port->dev);
1845         } else {
1846                 netdev_info(mlxsw_sp_port->dev, "link down\n");
1847                 netif_carrier_off(mlxsw_sp_port->dev);
1848         }
1849 }
1850
1851 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
1852         .func = mlxsw_sp_pude_event_func,
1853         .trap_id = MLXSW_TRAP_ID_PUDE,
1854 };
1855
1856 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
1857                                    enum mlxsw_event_trap_id trap_id)
1858 {
1859         struct mlxsw_event_listener *el;
1860         char hpkt_pl[MLXSW_REG_HPKT_LEN];
1861         int err;
1862
1863         switch (trap_id) {
1864         case MLXSW_TRAP_ID_PUDE:
1865                 el = &mlxsw_sp_pude_event;
1866                 break;
1867         }
1868         err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
1869         if (err)
1870                 return err;
1871
1872         mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
1873         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1874         if (err)
1875                 goto err_event_trap_set;
1876
1877         return 0;
1878
1879 err_event_trap_set:
1880         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1881         return err;
1882 }
1883
1884 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
1885                                       enum mlxsw_event_trap_id trap_id)
1886 {
1887         struct mlxsw_event_listener *el;
1888
1889         switch (trap_id) {
1890         case MLXSW_TRAP_ID_PUDE:
1891                 el = &mlxsw_sp_pude_event;
1892                 break;
1893         }
1894         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1895 }
1896
1897 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
1898                                       void *priv)
1899 {
1900         struct mlxsw_sp *mlxsw_sp = priv;
1901         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1902         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1903
1904         if (unlikely(!mlxsw_sp_port)) {
1905                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
1906                                      local_port);
1907                 return;
1908         }
1909
1910         skb->dev = mlxsw_sp_port->dev;
1911
1912         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1913         u64_stats_update_begin(&pcpu_stats->syncp);
1914         pcpu_stats->rx_packets++;
1915         pcpu_stats->rx_bytes += skb->len;
1916         u64_stats_update_end(&pcpu_stats->syncp);
1917
1918         skb->protocol = eth_type_trans(skb, skb->dev);
1919         netif_receive_skb(skb);
1920 }
1921
1922 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
1923         {
1924                 .func = mlxsw_sp_rx_listener_func,
1925                 .local_port = MLXSW_PORT_DONT_CARE,
1926                 .trap_id = MLXSW_TRAP_ID_FDB_MC,
1927         },
1928         /* Traps for specific L2 packet types, not trapped as FDB MC */
1929         {
1930                 .func = mlxsw_sp_rx_listener_func,
1931                 .local_port = MLXSW_PORT_DONT_CARE,
1932                 .trap_id = MLXSW_TRAP_ID_STP,
1933         },
1934         {
1935                 .func = mlxsw_sp_rx_listener_func,
1936                 .local_port = MLXSW_PORT_DONT_CARE,
1937                 .trap_id = MLXSW_TRAP_ID_LACP,
1938         },
1939         {
1940                 .func = mlxsw_sp_rx_listener_func,
1941                 .local_port = MLXSW_PORT_DONT_CARE,
1942                 .trap_id = MLXSW_TRAP_ID_EAPOL,
1943         },
1944         {
1945                 .func = mlxsw_sp_rx_listener_func,
1946                 .local_port = MLXSW_PORT_DONT_CARE,
1947                 .trap_id = MLXSW_TRAP_ID_LLDP,
1948         },
1949         {
1950                 .func = mlxsw_sp_rx_listener_func,
1951                 .local_port = MLXSW_PORT_DONT_CARE,
1952                 .trap_id = MLXSW_TRAP_ID_MMRP,
1953         },
1954         {
1955                 .func = mlxsw_sp_rx_listener_func,
1956                 .local_port = MLXSW_PORT_DONT_CARE,
1957                 .trap_id = MLXSW_TRAP_ID_MVRP,
1958         },
1959         {
1960                 .func = mlxsw_sp_rx_listener_func,
1961                 .local_port = MLXSW_PORT_DONT_CARE,
1962                 .trap_id = MLXSW_TRAP_ID_RPVST,
1963         },
1964         {
1965                 .func = mlxsw_sp_rx_listener_func,
1966                 .local_port = MLXSW_PORT_DONT_CARE,
1967                 .trap_id = MLXSW_TRAP_ID_DHCP,
1968         },
1969         {
1970                 .func = mlxsw_sp_rx_listener_func,
1971                 .local_port = MLXSW_PORT_DONT_CARE,
1972                 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
1973         },
1974         {
1975                 .func = mlxsw_sp_rx_listener_func,
1976                 .local_port = MLXSW_PORT_DONT_CARE,
1977                 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
1978         },
1979         {
1980                 .func = mlxsw_sp_rx_listener_func,
1981                 .local_port = MLXSW_PORT_DONT_CARE,
1982                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
1983         },
1984         {
1985                 .func = mlxsw_sp_rx_listener_func,
1986                 .local_port = MLXSW_PORT_DONT_CARE,
1987                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
1988         },
1989         {
1990                 .func = mlxsw_sp_rx_listener_func,
1991                 .local_port = MLXSW_PORT_DONT_CARE,
1992                 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
1993         },
1994 };
1995
1996 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
1997 {
1998         char htgt_pl[MLXSW_REG_HTGT_LEN];
1999         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2000         int i;
2001         int err;
2002
2003         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2004         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2005         if (err)
2006                 return err;
2007
2008         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2009         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2010         if (err)
2011                 return err;
2012
2013         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2014                 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2015                                                       &mlxsw_sp_rx_listener[i],
2016                                                       mlxsw_sp);
2017                 if (err)
2018                         goto err_rx_listener_register;
2019
2020                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2021                                     mlxsw_sp_rx_listener[i].trap_id);
2022                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2023                 if (err)
2024                         goto err_rx_trap_set;
2025         }
2026         return 0;
2027
2028 err_rx_trap_set:
2029         mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2030                                           &mlxsw_sp_rx_listener[i],
2031                                           mlxsw_sp);
2032 err_rx_listener_register:
2033         for (i--; i >= 0; i--) {
2034                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2035                                     mlxsw_sp_rx_listener[i].trap_id);
2036                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2037
2038                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2039                                                   &mlxsw_sp_rx_listener[i],
2040                                                   mlxsw_sp);
2041         }
2042         return err;
2043 }
2044
2045 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2046 {
2047         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2048         int i;
2049
2050         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2051                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2052                                     mlxsw_sp_rx_listener[i].trap_id);
2053                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2054
2055                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2056                                                   &mlxsw_sp_rx_listener[i],
2057                                                   mlxsw_sp);
2058         }
2059 }
2060
2061 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2062                                  enum mlxsw_reg_sfgc_type type,
2063                                  enum mlxsw_reg_sfgc_bridge_type bridge_type)
2064 {
2065         enum mlxsw_flood_table_type table_type;
2066         enum mlxsw_sp_flood_table flood_table;
2067         char sfgc_pl[MLXSW_REG_SFGC_LEN];
2068
2069         if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2070                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2071         else
2072                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2073
2074         if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2075                 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2076         else
2077                 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2078
2079         mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2080                             flood_table);
2081         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2082 }
2083
2084 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2085 {
2086         int type, err;
2087
2088         for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2089                 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2090                         continue;
2091
2092                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2093                                             MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2094                 if (err)
2095                         return err;
2096
2097                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2098                                             MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2099                 if (err)
2100                         return err;
2101         }
2102
2103         return 0;
2104 }
2105
2106 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2107 {
2108         char slcr_pl[MLXSW_REG_SLCR_LEN];
2109
2110         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2111                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
2112                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2113                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
2114                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
2115                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
2116                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
2117                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
2118                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2119         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2120 }
2121
2122 static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
2123                          const struct mlxsw_bus_info *mlxsw_bus_info)
2124 {
2125         struct mlxsw_sp *mlxsw_sp = priv;
2126         int err;
2127
2128         mlxsw_sp->core = mlxsw_core;
2129         mlxsw_sp->bus_info = mlxsw_bus_info;
2130         INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
2131         INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
2132         INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2133
2134         err = mlxsw_sp_base_mac_get(mlxsw_sp);
2135         if (err) {
2136                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2137                 return err;
2138         }
2139
2140         err = mlxsw_sp_ports_create(mlxsw_sp);
2141         if (err) {
2142                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2143                 return err;
2144         }
2145
2146         err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2147         if (err) {
2148                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2149                 goto err_event_register;
2150         }
2151
2152         err = mlxsw_sp_traps_init(mlxsw_sp);
2153         if (err) {
2154                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2155                 goto err_rx_listener_register;
2156         }
2157
2158         err = mlxsw_sp_flood_init(mlxsw_sp);
2159         if (err) {
2160                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2161                 goto err_flood_init;
2162         }
2163
2164         err = mlxsw_sp_buffers_init(mlxsw_sp);
2165         if (err) {
2166                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2167                 goto err_buffers_init;
2168         }
2169
2170         err = mlxsw_sp_lag_init(mlxsw_sp);
2171         if (err) {
2172                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2173                 goto err_lag_init;
2174         }
2175
2176         err = mlxsw_sp_switchdev_init(mlxsw_sp);
2177         if (err) {
2178                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2179                 goto err_switchdev_init;
2180         }
2181
2182         return 0;
2183
2184 err_switchdev_init:
2185 err_lag_init:
2186 err_buffers_init:
2187 err_flood_init:
2188         mlxsw_sp_traps_fini(mlxsw_sp);
2189 err_rx_listener_register:
2190         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2191 err_event_register:
2192         mlxsw_sp_ports_remove(mlxsw_sp);
2193         return err;
2194 }
2195
2196 static void mlxsw_sp_fini(void *priv)
2197 {
2198         struct mlxsw_sp *mlxsw_sp = priv;
2199
2200         mlxsw_sp_switchdev_fini(mlxsw_sp);
2201         mlxsw_sp_traps_fini(mlxsw_sp);
2202         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2203         mlxsw_sp_ports_remove(mlxsw_sp);
2204 }
2205
2206 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2207         .used_max_vepa_channels         = 1,
2208         .max_vepa_channels              = 0,
2209         .used_max_lag                   = 1,
2210         .max_lag                        = MLXSW_SP_LAG_MAX,
2211         .used_max_port_per_lag          = 1,
2212         .max_port_per_lag               = MLXSW_SP_PORT_PER_LAG_MAX,
2213         .used_max_mid                   = 1,
2214         .max_mid                        = MLXSW_SP_MID_MAX,
2215         .used_max_pgt                   = 1,
2216         .max_pgt                        = 0,
2217         .used_max_system_port           = 1,
2218         .max_system_port                = 64,
2219         .used_max_vlan_groups           = 1,
2220         .max_vlan_groups                = 127,
2221         .used_max_regions               = 1,
2222         .max_regions                    = 400,
2223         .used_flood_tables              = 1,
2224         .used_flood_mode                = 1,
2225         .flood_mode                     = 3,
2226         .max_fid_offset_flood_tables    = 2,
2227         .fid_offset_flood_table_size    = VLAN_N_VID - 1,
2228         .max_fid_flood_tables           = 2,
2229         .fid_flood_table_size           = MLXSW_SP_VFID_MAX,
2230         .used_max_ib_mc                 = 1,
2231         .max_ib_mc                      = 0,
2232         .used_max_pkey                  = 1,
2233         .max_pkey                       = 0,
2234         .swid_config                    = {
2235                 {
2236                         .used_type      = 1,
2237                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
2238                 }
2239         },
2240 };
2241
2242 static struct mlxsw_driver mlxsw_sp_driver = {
2243         .kind                   = MLXSW_DEVICE_KIND_SPECTRUM,
2244         .owner                  = THIS_MODULE,
2245         .priv_size              = sizeof(struct mlxsw_sp),
2246         .init                   = mlxsw_sp_init,
2247         .fini                   = mlxsw_sp_fini,
2248         .port_split             = mlxsw_sp_port_split,
2249         .port_unsplit           = mlxsw_sp_port_unsplit,
2250         .txhdr_construct        = mlxsw_sp_txhdr_construct,
2251         .txhdr_len              = MLXSW_TXHDR_LEN,
2252         .profile                = &mlxsw_sp_config_profile,
2253 };
2254
2255 static int
2256 mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
2257 {
2258         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2259         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2260
2261         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
2262         mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
2263
2264         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2265 }
2266
2267 static int
2268 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2269                                     u16 fid)
2270 {
2271         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2272         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2273
2274         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
2275         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2276         mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2277                                                 mlxsw_sp_port->local_port);
2278
2279         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2280 }
2281
2282 static int
2283 mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
2284 {
2285         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2286         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2287
2288         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
2289         mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2290
2291         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2292 }
2293
2294 static int
2295 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2296                                       u16 fid)
2297 {
2298         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2299         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2300
2301         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
2302         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2303         mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2304
2305         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2306 }
2307
2308 static int
2309 __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
2310 {
2311         int err, last_err = 0;
2312         u16 vid;
2313
2314         for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2315                 err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
2316                 if (err)
2317                         last_err = err;
2318         }
2319
2320         return last_err;
2321 }
2322
2323 static int
2324 __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
2325 {
2326         int err, last_err = 0;
2327         u16 vid;
2328
2329         for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2330                 err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
2331                 if (err)
2332                         last_err = err;
2333         }
2334
2335         return last_err;
2336 }
2337
2338 static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
2339 {
2340         if (!list_empty(&mlxsw_sp_port->vports_list))
2341                 if (mlxsw_sp_port->lagged)
2342                         return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
2343                 else
2344                         return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
2345         else
2346                 if (mlxsw_sp_port->lagged)
2347                         return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
2348                 else
2349                         return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
2350 }
2351
2352 static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
2353 {
2354         u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
2355         u16 fid = mlxsw_sp_vfid_to_fid(vfid);
2356
2357         if (mlxsw_sp_vport->lagged)
2358                 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
2359                                                              fid);
2360         else
2361                 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
2362 }
2363
2364 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2365 {
2366         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2367 }
2368
2369 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
2370 {
2371         struct net_device *dev = mlxsw_sp_port->dev;
2372         int err;
2373
2374         /* When port is not bridged untagged packets are tagged with
2375          * PVID=VID=1, thereby creating an implicit VLAN interface in
2376          * the device. Remove it and let bridge code take care of its
2377          * own VLANs.
2378          */
2379         err = mlxsw_sp_port_kill_vid(dev, 0, 1);
2380         if (err)
2381                 return err;
2382
2383         mlxsw_sp_port->learning = 1;
2384         mlxsw_sp_port->learning_sync = 1;
2385         mlxsw_sp_port->uc_flood = 1;
2386         mlxsw_sp_port->bridged = 1;
2387
2388         return 0;
2389 }
2390
2391 static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2392                                       bool flush_fdb)
2393 {
2394         struct net_device *dev = mlxsw_sp_port->dev;
2395
2396         if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2397                 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2398
2399         mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2400
2401         mlxsw_sp_port->learning = 0;
2402         mlxsw_sp_port->learning_sync = 0;
2403         mlxsw_sp_port->uc_flood = 0;
2404         mlxsw_sp_port->bridged = 0;
2405
2406         /* Add implicit VLAN interface in the device, so that untagged
2407          * packets will be classified to the default vFID.
2408          */
2409         return mlxsw_sp_port_add_vid(dev, 0, 1);
2410 }
2411
2412 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
2413                                          struct net_device *br_dev)
2414 {
2415         return !mlxsw_sp->master_bridge.dev ||
2416                mlxsw_sp->master_bridge.dev == br_dev;
2417 }
2418
2419 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2420                                        struct net_device *br_dev)
2421 {
2422         mlxsw_sp->master_bridge.dev = br_dev;
2423         mlxsw_sp->master_bridge.ref_count++;
2424 }
2425
2426 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
2427                                        struct net_device *br_dev)
2428 {
2429         if (--mlxsw_sp->master_bridge.ref_count == 0)
2430                 mlxsw_sp->master_bridge.dev = NULL;
2431 }
2432
2433 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2434 {
2435         char sldr_pl[MLXSW_REG_SLDR_LEN];
2436
2437         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
2438         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2439 }
2440
2441 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2442 {
2443         char sldr_pl[MLXSW_REG_SLDR_LEN];
2444
2445         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
2446         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2447 }
2448
2449 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2450                                      u16 lag_id, u8 port_index)
2451 {
2452         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2453         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2454
2455         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
2456                                       lag_id, port_index);
2457         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2458 }
2459
2460 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2461                                         u16 lag_id)
2462 {
2463         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2464         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2465
2466         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
2467                                          lag_id);
2468         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2469 }
2470
2471 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
2472                                         u16 lag_id)
2473 {
2474         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2475         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2476
2477         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
2478                                         lag_id);
2479         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2480 }
2481
2482 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
2483                                          u16 lag_id)
2484 {
2485         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2486         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2487
2488         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
2489                                          lag_id);
2490         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2491 }
2492
2493 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2494                                   struct net_device *lag_dev,
2495                                   u16 *p_lag_id)
2496 {
2497         struct mlxsw_sp_upper *lag;
2498         int free_lag_id = -1;
2499         int i;
2500
2501         for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
2502                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
2503                 if (lag->ref_count) {
2504                         if (lag->dev == lag_dev) {
2505                                 *p_lag_id = i;
2506                                 return 0;
2507                         }
2508                 } else if (free_lag_id < 0) {
2509                         free_lag_id = i;
2510                 }
2511         }
2512         if (free_lag_id < 0)
2513                 return -EBUSY;
2514         *p_lag_id = free_lag_id;
2515         return 0;
2516 }
2517
2518 static bool
2519 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
2520                           struct net_device *lag_dev,
2521                           struct netdev_lag_upper_info *lag_upper_info)
2522 {
2523         u16 lag_id;
2524
2525         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
2526                 return false;
2527         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2528                 return false;
2529         return true;
2530 }
2531
2532 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2533                                        u16 lag_id, u8 *p_port_index)
2534 {
2535         int i;
2536
2537         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2538                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
2539                         *p_port_index = i;
2540                         return 0;
2541                 }
2542         }
2543         return -EBUSY;
2544 }
2545
2546 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2547                                   struct net_device *lag_dev)
2548 {
2549         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2550         struct mlxsw_sp_upper *lag;
2551         u16 lag_id;
2552         u8 port_index;
2553         int err;
2554
2555         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
2556         if (err)
2557                 return err;
2558         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2559         if (!lag->ref_count) {
2560                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
2561                 if (err)
2562                         return err;
2563                 lag->dev = lag_dev;
2564         }
2565
2566         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
2567         if (err)
2568                 return err;
2569         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
2570         if (err)
2571                 goto err_col_port_add;
2572         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
2573         if (err)
2574                 goto err_col_port_enable;
2575
2576         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
2577                                    mlxsw_sp_port->local_port);
2578         mlxsw_sp_port->lag_id = lag_id;
2579         mlxsw_sp_port->lagged = 1;
2580         lag->ref_count++;
2581         return 0;
2582
2583 err_col_port_add:
2584         if (!lag->ref_count)
2585                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2586 err_col_port_enable:
2587         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2588         return err;
2589 }
2590
2591 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2592                                        struct net_device *br_dev,
2593                                        bool flush_fdb);
2594
2595 static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2596                                    struct net_device *lag_dev)
2597 {
2598         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2599         struct mlxsw_sp_port *mlxsw_sp_vport;
2600         struct mlxsw_sp_upper *lag;
2601         u16 lag_id = mlxsw_sp_port->lag_id;
2602         int err;
2603
2604         if (!mlxsw_sp_port->lagged)
2605                 return 0;
2606         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2607         WARN_ON(lag->ref_count == 0);
2608
2609         err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2610         if (err)
2611                 return err;
2612         err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2613         if (err)
2614                 return err;
2615
2616         /* In case we leave a LAG device that has bridges built on top,
2617          * then their teardown sequence is never issued and we need to
2618          * invoke the necessary cleanup routines ourselves.
2619          */
2620         list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
2621                             vport.list) {
2622                 struct net_device *br_dev;
2623
2624                 if (!mlxsw_sp_vport->bridged)
2625                         continue;
2626
2627                 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2628                 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
2629         }
2630
2631         if (mlxsw_sp_port->bridged) {
2632                 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
2633                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
2634                 mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
2635         }
2636
2637         if (lag->ref_count == 1) {
2638                 if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
2639                         netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2640                 err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2641                 if (err)
2642                         return err;
2643         }
2644
2645         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2646                                      mlxsw_sp_port->local_port);
2647         mlxsw_sp_port->lagged = 0;
2648         lag->ref_count--;
2649         return 0;
2650 }
2651
2652 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2653                                       u16 lag_id)
2654 {
2655         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2656         char sldr_pl[MLXSW_REG_SLDR_LEN];
2657
2658         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
2659                                          mlxsw_sp_port->local_port);
2660         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2661 }
2662
2663 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2664                                          u16 lag_id)
2665 {
2666         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2667         char sldr_pl[MLXSW_REG_SLDR_LEN];
2668
2669         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
2670                                             mlxsw_sp_port->local_port);
2671         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2672 }
2673
2674 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
2675                                        bool lag_tx_enabled)
2676 {
2677         if (lag_tx_enabled)
2678                 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
2679                                                   mlxsw_sp_port->lag_id);
2680         else
2681                 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
2682                                                      mlxsw_sp_port->lag_id);
2683 }
2684
2685 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
2686                                      struct netdev_lag_lower_state_info *info)
2687 {
2688         return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
2689 }
2690
2691 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
2692                                    struct net_device *vlan_dev)
2693 {
2694         struct mlxsw_sp_port *mlxsw_sp_vport;
2695         u16 vid = vlan_dev_vlan_id(vlan_dev);
2696
2697         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2698         if (!mlxsw_sp_vport) {
2699                 WARN_ON(!mlxsw_sp_vport);
2700                 return -EINVAL;
2701         }
2702
2703         mlxsw_sp_vport->dev = vlan_dev;
2704
2705         return 0;
2706 }
2707
2708 static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
2709                                      struct net_device *vlan_dev)
2710 {
2711         struct mlxsw_sp_port *mlxsw_sp_vport;
2712         u16 vid = vlan_dev_vlan_id(vlan_dev);
2713
2714         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2715         if (!mlxsw_sp_vport) {
2716                 WARN_ON(!mlxsw_sp_vport);
2717                 return -EINVAL;
2718         }
2719
2720         /* When removing a VLAN device while still bridged we should first
2721          * remove it from the bridge, as we receive the bridge's notification
2722          * when the vPort is already gone.
2723          */
2724         if (mlxsw_sp_vport->bridged) {
2725                 struct net_device *br_dev;
2726
2727                 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2728                 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
2729         }
2730
2731         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
2732
2733         return 0;
2734 }
2735
2736 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
2737                                                unsigned long event, void *ptr)
2738 {
2739         struct netdev_notifier_changeupper_info *info;
2740         struct mlxsw_sp_port *mlxsw_sp_port;
2741         struct net_device *upper_dev;
2742         struct mlxsw_sp *mlxsw_sp;
2743         int err;
2744
2745         mlxsw_sp_port = netdev_priv(dev);
2746         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2747         info = ptr;
2748
2749         switch (event) {
2750         case NETDEV_PRECHANGEUPPER:
2751                 upper_dev = info->upper_dev;
2752                 if (!info->master || !info->linking)
2753                         break;
2754                 /* HW limitation forbids to put ports to multiple bridges. */
2755                 if (netif_is_bridge_master(upper_dev) &&
2756                     !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
2757                         return NOTIFY_BAD;
2758                 if (netif_is_lag_master(upper_dev) &&
2759                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
2760                                                info->upper_info))
2761                         return NOTIFY_BAD;
2762                 break;
2763         case NETDEV_CHANGEUPPER:
2764                 upper_dev = info->upper_dev;
2765                 if (is_vlan_dev(upper_dev)) {
2766                         if (info->linking) {
2767                                 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
2768                                                               upper_dev);
2769                                 if (err) {
2770                                         netdev_err(dev, "Failed to link VLAN device\n");
2771                                         return NOTIFY_BAD;
2772                                 }
2773                         } else {
2774                                 err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
2775                                                                 upper_dev);
2776                                 if (err) {
2777                                         netdev_err(dev, "Failed to unlink VLAN device\n");
2778                                         return NOTIFY_BAD;
2779                                 }
2780                         }
2781                 } else if (netif_is_bridge_master(upper_dev)) {
2782                         if (info->linking) {
2783                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
2784                                 if (err) {
2785                                         netdev_err(dev, "Failed to join bridge\n");
2786                                         return NOTIFY_BAD;
2787                                 }
2788                                 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
2789                         } else {
2790                                 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
2791                                                                  true);
2792                                 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
2793                                 if (err) {
2794                                         netdev_err(dev, "Failed to leave bridge\n");
2795                                         return NOTIFY_BAD;
2796                                 }
2797                         }
2798                 } else if (netif_is_lag_master(upper_dev)) {
2799                         if (info->linking) {
2800                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
2801                                                              upper_dev);
2802                                 if (err) {
2803                                         netdev_err(dev, "Failed to join link aggregation\n");
2804                                         return NOTIFY_BAD;
2805                                 }
2806                         } else {
2807                                 err = mlxsw_sp_port_lag_leave(mlxsw_sp_port,
2808                                                               upper_dev);
2809                                 if (err) {
2810                                         netdev_err(dev, "Failed to leave link aggregation\n");
2811                                         return NOTIFY_BAD;
2812                                 }
2813                         }
2814                 }
2815                 break;
2816         }
2817
2818         return NOTIFY_DONE;
2819 }
2820
2821 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
2822                                                unsigned long event, void *ptr)
2823 {
2824         struct netdev_notifier_changelowerstate_info *info;
2825         struct mlxsw_sp_port *mlxsw_sp_port;
2826         int err;
2827
2828         mlxsw_sp_port = netdev_priv(dev);
2829         info = ptr;
2830
2831         switch (event) {
2832         case NETDEV_CHANGELOWERSTATE:
2833                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
2834                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
2835                                                         info->lower_state_info);
2836                         if (err)
2837                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
2838                 }
2839                 break;
2840         }
2841
2842         return NOTIFY_DONE;
2843 }
2844
2845 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
2846                                          unsigned long event, void *ptr)
2847 {
2848         switch (event) {
2849         case NETDEV_PRECHANGEUPPER:
2850         case NETDEV_CHANGEUPPER:
2851                 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
2852         case NETDEV_CHANGELOWERSTATE:
2853                 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
2854         }
2855
2856         return NOTIFY_DONE;
2857 }
2858
2859 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
2860                                         unsigned long event, void *ptr)
2861 {
2862         struct net_device *dev;
2863         struct list_head *iter;
2864         int ret;
2865
2866         netdev_for_each_lower_dev(lag_dev, dev, iter) {
2867                 if (mlxsw_sp_port_dev_check(dev)) {
2868                         ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
2869                         if (ret == NOTIFY_BAD)
2870                                 return ret;
2871                 }
2872         }
2873
2874         return NOTIFY_DONE;
2875 }
2876
2877 static struct mlxsw_sp_vfid *
2878 mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
2879                       const struct net_device *br_dev)
2880 {
2881         struct mlxsw_sp_vfid *vfid;
2882
2883         list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
2884                 if (vfid->br_dev == br_dev)
2885                         return vfid;
2886         }
2887
2888         return NULL;
2889 }
2890
2891 static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
2892 {
2893         return vfid - MLXSW_SP_VFID_PORT_MAX;
2894 }
2895
2896 static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
2897 {
2898         return MLXSW_SP_VFID_PORT_MAX + br_vfid;
2899 }
2900
2901 static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
2902 {
2903         return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
2904                                    MLXSW_SP_VFID_BR_MAX);
2905 }
2906
2907 static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
2908                                                      struct net_device *br_dev)
2909 {
2910         struct device *dev = mlxsw_sp->bus_info->dev;
2911         struct mlxsw_sp_vfid *vfid;
2912         u16 n_vfid;
2913         int err;
2914
2915         n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
2916         if (n_vfid == MLXSW_SP_VFID_MAX) {
2917                 dev_err(dev, "No available vFIDs\n");
2918                 return ERR_PTR(-ERANGE);
2919         }
2920
2921         err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
2922         if (err) {
2923                 dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
2924                 return ERR_PTR(err);
2925         }
2926
2927         vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
2928         if (!vfid)
2929                 goto err_allocate_vfid;
2930
2931         vfid->vfid = n_vfid;
2932         vfid->br_dev = br_dev;
2933
2934         list_add(&vfid->list, &mlxsw_sp->br_vfids.list);
2935         set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped);
2936
2937         return vfid;
2938
2939 err_allocate_vfid:
2940         __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
2941         return ERR_PTR(-ENOMEM);
2942 }
2943
2944 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
2945                                      struct mlxsw_sp_vfid *vfid)
2946 {
2947         u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
2948
2949         clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
2950         list_del(&vfid->list);
2951
2952         __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
2953
2954         kfree(vfid);
2955 }
2956
2957 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2958                                        struct net_device *br_dev,
2959                                        bool flush_fdb)
2960 {
2961         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2962         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
2963         struct net_device *dev = mlxsw_sp_vport->dev;
2964         struct mlxsw_sp_vfid *vfid, *new_vfid;
2965         int err;
2966
2967         vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
2968         if (!vfid) {
2969                 WARN_ON(!vfid);
2970                 return -EINVAL;
2971         }
2972
2973         /* We need a vFID to go back to after leaving the bridge's vFID. */
2974         new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
2975         if (!new_vfid) {
2976                 new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
2977                 if (IS_ERR(new_vfid)) {
2978                         netdev_err(dev, "Failed to create vFID for VID=%d\n",
2979                                    vid);
2980                         return PTR_ERR(new_vfid);
2981                 }
2982         }
2983
2984         /* Invalidate existing {Port, VID} to vFID mapping and create a new
2985          * one for the new vFID.
2986          */
2987         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
2988                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
2989                                            false,
2990                                            mlxsw_sp_vfid_to_fid(vfid->vfid),
2991                                            vid);
2992         if (err) {
2993                 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
2994                            vfid->vfid);
2995                 goto err_port_vid_to_fid_invalidate;
2996         }
2997
2998         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
2999                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3000                                            true,
3001                                            mlxsw_sp_vfid_to_fid(new_vfid->vfid),
3002                                            vid);
3003         if (err) {
3004                 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3005                            new_vfid->vfid);
3006                 goto err_port_vid_to_fid_validate;
3007         }
3008
3009         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3010         if (err) {
3011                 netdev_err(dev, "Failed to disable learning\n");
3012                 goto err_port_vid_learning_set;
3013         }
3014
3015         err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
3016                                        false);
3017         if (err) {
3018                 netdev_err(dev, "Failed clear to clear flooding\n");
3019                 goto err_vport_flood_set;
3020         }
3021
3022         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
3023                                           MLXSW_REG_SPMS_STATE_FORWARDING);
3024         if (err) {
3025                 netdev_err(dev, "Failed to set STP state\n");
3026                 goto err_port_stp_state_set;
3027         }
3028
3029         if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
3030                 netdev_err(dev, "Failed to flush FDB\n");
3031
3032         /* Switch between the vFIDs and destroy the old one if needed. */
3033         new_vfid->nr_vports++;
3034         mlxsw_sp_vport->vport.vfid = new_vfid;
3035         vfid->nr_vports--;
3036         if (!vfid->nr_vports)
3037                 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3038
3039         mlxsw_sp_vport->learning = 0;
3040         mlxsw_sp_vport->learning_sync = 0;
3041         mlxsw_sp_vport->uc_flood = 0;
3042         mlxsw_sp_vport->bridged = 0;
3043
3044         return 0;
3045
3046 err_port_stp_state_set:
3047 err_vport_flood_set:
3048 err_port_vid_learning_set:
3049 err_port_vid_to_fid_validate:
3050 err_port_vid_to_fid_invalidate:
3051         /* Rollback vFID only if new. */
3052         if (!new_vfid->nr_vports)
3053                 mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
3054         return err;
3055 }
3056
3057 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3058                                       struct net_device *br_dev)
3059 {
3060         struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
3061         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3062         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3063         struct net_device *dev = mlxsw_sp_vport->dev;
3064         struct mlxsw_sp_vfid *vfid;
3065         int err;
3066
3067         vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
3068         if (!vfid) {
3069                 vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
3070                 if (IS_ERR(vfid)) {
3071                         netdev_err(dev, "Failed to create bridge vFID\n");
3072                         return PTR_ERR(vfid);
3073                 }
3074         }
3075
3076         err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false);
3077         if (err) {
3078                 netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
3079                            vfid->vfid);
3080                 goto err_port_flood_set;
3081         }
3082
3083         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3084         if (err) {
3085                 netdev_err(dev, "Failed to enable learning\n");
3086                 goto err_port_vid_learning_set;
3087         }
3088
3089         /* We need to invalidate existing {Port, VID} to vFID mapping and
3090          * create a new one for the bridge's vFID.
3091          */
3092         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3093                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3094                                            false,
3095                                            mlxsw_sp_vfid_to_fid(old_vfid->vfid),
3096                                            vid);
3097         if (err) {
3098                 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3099                            old_vfid->vfid);
3100                 goto err_port_vid_to_fid_invalidate;
3101         }
3102
3103         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3104                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3105                                            true,
3106                                            mlxsw_sp_vfid_to_fid(vfid->vfid),
3107                                            vid);
3108         if (err) {
3109                 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3110                            vfid->vfid);
3111                 goto err_port_vid_to_fid_validate;
3112         }
3113
3114         /* Switch between the vFIDs and destroy the old one if needed. */
3115         vfid->nr_vports++;
3116         mlxsw_sp_vport->vport.vfid = vfid;
3117         old_vfid->nr_vports--;
3118         if (!old_vfid->nr_vports)
3119                 mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
3120
3121         mlxsw_sp_vport->learning = 1;
3122         mlxsw_sp_vport->learning_sync = 1;
3123         mlxsw_sp_vport->uc_flood = 1;
3124         mlxsw_sp_vport->bridged = 1;
3125
3126         return 0;
3127
3128 err_port_vid_to_fid_validate:
3129         mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3130                                      MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
3131                                      mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid);
3132 err_port_vid_to_fid_invalidate:
3133         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3134 err_port_vid_learning_set:
3135         mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false);
3136 err_port_flood_set:
3137         if (!vfid->nr_vports)
3138                 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3139         return err;
3140 }
3141
3142 static bool
3143 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3144                                   const struct net_device *br_dev)
3145 {
3146         struct mlxsw_sp_port *mlxsw_sp_vport;
3147
3148         list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3149                             vport.list) {
3150                 if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
3151                         return false;
3152         }
3153
3154         return true;
3155 }
3156
3157 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3158                                           unsigned long event, void *ptr,
3159                                           u16 vid)
3160 {
3161         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3162         struct netdev_notifier_changeupper_info *info = ptr;
3163         struct mlxsw_sp_port *mlxsw_sp_vport;
3164         struct net_device *upper_dev;
3165         int err;
3166
3167         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3168
3169         switch (event) {
3170         case NETDEV_PRECHANGEUPPER:
3171                 upper_dev = info->upper_dev;
3172                 if (!info->master || !info->linking)
3173                         break;
3174                 if (!netif_is_bridge_master(upper_dev))
3175                         return NOTIFY_BAD;
3176                 /* We can't have multiple VLAN interfaces configured on
3177                  * the same port and being members in the same bridge.
3178                  */
3179                 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3180                                                        upper_dev))
3181                         return NOTIFY_BAD;
3182                 break;
3183         case NETDEV_CHANGEUPPER:
3184                 upper_dev = info->upper_dev;
3185                 if (!info->master)
3186                         break;
3187                 if (info->linking) {
3188                         if (!mlxsw_sp_vport) {
3189                                 WARN_ON(!mlxsw_sp_vport);
3190                                 return NOTIFY_BAD;
3191                         }
3192                         err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3193                                                          upper_dev);
3194                         if (err) {
3195                                 netdev_err(dev, "Failed to join bridge\n");
3196                                 return NOTIFY_BAD;
3197                         }
3198                 } else {
3199                         /* We ignore bridge's unlinking notifications if vPort
3200                          * is gone, since we already left the bridge when the
3201                          * VLAN device was unlinked from the real device.
3202                          */
3203                         if (!mlxsw_sp_vport)
3204                                 return NOTIFY_DONE;
3205                         err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
3206                                                           upper_dev, true);
3207                         if (err) {
3208                                 netdev_err(dev, "Failed to leave bridge\n");
3209                                 return NOTIFY_BAD;
3210                         }
3211                 }
3212         }
3213
3214         return NOTIFY_DONE;
3215 }
3216
3217 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3218                                               unsigned long event, void *ptr,
3219                                               u16 vid)
3220 {
3221         struct net_device *dev;
3222         struct list_head *iter;
3223         int ret;
3224
3225         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3226                 if (mlxsw_sp_port_dev_check(dev)) {
3227                         ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
3228                                                              vid);
3229                         if (ret == NOTIFY_BAD)
3230                                 return ret;
3231                 }
3232         }
3233
3234         return NOTIFY_DONE;
3235 }
3236
3237 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
3238                                          unsigned long event, void *ptr)
3239 {
3240         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3241         u16 vid = vlan_dev_vlan_id(vlan_dev);
3242
3243         if (mlxsw_sp_port_dev_check(real_dev))
3244                 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
3245                                                       vid);
3246         else if (netif_is_lag_master(real_dev))
3247                 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
3248                                                           vid);
3249
3250         return NOTIFY_DONE;
3251 }
3252
3253 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3254                                     unsigned long event, void *ptr)
3255 {
3256         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3257
3258         if (mlxsw_sp_port_dev_check(dev))
3259                 return mlxsw_sp_netdevice_port_event(dev, event, ptr);
3260
3261         if (netif_is_lag_master(dev))
3262                 return mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3263
3264         if (is_vlan_dev(dev))
3265                 return mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3266
3267         return NOTIFY_DONE;
3268 }
3269
3270 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3271         .notifier_call = mlxsw_sp_netdevice_event,
3272 };
3273
3274 static int __init mlxsw_sp_module_init(void)
3275 {
3276         int err;
3277
3278         register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3279         err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3280         if (err)
3281                 goto err_core_driver_register;
3282         return 0;
3283
3284 err_core_driver_register:
3285         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3286         return err;
3287 }
3288
3289 static void __exit mlxsw_sp_module_exit(void)
3290 {
3291         mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3292         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3293 }
3294
3295 module_init(mlxsw_sp_module_init);
3296 module_exit(mlxsw_sp_module_exit);
3297
3298 MODULE_LICENSE("Dual BSD/GPL");
3299 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3300 MODULE_DESCRIPTION("Mellanox Spectrum driver");
3301 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);