4576d59a98a2b792a9e6a50cd03344aa55f0b01f
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <net/devlink.h>
53 #include <net/switchdev.h>
54 #include <generated/utsrelease.h>
55
56 #include "spectrum.h"
57 #include "core.h"
58 #include "reg.h"
59 #include "port.h"
60 #include "trap.h"
61 #include "txheader.h"
62
63 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
64 static const char mlxsw_sp_driver_version[] = "1.0";
65
66 /* tx_hdr_version
67  * Tx header version.
68  * Must be set to 1.
69  */
70 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
71
72 /* tx_hdr_ctl
73  * Packet control type.
74  * 0 - Ethernet control (e.g. EMADs, LACP)
75  * 1 - Ethernet data
76  */
77 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
78
79 /* tx_hdr_proto
80  * Packet protocol type. Must be set to 1 (Ethernet).
81  */
82 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
83
84 /* tx_hdr_rx_is_router
85  * Packet is sent from the router. Valid for data packets only.
86  */
87 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
88
89 /* tx_hdr_fid_valid
90  * Indicates if the 'fid' field is valid and should be used for
91  * forwarding lookup. Valid for data packets only.
92  */
93 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
94
95 /* tx_hdr_swid
96  * Switch partition ID. Must be set to 0.
97  */
98 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
99
100 /* tx_hdr_control_tclass
101  * Indicates if the packet should use the control TClass and not one
102  * of the data TClasses.
103  */
104 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
105
106 /* tx_hdr_etclass
107  * Egress TClass to be used on the egress device on the egress port.
108  */
109 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
110
111 /* tx_hdr_port_mid
112  * Destination local port for unicast packets.
113  * Destination multicast ID for multicast packets.
114  *
115  * Control packets are directed to a specific egress port, while data
116  * packets are transmitted through the CPU port (0) into the switch partition,
117  * where forwarding rules are applied.
118  */
119 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
120
121 /* tx_hdr_fid
122  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
123  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
124  * Valid for data packets only.
125  */
126 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
127
128 /* tx_hdr_type
129  * 0 - Data packets
130  * 6 - Control packets
131  */
132 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
133
134 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
135                                      const struct mlxsw_tx_info *tx_info)
136 {
137         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
138
139         memset(txhdr, 0, MLXSW_TXHDR_LEN);
140
141         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
142         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
143         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
144         mlxsw_tx_hdr_swid_set(txhdr, 0);
145         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
146         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
147         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
148 }
149
150 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
151 {
152         char spad_pl[MLXSW_REG_SPAD_LEN];
153         int err;
154
155         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
156         if (err)
157                 return err;
158         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
159         return 0;
160 }
161
162 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
163                                           bool is_up)
164 {
165         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
166         char paos_pl[MLXSW_REG_PAOS_LEN];
167
168         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
169                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
170                             MLXSW_PORT_ADMIN_STATUS_DOWN);
171         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
172 }
173
174 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
175                                          bool *p_is_up)
176 {
177         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
178         char paos_pl[MLXSW_REG_PAOS_LEN];
179         u8 oper_status;
180         int err;
181
182         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
183         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
184         if (err)
185                 return err;
186         oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
187         *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
188         return 0;
189 }
190
191 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
192                                       unsigned char *addr)
193 {
194         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
195         char ppad_pl[MLXSW_REG_PPAD_LEN];
196
197         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
198         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
199         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
200 }
201
202 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
203 {
204         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
205         unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
206
207         ether_addr_copy(addr, mlxsw_sp->base_mac);
208         addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
209         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
210 }
211
212 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
213                                        u16 vid, enum mlxsw_reg_spms_state state)
214 {
215         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
216         char *spms_pl;
217         int err;
218
219         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
220         if (!spms_pl)
221                 return -ENOMEM;
222         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
223         mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
224         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
225         kfree(spms_pl);
226         return err;
227 }
228
229 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
230 {
231         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
232         char pmtu_pl[MLXSW_REG_PMTU_LEN];
233         int max_mtu;
234         int err;
235
236         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
237         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
238         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
239         if (err)
240                 return err;
241         max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
242
243         if (mtu > max_mtu)
244                 return -EINVAL;
245
246         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
247         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
248 }
249
250 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
251 {
252         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
253         char pspa_pl[MLXSW_REG_PSPA_LEN];
254
255         mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
256         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
257 }
258
259 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
260                                      bool enable)
261 {
262         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
263         char svpe_pl[MLXSW_REG_SVPE_LEN];
264
265         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
266         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
267 }
268
269 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
270                                  enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
271                                  u16 vid)
272 {
273         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
274         char svfa_pl[MLXSW_REG_SVFA_LEN];
275
276         mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
277                             fid, vid);
278         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
279 }
280
281 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
282                                           u16 vid, bool learn_enable)
283 {
284         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
285         char *spvmlr_pl;
286         int err;
287
288         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
289         if (!spvmlr_pl)
290                 return -ENOMEM;
291         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
292                               learn_enable);
293         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
294         kfree(spvmlr_pl);
295         return err;
296 }
297
298 static int
299 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
300 {
301         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
302         char sspr_pl[MLXSW_REG_SSPR_LEN];
303
304         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
305         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
306 }
307
308 static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
309                                            u8 local_port, u8 *p_module,
310                                            u8 *p_width, u8 *p_lane)
311 {
312         char pmlp_pl[MLXSW_REG_PMLP_LEN];
313         int err;
314
315         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
316         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
317         if (err)
318                 return err;
319         *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
320         *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
321         *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
322         return 0;
323 }
324
325 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
326                                          u8 local_port, u8 *p_module,
327                                          u8 *p_width)
328 {
329         u8 lane;
330
331         return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module,
332                                                p_width, &lane);
333 }
334
335 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
336                                     u8 module, u8 width, u8 lane)
337 {
338         char pmlp_pl[MLXSW_REG_PMLP_LEN];
339         int i;
340
341         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
342         mlxsw_reg_pmlp_width_set(pmlp_pl, width);
343         for (i = 0; i < width; i++) {
344                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
345                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
346         }
347
348         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
349 }
350
351 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
352 {
353         char pmlp_pl[MLXSW_REG_PMLP_LEN];
354
355         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
356         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
357         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
358 }
359
360 static int mlxsw_sp_port_open(struct net_device *dev)
361 {
362         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
363         int err;
364
365         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
366         if (err)
367                 return err;
368         netif_start_queue(dev);
369         return 0;
370 }
371
372 static int mlxsw_sp_port_stop(struct net_device *dev)
373 {
374         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
375
376         netif_stop_queue(dev);
377         return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
378 }
379
380 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
381                                       struct net_device *dev)
382 {
383         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
384         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
385         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
386         const struct mlxsw_tx_info tx_info = {
387                 .local_port = mlxsw_sp_port->local_port,
388                 .is_emad = false,
389         };
390         u64 len;
391         int err;
392
393         if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
394                 return NETDEV_TX_BUSY;
395
396         if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
397                 struct sk_buff *skb_orig = skb;
398
399                 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
400                 if (!skb) {
401                         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
402                         dev_kfree_skb_any(skb_orig);
403                         return NETDEV_TX_OK;
404                 }
405         }
406
407         if (eth_skb_pad(skb)) {
408                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
409                 return NETDEV_TX_OK;
410         }
411
412         mlxsw_sp_txhdr_construct(skb, &tx_info);
413         len = skb->len;
414         /* Due to a race we might fail here because of a full queue. In that
415          * unlikely case we simply drop the packet.
416          */
417         err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
418
419         if (!err) {
420                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
421                 u64_stats_update_begin(&pcpu_stats->syncp);
422                 pcpu_stats->tx_packets++;
423                 pcpu_stats->tx_bytes += len;
424                 u64_stats_update_end(&pcpu_stats->syncp);
425         } else {
426                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
427                 dev_kfree_skb_any(skb);
428         }
429         return NETDEV_TX_OK;
430 }
431
432 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
433 {
434 }
435
436 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
437 {
438         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
439         struct sockaddr *addr = p;
440         int err;
441
442         if (!is_valid_ether_addr(addr->sa_data))
443                 return -EADDRNOTAVAIL;
444
445         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
446         if (err)
447                 return err;
448         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
449         return 0;
450 }
451
452 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
453                                       int mtu)
454 {
455         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
456         u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
457         char pbmc_pl[MLXSW_REG_PBMC_LEN];
458         int err;
459
460         mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
461         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
462         if (err)
463                 return err;
464         mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, 0, pg_size);
465         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
466 }
467
468 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
469 {
470         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
471         int err;
472
473         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu);
474         if (err)
475                 return err;
476         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
477         if (err)
478                 goto err_port_mtu_set;
479         dev->mtu = mtu;
480         return 0;
481
482 err_port_mtu_set:
483         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu);
484         return err;
485 }
486
487 static struct rtnl_link_stats64 *
488 mlxsw_sp_port_get_stats64(struct net_device *dev,
489                           struct rtnl_link_stats64 *stats)
490 {
491         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
492         struct mlxsw_sp_port_pcpu_stats *p;
493         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
494         u32 tx_dropped = 0;
495         unsigned int start;
496         int i;
497
498         for_each_possible_cpu(i) {
499                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
500                 do {
501                         start = u64_stats_fetch_begin_irq(&p->syncp);
502                         rx_packets      = p->rx_packets;
503                         rx_bytes        = p->rx_bytes;
504                         tx_packets      = p->tx_packets;
505                         tx_bytes        = p->tx_bytes;
506                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
507
508                 stats->rx_packets       += rx_packets;
509                 stats->rx_bytes         += rx_bytes;
510                 stats->tx_packets       += tx_packets;
511                 stats->tx_bytes         += tx_bytes;
512                 /* tx_dropped is u32, updated without syncp protection. */
513                 tx_dropped      += p->tx_dropped;
514         }
515         stats->tx_dropped       = tx_dropped;
516         return stats;
517 }
518
519 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
520                            u16 vid_end, bool is_member, bool untagged)
521 {
522         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
523         char *spvm_pl;
524         int err;
525
526         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
527         if (!spvm_pl)
528                 return -ENOMEM;
529
530         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
531                             vid_end, is_member, untagged);
532         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
533         kfree(spvm_pl);
534         return err;
535 }
536
537 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
538 {
539         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
540         u16 vid, last_visited_vid;
541         int err;
542
543         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
544                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
545                                                    vid);
546                 if (err) {
547                         last_visited_vid = vid;
548                         goto err_port_vid_to_fid_set;
549                 }
550         }
551
552         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
553         if (err) {
554                 last_visited_vid = VLAN_N_VID;
555                 goto err_port_vid_to_fid_set;
556         }
557
558         return 0;
559
560 err_port_vid_to_fid_set:
561         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
562                 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
563                                              vid);
564         return err;
565 }
566
567 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
568 {
569         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
570         u16 vid;
571         int err;
572
573         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
574         if (err)
575                 return err;
576
577         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
578                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
579                                                    vid, vid);
580                 if (err)
581                         return err;
582         }
583
584         return 0;
585 }
586
587 static struct mlxsw_sp_vfid *
588 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
589 {
590         struct mlxsw_sp_vfid *vfid;
591
592         list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
593                 if (vfid->vid == vid)
594                         return vfid;
595         }
596
597         return NULL;
598 }
599
600 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
601 {
602         return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
603                                    MLXSW_SP_VFID_PORT_MAX);
604 }
605
606 static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
607 {
608         u16 fid = mlxsw_sp_vfid_to_fid(vfid);
609         char sfmr_pl[MLXSW_REG_SFMR_LEN];
610
611         mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0);
612         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
613 }
614
615 static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
616 {
617         u16 fid = mlxsw_sp_vfid_to_fid(vfid);
618         char sfmr_pl[MLXSW_REG_SFMR_LEN];
619
620         mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0);
621         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
622 }
623
624 static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
625                                                   u16 vid)
626 {
627         struct device *dev = mlxsw_sp->bus_info->dev;
628         struct mlxsw_sp_vfid *vfid;
629         u16 n_vfid;
630         int err;
631
632         n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
633         if (n_vfid == MLXSW_SP_VFID_PORT_MAX) {
634                 dev_err(dev, "No available vFIDs\n");
635                 return ERR_PTR(-ERANGE);
636         }
637
638         err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
639         if (err) {
640                 dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
641                 return ERR_PTR(err);
642         }
643
644         vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
645         if (!vfid)
646                 goto err_allocate_vfid;
647
648         vfid->vfid = n_vfid;
649         vfid->vid = vid;
650
651         list_add(&vfid->list, &mlxsw_sp->port_vfids.list);
652         set_bit(n_vfid, mlxsw_sp->port_vfids.mapped);
653
654         return vfid;
655
656 err_allocate_vfid:
657         __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
658         return ERR_PTR(-ENOMEM);
659 }
660
661 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
662                                   struct mlxsw_sp_vfid *vfid)
663 {
664         clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
665         list_del(&vfid->list);
666
667         __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
668
669         kfree(vfid);
670 }
671
672 static struct mlxsw_sp_port *
673 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
674                            struct mlxsw_sp_vfid *vfid)
675 {
676         struct mlxsw_sp_port *mlxsw_sp_vport;
677
678         mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
679         if (!mlxsw_sp_vport)
680                 return NULL;
681
682         /* dev will be set correctly after the VLAN device is linked
683          * with the real device. In case of bridge SELF invocation, dev
684          * will remain as is.
685          */
686         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
687         mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
688         mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
689         mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
690         mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
691         mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
692         mlxsw_sp_vport->vport.vfid = vfid;
693         mlxsw_sp_vport->vport.vid = vfid->vid;
694
695         list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
696
697         return mlxsw_sp_vport;
698 }
699
700 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
701 {
702         list_del(&mlxsw_sp_vport->vport.list);
703         kfree(mlxsw_sp_vport);
704 }
705
706 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
707                           u16 vid)
708 {
709         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
710         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
711         struct mlxsw_sp_port *mlxsw_sp_vport;
712         struct mlxsw_sp_vfid *vfid;
713         int err;
714
715         /* VLAN 0 is added to HW filter when device goes up, but it is
716          * reserved in our case, so simply return.
717          */
718         if (!vid)
719                 return 0;
720
721         if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
722                 netdev_warn(dev, "VID=%d already configured\n", vid);
723                 return 0;
724         }
725
726         vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
727         if (!vfid) {
728                 vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
729                 if (IS_ERR(vfid)) {
730                         netdev_err(dev, "Failed to create vFID for VID=%d\n",
731                                    vid);
732                         return PTR_ERR(vfid);
733                 }
734         }
735
736         mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
737         if (!mlxsw_sp_vport) {
738                 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
739                 err = -ENOMEM;
740                 goto err_port_vport_create;
741         }
742
743         if (!vfid->nr_vports) {
744                 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
745                                                true, false);
746                 if (err) {
747                         netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
748                                    vfid->vfid);
749                         goto err_vport_flood_set;
750                 }
751         }
752
753         /* When adding the first VLAN interface on a bridged port we need to
754          * transition all the active 802.1Q bridge VLANs to use explicit
755          * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
756          */
757         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
758                 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
759                 if (err) {
760                         netdev_err(dev, "Failed to set to Virtual mode\n");
761                         goto err_port_vp_mode_trans;
762                 }
763         }
764
765         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
766                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
767                                            true,
768                                            mlxsw_sp_vfid_to_fid(vfid->vfid),
769                                            vid);
770         if (err) {
771                 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
772                            vid, vfid->vfid);
773                 goto err_port_vid_to_fid_set;
774         }
775
776         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
777         if (err) {
778                 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
779                 goto err_port_vid_learning_set;
780         }
781
782         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
783         if (err) {
784                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
785                            vid);
786                 goto err_port_add_vid;
787         }
788
789         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
790                                           MLXSW_REG_SPMS_STATE_FORWARDING);
791         if (err) {
792                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
793                 goto err_port_stp_state_set;
794         }
795
796         vfid->nr_vports++;
797
798         return 0;
799
800 err_port_stp_state_set:
801         mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
802 err_port_add_vid:
803         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
804 err_port_vid_learning_set:
805         mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
806                                      MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
807                                      mlxsw_sp_vfid_to_fid(vfid->vfid), vid);
808 err_port_vid_to_fid_set:
809         if (list_is_singular(&mlxsw_sp_port->vports_list))
810                 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
811 err_port_vp_mode_trans:
812         if (!vfid->nr_vports)
813                 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
814                                          false);
815 err_vport_flood_set:
816         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
817 err_port_vport_create:
818         if (!vfid->nr_vports)
819                 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
820         return err;
821 }
822
823 int mlxsw_sp_port_kill_vid(struct net_device *dev,
824                            __be16 __always_unused proto, u16 vid)
825 {
826         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
827         struct mlxsw_sp_port *mlxsw_sp_vport;
828         struct mlxsw_sp_vfid *vfid;
829         int err;
830
831         /* VLAN 0 is removed from HW filter when device goes down, but
832          * it is reserved in our case, so simply return.
833          */
834         if (!vid)
835                 return 0;
836
837         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
838         if (!mlxsw_sp_vport) {
839                 netdev_warn(dev, "VID=%d does not exist\n", vid);
840                 return 0;
841         }
842
843         vfid = mlxsw_sp_vport->vport.vfid;
844
845         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
846                                           MLXSW_REG_SPMS_STATE_DISCARDING);
847         if (err) {
848                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
849                 return err;
850         }
851
852         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
853         if (err) {
854                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
855                            vid);
856                 return err;
857         }
858
859         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
860         if (err) {
861                 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
862                 return err;
863         }
864
865         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
866                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
867                                            false,
868                                            mlxsw_sp_vfid_to_fid(vfid->vfid),
869                                            vid);
870         if (err) {
871                 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
872                            vid, vfid->vfid);
873                 return err;
874         }
875
876         /* When removing the last VLAN interface on a bridged port we need to
877          * transition all active 802.1Q bridge VLANs to use VID to FID
878          * mappings and set port's mode to VLAN mode.
879          */
880         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
881                 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
882                 if (err) {
883                         netdev_err(dev, "Failed to set to VLAN mode\n");
884                         return err;
885                 }
886         }
887
888         vfid->nr_vports--;
889         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
890
891         /* Destroy the vFID if no vPorts are assigned to it anymore. */
892         if (!vfid->nr_vports)
893                 mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
894
895         return 0;
896 }
897
898 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
899                                             size_t len)
900 {
901         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
902         u8 module, width, lane;
903         int err;
904
905         err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp,
906                                               mlxsw_sp_port->local_port,
907                                               &module, &width, &lane);
908         if (err) {
909                 netdev_err(dev, "Failed to retrieve module information\n");
910                 return err;
911         }
912
913         if (!mlxsw_sp_port->split)
914                 err = snprintf(name, len, "p%d", module + 1);
915         else
916                 err = snprintf(name, len, "p%ds%d", module + 1,
917                                lane / width);
918
919         if (err >= len)
920                 return -EINVAL;
921
922         return 0;
923 }
924
925 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
926         .ndo_open               = mlxsw_sp_port_open,
927         .ndo_stop               = mlxsw_sp_port_stop,
928         .ndo_start_xmit         = mlxsw_sp_port_xmit,
929         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
930         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
931         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
932         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
933         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
934         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
935         .ndo_fdb_add            = switchdev_port_fdb_add,
936         .ndo_fdb_del            = switchdev_port_fdb_del,
937         .ndo_fdb_dump           = switchdev_port_fdb_dump,
938         .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
939         .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
940         .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
941         .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
942 };
943
944 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
945                                       struct ethtool_drvinfo *drvinfo)
946 {
947         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
948         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
949
950         strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
951         strlcpy(drvinfo->version, mlxsw_sp_driver_version,
952                 sizeof(drvinfo->version));
953         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
954                  "%d.%d.%d",
955                  mlxsw_sp->bus_info->fw_rev.major,
956                  mlxsw_sp->bus_info->fw_rev.minor,
957                  mlxsw_sp->bus_info->fw_rev.subminor);
958         strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
959                 sizeof(drvinfo->bus_info));
960 }
961
962 struct mlxsw_sp_port_hw_stats {
963         char str[ETH_GSTRING_LEN];
964         u64 (*getter)(char *payload);
965 };
966
967 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
968         {
969                 .str = "a_frames_transmitted_ok",
970                 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
971         },
972         {
973                 .str = "a_frames_received_ok",
974                 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
975         },
976         {
977                 .str = "a_frame_check_sequence_errors",
978                 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
979         },
980         {
981                 .str = "a_alignment_errors",
982                 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
983         },
984         {
985                 .str = "a_octets_transmitted_ok",
986                 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
987         },
988         {
989                 .str = "a_octets_received_ok",
990                 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
991         },
992         {
993                 .str = "a_multicast_frames_xmitted_ok",
994                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
995         },
996         {
997                 .str = "a_broadcast_frames_xmitted_ok",
998                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
999         },
1000         {
1001                 .str = "a_multicast_frames_received_ok",
1002                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1003         },
1004         {
1005                 .str = "a_broadcast_frames_received_ok",
1006                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1007         },
1008         {
1009                 .str = "a_in_range_length_errors",
1010                 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1011         },
1012         {
1013                 .str = "a_out_of_range_length_field",
1014                 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1015         },
1016         {
1017                 .str = "a_frame_too_long_errors",
1018                 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1019         },
1020         {
1021                 .str = "a_symbol_error_during_carrier",
1022                 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1023         },
1024         {
1025                 .str = "a_mac_control_frames_transmitted",
1026                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1027         },
1028         {
1029                 .str = "a_mac_control_frames_received",
1030                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1031         },
1032         {
1033                 .str = "a_unsupported_opcodes_received",
1034                 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1035         },
1036         {
1037                 .str = "a_pause_mac_ctrl_frames_received",
1038                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1039         },
1040         {
1041                 .str = "a_pause_mac_ctrl_frames_xmitted",
1042                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1043         },
1044 };
1045
1046 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1047
1048 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1049                                       u32 stringset, u8 *data)
1050 {
1051         u8 *p = data;
1052         int i;
1053
1054         switch (stringset) {
1055         case ETH_SS_STATS:
1056                 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1057                         memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1058                                ETH_GSTRING_LEN);
1059                         p += ETH_GSTRING_LEN;
1060                 }
1061                 break;
1062         }
1063 }
1064
1065 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1066                                      enum ethtool_phys_id_state state)
1067 {
1068         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1069         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1070         char mlcr_pl[MLXSW_REG_MLCR_LEN];
1071         bool active;
1072
1073         switch (state) {
1074         case ETHTOOL_ID_ACTIVE:
1075                 active = true;
1076                 break;
1077         case ETHTOOL_ID_INACTIVE:
1078                 active = false;
1079                 break;
1080         default:
1081                 return -EOPNOTSUPP;
1082         }
1083
1084         mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1085         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1086 }
1087
1088 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1089                                     struct ethtool_stats *stats, u64 *data)
1090 {
1091         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1092         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1093         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1094         int i;
1095         int err;
1096
1097         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
1098         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1099         for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
1100                 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
1101 }
1102
1103 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1104 {
1105         switch (sset) {
1106         case ETH_SS_STATS:
1107                 return MLXSW_SP_PORT_HW_STATS_LEN;
1108         default:
1109                 return -EOPNOTSUPP;
1110         }
1111 }
1112
1113 struct mlxsw_sp_port_link_mode {
1114         u32 mask;
1115         u32 supported;
1116         u32 advertised;
1117         u32 speed;
1118 };
1119
1120 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1121         {
1122                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1123                 .supported      = SUPPORTED_100baseT_Full,
1124                 .advertised     = ADVERTISED_100baseT_Full,
1125                 .speed          = 100,
1126         },
1127         {
1128                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1129                 .speed          = 100,
1130         },
1131         {
1132                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1133                                   MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1134                 .supported      = SUPPORTED_1000baseKX_Full,
1135                 .advertised     = ADVERTISED_1000baseKX_Full,
1136                 .speed          = 1000,
1137         },
1138         {
1139                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1140                 .supported      = SUPPORTED_10000baseT_Full,
1141                 .advertised     = ADVERTISED_10000baseT_Full,
1142                 .speed          = 10000,
1143         },
1144         {
1145                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1146                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1147                 .supported      = SUPPORTED_10000baseKX4_Full,
1148                 .advertised     = ADVERTISED_10000baseKX4_Full,
1149                 .speed          = 10000,
1150         },
1151         {
1152                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1153                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1154                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1155                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1156                 .supported      = SUPPORTED_10000baseKR_Full,
1157                 .advertised     = ADVERTISED_10000baseKR_Full,
1158                 .speed          = 10000,
1159         },
1160         {
1161                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1162                 .supported      = SUPPORTED_20000baseKR2_Full,
1163                 .advertised     = ADVERTISED_20000baseKR2_Full,
1164                 .speed          = 20000,
1165         },
1166         {
1167                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1168                 .supported      = SUPPORTED_40000baseCR4_Full,
1169                 .advertised     = ADVERTISED_40000baseCR4_Full,
1170                 .speed          = 40000,
1171         },
1172         {
1173                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1174                 .supported      = SUPPORTED_40000baseKR4_Full,
1175                 .advertised     = ADVERTISED_40000baseKR4_Full,
1176                 .speed          = 40000,
1177         },
1178         {
1179                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1180                 .supported      = SUPPORTED_40000baseSR4_Full,
1181                 .advertised     = ADVERTISED_40000baseSR4_Full,
1182                 .speed          = 40000,
1183         },
1184         {
1185                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1186                 .supported      = SUPPORTED_40000baseLR4_Full,
1187                 .advertised     = ADVERTISED_40000baseLR4_Full,
1188                 .speed          = 40000,
1189         },
1190         {
1191                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1192                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1193                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1194                 .speed          = 25000,
1195         },
1196         {
1197                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1198                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1199                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1200                 .speed          = 50000,
1201         },
1202         {
1203                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1204                 .supported      = SUPPORTED_56000baseKR4_Full,
1205                 .advertised     = ADVERTISED_56000baseKR4_Full,
1206                 .speed          = 56000,
1207         },
1208         {
1209                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1210                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1211                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1212                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1213                 .speed          = 100000,
1214         },
1215 };
1216
1217 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1218
1219 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1220 {
1221         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1222                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1223                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1224                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1225                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1226                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1227                 return SUPPORTED_FIBRE;
1228
1229         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1230                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1231                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1232                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1233                               MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1234                 return SUPPORTED_Backplane;
1235         return 0;
1236 }
1237
1238 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1239 {
1240         u32 modes = 0;
1241         int i;
1242
1243         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1244                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1245                         modes |= mlxsw_sp_port_link_mode[i].supported;
1246         }
1247         return modes;
1248 }
1249
1250 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1251 {
1252         u32 modes = 0;
1253         int i;
1254
1255         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1256                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1257                         modes |= mlxsw_sp_port_link_mode[i].advertised;
1258         }
1259         return modes;
1260 }
1261
1262 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1263                                             struct ethtool_cmd *cmd)
1264 {
1265         u32 speed = SPEED_UNKNOWN;
1266         u8 duplex = DUPLEX_UNKNOWN;
1267         int i;
1268
1269         if (!carrier_ok)
1270                 goto out;
1271
1272         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1273                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1274                         speed = mlxsw_sp_port_link_mode[i].speed;
1275                         duplex = DUPLEX_FULL;
1276                         break;
1277                 }
1278         }
1279 out:
1280         ethtool_cmd_speed_set(cmd, speed);
1281         cmd->duplex = duplex;
1282 }
1283
1284 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1285 {
1286         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1287                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1288                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1289                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1290                 return PORT_FIBRE;
1291
1292         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1293                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1294                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1295                 return PORT_DA;
1296
1297         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1298                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1299                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1300                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1301                 return PORT_NONE;
1302
1303         return PORT_OTHER;
1304 }
1305
1306 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1307                                       struct ethtool_cmd *cmd)
1308 {
1309         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1310         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1311         char ptys_pl[MLXSW_REG_PTYS_LEN];
1312         u32 eth_proto_cap;
1313         u32 eth_proto_admin;
1314         u32 eth_proto_oper;
1315         int err;
1316
1317         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1318         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1319         if (err) {
1320                 netdev_err(dev, "Failed to get proto");
1321                 return err;
1322         }
1323         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1324                               &eth_proto_admin, &eth_proto_oper);
1325
1326         cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1327                          mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1328                          SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1329         cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1330         mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1331                                         eth_proto_oper, cmd);
1332
1333         eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1334         cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1335         cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1336
1337         cmd->transceiver = XCVR_INTERNAL;
1338         return 0;
1339 }
1340
1341 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1342 {
1343         u32 ptys_proto = 0;
1344         int i;
1345
1346         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1347                 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1348                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1349         }
1350         return ptys_proto;
1351 }
1352
1353 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1354 {
1355         u32 ptys_proto = 0;
1356         int i;
1357
1358         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1359                 if (speed == mlxsw_sp_port_link_mode[i].speed)
1360                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1361         }
1362         return ptys_proto;
1363 }
1364
1365 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1366 {
1367         u32 ptys_proto = 0;
1368         int i;
1369
1370         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1371                 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1372                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1373         }
1374         return ptys_proto;
1375 }
1376
1377 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1378                                       struct ethtool_cmd *cmd)
1379 {
1380         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1381         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1382         char ptys_pl[MLXSW_REG_PTYS_LEN];
1383         u32 speed;
1384         u32 eth_proto_new;
1385         u32 eth_proto_cap;
1386         u32 eth_proto_admin;
1387         bool is_up;
1388         int err;
1389
1390         speed = ethtool_cmd_speed(cmd);
1391
1392         eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1393                 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1394                 mlxsw_sp_to_ptys_speed(speed);
1395
1396         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1397         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1398         if (err) {
1399                 netdev_err(dev, "Failed to get proto");
1400                 return err;
1401         }
1402         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1403
1404         eth_proto_new = eth_proto_new & eth_proto_cap;
1405         if (!eth_proto_new) {
1406                 netdev_err(dev, "Not supported proto admin requested");
1407                 return -EINVAL;
1408         }
1409         if (eth_proto_new == eth_proto_admin)
1410                 return 0;
1411
1412         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1413         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1414         if (err) {
1415                 netdev_err(dev, "Failed to set proto admin");
1416                 return err;
1417         }
1418
1419         err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1420         if (err) {
1421                 netdev_err(dev, "Failed to get oper status");
1422                 return err;
1423         }
1424         if (!is_up)
1425                 return 0;
1426
1427         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1428         if (err) {
1429                 netdev_err(dev, "Failed to set admin status");
1430                 return err;
1431         }
1432
1433         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1434         if (err) {
1435                 netdev_err(dev, "Failed to set admin status");
1436                 return err;
1437         }
1438
1439         return 0;
1440 }
1441
1442 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1443         .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
1444         .get_link               = ethtool_op_get_link,
1445         .get_strings            = mlxsw_sp_port_get_strings,
1446         .set_phys_id            = mlxsw_sp_port_set_phys_id,
1447         .get_ethtool_stats      = mlxsw_sp_port_get_stats,
1448         .get_sset_count         = mlxsw_sp_port_get_sset_count,
1449         .get_settings           = mlxsw_sp_port_get_settings,
1450         .set_settings           = mlxsw_sp_port_set_settings,
1451 };
1452
1453 static int
1454 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1455 {
1456         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1457         u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1458         char ptys_pl[MLXSW_REG_PTYS_LEN];
1459         u32 eth_proto_admin;
1460
1461         eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1462         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1463                             eth_proto_admin);
1464         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1465 }
1466
1467 static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1468                                   bool split, u8 module, u8 width)
1469 {
1470         struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
1471         struct mlxsw_sp_port *mlxsw_sp_port;
1472         struct devlink_port *devlink_port;
1473         struct net_device *dev;
1474         size_t bytes;
1475         int err;
1476
1477         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1478         if (!dev)
1479                 return -ENOMEM;
1480         mlxsw_sp_port = netdev_priv(dev);
1481         mlxsw_sp_port->dev = dev;
1482         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1483         mlxsw_sp_port->local_port = local_port;
1484         mlxsw_sp_port->split = split;
1485         bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1486         mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1487         if (!mlxsw_sp_port->active_vlans) {
1488                 err = -ENOMEM;
1489                 goto err_port_active_vlans_alloc;
1490         }
1491         mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1492         if (!mlxsw_sp_port->untagged_vlans) {
1493                 err = -ENOMEM;
1494                 goto err_port_untagged_vlans_alloc;
1495         }
1496         INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1497
1498         mlxsw_sp_port->pcpu_stats =
1499                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1500         if (!mlxsw_sp_port->pcpu_stats) {
1501                 err = -ENOMEM;
1502                 goto err_alloc_stats;
1503         }
1504
1505         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1506         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1507
1508         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1509         if (err) {
1510                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1511                         mlxsw_sp_port->local_port);
1512                 goto err_dev_addr_init;
1513         }
1514
1515         netif_carrier_off(dev);
1516
1517         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1518                          NETIF_F_HW_VLAN_CTAG_FILTER;
1519
1520         /* Each packet needs to have a Tx header (metadata) on top all other
1521          * headers.
1522          */
1523         dev->hard_header_len += MLXSW_TXHDR_LEN;
1524
1525         devlink_port = &mlxsw_sp_port->devlink_port;
1526         if (mlxsw_sp_port->split)
1527                 devlink_port_split_set(devlink_port, module);
1528         err = devlink_port_register(devlink, devlink_port, local_port);
1529         if (err) {
1530                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register devlink port\n",
1531                         mlxsw_sp_port->local_port);
1532                 goto err_devlink_port_register;
1533         }
1534
1535         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1536         if (err) {
1537                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1538                         mlxsw_sp_port->local_port);
1539                 goto err_port_system_port_mapping_set;
1540         }
1541
1542         err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1543         if (err) {
1544                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1545                         mlxsw_sp_port->local_port);
1546                 goto err_port_swid_set;
1547         }
1548
1549         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
1550         if (err) {
1551                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1552                         mlxsw_sp_port->local_port);
1553                 goto err_port_speed_by_width_set;
1554         }
1555
1556         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1557         if (err) {
1558                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1559                         mlxsw_sp_port->local_port);
1560                 goto err_port_mtu_set;
1561         }
1562
1563         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1564         if (err)
1565                 goto err_port_admin_status_set;
1566
1567         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1568         if (err) {
1569                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1570                         mlxsw_sp_port->local_port);
1571                 goto err_port_buffers_init;
1572         }
1573
1574         mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1575         err = register_netdev(dev);
1576         if (err) {
1577                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1578                         mlxsw_sp_port->local_port);
1579                 goto err_register_netdev;
1580         }
1581
1582         devlink_port_type_eth_set(devlink_port, dev);
1583
1584         err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1585         if (err)
1586                 goto err_port_vlan_init;
1587
1588         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1589         return 0;
1590
1591 err_port_vlan_init:
1592         unregister_netdev(dev);
1593 err_register_netdev:
1594 err_port_buffers_init:
1595 err_port_admin_status_set:
1596 err_port_mtu_set:
1597 err_port_speed_by_width_set:
1598 err_port_swid_set:
1599 err_port_system_port_mapping_set:
1600         devlink_port_unregister(&mlxsw_sp_port->devlink_port);
1601 err_devlink_port_register:
1602 err_dev_addr_init:
1603         free_percpu(mlxsw_sp_port->pcpu_stats);
1604 err_alloc_stats:
1605         kfree(mlxsw_sp_port->untagged_vlans);
1606 err_port_untagged_vlans_alloc:
1607         kfree(mlxsw_sp_port->active_vlans);
1608 err_port_active_vlans_alloc:
1609         free_netdev(dev);
1610         return err;
1611 }
1612
1613 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1614                                 bool split, u8 module, u8 width, u8 lane)
1615 {
1616         int err;
1617
1618         err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1619                                        lane);
1620         if (err)
1621                 return err;
1622
1623         err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
1624                                      width);
1625         if (err)
1626                 goto err_port_create;
1627
1628         return 0;
1629
1630 err_port_create:
1631         mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
1632         return err;
1633 }
1634
1635 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1636 {
1637         struct net_device *dev = mlxsw_sp_port->dev;
1638         struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
1639
1640         list_for_each_entry_safe(mlxsw_sp_vport, tmp,
1641                                  &mlxsw_sp_port->vports_list, vport.list) {
1642                 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1643
1644                 /* vPorts created for VLAN devices should already be gone
1645                  * by now, since we unregistered the port netdev.
1646                  */
1647                 WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
1648                 mlxsw_sp_port_kill_vid(dev, 0, vid);
1649         }
1650 }
1651
1652 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1653 {
1654         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1655         struct devlink_port *devlink_port;
1656
1657         if (!mlxsw_sp_port)
1658                 return;
1659         mlxsw_sp->ports[local_port] = NULL;
1660         devlink_port = &mlxsw_sp_port->devlink_port;
1661         devlink_port_type_clear(devlink_port);
1662         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1663         devlink_port_unregister(devlink_port);
1664         mlxsw_sp_port_vports_fini(mlxsw_sp_port);
1665         mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1666         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1667         mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1668         free_percpu(mlxsw_sp_port->pcpu_stats);
1669         kfree(mlxsw_sp_port->untagged_vlans);
1670         kfree(mlxsw_sp_port->active_vlans);
1671         free_netdev(mlxsw_sp_port->dev);
1672 }
1673
1674 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1675 {
1676         int i;
1677
1678         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1679                 mlxsw_sp_port_remove(mlxsw_sp, i);
1680         kfree(mlxsw_sp->ports);
1681 }
1682
1683 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1684 {
1685         size_t alloc_size;
1686         u8 module, width;
1687         int i;
1688         int err;
1689
1690         alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1691         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1692         if (!mlxsw_sp->ports)
1693                 return -ENOMEM;
1694
1695         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1696                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1697                                                     &width);
1698                 if (err)
1699                         goto err_port_module_info_get;
1700                 if (!width)
1701                         continue;
1702                 mlxsw_sp->port_to_module[i] = module;
1703                 err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width);
1704                 if (err)
1705                         goto err_port_create;
1706         }
1707         return 0;
1708
1709 err_port_create:
1710 err_port_module_info_get:
1711         for (i--; i >= 1; i--)
1712                 mlxsw_sp_port_remove(mlxsw_sp, i);
1713         kfree(mlxsw_sp->ports);
1714         return err;
1715 }
1716
1717 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1718 {
1719         u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
1720
1721         return local_port - offset;
1722 }
1723
1724 static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count)
1725 {
1726         struct mlxsw_sp *mlxsw_sp = priv;
1727         struct mlxsw_sp_port *mlxsw_sp_port;
1728         u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1729         u8 module, cur_width, base_port;
1730         int i;
1731         int err;
1732
1733         mlxsw_sp_port = mlxsw_sp->ports[local_port];
1734         if (!mlxsw_sp_port) {
1735                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1736                         local_port);
1737                 return -EINVAL;
1738         }
1739
1740         if (count != 2 && count != 4) {
1741                 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
1742                 return -EINVAL;
1743         }
1744
1745         err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
1746                                             &cur_width);
1747         if (err) {
1748                 netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
1749                 return err;
1750         }
1751
1752         if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
1753                 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
1754                 return -EINVAL;
1755         }
1756
1757         /* Make sure we have enough slave (even) ports for the split. */
1758         if (count == 2) {
1759                 base_port = local_port;
1760                 if (mlxsw_sp->ports[base_port + 1]) {
1761                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1762                         return -EINVAL;
1763                 }
1764         } else {
1765                 base_port = mlxsw_sp_cluster_base_port_get(local_port);
1766                 if (mlxsw_sp->ports[base_port + 1] ||
1767                     mlxsw_sp->ports[base_port + 3]) {
1768                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1769                         return -EINVAL;
1770                 }
1771         }
1772
1773         for (i = 0; i < count; i++)
1774                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1775
1776         for (i = 0; i < count; i++) {
1777                 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
1778                                            module, width, i * width);
1779                 if (err) {
1780                         dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
1781                         goto err_port_create;
1782                 }
1783         }
1784
1785         return 0;
1786
1787 err_port_create:
1788         for (i--; i >= 0; i--)
1789                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1790         for (i = 0; i < count / 2; i++) {
1791                 module = mlxsw_sp->port_to_module[base_port + i * 2];
1792                 mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
1793                                      module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
1794         }
1795         return err;
1796 }
1797
1798 static int mlxsw_sp_port_unsplit(void *priv, u8 local_port)
1799 {
1800         struct mlxsw_sp *mlxsw_sp = priv;
1801         struct mlxsw_sp_port *mlxsw_sp_port;
1802         u8 module, cur_width, base_port;
1803         unsigned int count;
1804         int i;
1805         int err;
1806
1807         mlxsw_sp_port = mlxsw_sp->ports[local_port];
1808         if (!mlxsw_sp_port) {
1809                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1810                         local_port);
1811                 return -EINVAL;
1812         }
1813
1814         if (!mlxsw_sp_port->split) {
1815                 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
1816                 return -EINVAL;
1817         }
1818
1819         err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
1820                                             &cur_width);
1821         if (err) {
1822                 netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
1823                 return err;
1824         }
1825         count = cur_width == 1 ? 4 : 2;
1826
1827         base_port = mlxsw_sp_cluster_base_port_get(local_port);
1828
1829         /* Determine which ports to remove. */
1830         if (count == 2 && local_port >= base_port + 2)
1831                 base_port = base_port + 2;
1832
1833         for (i = 0; i < count; i++)
1834                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1835
1836         for (i = 0; i < count / 2; i++) {
1837                 module = mlxsw_sp->port_to_module[base_port + i * 2];
1838                 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
1839                                            module, MLXSW_PORT_MODULE_MAX_WIDTH,
1840                                            0);
1841                 if (err)
1842                         dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
1843         }
1844
1845         return 0;
1846 }
1847
1848 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
1849                                      char *pude_pl, void *priv)
1850 {
1851         struct mlxsw_sp *mlxsw_sp = priv;
1852         struct mlxsw_sp_port *mlxsw_sp_port;
1853         enum mlxsw_reg_pude_oper_status status;
1854         u8 local_port;
1855
1856         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1857         mlxsw_sp_port = mlxsw_sp->ports[local_port];
1858         if (!mlxsw_sp_port) {
1859                 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
1860                          local_port);
1861                 return;
1862         }
1863
1864         status = mlxsw_reg_pude_oper_status_get(pude_pl);
1865         if (status == MLXSW_PORT_OPER_STATUS_UP) {
1866                 netdev_info(mlxsw_sp_port->dev, "link up\n");
1867                 netif_carrier_on(mlxsw_sp_port->dev);
1868         } else {
1869                 netdev_info(mlxsw_sp_port->dev, "link down\n");
1870                 netif_carrier_off(mlxsw_sp_port->dev);
1871         }
1872 }
1873
1874 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
1875         .func = mlxsw_sp_pude_event_func,
1876         .trap_id = MLXSW_TRAP_ID_PUDE,
1877 };
1878
1879 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
1880                                    enum mlxsw_event_trap_id trap_id)
1881 {
1882         struct mlxsw_event_listener *el;
1883         char hpkt_pl[MLXSW_REG_HPKT_LEN];
1884         int err;
1885
1886         switch (trap_id) {
1887         case MLXSW_TRAP_ID_PUDE:
1888                 el = &mlxsw_sp_pude_event;
1889                 break;
1890         }
1891         err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
1892         if (err)
1893                 return err;
1894
1895         mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
1896         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1897         if (err)
1898                 goto err_event_trap_set;
1899
1900         return 0;
1901
1902 err_event_trap_set:
1903         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1904         return err;
1905 }
1906
1907 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
1908                                       enum mlxsw_event_trap_id trap_id)
1909 {
1910         struct mlxsw_event_listener *el;
1911
1912         switch (trap_id) {
1913         case MLXSW_TRAP_ID_PUDE:
1914                 el = &mlxsw_sp_pude_event;
1915                 break;
1916         }
1917         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1918 }
1919
1920 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
1921                                       void *priv)
1922 {
1923         struct mlxsw_sp *mlxsw_sp = priv;
1924         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1925         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1926
1927         if (unlikely(!mlxsw_sp_port)) {
1928                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
1929                                      local_port);
1930                 return;
1931         }
1932
1933         skb->dev = mlxsw_sp_port->dev;
1934
1935         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1936         u64_stats_update_begin(&pcpu_stats->syncp);
1937         pcpu_stats->rx_packets++;
1938         pcpu_stats->rx_bytes += skb->len;
1939         u64_stats_update_end(&pcpu_stats->syncp);
1940
1941         skb->protocol = eth_type_trans(skb, skb->dev);
1942         netif_receive_skb(skb);
1943 }
1944
1945 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
1946         {
1947                 .func = mlxsw_sp_rx_listener_func,
1948                 .local_port = MLXSW_PORT_DONT_CARE,
1949                 .trap_id = MLXSW_TRAP_ID_FDB_MC,
1950         },
1951         /* Traps for specific L2 packet types, not trapped as FDB MC */
1952         {
1953                 .func = mlxsw_sp_rx_listener_func,
1954                 .local_port = MLXSW_PORT_DONT_CARE,
1955                 .trap_id = MLXSW_TRAP_ID_STP,
1956         },
1957         {
1958                 .func = mlxsw_sp_rx_listener_func,
1959                 .local_port = MLXSW_PORT_DONT_CARE,
1960                 .trap_id = MLXSW_TRAP_ID_LACP,
1961         },
1962         {
1963                 .func = mlxsw_sp_rx_listener_func,
1964                 .local_port = MLXSW_PORT_DONT_CARE,
1965                 .trap_id = MLXSW_TRAP_ID_EAPOL,
1966         },
1967         {
1968                 .func = mlxsw_sp_rx_listener_func,
1969                 .local_port = MLXSW_PORT_DONT_CARE,
1970                 .trap_id = MLXSW_TRAP_ID_LLDP,
1971         },
1972         {
1973                 .func = mlxsw_sp_rx_listener_func,
1974                 .local_port = MLXSW_PORT_DONT_CARE,
1975                 .trap_id = MLXSW_TRAP_ID_MMRP,
1976         },
1977         {
1978                 .func = mlxsw_sp_rx_listener_func,
1979                 .local_port = MLXSW_PORT_DONT_CARE,
1980                 .trap_id = MLXSW_TRAP_ID_MVRP,
1981         },
1982         {
1983                 .func = mlxsw_sp_rx_listener_func,
1984                 .local_port = MLXSW_PORT_DONT_CARE,
1985                 .trap_id = MLXSW_TRAP_ID_RPVST,
1986         },
1987         {
1988                 .func = mlxsw_sp_rx_listener_func,
1989                 .local_port = MLXSW_PORT_DONT_CARE,
1990                 .trap_id = MLXSW_TRAP_ID_DHCP,
1991         },
1992         {
1993                 .func = mlxsw_sp_rx_listener_func,
1994                 .local_port = MLXSW_PORT_DONT_CARE,
1995                 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
1996         },
1997         {
1998                 .func = mlxsw_sp_rx_listener_func,
1999                 .local_port = MLXSW_PORT_DONT_CARE,
2000                 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
2001         },
2002         {
2003                 .func = mlxsw_sp_rx_listener_func,
2004                 .local_port = MLXSW_PORT_DONT_CARE,
2005                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
2006         },
2007         {
2008                 .func = mlxsw_sp_rx_listener_func,
2009                 .local_port = MLXSW_PORT_DONT_CARE,
2010                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
2011         },
2012         {
2013                 .func = mlxsw_sp_rx_listener_func,
2014                 .local_port = MLXSW_PORT_DONT_CARE,
2015                 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
2016         },
2017 };
2018
2019 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2020 {
2021         char htgt_pl[MLXSW_REG_HTGT_LEN];
2022         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2023         int i;
2024         int err;
2025
2026         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2027         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2028         if (err)
2029                 return err;
2030
2031         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2032         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2033         if (err)
2034                 return err;
2035
2036         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2037                 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2038                                                       &mlxsw_sp_rx_listener[i],
2039                                                       mlxsw_sp);
2040                 if (err)
2041                         goto err_rx_listener_register;
2042
2043                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2044                                     mlxsw_sp_rx_listener[i].trap_id);
2045                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2046                 if (err)
2047                         goto err_rx_trap_set;
2048         }
2049         return 0;
2050
2051 err_rx_trap_set:
2052         mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2053                                           &mlxsw_sp_rx_listener[i],
2054                                           mlxsw_sp);
2055 err_rx_listener_register:
2056         for (i--; i >= 0; i--) {
2057                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2058                                     mlxsw_sp_rx_listener[i].trap_id);
2059                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2060
2061                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2062                                                   &mlxsw_sp_rx_listener[i],
2063                                                   mlxsw_sp);
2064         }
2065         return err;
2066 }
2067
2068 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2069 {
2070         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2071         int i;
2072
2073         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2074                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2075                                     mlxsw_sp_rx_listener[i].trap_id);
2076                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2077
2078                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2079                                                   &mlxsw_sp_rx_listener[i],
2080                                                   mlxsw_sp);
2081         }
2082 }
2083
2084 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2085                                  enum mlxsw_reg_sfgc_type type,
2086                                  enum mlxsw_reg_sfgc_bridge_type bridge_type)
2087 {
2088         enum mlxsw_flood_table_type table_type;
2089         enum mlxsw_sp_flood_table flood_table;
2090         char sfgc_pl[MLXSW_REG_SFGC_LEN];
2091
2092         if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2093                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2094         else
2095                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2096
2097         if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2098                 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2099         else
2100                 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2101
2102         mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2103                             flood_table);
2104         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2105 }
2106
2107 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2108 {
2109         int type, err;
2110
2111         for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2112                 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2113                         continue;
2114
2115                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2116                                             MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2117                 if (err)
2118                         return err;
2119
2120                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2121                                             MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2122                 if (err)
2123                         return err;
2124         }
2125
2126         return 0;
2127 }
2128
2129 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2130 {
2131         char slcr_pl[MLXSW_REG_SLCR_LEN];
2132
2133         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2134                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
2135                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2136                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
2137                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
2138                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
2139                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
2140                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
2141                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2142         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2143 }
2144
2145 static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
2146                          const struct mlxsw_bus_info *mlxsw_bus_info)
2147 {
2148         struct mlxsw_sp *mlxsw_sp = priv;
2149         int err;
2150
2151         mlxsw_sp->core = mlxsw_core;
2152         mlxsw_sp->bus_info = mlxsw_bus_info;
2153         INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
2154         INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
2155         INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2156
2157         err = mlxsw_sp_base_mac_get(mlxsw_sp);
2158         if (err) {
2159                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2160                 return err;
2161         }
2162
2163         err = mlxsw_sp_ports_create(mlxsw_sp);
2164         if (err) {
2165                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2166                 return err;
2167         }
2168
2169         err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2170         if (err) {
2171                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2172                 goto err_event_register;
2173         }
2174
2175         err = mlxsw_sp_traps_init(mlxsw_sp);
2176         if (err) {
2177                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2178                 goto err_rx_listener_register;
2179         }
2180
2181         err = mlxsw_sp_flood_init(mlxsw_sp);
2182         if (err) {
2183                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2184                 goto err_flood_init;
2185         }
2186
2187         err = mlxsw_sp_buffers_init(mlxsw_sp);
2188         if (err) {
2189                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2190                 goto err_buffers_init;
2191         }
2192
2193         err = mlxsw_sp_lag_init(mlxsw_sp);
2194         if (err) {
2195                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2196                 goto err_lag_init;
2197         }
2198
2199         err = mlxsw_sp_switchdev_init(mlxsw_sp);
2200         if (err) {
2201                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2202                 goto err_switchdev_init;
2203         }
2204
2205         return 0;
2206
2207 err_switchdev_init:
2208 err_lag_init:
2209 err_buffers_init:
2210 err_flood_init:
2211         mlxsw_sp_traps_fini(mlxsw_sp);
2212 err_rx_listener_register:
2213         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2214 err_event_register:
2215         mlxsw_sp_ports_remove(mlxsw_sp);
2216         return err;
2217 }
2218
2219 static void mlxsw_sp_fini(void *priv)
2220 {
2221         struct mlxsw_sp *mlxsw_sp = priv;
2222
2223         mlxsw_sp_switchdev_fini(mlxsw_sp);
2224         mlxsw_sp_traps_fini(mlxsw_sp);
2225         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2226         mlxsw_sp_ports_remove(mlxsw_sp);
2227 }
2228
2229 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2230         .used_max_vepa_channels         = 1,
2231         .max_vepa_channels              = 0,
2232         .used_max_lag                   = 1,
2233         .max_lag                        = MLXSW_SP_LAG_MAX,
2234         .used_max_port_per_lag          = 1,
2235         .max_port_per_lag               = MLXSW_SP_PORT_PER_LAG_MAX,
2236         .used_max_mid                   = 1,
2237         .max_mid                        = MLXSW_SP_MID_MAX,
2238         .used_max_pgt                   = 1,
2239         .max_pgt                        = 0,
2240         .used_max_system_port           = 1,
2241         .max_system_port                = 64,
2242         .used_max_vlan_groups           = 1,
2243         .max_vlan_groups                = 127,
2244         .used_max_regions               = 1,
2245         .max_regions                    = 400,
2246         .used_flood_tables              = 1,
2247         .used_flood_mode                = 1,
2248         .flood_mode                     = 3,
2249         .max_fid_offset_flood_tables    = 2,
2250         .fid_offset_flood_table_size    = VLAN_N_VID - 1,
2251         .max_fid_flood_tables           = 2,
2252         .fid_flood_table_size           = MLXSW_SP_VFID_MAX,
2253         .used_max_ib_mc                 = 1,
2254         .max_ib_mc                      = 0,
2255         .used_max_pkey                  = 1,
2256         .max_pkey                       = 0,
2257         .swid_config                    = {
2258                 {
2259                         .used_type      = 1,
2260                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
2261                 }
2262         },
2263 };
2264
2265 static struct mlxsw_driver mlxsw_sp_driver = {
2266         .kind                   = MLXSW_DEVICE_KIND_SPECTRUM,
2267         .owner                  = THIS_MODULE,
2268         .priv_size              = sizeof(struct mlxsw_sp),
2269         .init                   = mlxsw_sp_init,
2270         .fini                   = mlxsw_sp_fini,
2271         .port_split             = mlxsw_sp_port_split,
2272         .port_unsplit           = mlxsw_sp_port_unsplit,
2273         .txhdr_construct        = mlxsw_sp_txhdr_construct,
2274         .txhdr_len              = MLXSW_TXHDR_LEN,
2275         .profile                = &mlxsw_sp_config_profile,
2276 };
2277
2278 static int
2279 mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
2280 {
2281         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2282         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2283
2284         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
2285         mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
2286
2287         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2288 }
2289
2290 static int
2291 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2292                                     u16 fid)
2293 {
2294         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2295         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2296
2297         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
2298         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2299         mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2300                                                 mlxsw_sp_port->local_port);
2301
2302         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2303 }
2304
2305 static int
2306 mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
2307 {
2308         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2309         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2310
2311         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
2312         mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2313
2314         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2315 }
2316
2317 static int
2318 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2319                                       u16 fid)
2320 {
2321         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2322         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2323
2324         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
2325         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2326         mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2327
2328         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2329 }
2330
2331 static int
2332 __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
2333 {
2334         int err, last_err = 0;
2335         u16 vid;
2336
2337         for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2338                 err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
2339                 if (err)
2340                         last_err = err;
2341         }
2342
2343         return last_err;
2344 }
2345
2346 static int
2347 __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
2348 {
2349         int err, last_err = 0;
2350         u16 vid;
2351
2352         for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2353                 err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
2354                 if (err)
2355                         last_err = err;
2356         }
2357
2358         return last_err;
2359 }
2360
2361 static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
2362 {
2363         if (!list_empty(&mlxsw_sp_port->vports_list))
2364                 if (mlxsw_sp_port->lagged)
2365                         return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
2366                 else
2367                         return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
2368         else
2369                 if (mlxsw_sp_port->lagged)
2370                         return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
2371                 else
2372                         return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
2373 }
2374
2375 static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
2376 {
2377         u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
2378         u16 fid = mlxsw_sp_vfid_to_fid(vfid);
2379
2380         if (mlxsw_sp_vport->lagged)
2381                 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
2382                                                              fid);
2383         else
2384                 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
2385 }
2386
2387 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2388 {
2389         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2390 }
2391
2392 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
2393 {
2394         struct net_device *dev = mlxsw_sp_port->dev;
2395         int err;
2396
2397         /* When port is not bridged untagged packets are tagged with
2398          * PVID=VID=1, thereby creating an implicit VLAN interface in
2399          * the device. Remove it and let bridge code take care of its
2400          * own VLANs.
2401          */
2402         err = mlxsw_sp_port_kill_vid(dev, 0, 1);
2403         if (err)
2404                 return err;
2405
2406         mlxsw_sp_port->learning = 1;
2407         mlxsw_sp_port->learning_sync = 1;
2408         mlxsw_sp_port->uc_flood = 1;
2409         mlxsw_sp_port->bridged = 1;
2410
2411         return 0;
2412 }
2413
2414 static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2415                                       bool flush_fdb)
2416 {
2417         struct net_device *dev = mlxsw_sp_port->dev;
2418
2419         if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2420                 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2421
2422         mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2423
2424         mlxsw_sp_port->learning = 0;
2425         mlxsw_sp_port->learning_sync = 0;
2426         mlxsw_sp_port->uc_flood = 0;
2427         mlxsw_sp_port->bridged = 0;
2428
2429         /* Add implicit VLAN interface in the device, so that untagged
2430          * packets will be classified to the default vFID.
2431          */
2432         return mlxsw_sp_port_add_vid(dev, 0, 1);
2433 }
2434
2435 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
2436                                          struct net_device *br_dev)
2437 {
2438         return !mlxsw_sp->master_bridge.dev ||
2439                mlxsw_sp->master_bridge.dev == br_dev;
2440 }
2441
2442 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2443                                        struct net_device *br_dev)
2444 {
2445         mlxsw_sp->master_bridge.dev = br_dev;
2446         mlxsw_sp->master_bridge.ref_count++;
2447 }
2448
2449 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
2450                                        struct net_device *br_dev)
2451 {
2452         if (--mlxsw_sp->master_bridge.ref_count == 0)
2453                 mlxsw_sp->master_bridge.dev = NULL;
2454 }
2455
2456 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2457 {
2458         char sldr_pl[MLXSW_REG_SLDR_LEN];
2459
2460         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
2461         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2462 }
2463
2464 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2465 {
2466         char sldr_pl[MLXSW_REG_SLDR_LEN];
2467
2468         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
2469         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2470 }
2471
2472 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2473                                      u16 lag_id, u8 port_index)
2474 {
2475         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2476         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2477
2478         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
2479                                       lag_id, port_index);
2480         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2481 }
2482
2483 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2484                                         u16 lag_id)
2485 {
2486         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2487         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2488
2489         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
2490                                          lag_id);
2491         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2492 }
2493
2494 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
2495                                         u16 lag_id)
2496 {
2497         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2498         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2499
2500         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
2501                                         lag_id);
2502         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2503 }
2504
2505 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
2506                                          u16 lag_id)
2507 {
2508         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2509         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2510
2511         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
2512                                          lag_id);
2513         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2514 }
2515
2516 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2517                                   struct net_device *lag_dev,
2518                                   u16 *p_lag_id)
2519 {
2520         struct mlxsw_sp_upper *lag;
2521         int free_lag_id = -1;
2522         int i;
2523
2524         for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
2525                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
2526                 if (lag->ref_count) {
2527                         if (lag->dev == lag_dev) {
2528                                 *p_lag_id = i;
2529                                 return 0;
2530                         }
2531                 } else if (free_lag_id < 0) {
2532                         free_lag_id = i;
2533                 }
2534         }
2535         if (free_lag_id < 0)
2536                 return -EBUSY;
2537         *p_lag_id = free_lag_id;
2538         return 0;
2539 }
2540
2541 static bool
2542 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
2543                           struct net_device *lag_dev,
2544                           struct netdev_lag_upper_info *lag_upper_info)
2545 {
2546         u16 lag_id;
2547
2548         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
2549                 return false;
2550         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2551                 return false;
2552         return true;
2553 }
2554
2555 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2556                                        u16 lag_id, u8 *p_port_index)
2557 {
2558         int i;
2559
2560         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2561                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
2562                         *p_port_index = i;
2563                         return 0;
2564                 }
2565         }
2566         return -EBUSY;
2567 }
2568
2569 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2570                                   struct net_device *lag_dev)
2571 {
2572         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2573         struct mlxsw_sp_upper *lag;
2574         u16 lag_id;
2575         u8 port_index;
2576         int err;
2577
2578         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
2579         if (err)
2580                 return err;
2581         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2582         if (!lag->ref_count) {
2583                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
2584                 if (err)
2585                         return err;
2586                 lag->dev = lag_dev;
2587         }
2588
2589         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
2590         if (err)
2591                 return err;
2592         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
2593         if (err)
2594                 goto err_col_port_add;
2595         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
2596         if (err)
2597                 goto err_col_port_enable;
2598
2599         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
2600                                    mlxsw_sp_port->local_port);
2601         mlxsw_sp_port->lag_id = lag_id;
2602         mlxsw_sp_port->lagged = 1;
2603         lag->ref_count++;
2604         return 0;
2605
2606 err_col_port_add:
2607         if (!lag->ref_count)
2608                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2609 err_col_port_enable:
2610         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2611         return err;
2612 }
2613
2614 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2615                                        struct net_device *br_dev,
2616                                        bool flush_fdb);
2617
2618 static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2619                                    struct net_device *lag_dev)
2620 {
2621         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2622         struct mlxsw_sp_port *mlxsw_sp_vport;
2623         struct mlxsw_sp_upper *lag;
2624         u16 lag_id = mlxsw_sp_port->lag_id;
2625         int err;
2626
2627         if (!mlxsw_sp_port->lagged)
2628                 return 0;
2629         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2630         WARN_ON(lag->ref_count == 0);
2631
2632         err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2633         if (err)
2634                 return err;
2635         err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2636         if (err)
2637                 return err;
2638
2639         /* In case we leave a LAG device that has bridges built on top,
2640          * then their teardown sequence is never issued and we need to
2641          * invoke the necessary cleanup routines ourselves.
2642          */
2643         list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
2644                             vport.list) {
2645                 struct net_device *br_dev;
2646
2647                 if (!mlxsw_sp_vport->bridged)
2648                         continue;
2649
2650                 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2651                 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
2652         }
2653
2654         if (mlxsw_sp_port->bridged) {
2655                 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
2656                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
2657                 mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
2658         }
2659
2660         if (lag->ref_count == 1) {
2661                 if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
2662                         netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2663                 err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2664                 if (err)
2665                         return err;
2666         }
2667
2668         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2669                                      mlxsw_sp_port->local_port);
2670         mlxsw_sp_port->lagged = 0;
2671         lag->ref_count--;
2672         return 0;
2673 }
2674
2675 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2676                                       u16 lag_id)
2677 {
2678         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2679         char sldr_pl[MLXSW_REG_SLDR_LEN];
2680
2681         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
2682                                          mlxsw_sp_port->local_port);
2683         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2684 }
2685
2686 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2687                                          u16 lag_id)
2688 {
2689         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2690         char sldr_pl[MLXSW_REG_SLDR_LEN];
2691
2692         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
2693                                             mlxsw_sp_port->local_port);
2694         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2695 }
2696
2697 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
2698                                        bool lag_tx_enabled)
2699 {
2700         if (lag_tx_enabled)
2701                 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
2702                                                   mlxsw_sp_port->lag_id);
2703         else
2704                 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
2705                                                      mlxsw_sp_port->lag_id);
2706 }
2707
2708 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
2709                                      struct netdev_lag_lower_state_info *info)
2710 {
2711         return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
2712 }
2713
2714 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
2715                                    struct net_device *vlan_dev)
2716 {
2717         struct mlxsw_sp_port *mlxsw_sp_vport;
2718         u16 vid = vlan_dev_vlan_id(vlan_dev);
2719
2720         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2721         if (!mlxsw_sp_vport) {
2722                 WARN_ON(!mlxsw_sp_vport);
2723                 return -EINVAL;
2724         }
2725
2726         mlxsw_sp_vport->dev = vlan_dev;
2727
2728         return 0;
2729 }
2730
2731 static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
2732                                      struct net_device *vlan_dev)
2733 {
2734         struct mlxsw_sp_port *mlxsw_sp_vport;
2735         u16 vid = vlan_dev_vlan_id(vlan_dev);
2736
2737         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2738         if (!mlxsw_sp_vport) {
2739                 WARN_ON(!mlxsw_sp_vport);
2740                 return -EINVAL;
2741         }
2742
2743         /* When removing a VLAN device while still bridged we should first
2744          * remove it from the bridge, as we receive the bridge's notification
2745          * when the vPort is already gone.
2746          */
2747         if (mlxsw_sp_vport->bridged) {
2748                 struct net_device *br_dev;
2749
2750                 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2751                 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
2752         }
2753
2754         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
2755
2756         return 0;
2757 }
2758
2759 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
2760                                                unsigned long event, void *ptr)
2761 {
2762         struct netdev_notifier_changeupper_info *info;
2763         struct mlxsw_sp_port *mlxsw_sp_port;
2764         struct net_device *upper_dev;
2765         struct mlxsw_sp *mlxsw_sp;
2766         int err;
2767
2768         mlxsw_sp_port = netdev_priv(dev);
2769         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2770         info = ptr;
2771
2772         switch (event) {
2773         case NETDEV_PRECHANGEUPPER:
2774                 upper_dev = info->upper_dev;
2775                 if (!info->master || !info->linking)
2776                         break;
2777                 /* HW limitation forbids to put ports to multiple bridges. */
2778                 if (netif_is_bridge_master(upper_dev) &&
2779                     !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
2780                         return NOTIFY_BAD;
2781                 if (netif_is_lag_master(upper_dev) &&
2782                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
2783                                                info->upper_info))
2784                         return NOTIFY_BAD;
2785                 break;
2786         case NETDEV_CHANGEUPPER:
2787                 upper_dev = info->upper_dev;
2788                 if (is_vlan_dev(upper_dev)) {
2789                         if (info->linking) {
2790                                 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
2791                                                               upper_dev);
2792                                 if (err) {
2793                                         netdev_err(dev, "Failed to link VLAN device\n");
2794                                         return NOTIFY_BAD;
2795                                 }
2796                         } else {
2797                                 err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
2798                                                                 upper_dev);
2799                                 if (err) {
2800                                         netdev_err(dev, "Failed to unlink VLAN device\n");
2801                                         return NOTIFY_BAD;
2802                                 }
2803                         }
2804                 } else if (netif_is_bridge_master(upper_dev)) {
2805                         if (info->linking) {
2806                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
2807                                 if (err) {
2808                                         netdev_err(dev, "Failed to join bridge\n");
2809                                         return NOTIFY_BAD;
2810                                 }
2811                                 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
2812                         } else {
2813                                 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
2814                                                                  true);
2815                                 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
2816                                 if (err) {
2817                                         netdev_err(dev, "Failed to leave bridge\n");
2818                                         return NOTIFY_BAD;
2819                                 }
2820                         }
2821                 } else if (netif_is_lag_master(upper_dev)) {
2822                         if (info->linking) {
2823                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
2824                                                              upper_dev);
2825                                 if (err) {
2826                                         netdev_err(dev, "Failed to join link aggregation\n");
2827                                         return NOTIFY_BAD;
2828                                 }
2829                         } else {
2830                                 err = mlxsw_sp_port_lag_leave(mlxsw_sp_port,
2831                                                               upper_dev);
2832                                 if (err) {
2833                                         netdev_err(dev, "Failed to leave link aggregation\n");
2834                                         return NOTIFY_BAD;
2835                                 }
2836                         }
2837                 }
2838                 break;
2839         }
2840
2841         return NOTIFY_DONE;
2842 }
2843
2844 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
2845                                                unsigned long event, void *ptr)
2846 {
2847         struct netdev_notifier_changelowerstate_info *info;
2848         struct mlxsw_sp_port *mlxsw_sp_port;
2849         int err;
2850
2851         mlxsw_sp_port = netdev_priv(dev);
2852         info = ptr;
2853
2854         switch (event) {
2855         case NETDEV_CHANGELOWERSTATE:
2856                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
2857                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
2858                                                         info->lower_state_info);
2859                         if (err)
2860                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
2861                 }
2862                 break;
2863         }
2864
2865         return NOTIFY_DONE;
2866 }
2867
2868 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
2869                                          unsigned long event, void *ptr)
2870 {
2871         switch (event) {
2872         case NETDEV_PRECHANGEUPPER:
2873         case NETDEV_CHANGEUPPER:
2874                 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
2875         case NETDEV_CHANGELOWERSTATE:
2876                 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
2877         }
2878
2879         return NOTIFY_DONE;
2880 }
2881
2882 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
2883                                         unsigned long event, void *ptr)
2884 {
2885         struct net_device *dev;
2886         struct list_head *iter;
2887         int ret;
2888
2889         netdev_for_each_lower_dev(lag_dev, dev, iter) {
2890                 if (mlxsw_sp_port_dev_check(dev)) {
2891                         ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
2892                         if (ret == NOTIFY_BAD)
2893                                 return ret;
2894                 }
2895         }
2896
2897         return NOTIFY_DONE;
2898 }
2899
2900 static struct mlxsw_sp_vfid *
2901 mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
2902                       const struct net_device *br_dev)
2903 {
2904         struct mlxsw_sp_vfid *vfid;
2905
2906         list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
2907                 if (vfid->br_dev == br_dev)
2908                         return vfid;
2909         }
2910
2911         return NULL;
2912 }
2913
2914 static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
2915 {
2916         return vfid - MLXSW_SP_VFID_PORT_MAX;
2917 }
2918
2919 static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
2920 {
2921         return MLXSW_SP_VFID_PORT_MAX + br_vfid;
2922 }
2923
2924 static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
2925 {
2926         return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
2927                                    MLXSW_SP_VFID_BR_MAX);
2928 }
2929
2930 static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
2931                                                      struct net_device *br_dev)
2932 {
2933         struct device *dev = mlxsw_sp->bus_info->dev;
2934         struct mlxsw_sp_vfid *vfid;
2935         u16 n_vfid;
2936         int err;
2937
2938         n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
2939         if (n_vfid == MLXSW_SP_VFID_MAX) {
2940                 dev_err(dev, "No available vFIDs\n");
2941                 return ERR_PTR(-ERANGE);
2942         }
2943
2944         err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
2945         if (err) {
2946                 dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
2947                 return ERR_PTR(err);
2948         }
2949
2950         vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
2951         if (!vfid)
2952                 goto err_allocate_vfid;
2953
2954         vfid->vfid = n_vfid;
2955         vfid->br_dev = br_dev;
2956
2957         list_add(&vfid->list, &mlxsw_sp->br_vfids.list);
2958         set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped);
2959
2960         return vfid;
2961
2962 err_allocate_vfid:
2963         __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
2964         return ERR_PTR(-ENOMEM);
2965 }
2966
2967 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
2968                                      struct mlxsw_sp_vfid *vfid)
2969 {
2970         u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
2971
2972         clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
2973         list_del(&vfid->list);
2974
2975         __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
2976
2977         kfree(vfid);
2978 }
2979
2980 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2981                                        struct net_device *br_dev,
2982                                        bool flush_fdb)
2983 {
2984         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2985         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
2986         struct net_device *dev = mlxsw_sp_vport->dev;
2987         struct mlxsw_sp_vfid *vfid, *new_vfid;
2988         int err;
2989
2990         vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
2991         if (!vfid) {
2992                 WARN_ON(!vfid);
2993                 return -EINVAL;
2994         }
2995
2996         /* We need a vFID to go back to after leaving the bridge's vFID. */
2997         new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
2998         if (!new_vfid) {
2999                 new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
3000                 if (IS_ERR(new_vfid)) {
3001                         netdev_err(dev, "Failed to create vFID for VID=%d\n",
3002                                    vid);
3003                         return PTR_ERR(new_vfid);
3004                 }
3005         }
3006
3007         /* Invalidate existing {Port, VID} to vFID mapping and create a new
3008          * one for the new vFID.
3009          */
3010         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3011                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3012                                            false,
3013                                            mlxsw_sp_vfid_to_fid(vfid->vfid),
3014                                            vid);
3015         if (err) {
3016                 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3017                            vfid->vfid);
3018                 goto err_port_vid_to_fid_invalidate;
3019         }
3020
3021         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3022                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3023                                            true,
3024                                            mlxsw_sp_vfid_to_fid(new_vfid->vfid),
3025                                            vid);
3026         if (err) {
3027                 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3028                            new_vfid->vfid);
3029                 goto err_port_vid_to_fid_validate;
3030         }
3031
3032         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3033         if (err) {
3034                 netdev_err(dev, "Failed to disable learning\n");
3035                 goto err_port_vid_learning_set;
3036         }
3037
3038         err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
3039                                        false);
3040         if (err) {
3041                 netdev_err(dev, "Failed clear to clear flooding\n");
3042                 goto err_vport_flood_set;
3043         }
3044
3045         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
3046                                           MLXSW_REG_SPMS_STATE_FORWARDING);
3047         if (err) {
3048                 netdev_err(dev, "Failed to set STP state\n");
3049                 goto err_port_stp_state_set;
3050         }
3051
3052         if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
3053                 netdev_err(dev, "Failed to flush FDB\n");
3054
3055         /* Switch between the vFIDs and destroy the old one if needed. */
3056         new_vfid->nr_vports++;
3057         mlxsw_sp_vport->vport.vfid = new_vfid;
3058         vfid->nr_vports--;
3059         if (!vfid->nr_vports)
3060                 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3061
3062         mlxsw_sp_vport->learning = 0;
3063         mlxsw_sp_vport->learning_sync = 0;
3064         mlxsw_sp_vport->uc_flood = 0;
3065         mlxsw_sp_vport->bridged = 0;
3066
3067         return 0;
3068
3069 err_port_stp_state_set:
3070 err_vport_flood_set:
3071 err_port_vid_learning_set:
3072 err_port_vid_to_fid_validate:
3073 err_port_vid_to_fid_invalidate:
3074         /* Rollback vFID only if new. */
3075         if (!new_vfid->nr_vports)
3076                 mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
3077         return err;
3078 }
3079
3080 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3081                                       struct net_device *br_dev)
3082 {
3083         struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
3084         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3085         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3086         struct net_device *dev = mlxsw_sp_vport->dev;
3087         struct mlxsw_sp_vfid *vfid;
3088         int err;
3089
3090         vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
3091         if (!vfid) {
3092                 vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
3093                 if (IS_ERR(vfid)) {
3094                         netdev_err(dev, "Failed to create bridge vFID\n");
3095                         return PTR_ERR(vfid);
3096                 }
3097         }
3098
3099         err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false);
3100         if (err) {
3101                 netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
3102                            vfid->vfid);
3103                 goto err_port_flood_set;
3104         }
3105
3106         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3107         if (err) {
3108                 netdev_err(dev, "Failed to enable learning\n");
3109                 goto err_port_vid_learning_set;
3110         }
3111
3112         /* We need to invalidate existing {Port, VID} to vFID mapping and
3113          * create a new one for the bridge's vFID.
3114          */
3115         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3116                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3117                                            false,
3118                                            mlxsw_sp_vfid_to_fid(old_vfid->vfid),
3119                                            vid);
3120         if (err) {
3121                 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3122                            old_vfid->vfid);
3123                 goto err_port_vid_to_fid_invalidate;
3124         }
3125
3126         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3127                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3128                                            true,
3129                                            mlxsw_sp_vfid_to_fid(vfid->vfid),
3130                                            vid);
3131         if (err) {
3132                 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3133                            vfid->vfid);
3134                 goto err_port_vid_to_fid_validate;
3135         }
3136
3137         /* Switch between the vFIDs and destroy the old one if needed. */
3138         vfid->nr_vports++;
3139         mlxsw_sp_vport->vport.vfid = vfid;
3140         old_vfid->nr_vports--;
3141         if (!old_vfid->nr_vports)
3142                 mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
3143
3144         mlxsw_sp_vport->learning = 1;
3145         mlxsw_sp_vport->learning_sync = 1;
3146         mlxsw_sp_vport->uc_flood = 1;
3147         mlxsw_sp_vport->bridged = 1;
3148
3149         return 0;
3150
3151 err_port_vid_to_fid_validate:
3152         mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3153                                      MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
3154                                      mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid);
3155 err_port_vid_to_fid_invalidate:
3156         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3157 err_port_vid_learning_set:
3158         mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false);
3159 err_port_flood_set:
3160         if (!vfid->nr_vports)
3161                 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3162         return err;
3163 }
3164
3165 static bool
3166 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3167                                   const struct net_device *br_dev)
3168 {
3169         struct mlxsw_sp_port *mlxsw_sp_vport;
3170
3171         list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3172                             vport.list) {
3173                 if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
3174                         return false;
3175         }
3176
3177         return true;
3178 }
3179
3180 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3181                                           unsigned long event, void *ptr,
3182                                           u16 vid)
3183 {
3184         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3185         struct netdev_notifier_changeupper_info *info = ptr;
3186         struct mlxsw_sp_port *mlxsw_sp_vport;
3187         struct net_device *upper_dev;
3188         int err;
3189
3190         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3191
3192         switch (event) {
3193         case NETDEV_PRECHANGEUPPER:
3194                 upper_dev = info->upper_dev;
3195                 if (!info->master || !info->linking)
3196                         break;
3197                 if (!netif_is_bridge_master(upper_dev))
3198                         return NOTIFY_BAD;
3199                 /* We can't have multiple VLAN interfaces configured on
3200                  * the same port and being members in the same bridge.
3201                  */
3202                 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3203                                                        upper_dev))
3204                         return NOTIFY_BAD;
3205                 break;
3206         case NETDEV_CHANGEUPPER:
3207                 upper_dev = info->upper_dev;
3208                 if (!info->master)
3209                         break;
3210                 if (info->linking) {
3211                         if (!mlxsw_sp_vport) {
3212                                 WARN_ON(!mlxsw_sp_vport);
3213                                 return NOTIFY_BAD;
3214                         }
3215                         err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3216                                                          upper_dev);
3217                         if (err) {
3218                                 netdev_err(dev, "Failed to join bridge\n");
3219                                 return NOTIFY_BAD;
3220                         }
3221                 } else {
3222                         /* We ignore bridge's unlinking notifications if vPort
3223                          * is gone, since we already left the bridge when the
3224                          * VLAN device was unlinked from the real device.
3225                          */
3226                         if (!mlxsw_sp_vport)
3227                                 return NOTIFY_DONE;
3228                         err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
3229                                                           upper_dev, true);
3230                         if (err) {
3231                                 netdev_err(dev, "Failed to leave bridge\n");
3232                                 return NOTIFY_BAD;
3233                         }
3234                 }
3235         }
3236
3237         return NOTIFY_DONE;
3238 }
3239
3240 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3241                                               unsigned long event, void *ptr,
3242                                               u16 vid)
3243 {
3244         struct net_device *dev;
3245         struct list_head *iter;
3246         int ret;
3247
3248         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3249                 if (mlxsw_sp_port_dev_check(dev)) {
3250                         ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
3251                                                              vid);
3252                         if (ret == NOTIFY_BAD)
3253                                 return ret;
3254                 }
3255         }
3256
3257         return NOTIFY_DONE;
3258 }
3259
3260 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
3261                                          unsigned long event, void *ptr)
3262 {
3263         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3264         u16 vid = vlan_dev_vlan_id(vlan_dev);
3265
3266         if (mlxsw_sp_port_dev_check(real_dev))
3267                 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
3268                                                       vid);
3269         else if (netif_is_lag_master(real_dev))
3270                 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
3271                                                           vid);
3272
3273         return NOTIFY_DONE;
3274 }
3275
3276 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3277                                     unsigned long event, void *ptr)
3278 {
3279         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3280
3281         if (mlxsw_sp_port_dev_check(dev))
3282                 return mlxsw_sp_netdevice_port_event(dev, event, ptr);
3283
3284         if (netif_is_lag_master(dev))
3285                 return mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3286
3287         if (is_vlan_dev(dev))
3288                 return mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3289
3290         return NOTIFY_DONE;
3291 }
3292
3293 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3294         .notifier_call = mlxsw_sp_netdevice_event,
3295 };
3296
3297 static int __init mlxsw_sp_module_init(void)
3298 {
3299         int err;
3300
3301         register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3302         err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3303         if (err)
3304                 goto err_core_driver_register;
3305         return 0;
3306
3307 err_core_driver_register:
3308         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3309         return err;
3310 }
3311
3312 static void __exit mlxsw_sp_module_exit(void)
3313 {
3314         mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3315         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3316 }
3317
3318 module_init(mlxsw_sp_module_init);
3319 module_exit(mlxsw_sp_module_exit);
3320
3321 MODULE_LICENSE("Dual BSD/GPL");
3322 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3323 MODULE_DESCRIPTION("Mellanox Spectrum driver");
3324 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);