926019e86c36a5168c349ef15d967ff6ddbaf5da
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <net/devlink.h>
53 #include <net/switchdev.h>
54 #include <generated/utsrelease.h>
55
56 #include "spectrum.h"
57 #include "core.h"
58 #include "reg.h"
59 #include "port.h"
60 #include "trap.h"
61 #include "txheader.h"
62
63 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
64 static const char mlxsw_sp_driver_version[] = "1.0";
65
66 /* tx_hdr_version
67  * Tx header version.
68  * Must be set to 1.
69  */
70 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
71
72 /* tx_hdr_ctl
73  * Packet control type.
74  * 0 - Ethernet control (e.g. EMADs, LACP)
75  * 1 - Ethernet data
76  */
77 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
78
79 /* tx_hdr_proto
80  * Packet protocol type. Must be set to 1 (Ethernet).
81  */
82 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
83
84 /* tx_hdr_rx_is_router
85  * Packet is sent from the router. Valid for data packets only.
86  */
87 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
88
89 /* tx_hdr_fid_valid
90  * Indicates if the 'fid' field is valid and should be used for
91  * forwarding lookup. Valid for data packets only.
92  */
93 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
94
95 /* tx_hdr_swid
96  * Switch partition ID. Must be set to 0.
97  */
98 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
99
100 /* tx_hdr_control_tclass
101  * Indicates if the packet should use the control TClass and not one
102  * of the data TClasses.
103  */
104 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
105
106 /* tx_hdr_etclass
107  * Egress TClass to be used on the egress device on the egress port.
108  */
109 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
110
111 /* tx_hdr_port_mid
112  * Destination local port for unicast packets.
113  * Destination multicast ID for multicast packets.
114  *
115  * Control packets are directed to a specific egress port, while data
116  * packets are transmitted through the CPU port (0) into the switch partition,
117  * where forwarding rules are applied.
118  */
119 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
120
121 /* tx_hdr_fid
122  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
123  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
124  * Valid for data packets only.
125  */
126 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
127
128 /* tx_hdr_type
129  * 0 - Data packets
130  * 6 - Control packets
131  */
132 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
133
134 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
135                                      const struct mlxsw_tx_info *tx_info)
136 {
137         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
138
139         memset(txhdr, 0, MLXSW_TXHDR_LEN);
140
141         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
142         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
143         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
144         mlxsw_tx_hdr_swid_set(txhdr, 0);
145         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
146         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
147         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
148 }
149
150 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
151 {
152         char spad_pl[MLXSW_REG_SPAD_LEN];
153         int err;
154
155         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
156         if (err)
157                 return err;
158         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
159         return 0;
160 }
161
162 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
163                                           bool is_up)
164 {
165         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
166         char paos_pl[MLXSW_REG_PAOS_LEN];
167
168         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
169                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
170                             MLXSW_PORT_ADMIN_STATUS_DOWN);
171         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
172 }
173
174 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
175                                          bool *p_is_up)
176 {
177         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
178         char paos_pl[MLXSW_REG_PAOS_LEN];
179         u8 oper_status;
180         int err;
181
182         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
183         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
184         if (err)
185                 return err;
186         oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
187         *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
188         return 0;
189 }
190
191 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
192                                       unsigned char *addr)
193 {
194         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
195         char ppad_pl[MLXSW_REG_PPAD_LEN];
196
197         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
198         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
199         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
200 }
201
202 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
203 {
204         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
205         unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
206
207         ether_addr_copy(addr, mlxsw_sp->base_mac);
208         addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
209         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
210 }
211
212 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
213                                        u16 vid, enum mlxsw_reg_spms_state state)
214 {
215         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
216         char *spms_pl;
217         int err;
218
219         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
220         if (!spms_pl)
221                 return -ENOMEM;
222         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
223         mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
224         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
225         kfree(spms_pl);
226         return err;
227 }
228
229 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
230 {
231         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
232         char pmtu_pl[MLXSW_REG_PMTU_LEN];
233         int max_mtu;
234         int err;
235
236         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
237         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
238         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
239         if (err)
240                 return err;
241         max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
242
243         if (mtu > max_mtu)
244                 return -EINVAL;
245
246         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
247         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
248 }
249
250 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
251 {
252         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
253         char pspa_pl[MLXSW_REG_PSPA_LEN];
254
255         mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
256         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
257 }
258
259 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
260                                      bool enable)
261 {
262         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
263         char svpe_pl[MLXSW_REG_SVPE_LEN];
264
265         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
266         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
267 }
268
269 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
270                                  enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
271                                  u16 vid)
272 {
273         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
274         char svfa_pl[MLXSW_REG_SVFA_LEN];
275
276         mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
277                             fid, vid);
278         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
279 }
280
281 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
282                                           u16 vid, bool learn_enable)
283 {
284         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
285         char *spvmlr_pl;
286         int err;
287
288         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
289         if (!spvmlr_pl)
290                 return -ENOMEM;
291         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
292                               learn_enable);
293         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
294         kfree(spvmlr_pl);
295         return err;
296 }
297
298 static int
299 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
300 {
301         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
302         char sspr_pl[MLXSW_REG_SSPR_LEN];
303
304         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
305         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
306 }
307
308 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
309                                          u8 local_port, u8 *p_module,
310                                          u8 *p_width)
311 {
312         char pmlp_pl[MLXSW_REG_PMLP_LEN];
313         int err;
314
315         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
316         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
317         if (err)
318                 return err;
319         *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
320         *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
321         return 0;
322 }
323
324 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
325 {
326         char pmlp_pl[MLXSW_REG_PMLP_LEN];
327
328         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
329         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
330         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
331 }
332
333 static int mlxsw_sp_port_open(struct net_device *dev)
334 {
335         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
336         int err;
337
338         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
339         if (err)
340                 return err;
341         netif_start_queue(dev);
342         return 0;
343 }
344
345 static int mlxsw_sp_port_stop(struct net_device *dev)
346 {
347         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
348
349         netif_stop_queue(dev);
350         return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
351 }
352
353 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
354                                       struct net_device *dev)
355 {
356         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
357         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
358         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
359         const struct mlxsw_tx_info tx_info = {
360                 .local_port = mlxsw_sp_port->local_port,
361                 .is_emad = false,
362         };
363         u64 len;
364         int err;
365
366         if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
367                 return NETDEV_TX_BUSY;
368
369         if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
370                 struct sk_buff *skb_orig = skb;
371
372                 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
373                 if (!skb) {
374                         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
375                         dev_kfree_skb_any(skb_orig);
376                         return NETDEV_TX_OK;
377                 }
378         }
379
380         if (eth_skb_pad(skb)) {
381                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
382                 return NETDEV_TX_OK;
383         }
384
385         mlxsw_sp_txhdr_construct(skb, &tx_info);
386         len = skb->len;
387         /* Due to a race we might fail here because of a full queue. In that
388          * unlikely case we simply drop the packet.
389          */
390         err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
391
392         if (!err) {
393                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
394                 u64_stats_update_begin(&pcpu_stats->syncp);
395                 pcpu_stats->tx_packets++;
396                 pcpu_stats->tx_bytes += len;
397                 u64_stats_update_end(&pcpu_stats->syncp);
398         } else {
399                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
400                 dev_kfree_skb_any(skb);
401         }
402         return NETDEV_TX_OK;
403 }
404
405 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
406 {
407 }
408
409 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
410 {
411         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
412         struct sockaddr *addr = p;
413         int err;
414
415         if (!is_valid_ether_addr(addr->sa_data))
416                 return -EADDRNOTAVAIL;
417
418         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
419         if (err)
420                 return err;
421         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
422         return 0;
423 }
424
425 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
426 {
427         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
428         int err;
429
430         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
431         if (err)
432                 return err;
433         dev->mtu = mtu;
434         return 0;
435 }
436
437 static struct rtnl_link_stats64 *
438 mlxsw_sp_port_get_stats64(struct net_device *dev,
439                           struct rtnl_link_stats64 *stats)
440 {
441         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
442         struct mlxsw_sp_port_pcpu_stats *p;
443         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
444         u32 tx_dropped = 0;
445         unsigned int start;
446         int i;
447
448         for_each_possible_cpu(i) {
449                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
450                 do {
451                         start = u64_stats_fetch_begin_irq(&p->syncp);
452                         rx_packets      = p->rx_packets;
453                         rx_bytes        = p->rx_bytes;
454                         tx_packets      = p->tx_packets;
455                         tx_bytes        = p->tx_bytes;
456                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
457
458                 stats->rx_packets       += rx_packets;
459                 stats->rx_bytes         += rx_bytes;
460                 stats->tx_packets       += tx_packets;
461                 stats->tx_bytes         += tx_bytes;
462                 /* tx_dropped is u32, updated without syncp protection. */
463                 tx_dropped      += p->tx_dropped;
464         }
465         stats->tx_dropped       = tx_dropped;
466         return stats;
467 }
468
469 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
470                            u16 vid_end, bool is_member, bool untagged)
471 {
472         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
473         char *spvm_pl;
474         int err;
475
476         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
477         if (!spvm_pl)
478                 return -ENOMEM;
479
480         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
481                             vid_end, is_member, untagged);
482         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
483         kfree(spvm_pl);
484         return err;
485 }
486
487 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
488 {
489         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
490         u16 vid, last_visited_vid;
491         int err;
492
493         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
494                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
495                                                    vid);
496                 if (err) {
497                         last_visited_vid = vid;
498                         goto err_port_vid_to_fid_set;
499                 }
500         }
501
502         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
503         if (err) {
504                 last_visited_vid = VLAN_N_VID;
505                 goto err_port_vid_to_fid_set;
506         }
507
508         return 0;
509
510 err_port_vid_to_fid_set:
511         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
512                 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
513                                              vid);
514         return err;
515 }
516
517 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
518 {
519         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
520         u16 vid;
521         int err;
522
523         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
524         if (err)
525                 return err;
526
527         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
528                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
529                                                    vid, vid);
530                 if (err)
531                         return err;
532         }
533
534         return 0;
535 }
536
537 static struct mlxsw_sp_vfid *
538 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
539 {
540         struct mlxsw_sp_vfid *vfid;
541
542         list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
543                 if (vfid->vid == vid)
544                         return vfid;
545         }
546
547         return NULL;
548 }
549
550 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
551 {
552         return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
553                                    MLXSW_SP_VFID_PORT_MAX);
554 }
555
556 static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
557 {
558         u16 fid = mlxsw_sp_vfid_to_fid(vfid);
559         char sfmr_pl[MLXSW_REG_SFMR_LEN];
560
561         mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0);
562         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
563 }
564
565 static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
566 {
567         u16 fid = mlxsw_sp_vfid_to_fid(vfid);
568         char sfmr_pl[MLXSW_REG_SFMR_LEN];
569
570         mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0);
571         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
572 }
573
574 static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
575                                                   u16 vid)
576 {
577         struct device *dev = mlxsw_sp->bus_info->dev;
578         struct mlxsw_sp_vfid *vfid;
579         u16 n_vfid;
580         int err;
581
582         n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
583         if (n_vfid == MLXSW_SP_VFID_PORT_MAX) {
584                 dev_err(dev, "No available vFIDs\n");
585                 return ERR_PTR(-ERANGE);
586         }
587
588         err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
589         if (err) {
590                 dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
591                 return ERR_PTR(err);
592         }
593
594         vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
595         if (!vfid)
596                 goto err_allocate_vfid;
597
598         vfid->vfid = n_vfid;
599         vfid->vid = vid;
600
601         list_add(&vfid->list, &mlxsw_sp->port_vfids.list);
602         set_bit(n_vfid, mlxsw_sp->port_vfids.mapped);
603
604         return vfid;
605
606 err_allocate_vfid:
607         __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
608         return ERR_PTR(-ENOMEM);
609 }
610
611 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
612                                   struct mlxsw_sp_vfid *vfid)
613 {
614         clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
615         list_del(&vfid->list);
616
617         __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
618
619         kfree(vfid);
620 }
621
622 static struct mlxsw_sp_port *
623 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
624                            struct mlxsw_sp_vfid *vfid)
625 {
626         struct mlxsw_sp_port *mlxsw_sp_vport;
627
628         mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
629         if (!mlxsw_sp_vport)
630                 return NULL;
631
632         /* dev will be set correctly after the VLAN device is linked
633          * with the real device. In case of bridge SELF invocation, dev
634          * will remain as is.
635          */
636         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
637         mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
638         mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
639         mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
640         mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
641         mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
642         mlxsw_sp_vport->vport.vfid = vfid;
643         mlxsw_sp_vport->vport.vid = vfid->vid;
644
645         list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
646
647         return mlxsw_sp_vport;
648 }
649
650 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
651 {
652         list_del(&mlxsw_sp_vport->vport.list);
653         kfree(mlxsw_sp_vport);
654 }
655
656 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
657                           u16 vid)
658 {
659         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
660         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
661         struct mlxsw_sp_port *mlxsw_sp_vport;
662         struct mlxsw_sp_vfid *vfid;
663         int err;
664
665         /* VLAN 0 is added to HW filter when device goes up, but it is
666          * reserved in our case, so simply return.
667          */
668         if (!vid)
669                 return 0;
670
671         if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
672                 netdev_warn(dev, "VID=%d already configured\n", vid);
673                 return 0;
674         }
675
676         vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
677         if (!vfid) {
678                 vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
679                 if (IS_ERR(vfid)) {
680                         netdev_err(dev, "Failed to create vFID for VID=%d\n",
681                                    vid);
682                         return PTR_ERR(vfid);
683                 }
684         }
685
686         mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
687         if (!mlxsw_sp_vport) {
688                 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
689                 err = -ENOMEM;
690                 goto err_port_vport_create;
691         }
692
693         if (!vfid->nr_vports) {
694                 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
695                                                true, false);
696                 if (err) {
697                         netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
698                                    vfid->vfid);
699                         goto err_vport_flood_set;
700                 }
701         }
702
703         /* When adding the first VLAN interface on a bridged port we need to
704          * transition all the active 802.1Q bridge VLANs to use explicit
705          * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
706          */
707         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
708                 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
709                 if (err) {
710                         netdev_err(dev, "Failed to set to Virtual mode\n");
711                         goto err_port_vp_mode_trans;
712                 }
713         }
714
715         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
716                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
717                                            true,
718                                            mlxsw_sp_vfid_to_fid(vfid->vfid),
719                                            vid);
720         if (err) {
721                 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
722                            vid, vfid->vfid);
723                 goto err_port_vid_to_fid_set;
724         }
725
726         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
727         if (err) {
728                 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
729                 goto err_port_vid_learning_set;
730         }
731
732         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
733         if (err) {
734                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
735                            vid);
736                 goto err_port_add_vid;
737         }
738
739         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
740                                           MLXSW_REG_SPMS_STATE_FORWARDING);
741         if (err) {
742                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
743                 goto err_port_stp_state_set;
744         }
745
746         vfid->nr_vports++;
747
748         return 0;
749
750 err_port_stp_state_set:
751         mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
752 err_port_add_vid:
753         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
754 err_port_vid_learning_set:
755         mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
756                                      MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
757                                      mlxsw_sp_vfid_to_fid(vfid->vfid), vid);
758 err_port_vid_to_fid_set:
759         if (list_is_singular(&mlxsw_sp_port->vports_list))
760                 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
761 err_port_vp_mode_trans:
762         if (!vfid->nr_vports)
763                 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
764                                          false);
765 err_vport_flood_set:
766         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
767 err_port_vport_create:
768         if (!vfid->nr_vports)
769                 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
770         return err;
771 }
772
773 int mlxsw_sp_port_kill_vid(struct net_device *dev,
774                            __be16 __always_unused proto, u16 vid)
775 {
776         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
777         struct mlxsw_sp_port *mlxsw_sp_vport;
778         struct mlxsw_sp_vfid *vfid;
779         int err;
780
781         /* VLAN 0 is removed from HW filter when device goes down, but
782          * it is reserved in our case, so simply return.
783          */
784         if (!vid)
785                 return 0;
786
787         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
788         if (!mlxsw_sp_vport) {
789                 netdev_warn(dev, "VID=%d does not exist\n", vid);
790                 return 0;
791         }
792
793         vfid = mlxsw_sp_vport->vport.vfid;
794
795         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
796                                           MLXSW_REG_SPMS_STATE_DISCARDING);
797         if (err) {
798                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
799                 return err;
800         }
801
802         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
803         if (err) {
804                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
805                            vid);
806                 return err;
807         }
808
809         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
810         if (err) {
811                 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
812                 return err;
813         }
814
815         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
816                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
817                                            false,
818                                            mlxsw_sp_vfid_to_fid(vfid->vfid),
819                                            vid);
820         if (err) {
821                 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
822                            vid, vfid->vfid);
823                 return err;
824         }
825
826         /* When removing the last VLAN interface on a bridged port we need to
827          * transition all active 802.1Q bridge VLANs to use VID to FID
828          * mappings and set port's mode to VLAN mode.
829          */
830         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
831                 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
832                 if (err) {
833                         netdev_err(dev, "Failed to set to VLAN mode\n");
834                         return err;
835                 }
836         }
837
838         vfid->nr_vports--;
839         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
840
841         /* Destroy the vFID if no vPorts are assigned to it anymore. */
842         if (!vfid->nr_vports)
843                 mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
844
845         return 0;
846 }
847
848 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
849         .ndo_open               = mlxsw_sp_port_open,
850         .ndo_stop               = mlxsw_sp_port_stop,
851         .ndo_start_xmit         = mlxsw_sp_port_xmit,
852         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
853         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
854         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
855         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
856         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
857         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
858         .ndo_fdb_add            = switchdev_port_fdb_add,
859         .ndo_fdb_del            = switchdev_port_fdb_del,
860         .ndo_fdb_dump           = switchdev_port_fdb_dump,
861         .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
862         .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
863         .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
864 };
865
866 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
867                                       struct ethtool_drvinfo *drvinfo)
868 {
869         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
870         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
871
872         strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
873         strlcpy(drvinfo->version, mlxsw_sp_driver_version,
874                 sizeof(drvinfo->version));
875         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
876                  "%d.%d.%d",
877                  mlxsw_sp->bus_info->fw_rev.major,
878                  mlxsw_sp->bus_info->fw_rev.minor,
879                  mlxsw_sp->bus_info->fw_rev.subminor);
880         strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
881                 sizeof(drvinfo->bus_info));
882 }
883
884 struct mlxsw_sp_port_hw_stats {
885         char str[ETH_GSTRING_LEN];
886         u64 (*getter)(char *payload);
887 };
888
889 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
890         {
891                 .str = "a_frames_transmitted_ok",
892                 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
893         },
894         {
895                 .str = "a_frames_received_ok",
896                 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
897         },
898         {
899                 .str = "a_frame_check_sequence_errors",
900                 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
901         },
902         {
903                 .str = "a_alignment_errors",
904                 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
905         },
906         {
907                 .str = "a_octets_transmitted_ok",
908                 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
909         },
910         {
911                 .str = "a_octets_received_ok",
912                 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
913         },
914         {
915                 .str = "a_multicast_frames_xmitted_ok",
916                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
917         },
918         {
919                 .str = "a_broadcast_frames_xmitted_ok",
920                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
921         },
922         {
923                 .str = "a_multicast_frames_received_ok",
924                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
925         },
926         {
927                 .str = "a_broadcast_frames_received_ok",
928                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
929         },
930         {
931                 .str = "a_in_range_length_errors",
932                 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
933         },
934         {
935                 .str = "a_out_of_range_length_field",
936                 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
937         },
938         {
939                 .str = "a_frame_too_long_errors",
940                 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
941         },
942         {
943                 .str = "a_symbol_error_during_carrier",
944                 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
945         },
946         {
947                 .str = "a_mac_control_frames_transmitted",
948                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
949         },
950         {
951                 .str = "a_mac_control_frames_received",
952                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
953         },
954         {
955                 .str = "a_unsupported_opcodes_received",
956                 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
957         },
958         {
959                 .str = "a_pause_mac_ctrl_frames_received",
960                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
961         },
962         {
963                 .str = "a_pause_mac_ctrl_frames_xmitted",
964                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
965         },
966 };
967
968 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
969
970 static void mlxsw_sp_port_get_strings(struct net_device *dev,
971                                       u32 stringset, u8 *data)
972 {
973         u8 *p = data;
974         int i;
975
976         switch (stringset) {
977         case ETH_SS_STATS:
978                 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
979                         memcpy(p, mlxsw_sp_port_hw_stats[i].str,
980                                ETH_GSTRING_LEN);
981                         p += ETH_GSTRING_LEN;
982                 }
983                 break;
984         }
985 }
986
987 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
988                                      enum ethtool_phys_id_state state)
989 {
990         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
991         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
992         char mlcr_pl[MLXSW_REG_MLCR_LEN];
993         bool active;
994
995         switch (state) {
996         case ETHTOOL_ID_ACTIVE:
997                 active = true;
998                 break;
999         case ETHTOOL_ID_INACTIVE:
1000                 active = false;
1001                 break;
1002         default:
1003                 return -EOPNOTSUPP;
1004         }
1005
1006         mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1007         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1008 }
1009
1010 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1011                                     struct ethtool_stats *stats, u64 *data)
1012 {
1013         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1014         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1015         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1016         int i;
1017         int err;
1018
1019         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
1020         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1021         for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
1022                 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
1023 }
1024
1025 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1026 {
1027         switch (sset) {
1028         case ETH_SS_STATS:
1029                 return MLXSW_SP_PORT_HW_STATS_LEN;
1030         default:
1031                 return -EOPNOTSUPP;
1032         }
1033 }
1034
1035 struct mlxsw_sp_port_link_mode {
1036         u32 mask;
1037         u32 supported;
1038         u32 advertised;
1039         u32 speed;
1040 };
1041
1042 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1043         {
1044                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1045                 .supported      = SUPPORTED_100baseT_Full,
1046                 .advertised     = ADVERTISED_100baseT_Full,
1047                 .speed          = 100,
1048         },
1049         {
1050                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1051                 .speed          = 100,
1052         },
1053         {
1054                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1055                                   MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1056                 .supported      = SUPPORTED_1000baseKX_Full,
1057                 .advertised     = ADVERTISED_1000baseKX_Full,
1058                 .speed          = 1000,
1059         },
1060         {
1061                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1062                 .supported      = SUPPORTED_10000baseT_Full,
1063                 .advertised     = ADVERTISED_10000baseT_Full,
1064                 .speed          = 10000,
1065         },
1066         {
1067                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1068                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1069                 .supported      = SUPPORTED_10000baseKX4_Full,
1070                 .advertised     = ADVERTISED_10000baseKX4_Full,
1071                 .speed          = 10000,
1072         },
1073         {
1074                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1075                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1076                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1077                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1078                 .supported      = SUPPORTED_10000baseKR_Full,
1079                 .advertised     = ADVERTISED_10000baseKR_Full,
1080                 .speed          = 10000,
1081         },
1082         {
1083                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1084                 .supported      = SUPPORTED_20000baseKR2_Full,
1085                 .advertised     = ADVERTISED_20000baseKR2_Full,
1086                 .speed          = 20000,
1087         },
1088         {
1089                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1090                 .supported      = SUPPORTED_40000baseCR4_Full,
1091                 .advertised     = ADVERTISED_40000baseCR4_Full,
1092                 .speed          = 40000,
1093         },
1094         {
1095                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1096                 .supported      = SUPPORTED_40000baseKR4_Full,
1097                 .advertised     = ADVERTISED_40000baseKR4_Full,
1098                 .speed          = 40000,
1099         },
1100         {
1101                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1102                 .supported      = SUPPORTED_40000baseSR4_Full,
1103                 .advertised     = ADVERTISED_40000baseSR4_Full,
1104                 .speed          = 40000,
1105         },
1106         {
1107                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1108                 .supported      = SUPPORTED_40000baseLR4_Full,
1109                 .advertised     = ADVERTISED_40000baseLR4_Full,
1110                 .speed          = 40000,
1111         },
1112         {
1113                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1114                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1115                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1116                 .speed          = 25000,
1117         },
1118         {
1119                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1120                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1121                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1122                 .speed          = 50000,
1123         },
1124         {
1125                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1126                 .supported      = SUPPORTED_56000baseKR4_Full,
1127                 .advertised     = ADVERTISED_56000baseKR4_Full,
1128                 .speed          = 56000,
1129         },
1130         {
1131                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1132                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1133                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1134                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1135                 .speed          = 100000,
1136         },
1137 };
1138
1139 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1140
1141 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1142 {
1143         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1144                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1145                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1146                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1147                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1148                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1149                 return SUPPORTED_FIBRE;
1150
1151         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1152                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1153                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1154                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1155                               MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1156                 return SUPPORTED_Backplane;
1157         return 0;
1158 }
1159
1160 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1161 {
1162         u32 modes = 0;
1163         int i;
1164
1165         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1166                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1167                         modes |= mlxsw_sp_port_link_mode[i].supported;
1168         }
1169         return modes;
1170 }
1171
1172 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1173 {
1174         u32 modes = 0;
1175         int i;
1176
1177         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1178                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1179                         modes |= mlxsw_sp_port_link_mode[i].advertised;
1180         }
1181         return modes;
1182 }
1183
1184 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1185                                             struct ethtool_cmd *cmd)
1186 {
1187         u32 speed = SPEED_UNKNOWN;
1188         u8 duplex = DUPLEX_UNKNOWN;
1189         int i;
1190
1191         if (!carrier_ok)
1192                 goto out;
1193
1194         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1195                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1196                         speed = mlxsw_sp_port_link_mode[i].speed;
1197                         duplex = DUPLEX_FULL;
1198                         break;
1199                 }
1200         }
1201 out:
1202         ethtool_cmd_speed_set(cmd, speed);
1203         cmd->duplex = duplex;
1204 }
1205
1206 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1207 {
1208         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1209                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1210                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1211                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1212                 return PORT_FIBRE;
1213
1214         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1215                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1216                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1217                 return PORT_DA;
1218
1219         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1220                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1221                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1222                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1223                 return PORT_NONE;
1224
1225         return PORT_OTHER;
1226 }
1227
1228 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1229                                       struct ethtool_cmd *cmd)
1230 {
1231         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1232         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1233         char ptys_pl[MLXSW_REG_PTYS_LEN];
1234         u32 eth_proto_cap;
1235         u32 eth_proto_admin;
1236         u32 eth_proto_oper;
1237         int err;
1238
1239         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1240         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1241         if (err) {
1242                 netdev_err(dev, "Failed to get proto");
1243                 return err;
1244         }
1245         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1246                               &eth_proto_admin, &eth_proto_oper);
1247
1248         cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1249                          mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1250                          SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1251         cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1252         mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1253                                         eth_proto_oper, cmd);
1254
1255         eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1256         cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1257         cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1258
1259         cmd->transceiver = XCVR_INTERNAL;
1260         return 0;
1261 }
1262
1263 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1264 {
1265         u32 ptys_proto = 0;
1266         int i;
1267
1268         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1269                 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1270                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1271         }
1272         return ptys_proto;
1273 }
1274
1275 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1276 {
1277         u32 ptys_proto = 0;
1278         int i;
1279
1280         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1281                 if (speed == mlxsw_sp_port_link_mode[i].speed)
1282                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1283         }
1284         return ptys_proto;
1285 }
1286
1287 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1288                                       struct ethtool_cmd *cmd)
1289 {
1290         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1291         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1292         char ptys_pl[MLXSW_REG_PTYS_LEN];
1293         u32 speed;
1294         u32 eth_proto_new;
1295         u32 eth_proto_cap;
1296         u32 eth_proto_admin;
1297         bool is_up;
1298         int err;
1299
1300         speed = ethtool_cmd_speed(cmd);
1301
1302         eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1303                 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1304                 mlxsw_sp_to_ptys_speed(speed);
1305
1306         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1307         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1308         if (err) {
1309                 netdev_err(dev, "Failed to get proto");
1310                 return err;
1311         }
1312         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1313
1314         eth_proto_new = eth_proto_new & eth_proto_cap;
1315         if (!eth_proto_new) {
1316                 netdev_err(dev, "Not supported proto admin requested");
1317                 return -EINVAL;
1318         }
1319         if (eth_proto_new == eth_proto_admin)
1320                 return 0;
1321
1322         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1323         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1324         if (err) {
1325                 netdev_err(dev, "Failed to set proto admin");
1326                 return err;
1327         }
1328
1329         err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1330         if (err) {
1331                 netdev_err(dev, "Failed to get oper status");
1332                 return err;
1333         }
1334         if (!is_up)
1335                 return 0;
1336
1337         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1338         if (err) {
1339                 netdev_err(dev, "Failed to set admin status");
1340                 return err;
1341         }
1342
1343         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1344         if (err) {
1345                 netdev_err(dev, "Failed to set admin status");
1346                 return err;
1347         }
1348
1349         return 0;
1350 }
1351
1352 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1353         .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
1354         .get_link               = ethtool_op_get_link,
1355         .get_strings            = mlxsw_sp_port_get_strings,
1356         .set_phys_id            = mlxsw_sp_port_set_phys_id,
1357         .get_ethtool_stats      = mlxsw_sp_port_get_stats,
1358         .get_sset_count         = mlxsw_sp_port_get_sset_count,
1359         .get_settings           = mlxsw_sp_port_get_settings,
1360         .set_settings           = mlxsw_sp_port_set_settings,
1361 };
1362
1363 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1364 {
1365         struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
1366         struct mlxsw_sp_port *mlxsw_sp_port;
1367         struct devlink_port *devlink_port;
1368         struct net_device *dev;
1369         size_t bytes;
1370         int err;
1371
1372         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1373         if (!dev)
1374                 return -ENOMEM;
1375         mlxsw_sp_port = netdev_priv(dev);
1376         mlxsw_sp_port->dev = dev;
1377         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1378         mlxsw_sp_port->local_port = local_port;
1379         bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1380         mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1381         if (!mlxsw_sp_port->active_vlans) {
1382                 err = -ENOMEM;
1383                 goto err_port_active_vlans_alloc;
1384         }
1385         mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1386         if (!mlxsw_sp_port->untagged_vlans) {
1387                 err = -ENOMEM;
1388                 goto err_port_untagged_vlans_alloc;
1389         }
1390         INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1391
1392         mlxsw_sp_port->pcpu_stats =
1393                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1394         if (!mlxsw_sp_port->pcpu_stats) {
1395                 err = -ENOMEM;
1396                 goto err_alloc_stats;
1397         }
1398
1399         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1400         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1401
1402         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1403         if (err) {
1404                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1405                         mlxsw_sp_port->local_port);
1406                 goto err_dev_addr_init;
1407         }
1408
1409         netif_carrier_off(dev);
1410
1411         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1412                          NETIF_F_HW_VLAN_CTAG_FILTER;
1413
1414         /* Each packet needs to have a Tx header (metadata) on top all other
1415          * headers.
1416          */
1417         dev->hard_header_len += MLXSW_TXHDR_LEN;
1418
1419         devlink_port = &mlxsw_sp_port->devlink_port;
1420         err = devlink_port_register(devlink, devlink_port, local_port);
1421         if (err) {
1422                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register devlink port\n",
1423                         mlxsw_sp_port->local_port);
1424                 goto err_devlink_port_register;
1425         }
1426
1427         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1428         if (err) {
1429                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1430                         mlxsw_sp_port->local_port);
1431                 goto err_port_system_port_mapping_set;
1432         }
1433
1434         err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1435         if (err) {
1436                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1437                         mlxsw_sp_port->local_port);
1438                 goto err_port_swid_set;
1439         }
1440
1441         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1442         if (err) {
1443                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1444                         mlxsw_sp_port->local_port);
1445                 goto err_port_mtu_set;
1446         }
1447
1448         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1449         if (err)
1450                 goto err_port_admin_status_set;
1451
1452         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1453         if (err) {
1454                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1455                         mlxsw_sp_port->local_port);
1456                 goto err_port_buffers_init;
1457         }
1458
1459         mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1460         err = register_netdev(dev);
1461         if (err) {
1462                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1463                         mlxsw_sp_port->local_port);
1464                 goto err_register_netdev;
1465         }
1466
1467         devlink_port_type_eth_set(devlink_port, dev);
1468
1469         err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1470         if (err)
1471                 goto err_port_vlan_init;
1472
1473         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1474         return 0;
1475
1476 err_port_vlan_init:
1477         unregister_netdev(dev);
1478 err_register_netdev:
1479 err_port_buffers_init:
1480 err_port_admin_status_set:
1481 err_port_mtu_set:
1482 err_port_swid_set:
1483 err_port_system_port_mapping_set:
1484         devlink_port_unregister(&mlxsw_sp_port->devlink_port);
1485 err_devlink_port_register:
1486 err_dev_addr_init:
1487         free_percpu(mlxsw_sp_port->pcpu_stats);
1488 err_alloc_stats:
1489         kfree(mlxsw_sp_port->untagged_vlans);
1490 err_port_untagged_vlans_alloc:
1491         kfree(mlxsw_sp_port->active_vlans);
1492 err_port_active_vlans_alloc:
1493         free_netdev(dev);
1494         return err;
1495 }
1496
1497 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1498 {
1499         struct net_device *dev = mlxsw_sp_port->dev;
1500         struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
1501
1502         list_for_each_entry_safe(mlxsw_sp_vport, tmp,
1503                                  &mlxsw_sp_port->vports_list, vport.list) {
1504                 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1505
1506                 /* vPorts created for VLAN devices should already be gone
1507                  * by now, since we unregistered the port netdev.
1508                  */
1509                 WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
1510                 mlxsw_sp_port_kill_vid(dev, 0, vid);
1511         }
1512 }
1513
1514 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1515 {
1516         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1517         struct devlink_port *devlink_port;
1518
1519         if (!mlxsw_sp_port)
1520                 return;
1521         mlxsw_sp->ports[local_port] = NULL;
1522         devlink_port = &mlxsw_sp_port->devlink_port;
1523         devlink_port_type_clear(devlink_port);
1524         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1525         devlink_port_unregister(devlink_port);
1526         mlxsw_sp_port_vports_fini(mlxsw_sp_port);
1527         mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1528         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1529         mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1530         free_percpu(mlxsw_sp_port->pcpu_stats);
1531         kfree(mlxsw_sp_port->untagged_vlans);
1532         kfree(mlxsw_sp_port->active_vlans);
1533         free_netdev(mlxsw_sp_port->dev);
1534 }
1535
1536 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1537 {
1538         int i;
1539
1540         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1541                 mlxsw_sp_port_remove(mlxsw_sp, i);
1542         kfree(mlxsw_sp->ports);
1543 }
1544
1545 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1546 {
1547         size_t alloc_size;
1548         u8 module, width;
1549         int i;
1550         int err;
1551
1552         alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1553         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1554         if (!mlxsw_sp->ports)
1555                 return -ENOMEM;
1556
1557         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1558                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1559                                                     &width);
1560                 if (err)
1561                         goto err_port_module_info_get;
1562                 if (!width)
1563                         continue;
1564                 mlxsw_sp->port_to_module[i] = module;
1565                 err = mlxsw_sp_port_create(mlxsw_sp, i);
1566                 if (err)
1567                         goto err_port_create;
1568         }
1569         return 0;
1570
1571 err_port_create:
1572 err_port_module_info_get:
1573         for (i--; i >= 1; i--)
1574                 mlxsw_sp_port_remove(mlxsw_sp, i);
1575         kfree(mlxsw_sp->ports);
1576         return err;
1577 }
1578
1579 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
1580                                      char *pude_pl, void *priv)
1581 {
1582         struct mlxsw_sp *mlxsw_sp = priv;
1583         struct mlxsw_sp_port *mlxsw_sp_port;
1584         enum mlxsw_reg_pude_oper_status status;
1585         u8 local_port;
1586
1587         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1588         mlxsw_sp_port = mlxsw_sp->ports[local_port];
1589         if (!mlxsw_sp_port) {
1590                 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
1591                          local_port);
1592                 return;
1593         }
1594
1595         status = mlxsw_reg_pude_oper_status_get(pude_pl);
1596         if (status == MLXSW_PORT_OPER_STATUS_UP) {
1597                 netdev_info(mlxsw_sp_port->dev, "link up\n");
1598                 netif_carrier_on(mlxsw_sp_port->dev);
1599         } else {
1600                 netdev_info(mlxsw_sp_port->dev, "link down\n");
1601                 netif_carrier_off(mlxsw_sp_port->dev);
1602         }
1603 }
1604
1605 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
1606         .func = mlxsw_sp_pude_event_func,
1607         .trap_id = MLXSW_TRAP_ID_PUDE,
1608 };
1609
1610 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
1611                                    enum mlxsw_event_trap_id trap_id)
1612 {
1613         struct mlxsw_event_listener *el;
1614         char hpkt_pl[MLXSW_REG_HPKT_LEN];
1615         int err;
1616
1617         switch (trap_id) {
1618         case MLXSW_TRAP_ID_PUDE:
1619                 el = &mlxsw_sp_pude_event;
1620                 break;
1621         }
1622         err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
1623         if (err)
1624                 return err;
1625
1626         mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
1627         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1628         if (err)
1629                 goto err_event_trap_set;
1630
1631         return 0;
1632
1633 err_event_trap_set:
1634         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1635         return err;
1636 }
1637
1638 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
1639                                       enum mlxsw_event_trap_id trap_id)
1640 {
1641         struct mlxsw_event_listener *el;
1642
1643         switch (trap_id) {
1644         case MLXSW_TRAP_ID_PUDE:
1645                 el = &mlxsw_sp_pude_event;
1646                 break;
1647         }
1648         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1649 }
1650
1651 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
1652                                       void *priv)
1653 {
1654         struct mlxsw_sp *mlxsw_sp = priv;
1655         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1656         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1657
1658         if (unlikely(!mlxsw_sp_port)) {
1659                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
1660                                      local_port);
1661                 return;
1662         }
1663
1664         skb->dev = mlxsw_sp_port->dev;
1665
1666         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1667         u64_stats_update_begin(&pcpu_stats->syncp);
1668         pcpu_stats->rx_packets++;
1669         pcpu_stats->rx_bytes += skb->len;
1670         u64_stats_update_end(&pcpu_stats->syncp);
1671
1672         skb->protocol = eth_type_trans(skb, skb->dev);
1673         netif_receive_skb(skb);
1674 }
1675
1676 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
1677         {
1678                 .func = mlxsw_sp_rx_listener_func,
1679                 .local_port = MLXSW_PORT_DONT_CARE,
1680                 .trap_id = MLXSW_TRAP_ID_FDB_MC,
1681         },
1682         /* Traps for specific L2 packet types, not trapped as FDB MC */
1683         {
1684                 .func = mlxsw_sp_rx_listener_func,
1685                 .local_port = MLXSW_PORT_DONT_CARE,
1686                 .trap_id = MLXSW_TRAP_ID_STP,
1687         },
1688         {
1689                 .func = mlxsw_sp_rx_listener_func,
1690                 .local_port = MLXSW_PORT_DONT_CARE,
1691                 .trap_id = MLXSW_TRAP_ID_LACP,
1692         },
1693         {
1694                 .func = mlxsw_sp_rx_listener_func,
1695                 .local_port = MLXSW_PORT_DONT_CARE,
1696                 .trap_id = MLXSW_TRAP_ID_EAPOL,
1697         },
1698         {
1699                 .func = mlxsw_sp_rx_listener_func,
1700                 .local_port = MLXSW_PORT_DONT_CARE,
1701                 .trap_id = MLXSW_TRAP_ID_LLDP,
1702         },
1703         {
1704                 .func = mlxsw_sp_rx_listener_func,
1705                 .local_port = MLXSW_PORT_DONT_CARE,
1706                 .trap_id = MLXSW_TRAP_ID_MMRP,
1707         },
1708         {
1709                 .func = mlxsw_sp_rx_listener_func,
1710                 .local_port = MLXSW_PORT_DONT_CARE,
1711                 .trap_id = MLXSW_TRAP_ID_MVRP,
1712         },
1713         {
1714                 .func = mlxsw_sp_rx_listener_func,
1715                 .local_port = MLXSW_PORT_DONT_CARE,
1716                 .trap_id = MLXSW_TRAP_ID_RPVST,
1717         },
1718         {
1719                 .func = mlxsw_sp_rx_listener_func,
1720                 .local_port = MLXSW_PORT_DONT_CARE,
1721                 .trap_id = MLXSW_TRAP_ID_DHCP,
1722         },
1723         {
1724                 .func = mlxsw_sp_rx_listener_func,
1725                 .local_port = MLXSW_PORT_DONT_CARE,
1726                 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
1727         },
1728         {
1729                 .func = mlxsw_sp_rx_listener_func,
1730                 .local_port = MLXSW_PORT_DONT_CARE,
1731                 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
1732         },
1733         {
1734                 .func = mlxsw_sp_rx_listener_func,
1735                 .local_port = MLXSW_PORT_DONT_CARE,
1736                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
1737         },
1738         {
1739                 .func = mlxsw_sp_rx_listener_func,
1740                 .local_port = MLXSW_PORT_DONT_CARE,
1741                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
1742         },
1743         {
1744                 .func = mlxsw_sp_rx_listener_func,
1745                 .local_port = MLXSW_PORT_DONT_CARE,
1746                 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
1747         },
1748 };
1749
1750 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
1751 {
1752         char htgt_pl[MLXSW_REG_HTGT_LEN];
1753         char hpkt_pl[MLXSW_REG_HPKT_LEN];
1754         int i;
1755         int err;
1756
1757         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
1758         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
1759         if (err)
1760                 return err;
1761
1762         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
1763         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
1764         if (err)
1765                 return err;
1766
1767         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
1768                 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
1769                                                       &mlxsw_sp_rx_listener[i],
1770                                                       mlxsw_sp);
1771                 if (err)
1772                         goto err_rx_listener_register;
1773
1774                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
1775                                     mlxsw_sp_rx_listener[i].trap_id);
1776                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1777                 if (err)
1778                         goto err_rx_trap_set;
1779         }
1780         return 0;
1781
1782 err_rx_trap_set:
1783         mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1784                                           &mlxsw_sp_rx_listener[i],
1785                                           mlxsw_sp);
1786 err_rx_listener_register:
1787         for (i--; i >= 0; i--) {
1788                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
1789                                     mlxsw_sp_rx_listener[i].trap_id);
1790                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1791
1792                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1793                                                   &mlxsw_sp_rx_listener[i],
1794                                                   mlxsw_sp);
1795         }
1796         return err;
1797 }
1798
1799 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
1800 {
1801         char hpkt_pl[MLXSW_REG_HPKT_LEN];
1802         int i;
1803
1804         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
1805                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
1806                                     mlxsw_sp_rx_listener[i].trap_id);
1807                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1808
1809                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1810                                                   &mlxsw_sp_rx_listener[i],
1811                                                   mlxsw_sp);
1812         }
1813 }
1814
1815 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
1816                                  enum mlxsw_reg_sfgc_type type,
1817                                  enum mlxsw_reg_sfgc_bridge_type bridge_type)
1818 {
1819         enum mlxsw_flood_table_type table_type;
1820         enum mlxsw_sp_flood_table flood_table;
1821         char sfgc_pl[MLXSW_REG_SFGC_LEN];
1822
1823         if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
1824                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
1825         else
1826                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
1827
1828         if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
1829                 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
1830         else
1831                 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
1832
1833         mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
1834                             flood_table);
1835         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
1836 }
1837
1838 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
1839 {
1840         int type, err;
1841
1842         for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
1843                 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
1844                         continue;
1845
1846                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
1847                                             MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
1848                 if (err)
1849                         return err;
1850
1851                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
1852                                             MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
1853                 if (err)
1854                         return err;
1855         }
1856
1857         return 0;
1858 }
1859
1860 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
1861 {
1862         char slcr_pl[MLXSW_REG_SLCR_LEN];
1863
1864         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
1865                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
1866                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
1867                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
1868                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
1869                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
1870                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
1871                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
1872                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
1873         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
1874 }
1875
1876 static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
1877                          const struct mlxsw_bus_info *mlxsw_bus_info)
1878 {
1879         struct mlxsw_sp *mlxsw_sp = priv;
1880         int err;
1881
1882         mlxsw_sp->core = mlxsw_core;
1883         mlxsw_sp->bus_info = mlxsw_bus_info;
1884         INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
1885         INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
1886         INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
1887
1888         err = mlxsw_sp_base_mac_get(mlxsw_sp);
1889         if (err) {
1890                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
1891                 return err;
1892         }
1893
1894         err = mlxsw_sp_ports_create(mlxsw_sp);
1895         if (err) {
1896                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
1897                 return err;
1898         }
1899
1900         err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1901         if (err) {
1902                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
1903                 goto err_event_register;
1904         }
1905
1906         err = mlxsw_sp_traps_init(mlxsw_sp);
1907         if (err) {
1908                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
1909                 goto err_rx_listener_register;
1910         }
1911
1912         err = mlxsw_sp_flood_init(mlxsw_sp);
1913         if (err) {
1914                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
1915                 goto err_flood_init;
1916         }
1917
1918         err = mlxsw_sp_buffers_init(mlxsw_sp);
1919         if (err) {
1920                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
1921                 goto err_buffers_init;
1922         }
1923
1924         err = mlxsw_sp_lag_init(mlxsw_sp);
1925         if (err) {
1926                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
1927                 goto err_lag_init;
1928         }
1929
1930         err = mlxsw_sp_switchdev_init(mlxsw_sp);
1931         if (err) {
1932                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
1933                 goto err_switchdev_init;
1934         }
1935
1936         return 0;
1937
1938 err_switchdev_init:
1939 err_lag_init:
1940 err_buffers_init:
1941 err_flood_init:
1942         mlxsw_sp_traps_fini(mlxsw_sp);
1943 err_rx_listener_register:
1944         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1945 err_event_register:
1946         mlxsw_sp_ports_remove(mlxsw_sp);
1947         return err;
1948 }
1949
1950 static void mlxsw_sp_fini(void *priv)
1951 {
1952         struct mlxsw_sp *mlxsw_sp = priv;
1953
1954         mlxsw_sp_switchdev_fini(mlxsw_sp);
1955         mlxsw_sp_traps_fini(mlxsw_sp);
1956         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1957         mlxsw_sp_ports_remove(mlxsw_sp);
1958 }
1959
1960 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
1961         .used_max_vepa_channels         = 1,
1962         .max_vepa_channels              = 0,
1963         .used_max_lag                   = 1,
1964         .max_lag                        = MLXSW_SP_LAG_MAX,
1965         .used_max_port_per_lag          = 1,
1966         .max_port_per_lag               = MLXSW_SP_PORT_PER_LAG_MAX,
1967         .used_max_mid                   = 1,
1968         .max_mid                        = MLXSW_SP_MID_MAX,
1969         .used_max_pgt                   = 1,
1970         .max_pgt                        = 0,
1971         .used_max_system_port           = 1,
1972         .max_system_port                = 64,
1973         .used_max_vlan_groups           = 1,
1974         .max_vlan_groups                = 127,
1975         .used_max_regions               = 1,
1976         .max_regions                    = 400,
1977         .used_flood_tables              = 1,
1978         .used_flood_mode                = 1,
1979         .flood_mode                     = 3,
1980         .max_fid_offset_flood_tables    = 2,
1981         .fid_offset_flood_table_size    = VLAN_N_VID - 1,
1982         .max_fid_flood_tables           = 2,
1983         .fid_flood_table_size           = MLXSW_SP_VFID_MAX,
1984         .used_max_ib_mc                 = 1,
1985         .max_ib_mc                      = 0,
1986         .used_max_pkey                  = 1,
1987         .max_pkey                       = 0,
1988         .swid_config                    = {
1989                 {
1990                         .used_type      = 1,
1991                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
1992                 }
1993         },
1994 };
1995
1996 static struct mlxsw_driver mlxsw_sp_driver = {
1997         .kind                   = MLXSW_DEVICE_KIND_SPECTRUM,
1998         .owner                  = THIS_MODULE,
1999         .priv_size              = sizeof(struct mlxsw_sp),
2000         .init                   = mlxsw_sp_init,
2001         .fini                   = mlxsw_sp_fini,
2002         .txhdr_construct        = mlxsw_sp_txhdr_construct,
2003         .txhdr_len              = MLXSW_TXHDR_LEN,
2004         .profile                = &mlxsw_sp_config_profile,
2005 };
2006
2007 static int
2008 mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
2009 {
2010         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2011         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2012
2013         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
2014         mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
2015
2016         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2017 }
2018
2019 static int
2020 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2021                                     u16 fid)
2022 {
2023         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2024         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2025
2026         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
2027         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2028         mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2029                                                 mlxsw_sp_port->local_port);
2030
2031         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2032 }
2033
2034 static int
2035 mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
2036 {
2037         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2038         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2039
2040         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
2041         mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2042
2043         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2044 }
2045
2046 static int
2047 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2048                                       u16 fid)
2049 {
2050         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2051         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2052
2053         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
2054         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2055         mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2056
2057         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2058 }
2059
2060 static int
2061 __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
2062 {
2063         int err, last_err = 0;
2064         u16 vid;
2065
2066         for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2067                 err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
2068                 if (err)
2069                         last_err = err;
2070         }
2071
2072         return last_err;
2073 }
2074
2075 static int
2076 __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
2077 {
2078         int err, last_err = 0;
2079         u16 vid;
2080
2081         for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2082                 err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
2083                 if (err)
2084                         last_err = err;
2085         }
2086
2087         return last_err;
2088 }
2089
2090 static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
2091 {
2092         if (!list_empty(&mlxsw_sp_port->vports_list))
2093                 if (mlxsw_sp_port->lagged)
2094                         return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
2095                 else
2096                         return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
2097         else
2098                 if (mlxsw_sp_port->lagged)
2099                         return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
2100                 else
2101                         return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
2102 }
2103
2104 static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
2105 {
2106         u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
2107         u16 fid = mlxsw_sp_vfid_to_fid(vfid);
2108
2109         if (mlxsw_sp_vport->lagged)
2110                 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
2111                                                              fid);
2112         else
2113                 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
2114 }
2115
2116 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2117 {
2118         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2119 }
2120
2121 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
2122 {
2123         struct net_device *dev = mlxsw_sp_port->dev;
2124         int err;
2125
2126         /* When port is not bridged untagged packets are tagged with
2127          * PVID=VID=1, thereby creating an implicit VLAN interface in
2128          * the device. Remove it and let bridge code take care of its
2129          * own VLANs.
2130          */
2131         err = mlxsw_sp_port_kill_vid(dev, 0, 1);
2132         if (err)
2133                 return err;
2134
2135         mlxsw_sp_port->learning = 1;
2136         mlxsw_sp_port->learning_sync = 1;
2137         mlxsw_sp_port->uc_flood = 1;
2138         mlxsw_sp_port->bridged = 1;
2139
2140         return 0;
2141 }
2142
2143 static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2144                                       bool flush_fdb)
2145 {
2146         struct net_device *dev = mlxsw_sp_port->dev;
2147
2148         if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2149                 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2150
2151         mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2152
2153         mlxsw_sp_port->learning = 0;
2154         mlxsw_sp_port->learning_sync = 0;
2155         mlxsw_sp_port->uc_flood = 0;
2156         mlxsw_sp_port->bridged = 0;
2157
2158         /* Add implicit VLAN interface in the device, so that untagged
2159          * packets will be classified to the default vFID.
2160          */
2161         return mlxsw_sp_port_add_vid(dev, 0, 1);
2162 }
2163
2164 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
2165                                          struct net_device *br_dev)
2166 {
2167         return !mlxsw_sp->master_bridge.dev ||
2168                mlxsw_sp->master_bridge.dev == br_dev;
2169 }
2170
2171 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2172                                        struct net_device *br_dev)
2173 {
2174         mlxsw_sp->master_bridge.dev = br_dev;
2175         mlxsw_sp->master_bridge.ref_count++;
2176 }
2177
2178 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
2179                                        struct net_device *br_dev)
2180 {
2181         if (--mlxsw_sp->master_bridge.ref_count == 0)
2182                 mlxsw_sp->master_bridge.dev = NULL;
2183 }
2184
2185 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2186 {
2187         char sldr_pl[MLXSW_REG_SLDR_LEN];
2188
2189         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
2190         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2191 }
2192
2193 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2194 {
2195         char sldr_pl[MLXSW_REG_SLDR_LEN];
2196
2197         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
2198         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2199 }
2200
2201 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2202                                      u16 lag_id, u8 port_index)
2203 {
2204         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2205         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2206
2207         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
2208                                       lag_id, port_index);
2209         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2210 }
2211
2212 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2213                                         u16 lag_id)
2214 {
2215         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2216         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2217
2218         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
2219                                          lag_id);
2220         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2221 }
2222
2223 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
2224                                         u16 lag_id)
2225 {
2226         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2227         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2228
2229         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
2230                                         lag_id);
2231         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2232 }
2233
2234 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
2235                                          u16 lag_id)
2236 {
2237         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2238         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2239
2240         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
2241                                          lag_id);
2242         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2243 }
2244
2245 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2246                                   struct net_device *lag_dev,
2247                                   u16 *p_lag_id)
2248 {
2249         struct mlxsw_sp_upper *lag;
2250         int free_lag_id = -1;
2251         int i;
2252
2253         for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
2254                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
2255                 if (lag->ref_count) {
2256                         if (lag->dev == lag_dev) {
2257                                 *p_lag_id = i;
2258                                 return 0;
2259                         }
2260                 } else if (free_lag_id < 0) {
2261                         free_lag_id = i;
2262                 }
2263         }
2264         if (free_lag_id < 0)
2265                 return -EBUSY;
2266         *p_lag_id = free_lag_id;
2267         return 0;
2268 }
2269
2270 static bool
2271 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
2272                           struct net_device *lag_dev,
2273                           struct netdev_lag_upper_info *lag_upper_info)
2274 {
2275         u16 lag_id;
2276
2277         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
2278                 return false;
2279         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2280                 return false;
2281         return true;
2282 }
2283
2284 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2285                                        u16 lag_id, u8 *p_port_index)
2286 {
2287         int i;
2288
2289         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2290                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
2291                         *p_port_index = i;
2292                         return 0;
2293                 }
2294         }
2295         return -EBUSY;
2296 }
2297
2298 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2299                                   struct net_device *lag_dev)
2300 {
2301         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2302         struct mlxsw_sp_upper *lag;
2303         u16 lag_id;
2304         u8 port_index;
2305         int err;
2306
2307         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
2308         if (err)
2309                 return err;
2310         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2311         if (!lag->ref_count) {
2312                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
2313                 if (err)
2314                         return err;
2315                 lag->dev = lag_dev;
2316         }
2317
2318         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
2319         if (err)
2320                 return err;
2321         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
2322         if (err)
2323                 goto err_col_port_add;
2324         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
2325         if (err)
2326                 goto err_col_port_enable;
2327
2328         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
2329                                    mlxsw_sp_port->local_port);
2330         mlxsw_sp_port->lag_id = lag_id;
2331         mlxsw_sp_port->lagged = 1;
2332         lag->ref_count++;
2333         return 0;
2334
2335 err_col_port_add:
2336         if (!lag->ref_count)
2337                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2338 err_col_port_enable:
2339         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2340         return err;
2341 }
2342
2343 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2344                                        struct net_device *br_dev,
2345                                        bool flush_fdb);
2346
2347 static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2348                                    struct net_device *lag_dev)
2349 {
2350         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2351         struct mlxsw_sp_port *mlxsw_sp_vport;
2352         struct mlxsw_sp_upper *lag;
2353         u16 lag_id = mlxsw_sp_port->lag_id;
2354         int err;
2355
2356         if (!mlxsw_sp_port->lagged)
2357                 return 0;
2358         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2359         WARN_ON(lag->ref_count == 0);
2360
2361         err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2362         if (err)
2363                 return err;
2364         err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2365         if (err)
2366                 return err;
2367
2368         /* In case we leave a LAG device that has bridges built on top,
2369          * then their teardown sequence is never issued and we need to
2370          * invoke the necessary cleanup routines ourselves.
2371          */
2372         list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
2373                             vport.list) {
2374                 struct net_device *br_dev;
2375
2376                 if (!mlxsw_sp_vport->bridged)
2377                         continue;
2378
2379                 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2380                 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
2381         }
2382
2383         if (mlxsw_sp_port->bridged) {
2384                 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
2385                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
2386
2387                 if (lag->ref_count == 1)
2388                         mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
2389         }
2390
2391         if (lag->ref_count == 1) {
2392                 if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
2393                         netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2394                 err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2395                 if (err)
2396                         return err;
2397         }
2398
2399         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2400                                      mlxsw_sp_port->local_port);
2401         mlxsw_sp_port->lagged = 0;
2402         lag->ref_count--;
2403         return 0;
2404 }
2405
2406 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2407                                       u16 lag_id)
2408 {
2409         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2410         char sldr_pl[MLXSW_REG_SLDR_LEN];
2411
2412         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
2413                                          mlxsw_sp_port->local_port);
2414         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2415 }
2416
2417 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2418                                          u16 lag_id)
2419 {
2420         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2421         char sldr_pl[MLXSW_REG_SLDR_LEN];
2422
2423         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
2424                                             mlxsw_sp_port->local_port);
2425         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2426 }
2427
2428 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
2429                                        bool lag_tx_enabled)
2430 {
2431         if (lag_tx_enabled)
2432                 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
2433                                                   mlxsw_sp_port->lag_id);
2434         else
2435                 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
2436                                                      mlxsw_sp_port->lag_id);
2437 }
2438
2439 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
2440                                      struct netdev_lag_lower_state_info *info)
2441 {
2442         return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
2443 }
2444
2445 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
2446                                    struct net_device *vlan_dev)
2447 {
2448         struct mlxsw_sp_port *mlxsw_sp_vport;
2449         u16 vid = vlan_dev_vlan_id(vlan_dev);
2450
2451         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2452         if (!mlxsw_sp_vport) {
2453                 WARN_ON(!mlxsw_sp_vport);
2454                 return -EINVAL;
2455         }
2456
2457         mlxsw_sp_vport->dev = vlan_dev;
2458
2459         return 0;
2460 }
2461
2462 static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
2463                                      struct net_device *vlan_dev)
2464 {
2465         struct mlxsw_sp_port *mlxsw_sp_vport;
2466         u16 vid = vlan_dev_vlan_id(vlan_dev);
2467
2468         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2469         if (!mlxsw_sp_vport) {
2470                 WARN_ON(!mlxsw_sp_vport);
2471                 return -EINVAL;
2472         }
2473
2474         /* When removing a VLAN device while still bridged we should first
2475          * remove it from the bridge, as we receive the bridge's notification
2476          * when the vPort is already gone.
2477          */
2478         if (mlxsw_sp_vport->bridged) {
2479                 struct net_device *br_dev;
2480
2481                 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2482                 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
2483         }
2484
2485         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
2486
2487         return 0;
2488 }
2489
2490 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
2491                                                unsigned long event, void *ptr)
2492 {
2493         struct netdev_notifier_changeupper_info *info;
2494         struct mlxsw_sp_port *mlxsw_sp_port;
2495         struct net_device *upper_dev;
2496         struct mlxsw_sp *mlxsw_sp;
2497         int err;
2498
2499         mlxsw_sp_port = netdev_priv(dev);
2500         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2501         info = ptr;
2502
2503         switch (event) {
2504         case NETDEV_PRECHANGEUPPER:
2505                 upper_dev = info->upper_dev;
2506                 if (!info->master || !info->linking)
2507                         break;
2508                 /* HW limitation forbids to put ports to multiple bridges. */
2509                 if (netif_is_bridge_master(upper_dev) &&
2510                     !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
2511                         return NOTIFY_BAD;
2512                 if (netif_is_lag_master(upper_dev) &&
2513                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
2514                                                info->upper_info))
2515                         return NOTIFY_BAD;
2516                 break;
2517         case NETDEV_CHANGEUPPER:
2518                 upper_dev = info->upper_dev;
2519                 if (is_vlan_dev(upper_dev)) {
2520                         if (info->linking) {
2521                                 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
2522                                                               upper_dev);
2523                                 if (err) {
2524                                         netdev_err(dev, "Failed to link VLAN device\n");
2525                                         return NOTIFY_BAD;
2526                                 }
2527                         } else {
2528                                 err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
2529                                                                 upper_dev);
2530                                 if (err) {
2531                                         netdev_err(dev, "Failed to unlink VLAN device\n");
2532                                         return NOTIFY_BAD;
2533                                 }
2534                         }
2535                 } else if (netif_is_bridge_master(upper_dev)) {
2536                         if (info->linking) {
2537                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
2538                                 if (err) {
2539                                         netdev_err(dev, "Failed to join bridge\n");
2540                                         return NOTIFY_BAD;
2541                                 }
2542                                 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
2543                         } else {
2544                                 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
2545                                                                  true);
2546                                 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
2547                                 if (err) {
2548                                         netdev_err(dev, "Failed to leave bridge\n");
2549                                         return NOTIFY_BAD;
2550                                 }
2551                         }
2552                 } else if (netif_is_lag_master(upper_dev)) {
2553                         if (info->linking) {
2554                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
2555                                                              upper_dev);
2556                                 if (err) {
2557                                         netdev_err(dev, "Failed to join link aggregation\n");
2558                                         return NOTIFY_BAD;
2559                                 }
2560                         } else {
2561                                 err = mlxsw_sp_port_lag_leave(mlxsw_sp_port,
2562                                                               upper_dev);
2563                                 if (err) {
2564                                         netdev_err(dev, "Failed to leave link aggregation\n");
2565                                         return NOTIFY_BAD;
2566                                 }
2567                         }
2568                 }
2569                 break;
2570         }
2571
2572         return NOTIFY_DONE;
2573 }
2574
2575 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
2576                                                unsigned long event, void *ptr)
2577 {
2578         struct netdev_notifier_changelowerstate_info *info;
2579         struct mlxsw_sp_port *mlxsw_sp_port;
2580         int err;
2581
2582         mlxsw_sp_port = netdev_priv(dev);
2583         info = ptr;
2584
2585         switch (event) {
2586         case NETDEV_CHANGELOWERSTATE:
2587                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
2588                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
2589                                                         info->lower_state_info);
2590                         if (err)
2591                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
2592                 }
2593                 break;
2594         }
2595
2596         return NOTIFY_DONE;
2597 }
2598
2599 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
2600                                          unsigned long event, void *ptr)
2601 {
2602         switch (event) {
2603         case NETDEV_PRECHANGEUPPER:
2604         case NETDEV_CHANGEUPPER:
2605                 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
2606         case NETDEV_CHANGELOWERSTATE:
2607                 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
2608         }
2609
2610         return NOTIFY_DONE;
2611 }
2612
2613 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
2614                                         unsigned long event, void *ptr)
2615 {
2616         struct net_device *dev;
2617         struct list_head *iter;
2618         int ret;
2619
2620         netdev_for_each_lower_dev(lag_dev, dev, iter) {
2621                 if (mlxsw_sp_port_dev_check(dev)) {
2622                         ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
2623                         if (ret == NOTIFY_BAD)
2624                                 return ret;
2625                 }
2626         }
2627
2628         return NOTIFY_DONE;
2629 }
2630
2631 static struct mlxsw_sp_vfid *
2632 mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
2633                       const struct net_device *br_dev)
2634 {
2635         struct mlxsw_sp_vfid *vfid;
2636
2637         list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
2638                 if (vfid->br_dev == br_dev)
2639                         return vfid;
2640         }
2641
2642         return NULL;
2643 }
2644
2645 static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
2646 {
2647         return vfid - MLXSW_SP_VFID_PORT_MAX;
2648 }
2649
2650 static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
2651 {
2652         return MLXSW_SP_VFID_PORT_MAX + br_vfid;
2653 }
2654
2655 static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
2656 {
2657         return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
2658                                    MLXSW_SP_VFID_BR_MAX);
2659 }
2660
2661 static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
2662                                                      struct net_device *br_dev)
2663 {
2664         struct device *dev = mlxsw_sp->bus_info->dev;
2665         struct mlxsw_sp_vfid *vfid;
2666         u16 n_vfid;
2667         int err;
2668
2669         n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
2670         if (n_vfid == MLXSW_SP_VFID_MAX) {
2671                 dev_err(dev, "No available vFIDs\n");
2672                 return ERR_PTR(-ERANGE);
2673         }
2674
2675         err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
2676         if (err) {
2677                 dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
2678                 return ERR_PTR(err);
2679         }
2680
2681         vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
2682         if (!vfid)
2683                 goto err_allocate_vfid;
2684
2685         vfid->vfid = n_vfid;
2686         vfid->br_dev = br_dev;
2687
2688         list_add(&vfid->list, &mlxsw_sp->br_vfids.list);
2689         set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped);
2690
2691         return vfid;
2692
2693 err_allocate_vfid:
2694         __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
2695         return ERR_PTR(-ENOMEM);
2696 }
2697
2698 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
2699                                      struct mlxsw_sp_vfid *vfid)
2700 {
2701         u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
2702
2703         clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
2704         list_del(&vfid->list);
2705
2706         __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
2707
2708         kfree(vfid);
2709 }
2710
2711 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2712                                        struct net_device *br_dev,
2713                                        bool flush_fdb)
2714 {
2715         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2716         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
2717         struct net_device *dev = mlxsw_sp_vport->dev;
2718         struct mlxsw_sp_vfid *vfid, *new_vfid;
2719         int err;
2720
2721         vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
2722         if (!vfid) {
2723                 WARN_ON(!vfid);
2724                 return -EINVAL;
2725         }
2726
2727         /* We need a vFID to go back to after leaving the bridge's vFID. */
2728         new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
2729         if (!new_vfid) {
2730                 new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
2731                 if (IS_ERR(new_vfid)) {
2732                         netdev_err(dev, "Failed to create vFID for VID=%d\n",
2733                                    vid);
2734                         return PTR_ERR(new_vfid);
2735                 }
2736         }
2737
2738         /* Invalidate existing {Port, VID} to vFID mapping and create a new
2739          * one for the new vFID.
2740          */
2741         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
2742                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
2743                                            false,
2744                                            mlxsw_sp_vfid_to_fid(vfid->vfid),
2745                                            vid);
2746         if (err) {
2747                 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
2748                            vfid->vfid);
2749                 goto err_port_vid_to_fid_invalidate;
2750         }
2751
2752         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
2753                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
2754                                            true,
2755                                            mlxsw_sp_vfid_to_fid(new_vfid->vfid),
2756                                            vid);
2757         if (err) {
2758                 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
2759                            new_vfid->vfid);
2760                 goto err_port_vid_to_fid_validate;
2761         }
2762
2763         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
2764         if (err) {
2765                 netdev_err(dev, "Failed to disable learning\n");
2766                 goto err_port_vid_learning_set;
2767         }
2768
2769         err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
2770                                        false);
2771         if (err) {
2772                 netdev_err(dev, "Failed clear to clear flooding\n");
2773                 goto err_vport_flood_set;
2774         }
2775
2776         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
2777                                           MLXSW_REG_SPMS_STATE_FORWARDING);
2778         if (err) {
2779                 netdev_err(dev, "Failed to set STP state\n");
2780                 goto err_port_stp_state_set;
2781         }
2782
2783         if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
2784                 netdev_err(dev, "Failed to flush FDB\n");
2785
2786         /* Switch between the vFIDs and destroy the old one if needed. */
2787         new_vfid->nr_vports++;
2788         mlxsw_sp_vport->vport.vfid = new_vfid;
2789         vfid->nr_vports--;
2790         if (!vfid->nr_vports)
2791                 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
2792
2793         mlxsw_sp_vport->learning = 0;
2794         mlxsw_sp_vport->learning_sync = 0;
2795         mlxsw_sp_vport->uc_flood = 0;
2796         mlxsw_sp_vport->bridged = 0;
2797
2798         return 0;
2799
2800 err_port_stp_state_set:
2801 err_vport_flood_set:
2802 err_port_vid_learning_set:
2803 err_port_vid_to_fid_validate:
2804 err_port_vid_to_fid_invalidate:
2805         /* Rollback vFID only if new. */
2806         if (!new_vfid->nr_vports)
2807                 mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
2808         return err;
2809 }
2810
2811 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
2812                                       struct net_device *br_dev)
2813 {
2814         struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
2815         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2816         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
2817         struct net_device *dev = mlxsw_sp_vport->dev;
2818         struct mlxsw_sp_vfid *vfid;
2819         int err;
2820
2821         vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
2822         if (!vfid) {
2823                 vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
2824                 if (IS_ERR(vfid)) {
2825                         netdev_err(dev, "Failed to create bridge vFID\n");
2826                         return PTR_ERR(vfid);
2827                 }
2828         }
2829
2830         err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false);
2831         if (err) {
2832                 netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
2833                            vfid->vfid);
2834                 goto err_port_flood_set;
2835         }
2836
2837         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
2838         if (err) {
2839                 netdev_err(dev, "Failed to enable learning\n");
2840                 goto err_port_vid_learning_set;
2841         }
2842
2843         /* We need to invalidate existing {Port, VID} to vFID mapping and
2844          * create a new one for the bridge's vFID.
2845          */
2846         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
2847                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
2848                                            false,
2849                                            mlxsw_sp_vfid_to_fid(old_vfid->vfid),
2850                                            vid);
2851         if (err) {
2852                 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
2853                            old_vfid->vfid);
2854                 goto err_port_vid_to_fid_invalidate;
2855         }
2856
2857         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
2858                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
2859                                            true,
2860                                            mlxsw_sp_vfid_to_fid(vfid->vfid),
2861                                            vid);
2862         if (err) {
2863                 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
2864                            vfid->vfid);
2865                 goto err_port_vid_to_fid_validate;
2866         }
2867
2868         /* Switch between the vFIDs and destroy the old one if needed. */
2869         vfid->nr_vports++;
2870         mlxsw_sp_vport->vport.vfid = vfid;
2871         old_vfid->nr_vports--;
2872         if (!old_vfid->nr_vports)
2873                 mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
2874
2875         mlxsw_sp_vport->learning = 1;
2876         mlxsw_sp_vport->learning_sync = 1;
2877         mlxsw_sp_vport->uc_flood = 1;
2878         mlxsw_sp_vport->bridged = 1;
2879
2880         return 0;
2881
2882 err_port_vid_to_fid_validate:
2883         mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
2884                                      MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
2885                                      mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid);
2886 err_port_vid_to_fid_invalidate:
2887         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
2888 err_port_vid_learning_set:
2889         mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false);
2890 err_port_flood_set:
2891         if (!vfid->nr_vports)
2892                 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
2893         return err;
2894 }
2895
2896 static bool
2897 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
2898                                   const struct net_device *br_dev)
2899 {
2900         struct mlxsw_sp_port *mlxsw_sp_vport;
2901
2902         list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
2903                             vport.list) {
2904                 if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
2905                         return false;
2906         }
2907
2908         return true;
2909 }
2910
2911 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
2912                                           unsigned long event, void *ptr,
2913                                           u16 vid)
2914 {
2915         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2916         struct netdev_notifier_changeupper_info *info = ptr;
2917         struct mlxsw_sp_port *mlxsw_sp_vport;
2918         struct net_device *upper_dev;
2919         int err;
2920
2921         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2922
2923         switch (event) {
2924         case NETDEV_PRECHANGEUPPER:
2925                 upper_dev = info->upper_dev;
2926                 if (!info->master || !info->linking)
2927                         break;
2928                 if (!netif_is_bridge_master(upper_dev))
2929                         return NOTIFY_BAD;
2930                 /* We can't have multiple VLAN interfaces configured on
2931                  * the same port and being members in the same bridge.
2932                  */
2933                 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
2934                                                        upper_dev))
2935                         return NOTIFY_BAD;
2936                 break;
2937         case NETDEV_CHANGEUPPER:
2938                 upper_dev = info->upper_dev;
2939                 if (!info->master)
2940                         break;
2941                 if (info->linking) {
2942                         if (!mlxsw_sp_vport) {
2943                                 WARN_ON(!mlxsw_sp_vport);
2944                                 return NOTIFY_BAD;
2945                         }
2946                         err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
2947                                                          upper_dev);
2948                         if (err) {
2949                                 netdev_err(dev, "Failed to join bridge\n");
2950                                 return NOTIFY_BAD;
2951                         }
2952                 } else {
2953                         /* We ignore bridge's unlinking notifications if vPort
2954                          * is gone, since we already left the bridge when the
2955                          * VLAN device was unlinked from the real device.
2956                          */
2957                         if (!mlxsw_sp_vport)
2958                                 return NOTIFY_DONE;
2959                         err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
2960                                                           upper_dev, true);
2961                         if (err) {
2962                                 netdev_err(dev, "Failed to leave bridge\n");
2963                                 return NOTIFY_BAD;
2964                         }
2965                 }
2966         }
2967
2968         return NOTIFY_DONE;
2969 }
2970
2971 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
2972                                               unsigned long event, void *ptr,
2973                                               u16 vid)
2974 {
2975         struct net_device *dev;
2976         struct list_head *iter;
2977         int ret;
2978
2979         netdev_for_each_lower_dev(lag_dev, dev, iter) {
2980                 if (mlxsw_sp_port_dev_check(dev)) {
2981                         ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
2982                                                              vid);
2983                         if (ret == NOTIFY_BAD)
2984                                 return ret;
2985                 }
2986         }
2987
2988         return NOTIFY_DONE;
2989 }
2990
2991 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
2992                                          unsigned long event, void *ptr)
2993 {
2994         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
2995         u16 vid = vlan_dev_vlan_id(vlan_dev);
2996
2997         if (mlxsw_sp_port_dev_check(real_dev))
2998                 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
2999                                                       vid);
3000         else if (netif_is_lag_master(real_dev))
3001                 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
3002                                                           vid);
3003
3004         return NOTIFY_DONE;
3005 }
3006
3007 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3008                                     unsigned long event, void *ptr)
3009 {
3010         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3011
3012         if (mlxsw_sp_port_dev_check(dev))
3013                 return mlxsw_sp_netdevice_port_event(dev, event, ptr);
3014
3015         if (netif_is_lag_master(dev))
3016                 return mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3017
3018         if (is_vlan_dev(dev))
3019                 return mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3020
3021         return NOTIFY_DONE;
3022 }
3023
3024 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3025         .notifier_call = mlxsw_sp_netdevice_event,
3026 };
3027
3028 static int __init mlxsw_sp_module_init(void)
3029 {
3030         int err;
3031
3032         register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3033         err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3034         if (err)
3035                 goto err_core_driver_register;
3036         return 0;
3037
3038 err_core_driver_register:
3039         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3040         return err;
3041 }
3042
3043 static void __exit mlxsw_sp_module_exit(void)
3044 {
3045         mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3046         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3047 }
3048
3049 module_init(mlxsw_sp_module_init);
3050 module_exit(mlxsw_sp_module_exit);
3051
3052 MODULE_LICENSE("Dual BSD/GPL");
3053 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3054 MODULE_DESCRIPTION("Mellanox Spectrum driver");
3055 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);