mlxsw: spectrum: Don't return upon error in removal path
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/notifier.h>
53 #include <linux/dcbnl.h>
54 #include <linux/inetdevice.h>
55 #include <net/switchdev.h>
56 #include <generated/utsrelease.h>
57 #include <net/pkt_cls.h>
58 #include <net/tc_act/tc_mirred.h>
59
60 #include "spectrum.h"
61 #include "core.h"
62 #include "reg.h"
63 #include "port.h"
64 #include "trap.h"
65 #include "txheader.h"
66
67 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
68 static const char mlxsw_sp_driver_version[] = "1.0";
69
70 /* tx_hdr_version
71  * Tx header version.
72  * Must be set to 1.
73  */
74 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
75
76 /* tx_hdr_ctl
77  * Packet control type.
78  * 0 - Ethernet control (e.g. EMADs, LACP)
79  * 1 - Ethernet data
80  */
81 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
82
83 /* tx_hdr_proto
84  * Packet protocol type. Must be set to 1 (Ethernet).
85  */
86 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
87
88 /* tx_hdr_rx_is_router
89  * Packet is sent from the router. Valid for data packets only.
90  */
91 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
92
93 /* tx_hdr_fid_valid
94  * Indicates if the 'fid' field is valid and should be used for
95  * forwarding lookup. Valid for data packets only.
96  */
97 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
98
99 /* tx_hdr_swid
100  * Switch partition ID. Must be set to 0.
101  */
102 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
103
104 /* tx_hdr_control_tclass
105  * Indicates if the packet should use the control TClass and not one
106  * of the data TClasses.
107  */
108 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
109
110 /* tx_hdr_etclass
111  * Egress TClass to be used on the egress device on the egress port.
112  */
113 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
114
115 /* tx_hdr_port_mid
116  * Destination local port for unicast packets.
117  * Destination multicast ID for multicast packets.
118  *
119  * Control packets are directed to a specific egress port, while data
120  * packets are transmitted through the CPU port (0) into the switch partition,
121  * where forwarding rules are applied.
122  */
123 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
124
125 /* tx_hdr_fid
126  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
127  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
128  * Valid for data packets only.
129  */
130 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
131
132 /* tx_hdr_type
133  * 0 - Data packets
134  * 6 - Control packets
135  */
136 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
137
138 static bool mlxsw_sp_port_dev_check(const struct net_device *dev);
139
140 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
141                                      const struct mlxsw_tx_info *tx_info)
142 {
143         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
144
145         memset(txhdr, 0, MLXSW_TXHDR_LEN);
146
147         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
148         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
149         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
150         mlxsw_tx_hdr_swid_set(txhdr, 0);
151         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
152         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
153         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
154 }
155
156 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
157 {
158         char spad_pl[MLXSW_REG_SPAD_LEN];
159         int err;
160
161         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
162         if (err)
163                 return err;
164         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
165         return 0;
166 }
167
168 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
169 {
170         struct mlxsw_resources *resources;
171         int i;
172
173         resources = mlxsw_core_resources_get(mlxsw_sp->core);
174         if (!resources->max_span_valid)
175                 return -EIO;
176
177         mlxsw_sp->span.entries_count = resources->max_span;
178         mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
179                                          sizeof(struct mlxsw_sp_span_entry),
180                                          GFP_KERNEL);
181         if (!mlxsw_sp->span.entries)
182                 return -ENOMEM;
183
184         for (i = 0; i < mlxsw_sp->span.entries_count; i++)
185                 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
186
187         return 0;
188 }
189
190 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
191 {
192         int i;
193
194         for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
195                 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
196
197                 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
198         }
199         kfree(mlxsw_sp->span.entries);
200 }
201
202 static struct mlxsw_sp_span_entry *
203 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
204 {
205         struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
206         struct mlxsw_sp_span_entry *span_entry;
207         char mpat_pl[MLXSW_REG_MPAT_LEN];
208         u8 local_port = port->local_port;
209         int index;
210         int i;
211         int err;
212
213         /* find a free entry to use */
214         index = -1;
215         for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
216                 if (!mlxsw_sp->span.entries[i].used) {
217                         index = i;
218                         span_entry = &mlxsw_sp->span.entries[i];
219                         break;
220                 }
221         }
222         if (index < 0)
223                 return NULL;
224
225         /* create a new port analayzer entry for local_port */
226         mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
227         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
228         if (err)
229                 return NULL;
230
231         span_entry->used = true;
232         span_entry->id = index;
233         span_entry->ref_count = 0;
234         span_entry->local_port = local_port;
235         return span_entry;
236 }
237
238 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
239                                         struct mlxsw_sp_span_entry *span_entry)
240 {
241         u8 local_port = span_entry->local_port;
242         char mpat_pl[MLXSW_REG_MPAT_LEN];
243         int pa_id = span_entry->id;
244
245         mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
246         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
247         span_entry->used = false;
248 }
249
250 struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
251 {
252         struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
253         int i;
254
255         for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
256                 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
257
258                 if (curr->used && curr->local_port == port->local_port)
259                         return curr;
260         }
261         return NULL;
262 }
263
264 struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
265 {
266         struct mlxsw_sp_span_entry *span_entry;
267
268         span_entry = mlxsw_sp_span_entry_find(port);
269         if (span_entry) {
270                 span_entry->ref_count++;
271                 return span_entry;
272         }
273
274         return mlxsw_sp_span_entry_create(port);
275 }
276
277 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
278                                    struct mlxsw_sp_span_entry *span_entry)
279 {
280         if (--span_entry->ref_count == 0)
281                 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
282         return 0;
283 }
284
285 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
286 {
287         struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
288         struct mlxsw_sp_span_inspected_port *p;
289         int i;
290
291         for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
292                 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
293
294                 list_for_each_entry(p, &curr->bound_ports_list, list)
295                         if (p->local_port == port->local_port &&
296                             p->type == MLXSW_SP_SPAN_EGRESS)
297                                 return true;
298         }
299
300         return false;
301 }
302
303 static int mlxsw_sp_span_mtu_to_buffsize(int mtu)
304 {
305         return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1;
306 }
307
308 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
309 {
310         struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
311         char sbib_pl[MLXSW_REG_SBIB_LEN];
312         int err;
313
314         /* If port is egress mirrored, the shared buffer size should be
315          * updated according to the mtu value
316          */
317         if (mlxsw_sp_span_is_egress_mirror(port)) {
318                 mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
319                                     mlxsw_sp_span_mtu_to_buffsize(mtu));
320                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
321                 if (err) {
322                         netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
323                         return err;
324                 }
325         }
326
327         return 0;
328 }
329
330 static struct mlxsw_sp_span_inspected_port *
331 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
332                                     struct mlxsw_sp_span_entry *span_entry)
333 {
334         struct mlxsw_sp_span_inspected_port *p;
335
336         list_for_each_entry(p, &span_entry->bound_ports_list, list)
337                 if (port->local_port == p->local_port)
338                         return p;
339         return NULL;
340 }
341
342 static int
343 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
344                                   struct mlxsw_sp_span_entry *span_entry,
345                                   enum mlxsw_sp_span_type type)
346 {
347         struct mlxsw_sp_span_inspected_port *inspected_port;
348         struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
349         char mpar_pl[MLXSW_REG_MPAR_LEN];
350         char sbib_pl[MLXSW_REG_SBIB_LEN];
351         int pa_id = span_entry->id;
352         int err;
353
354         /* if it is an egress SPAN, bind a shared buffer to it */
355         if (type == MLXSW_SP_SPAN_EGRESS) {
356                 mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
357                                     mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu));
358                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
359                 if (err) {
360                         netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
361                         return err;
362                 }
363         }
364
365         /* bind the port to the SPAN entry */
366         mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, true, pa_id);
367         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
368         if (err)
369                 goto err_mpar_reg_write;
370
371         inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
372         if (!inspected_port) {
373                 err = -ENOMEM;
374                 goto err_inspected_port_alloc;
375         }
376         inspected_port->local_port = port->local_port;
377         inspected_port->type = type;
378         list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
379
380         return 0;
381
382 err_mpar_reg_write:
383 err_inspected_port_alloc:
384         if (type == MLXSW_SP_SPAN_EGRESS) {
385                 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
386                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
387         }
388         return err;
389 }
390
391 static void
392 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
393                                     struct mlxsw_sp_span_entry *span_entry,
394                                     enum mlxsw_sp_span_type type)
395 {
396         struct mlxsw_sp_span_inspected_port *inspected_port;
397         struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
398         char mpar_pl[MLXSW_REG_MPAR_LEN];
399         char sbib_pl[MLXSW_REG_SBIB_LEN];
400         int pa_id = span_entry->id;
401
402         inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
403         if (!inspected_port)
404                 return;
405
406         /* remove the inspected port */
407         mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, false, pa_id);
408         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
409
410         /* remove the SBIB buffer if it was egress SPAN */
411         if (type == MLXSW_SP_SPAN_EGRESS) {
412                 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
413                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
414         }
415
416         mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
417
418         list_del(&inspected_port->list);
419         kfree(inspected_port);
420 }
421
422 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
423                                     struct mlxsw_sp_port *to,
424                                     enum mlxsw_sp_span_type type)
425 {
426         struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
427         struct mlxsw_sp_span_entry *span_entry;
428         int err;
429
430         span_entry = mlxsw_sp_span_entry_get(to);
431         if (!span_entry)
432                 return -ENOENT;
433
434         netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
435                    span_entry->id);
436
437         err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
438         if (err)
439                 goto err_port_bind;
440
441         return 0;
442
443 err_port_bind:
444         mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
445         return err;
446 }
447
448 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
449                                         struct mlxsw_sp_port *to,
450                                         enum mlxsw_sp_span_type type)
451 {
452         struct mlxsw_sp_span_entry *span_entry;
453
454         span_entry = mlxsw_sp_span_entry_find(to);
455         if (!span_entry) {
456                 netdev_err(from->dev, "no span entry found\n");
457                 return;
458         }
459
460         netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
461                    span_entry->id);
462         mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
463 }
464
465 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
466                                           bool is_up)
467 {
468         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
469         char paos_pl[MLXSW_REG_PAOS_LEN];
470
471         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
472                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
473                             MLXSW_PORT_ADMIN_STATUS_DOWN);
474         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
475 }
476
477 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
478                                       unsigned char *addr)
479 {
480         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
481         char ppad_pl[MLXSW_REG_PPAD_LEN];
482
483         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
484         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
485         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
486 }
487
488 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
489 {
490         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
491         unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
492
493         ether_addr_copy(addr, mlxsw_sp->base_mac);
494         addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
495         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
496 }
497
498 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
499 {
500         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
501         char pmtu_pl[MLXSW_REG_PMTU_LEN];
502         int max_mtu;
503         int err;
504
505         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
506         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
507         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
508         if (err)
509                 return err;
510         max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
511
512         if (mtu > max_mtu)
513                 return -EINVAL;
514
515         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
516         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
517 }
518
519 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
520                                     u8 swid)
521 {
522         char pspa_pl[MLXSW_REG_PSPA_LEN];
523
524         mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
525         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
526 }
527
528 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
529 {
530         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
531
532         return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
533                                         swid);
534 }
535
536 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
537                                      bool enable)
538 {
539         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
540         char svpe_pl[MLXSW_REG_SVPE_LEN];
541
542         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
543         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
544 }
545
546 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
547                                  enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
548                                  u16 vid)
549 {
550         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
551         char svfa_pl[MLXSW_REG_SVFA_LEN];
552
553         mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
554                             fid, vid);
555         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
556 }
557
558 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
559                                           u16 vid, bool learn_enable)
560 {
561         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
562         char *spvmlr_pl;
563         int err;
564
565         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
566         if (!spvmlr_pl)
567                 return -ENOMEM;
568         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
569                               learn_enable);
570         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
571         kfree(spvmlr_pl);
572         return err;
573 }
574
575 static int
576 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
577 {
578         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
579         char sspr_pl[MLXSW_REG_SSPR_LEN];
580
581         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
582         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
583 }
584
585 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
586                                          u8 local_port, u8 *p_module,
587                                          u8 *p_width, u8 *p_lane)
588 {
589         char pmlp_pl[MLXSW_REG_PMLP_LEN];
590         int err;
591
592         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
593         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
594         if (err)
595                 return err;
596         *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
597         *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
598         *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
599         return 0;
600 }
601
602 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
603                                     u8 module, u8 width, u8 lane)
604 {
605         char pmlp_pl[MLXSW_REG_PMLP_LEN];
606         int i;
607
608         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
609         mlxsw_reg_pmlp_width_set(pmlp_pl, width);
610         for (i = 0; i < width; i++) {
611                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
612                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
613         }
614
615         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
616 }
617
618 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
619 {
620         char pmlp_pl[MLXSW_REG_PMLP_LEN];
621
622         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
623         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
624         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
625 }
626
627 static int mlxsw_sp_port_open(struct net_device *dev)
628 {
629         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
630         int err;
631
632         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
633         if (err)
634                 return err;
635         netif_start_queue(dev);
636         return 0;
637 }
638
639 static int mlxsw_sp_port_stop(struct net_device *dev)
640 {
641         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
642
643         netif_stop_queue(dev);
644         return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
645 }
646
647 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
648                                       struct net_device *dev)
649 {
650         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
651         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
652         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
653         const struct mlxsw_tx_info tx_info = {
654                 .local_port = mlxsw_sp_port->local_port,
655                 .is_emad = false,
656         };
657         u64 len;
658         int err;
659
660         if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
661                 return NETDEV_TX_BUSY;
662
663         if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
664                 struct sk_buff *skb_orig = skb;
665
666                 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
667                 if (!skb) {
668                         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
669                         dev_kfree_skb_any(skb_orig);
670                         return NETDEV_TX_OK;
671                 }
672         }
673
674         if (eth_skb_pad(skb)) {
675                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
676                 return NETDEV_TX_OK;
677         }
678
679         mlxsw_sp_txhdr_construct(skb, &tx_info);
680         /* TX header is consumed by HW on the way so we shouldn't count its
681          * bytes as being sent.
682          */
683         len = skb->len - MLXSW_TXHDR_LEN;
684
685         /* Due to a race we might fail here because of a full queue. In that
686          * unlikely case we simply drop the packet.
687          */
688         err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
689
690         if (!err) {
691                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
692                 u64_stats_update_begin(&pcpu_stats->syncp);
693                 pcpu_stats->tx_packets++;
694                 pcpu_stats->tx_bytes += len;
695                 u64_stats_update_end(&pcpu_stats->syncp);
696         } else {
697                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
698                 dev_kfree_skb_any(skb);
699         }
700         return NETDEV_TX_OK;
701 }
702
703 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
704 {
705 }
706
707 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
708 {
709         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
710         struct sockaddr *addr = p;
711         int err;
712
713         if (!is_valid_ether_addr(addr->sa_data))
714                 return -EADDRNOTAVAIL;
715
716         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
717         if (err)
718                 return err;
719         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
720         return 0;
721 }
722
723 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
724                                  bool pause_en, bool pfc_en, u16 delay)
725 {
726         u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
727
728         delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
729                          MLXSW_SP_PAUSE_DELAY;
730
731         if (pause_en || pfc_en)
732                 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
733                                                     pg_size + delay, pg_size);
734         else
735                 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
736 }
737
738 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
739                                  u8 *prio_tc, bool pause_en,
740                                  struct ieee_pfc *my_pfc)
741 {
742         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
743         u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
744         u16 delay = !!my_pfc ? my_pfc->delay : 0;
745         char pbmc_pl[MLXSW_REG_PBMC_LEN];
746         int i, j, err;
747
748         mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
749         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
750         if (err)
751                 return err;
752
753         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
754                 bool configure = false;
755                 bool pfc = false;
756
757                 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
758                         if (prio_tc[j] == i) {
759                                 pfc = pfc_en & BIT(j);
760                                 configure = true;
761                                 break;
762                         }
763                 }
764
765                 if (!configure)
766                         continue;
767                 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
768         }
769
770         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
771 }
772
773 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
774                                       int mtu, bool pause_en)
775 {
776         u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
777         bool dcb_en = !!mlxsw_sp_port->dcb.ets;
778         struct ieee_pfc *my_pfc;
779         u8 *prio_tc;
780
781         prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
782         my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
783
784         return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
785                                             pause_en, my_pfc);
786 }
787
788 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
789 {
790         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
791         bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
792         int err;
793
794         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
795         if (err)
796                 return err;
797         err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
798         if (err)
799                 goto err_span_port_mtu_update;
800         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
801         if (err)
802                 goto err_port_mtu_set;
803         dev->mtu = mtu;
804         return 0;
805
806 err_port_mtu_set:
807         mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
808 err_span_port_mtu_update:
809         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
810         return err;
811 }
812
813 static struct rtnl_link_stats64 *
814 mlxsw_sp_port_get_stats64(struct net_device *dev,
815                           struct rtnl_link_stats64 *stats)
816 {
817         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
818         struct mlxsw_sp_port_pcpu_stats *p;
819         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
820         u32 tx_dropped = 0;
821         unsigned int start;
822         int i;
823
824         for_each_possible_cpu(i) {
825                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
826                 do {
827                         start = u64_stats_fetch_begin_irq(&p->syncp);
828                         rx_packets      = p->rx_packets;
829                         rx_bytes        = p->rx_bytes;
830                         tx_packets      = p->tx_packets;
831                         tx_bytes        = p->tx_bytes;
832                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
833
834                 stats->rx_packets       += rx_packets;
835                 stats->rx_bytes         += rx_bytes;
836                 stats->tx_packets       += tx_packets;
837                 stats->tx_bytes         += tx_bytes;
838                 /* tx_dropped is u32, updated without syncp protection. */
839                 tx_dropped      += p->tx_dropped;
840         }
841         stats->tx_dropped       = tx_dropped;
842         return stats;
843 }
844
845 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
846                            u16 vid_end, bool is_member, bool untagged)
847 {
848         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
849         char *spvm_pl;
850         int err;
851
852         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
853         if (!spvm_pl)
854                 return -ENOMEM;
855
856         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
857                             vid_end, is_member, untagged);
858         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
859         kfree(spvm_pl);
860         return err;
861 }
862
863 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
864 {
865         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
866         u16 vid, last_visited_vid;
867         int err;
868
869         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
870                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
871                                                    vid);
872                 if (err) {
873                         last_visited_vid = vid;
874                         goto err_port_vid_to_fid_set;
875                 }
876         }
877
878         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
879         if (err) {
880                 last_visited_vid = VLAN_N_VID;
881                 goto err_port_vid_to_fid_set;
882         }
883
884         return 0;
885
886 err_port_vid_to_fid_set:
887         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
888                 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
889                                              vid);
890         return err;
891 }
892
893 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
894 {
895         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
896         u16 vid;
897         int err;
898
899         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
900         if (err)
901                 return err;
902
903         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
904                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
905                                                    vid, vid);
906                 if (err)
907                         return err;
908         }
909
910         return 0;
911 }
912
913 static struct mlxsw_sp_port *
914 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
915 {
916         struct mlxsw_sp_port *mlxsw_sp_vport;
917
918         mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
919         if (!mlxsw_sp_vport)
920                 return NULL;
921
922         /* dev will be set correctly after the VLAN device is linked
923          * with the real device. In case of bridge SELF invocation, dev
924          * will remain as is.
925          */
926         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
927         mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
928         mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
929         mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
930         mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
931         mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
932         mlxsw_sp_vport->vport.vid = vid;
933
934         list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
935
936         return mlxsw_sp_vport;
937 }
938
939 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
940 {
941         list_del(&mlxsw_sp_vport->vport.list);
942         kfree(mlxsw_sp_vport);
943 }
944
945 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
946                           u16 vid)
947 {
948         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
949         struct mlxsw_sp_port *mlxsw_sp_vport;
950         bool untagged = vid == 1;
951         int err;
952
953         /* VLAN 0 is added to HW filter when device goes up, but it is
954          * reserved in our case, so simply return.
955          */
956         if (!vid)
957                 return 0;
958
959         if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
960                 netdev_warn(dev, "VID=%d already configured\n", vid);
961                 return 0;
962         }
963
964         mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
965         if (!mlxsw_sp_vport) {
966                 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
967                 return -ENOMEM;
968         }
969
970         /* When adding the first VLAN interface on a bridged port we need to
971          * transition all the active 802.1Q bridge VLANs to use explicit
972          * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
973          */
974         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
975                 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
976                 if (err) {
977                         netdev_err(dev, "Failed to set to Virtual mode\n");
978                         goto err_port_vp_mode_trans;
979                 }
980         }
981
982         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
983         if (err) {
984                 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
985                 goto err_port_vid_learning_set;
986         }
987
988         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
989         if (err) {
990                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
991                            vid);
992                 goto err_port_add_vid;
993         }
994
995         return 0;
996
997 err_port_add_vid:
998         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
999 err_port_vid_learning_set:
1000         if (list_is_singular(&mlxsw_sp_port->vports_list))
1001                 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
1002 err_port_vp_mode_trans:
1003         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
1004         return err;
1005 }
1006
1007 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1008                                   __be16 __always_unused proto, u16 vid)
1009 {
1010         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1011         struct mlxsw_sp_port *mlxsw_sp_vport;
1012         struct mlxsw_sp_fid *f;
1013
1014         /* VLAN 0 is removed from HW filter when device goes down, but
1015          * it is reserved in our case, so simply return.
1016          */
1017         if (!vid)
1018                 return 0;
1019
1020         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
1021         if (WARN_ON(!mlxsw_sp_vport))
1022                 return 0;
1023
1024         mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
1025
1026         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
1027
1028         /* Drop FID reference. If this was the last reference the
1029          * resources will be freed.
1030          */
1031         f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
1032         if (f && !WARN_ON(!f->leave))
1033                 f->leave(mlxsw_sp_vport);
1034
1035         /* When removing the last VLAN interface on a bridged port we need to
1036          * transition all active 802.1Q bridge VLANs to use VID to FID
1037          * mappings and set port's mode to VLAN mode.
1038          */
1039         if (list_is_singular(&mlxsw_sp_port->vports_list))
1040                 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
1041
1042         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
1043
1044         return 0;
1045 }
1046
1047 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1048                                             size_t len)
1049 {
1050         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1051         u8 module = mlxsw_sp_port->mapping.module;
1052         u8 width = mlxsw_sp_port->mapping.width;
1053         u8 lane = mlxsw_sp_port->mapping.lane;
1054         int err;
1055
1056         if (!mlxsw_sp_port->split)
1057                 err = snprintf(name, len, "p%d", module + 1);
1058         else
1059                 err = snprintf(name, len, "p%ds%d", module + 1,
1060                                lane / width);
1061
1062         if (err >= len)
1063                 return -EINVAL;
1064
1065         return 0;
1066 }
1067
1068 static struct mlxsw_sp_port_mall_tc_entry *
1069 mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port,
1070                                 unsigned long cookie) {
1071         struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1072
1073         list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1074                 if (mall_tc_entry->cookie == cookie)
1075                         return mall_tc_entry;
1076
1077         return NULL;
1078 }
1079
1080 static int
1081 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1082                                       struct tc_cls_matchall_offload *cls,
1083                                       const struct tc_action *a,
1084                                       bool ingress)
1085 {
1086         struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1087         struct net *net = dev_net(mlxsw_sp_port->dev);
1088         enum mlxsw_sp_span_type span_type;
1089         struct mlxsw_sp_port *to_port;
1090         struct net_device *to_dev;
1091         int ifindex;
1092         int err;
1093
1094         ifindex = tcf_mirred_ifindex(a);
1095         to_dev = __dev_get_by_index(net, ifindex);
1096         if (!to_dev) {
1097                 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1098                 return -EINVAL;
1099         }
1100
1101         if (!mlxsw_sp_port_dev_check(to_dev)) {
1102                 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
1103                 return -ENOTSUPP;
1104         }
1105         to_port = netdev_priv(to_dev);
1106
1107         mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1108         if (!mall_tc_entry)
1109                 return -ENOMEM;
1110
1111         mall_tc_entry->cookie = cls->cookie;
1112         mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1113         mall_tc_entry->mirror.to_local_port = to_port->local_port;
1114         mall_tc_entry->mirror.ingress = ingress;
1115         list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
1116
1117         span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1118         err = mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
1119         if (err)
1120                 goto err_mirror_add;
1121         return 0;
1122
1123 err_mirror_add:
1124         list_del(&mall_tc_entry->list);
1125         kfree(mall_tc_entry);
1126         return err;
1127 }
1128
1129 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1130                                           __be16 protocol,
1131                                           struct tc_cls_matchall_offload *cls,
1132                                           bool ingress)
1133 {
1134         const struct tc_action *a;
1135         int err;
1136
1137         if (!tc_single_action(cls->exts)) {
1138                 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
1139                 return -ENOTSUPP;
1140         }
1141
1142         tc_for_each_action(a, cls->exts) {
1143                 if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL))
1144                         return -ENOTSUPP;
1145
1146                 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, cls,
1147                                                             a, ingress);
1148                 if (err)
1149                         return err;
1150         }
1151
1152         return 0;
1153 }
1154
1155 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1156                                            struct tc_cls_matchall_offload *cls)
1157 {
1158         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1159         struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1160         enum mlxsw_sp_span_type span_type;
1161         struct mlxsw_sp_port *to_port;
1162
1163         mall_tc_entry = mlxsw_sp_port_mirror_entry_find(mlxsw_sp_port,
1164                                                         cls->cookie);
1165         if (!mall_tc_entry) {
1166                 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1167                 return;
1168         }
1169
1170         switch (mall_tc_entry->type) {
1171         case MLXSW_SP_PORT_MALL_MIRROR:
1172                 to_port = mlxsw_sp->ports[mall_tc_entry->mirror.to_local_port];
1173                 span_type = mall_tc_entry->mirror.ingress ?
1174                                 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1175
1176                 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
1177                 break;
1178         default:
1179                 WARN_ON(1);
1180         }
1181
1182         list_del(&mall_tc_entry->list);
1183         kfree(mall_tc_entry);
1184 }
1185
1186 static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
1187                              __be16 proto, struct tc_to_netdev *tc)
1188 {
1189         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1190         bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
1191
1192         if (tc->type == TC_SETUP_MATCHALL) {
1193                 switch (tc->cls_mall->command) {
1194                 case TC_CLSMATCHALL_REPLACE:
1195                         return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port,
1196                                                               proto,
1197                                                               tc->cls_mall,
1198                                                               ingress);
1199                 case TC_CLSMATCHALL_DESTROY:
1200                         mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port,
1201                                                        tc->cls_mall);
1202                         return 0;
1203                 default:
1204                         return -EINVAL;
1205                 }
1206         }
1207
1208         return -ENOTSUPP;
1209 }
1210
1211 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1212         .ndo_open               = mlxsw_sp_port_open,
1213         .ndo_stop               = mlxsw_sp_port_stop,
1214         .ndo_start_xmit         = mlxsw_sp_port_xmit,
1215         .ndo_setup_tc           = mlxsw_sp_setup_tc,
1216         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
1217         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
1218         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
1219         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
1220         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
1221         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
1222         .ndo_neigh_construct    = mlxsw_sp_router_neigh_construct,
1223         .ndo_neigh_destroy      = mlxsw_sp_router_neigh_destroy,
1224         .ndo_fdb_add            = switchdev_port_fdb_add,
1225         .ndo_fdb_del            = switchdev_port_fdb_del,
1226         .ndo_fdb_dump           = switchdev_port_fdb_dump,
1227         .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
1228         .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
1229         .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
1230         .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
1231 };
1232
1233 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1234                                       struct ethtool_drvinfo *drvinfo)
1235 {
1236         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1237         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1238
1239         strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1240         strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1241                 sizeof(drvinfo->version));
1242         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1243                  "%d.%d.%d",
1244                  mlxsw_sp->bus_info->fw_rev.major,
1245                  mlxsw_sp->bus_info->fw_rev.minor,
1246                  mlxsw_sp->bus_info->fw_rev.subminor);
1247         strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1248                 sizeof(drvinfo->bus_info));
1249 }
1250
1251 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1252                                          struct ethtool_pauseparam *pause)
1253 {
1254         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1255
1256         pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1257         pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1258 }
1259
1260 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1261                                    struct ethtool_pauseparam *pause)
1262 {
1263         char pfcc_pl[MLXSW_REG_PFCC_LEN];
1264
1265         mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1266         mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1267         mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1268
1269         return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1270                                pfcc_pl);
1271 }
1272
1273 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1274                                         struct ethtool_pauseparam *pause)
1275 {
1276         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1277         bool pause_en = pause->tx_pause || pause->rx_pause;
1278         int err;
1279
1280         if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1281                 netdev_err(dev, "PFC already enabled on port\n");
1282                 return -EINVAL;
1283         }
1284
1285         if (pause->autoneg) {
1286                 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1287                 return -EINVAL;
1288         }
1289
1290         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1291         if (err) {
1292                 netdev_err(dev, "Failed to configure port's headroom\n");
1293                 return err;
1294         }
1295
1296         err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1297         if (err) {
1298                 netdev_err(dev, "Failed to set PAUSE parameters\n");
1299                 goto err_port_pause_configure;
1300         }
1301
1302         mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1303         mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1304
1305         return 0;
1306
1307 err_port_pause_configure:
1308         pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1309         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1310         return err;
1311 }
1312
1313 struct mlxsw_sp_port_hw_stats {
1314         char str[ETH_GSTRING_LEN];
1315         u64 (*getter)(char *payload);
1316 };
1317
1318 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1319         {
1320                 .str = "a_frames_transmitted_ok",
1321                 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1322         },
1323         {
1324                 .str = "a_frames_received_ok",
1325                 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1326         },
1327         {
1328                 .str = "a_frame_check_sequence_errors",
1329                 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1330         },
1331         {
1332                 .str = "a_alignment_errors",
1333                 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1334         },
1335         {
1336                 .str = "a_octets_transmitted_ok",
1337                 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1338         },
1339         {
1340                 .str = "a_octets_received_ok",
1341                 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1342         },
1343         {
1344                 .str = "a_multicast_frames_xmitted_ok",
1345                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1346         },
1347         {
1348                 .str = "a_broadcast_frames_xmitted_ok",
1349                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1350         },
1351         {
1352                 .str = "a_multicast_frames_received_ok",
1353                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1354         },
1355         {
1356                 .str = "a_broadcast_frames_received_ok",
1357                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1358         },
1359         {
1360                 .str = "a_in_range_length_errors",
1361                 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1362         },
1363         {
1364                 .str = "a_out_of_range_length_field",
1365                 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1366         },
1367         {
1368                 .str = "a_frame_too_long_errors",
1369                 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1370         },
1371         {
1372                 .str = "a_symbol_error_during_carrier",
1373                 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1374         },
1375         {
1376                 .str = "a_mac_control_frames_transmitted",
1377                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1378         },
1379         {
1380                 .str = "a_mac_control_frames_received",
1381                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1382         },
1383         {
1384                 .str = "a_unsupported_opcodes_received",
1385                 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1386         },
1387         {
1388                 .str = "a_pause_mac_ctrl_frames_received",
1389                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1390         },
1391         {
1392                 .str = "a_pause_mac_ctrl_frames_xmitted",
1393                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1394         },
1395 };
1396
1397 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1398
1399 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
1400         {
1401                 .str = "rx_octets_prio",
1402                 .getter = mlxsw_reg_ppcnt_rx_octets_get,
1403         },
1404         {
1405                 .str = "rx_frames_prio",
1406                 .getter = mlxsw_reg_ppcnt_rx_frames_get,
1407         },
1408         {
1409                 .str = "tx_octets_prio",
1410                 .getter = mlxsw_reg_ppcnt_tx_octets_get,
1411         },
1412         {
1413                 .str = "tx_frames_prio",
1414                 .getter = mlxsw_reg_ppcnt_tx_frames_get,
1415         },
1416         {
1417                 .str = "rx_pause_prio",
1418                 .getter = mlxsw_reg_ppcnt_rx_pause_get,
1419         },
1420         {
1421                 .str = "rx_pause_duration_prio",
1422                 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1423         },
1424         {
1425                 .str = "tx_pause_prio",
1426                 .getter = mlxsw_reg_ppcnt_tx_pause_get,
1427         },
1428         {
1429                 .str = "tx_pause_duration_prio",
1430                 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1431         },
1432 };
1433
1434 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1435
1436 static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(char *ppcnt_pl)
1437 {
1438         u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
1439
1440         return MLXSW_SP_CELLS_TO_BYTES(transmit_queue);
1441 }
1442
1443 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1444         {
1445                 .str = "tc_transmit_queue_tc",
1446                 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get,
1447         },
1448         {
1449                 .str = "tc_no_buffer_discard_uc_tc",
1450                 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1451         },
1452 };
1453
1454 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1455
1456 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
1457                                          (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1458                                           MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
1459                                          IEEE_8021QAZ_MAX_TCS)
1460
1461 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
1462 {
1463         int i;
1464
1465         for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
1466                 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1467                          mlxsw_sp_port_hw_prio_stats[i].str, prio);
1468                 *p += ETH_GSTRING_LEN;
1469         }
1470 }
1471
1472 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
1473 {
1474         int i;
1475
1476         for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
1477                 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1478                          mlxsw_sp_port_hw_tc_stats[i].str, tc);
1479                 *p += ETH_GSTRING_LEN;
1480         }
1481 }
1482
1483 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1484                                       u32 stringset, u8 *data)
1485 {
1486         u8 *p = data;
1487         int i;
1488
1489         switch (stringset) {
1490         case ETH_SS_STATS:
1491                 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1492                         memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1493                                ETH_GSTRING_LEN);
1494                         p += ETH_GSTRING_LEN;
1495                 }
1496
1497                 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1498                         mlxsw_sp_port_get_prio_strings(&p, i);
1499
1500                 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1501                         mlxsw_sp_port_get_tc_strings(&p, i);
1502
1503                 break;
1504         }
1505 }
1506
1507 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1508                                      enum ethtool_phys_id_state state)
1509 {
1510         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1511         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1512         char mlcr_pl[MLXSW_REG_MLCR_LEN];
1513         bool active;
1514
1515         switch (state) {
1516         case ETHTOOL_ID_ACTIVE:
1517                 active = true;
1518                 break;
1519         case ETHTOOL_ID_INACTIVE:
1520                 active = false;
1521                 break;
1522         default:
1523                 return -EOPNOTSUPP;
1524         }
1525
1526         mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1527         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1528 }
1529
1530 static int
1531 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
1532                                int *p_len, enum mlxsw_reg_ppcnt_grp grp)
1533 {
1534         switch (grp) {
1535         case  MLXSW_REG_PPCNT_IEEE_8023_CNT:
1536                 *p_hw_stats = mlxsw_sp_port_hw_stats;
1537                 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
1538                 break;
1539         case MLXSW_REG_PPCNT_PRIO_CNT:
1540                 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
1541                 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1542                 break;
1543         case MLXSW_REG_PPCNT_TC_CNT:
1544                 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
1545                 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
1546                 break;
1547         default:
1548                 WARN_ON(1);
1549                 return -ENOTSUPP;
1550         }
1551         return 0;
1552 }
1553
1554 static void __mlxsw_sp_port_get_stats(struct net_device *dev,
1555                                       enum mlxsw_reg_ppcnt_grp grp, int prio,
1556                                       u64 *data, int data_index)
1557 {
1558         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1559         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1560         struct mlxsw_sp_port_hw_stats *hw_stats;
1561         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1562         int i, len;
1563         int err;
1564
1565         err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
1566         if (err)
1567                 return;
1568         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
1569         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1570         for (i = 0; i < len; i++)
1571                 data[data_index + i] = !err ? hw_stats[i].getter(ppcnt_pl) : 0;
1572 }
1573
1574 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1575                                     struct ethtool_stats *stats, u64 *data)
1576 {
1577         int i, data_index = 0;
1578
1579         /* IEEE 802.3 Counters */
1580         __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
1581                                   data, data_index);
1582         data_index = MLXSW_SP_PORT_HW_STATS_LEN;
1583
1584         /* Per-Priority Counters */
1585         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1586                 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
1587                                           data, data_index);
1588                 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1589         }
1590
1591         /* Per-TC Counters */
1592         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1593                 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
1594                                           data, data_index);
1595                 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
1596         }
1597 }
1598
1599 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1600 {
1601         switch (sset) {
1602         case ETH_SS_STATS:
1603                 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
1604         default:
1605                 return -EOPNOTSUPP;
1606         }
1607 }
1608
1609 struct mlxsw_sp_port_link_mode {
1610         u32 mask;
1611         u32 supported;
1612         u32 advertised;
1613         u32 speed;
1614 };
1615
1616 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1617         {
1618                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1619                 .supported      = SUPPORTED_100baseT_Full,
1620                 .advertised     = ADVERTISED_100baseT_Full,
1621                 .speed          = 100,
1622         },
1623         {
1624                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1625                 .speed          = 100,
1626         },
1627         {
1628                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1629                                   MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1630                 .supported      = SUPPORTED_1000baseKX_Full,
1631                 .advertised     = ADVERTISED_1000baseKX_Full,
1632                 .speed          = 1000,
1633         },
1634         {
1635                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1636                 .supported      = SUPPORTED_10000baseT_Full,
1637                 .advertised     = ADVERTISED_10000baseT_Full,
1638                 .speed          = 10000,
1639         },
1640         {
1641                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1642                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1643                 .supported      = SUPPORTED_10000baseKX4_Full,
1644                 .advertised     = ADVERTISED_10000baseKX4_Full,
1645                 .speed          = 10000,
1646         },
1647         {
1648                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1649                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1650                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1651                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1652                 .supported      = SUPPORTED_10000baseKR_Full,
1653                 .advertised     = ADVERTISED_10000baseKR_Full,
1654                 .speed          = 10000,
1655         },
1656         {
1657                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1658                 .supported      = SUPPORTED_20000baseKR2_Full,
1659                 .advertised     = ADVERTISED_20000baseKR2_Full,
1660                 .speed          = 20000,
1661         },
1662         {
1663                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1664                 .supported      = SUPPORTED_40000baseCR4_Full,
1665                 .advertised     = ADVERTISED_40000baseCR4_Full,
1666                 .speed          = 40000,
1667         },
1668         {
1669                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1670                 .supported      = SUPPORTED_40000baseKR4_Full,
1671                 .advertised     = ADVERTISED_40000baseKR4_Full,
1672                 .speed          = 40000,
1673         },
1674         {
1675                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1676                 .supported      = SUPPORTED_40000baseSR4_Full,
1677                 .advertised     = ADVERTISED_40000baseSR4_Full,
1678                 .speed          = 40000,
1679         },
1680         {
1681                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1682                 .supported      = SUPPORTED_40000baseLR4_Full,
1683                 .advertised     = ADVERTISED_40000baseLR4_Full,
1684                 .speed          = 40000,
1685         },
1686         {
1687                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1688                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1689                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1690                 .speed          = 25000,
1691         },
1692         {
1693                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1694                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1695                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1696                 .speed          = 50000,
1697         },
1698         {
1699                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1700                 .supported      = SUPPORTED_56000baseKR4_Full,
1701                 .advertised     = ADVERTISED_56000baseKR4_Full,
1702                 .speed          = 56000,
1703         },
1704         {
1705                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1706                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1707                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1708                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1709                 .speed          = 100000,
1710         },
1711 };
1712
1713 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1714
1715 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1716 {
1717         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1718                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1719                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1720                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1721                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1722                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1723                 return SUPPORTED_FIBRE;
1724
1725         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1726                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1727                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1728                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1729                               MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1730                 return SUPPORTED_Backplane;
1731         return 0;
1732 }
1733
1734 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1735 {
1736         u32 modes = 0;
1737         int i;
1738
1739         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1740                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1741                         modes |= mlxsw_sp_port_link_mode[i].supported;
1742         }
1743         return modes;
1744 }
1745
1746 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1747 {
1748         u32 modes = 0;
1749         int i;
1750
1751         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1752                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1753                         modes |= mlxsw_sp_port_link_mode[i].advertised;
1754         }
1755         return modes;
1756 }
1757
1758 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1759                                             struct ethtool_cmd *cmd)
1760 {
1761         u32 speed = SPEED_UNKNOWN;
1762         u8 duplex = DUPLEX_UNKNOWN;
1763         int i;
1764
1765         if (!carrier_ok)
1766                 goto out;
1767
1768         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1769                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1770                         speed = mlxsw_sp_port_link_mode[i].speed;
1771                         duplex = DUPLEX_FULL;
1772                         break;
1773                 }
1774         }
1775 out:
1776         ethtool_cmd_speed_set(cmd, speed);
1777         cmd->duplex = duplex;
1778 }
1779
1780 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1781 {
1782         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1783                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1784                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1785                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1786                 return PORT_FIBRE;
1787
1788         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1789                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1790                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1791                 return PORT_DA;
1792
1793         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1794                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1795                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1796                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1797                 return PORT_NONE;
1798
1799         return PORT_OTHER;
1800 }
1801
1802 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1803                                       struct ethtool_cmd *cmd)
1804 {
1805         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1806         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1807         char ptys_pl[MLXSW_REG_PTYS_LEN];
1808         u32 eth_proto_cap;
1809         u32 eth_proto_admin;
1810         u32 eth_proto_oper;
1811         int err;
1812
1813         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1814         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1815         if (err) {
1816                 netdev_err(dev, "Failed to get proto");
1817                 return err;
1818         }
1819         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1820                               &eth_proto_admin, &eth_proto_oper);
1821
1822         cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1823                          mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1824                          SUPPORTED_Pause | SUPPORTED_Asym_Pause |
1825                          SUPPORTED_Autoneg;
1826         cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1827         mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1828                                         eth_proto_oper, cmd);
1829
1830         eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1831         cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1832         cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1833
1834         cmd->transceiver = XCVR_INTERNAL;
1835         return 0;
1836 }
1837
1838 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1839 {
1840         u32 ptys_proto = 0;
1841         int i;
1842
1843         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1844                 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1845                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1846         }
1847         return ptys_proto;
1848 }
1849
1850 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1851 {
1852         u32 ptys_proto = 0;
1853         int i;
1854
1855         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1856                 if (speed == mlxsw_sp_port_link_mode[i].speed)
1857                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1858         }
1859         return ptys_proto;
1860 }
1861
1862 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1863 {
1864         u32 ptys_proto = 0;
1865         int i;
1866
1867         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1868                 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1869                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1870         }
1871         return ptys_proto;
1872 }
1873
1874 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1875                                       struct ethtool_cmd *cmd)
1876 {
1877         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1878         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1879         char ptys_pl[MLXSW_REG_PTYS_LEN];
1880         u32 speed;
1881         u32 eth_proto_new;
1882         u32 eth_proto_cap;
1883         u32 eth_proto_admin;
1884         int err;
1885
1886         speed = ethtool_cmd_speed(cmd);
1887
1888         eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1889                 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1890                 mlxsw_sp_to_ptys_speed(speed);
1891
1892         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1893         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1894         if (err) {
1895                 netdev_err(dev, "Failed to get proto");
1896                 return err;
1897         }
1898         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1899
1900         eth_proto_new = eth_proto_new & eth_proto_cap;
1901         if (!eth_proto_new) {
1902                 netdev_err(dev, "Not supported proto admin requested");
1903                 return -EINVAL;
1904         }
1905         if (eth_proto_new == eth_proto_admin)
1906                 return 0;
1907
1908         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1909         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1910         if (err) {
1911                 netdev_err(dev, "Failed to set proto admin");
1912                 return err;
1913         }
1914
1915         if (!netif_running(dev))
1916                 return 0;
1917
1918         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1919         if (err) {
1920                 netdev_err(dev, "Failed to set admin status");
1921                 return err;
1922         }
1923
1924         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1925         if (err) {
1926                 netdev_err(dev, "Failed to set admin status");
1927                 return err;
1928         }
1929
1930         return 0;
1931 }
1932
1933 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1934         .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
1935         .get_link               = ethtool_op_get_link,
1936         .get_pauseparam         = mlxsw_sp_port_get_pauseparam,
1937         .set_pauseparam         = mlxsw_sp_port_set_pauseparam,
1938         .get_strings            = mlxsw_sp_port_get_strings,
1939         .set_phys_id            = mlxsw_sp_port_set_phys_id,
1940         .get_ethtool_stats      = mlxsw_sp_port_get_stats,
1941         .get_sset_count         = mlxsw_sp_port_get_sset_count,
1942         .get_settings           = mlxsw_sp_port_get_settings,
1943         .set_settings           = mlxsw_sp_port_set_settings,
1944 };
1945
1946 static int
1947 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1948 {
1949         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1950         u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1951         char ptys_pl[MLXSW_REG_PTYS_LEN];
1952         u32 eth_proto_admin;
1953
1954         eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1955         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1956                             eth_proto_admin);
1957         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1958 }
1959
1960 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1961                           enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1962                           bool dwrr, u8 dwrr_weight)
1963 {
1964         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1965         char qeec_pl[MLXSW_REG_QEEC_LEN];
1966
1967         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1968                             next_index);
1969         mlxsw_reg_qeec_de_set(qeec_pl, true);
1970         mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1971         mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1972         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1973 }
1974
1975 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1976                                   enum mlxsw_reg_qeec_hr hr, u8 index,
1977                                   u8 next_index, u32 maxrate)
1978 {
1979         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1980         char qeec_pl[MLXSW_REG_QEEC_LEN];
1981
1982         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1983                             next_index);
1984         mlxsw_reg_qeec_mase_set(qeec_pl, true);
1985         mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1986         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1987 }
1988
1989 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1990                               u8 switch_prio, u8 tclass)
1991 {
1992         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1993         char qtct_pl[MLXSW_REG_QTCT_LEN];
1994
1995         mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1996                             tclass);
1997         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1998 }
1999
2000 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2001 {
2002         int err, i;
2003
2004         /* Setup the elements hierarcy, so that each TC is linked to
2005          * one subgroup, which are all member in the same group.
2006          */
2007         err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2008                                     MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2009                                     0);
2010         if (err)
2011                 return err;
2012         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2013                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2014                                             MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2015                                             0, false, 0);
2016                 if (err)
2017                         return err;
2018         }
2019         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2020                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2021                                             MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2022                                             false, 0);
2023                 if (err)
2024                         return err;
2025         }
2026
2027         /* Make sure the max shaper is disabled in all hierarcies that
2028          * support it.
2029          */
2030         err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2031                                             MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2032                                             MLXSW_REG_QEEC_MAS_DIS);
2033         if (err)
2034                 return err;
2035         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2036                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2037                                                     MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2038                                                     i, 0,
2039                                                     MLXSW_REG_QEEC_MAS_DIS);
2040                 if (err)
2041                         return err;
2042         }
2043         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2044                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2045                                                     MLXSW_REG_QEEC_HIERARCY_TC,
2046                                                     i, i,
2047                                                     MLXSW_REG_QEEC_MAS_DIS);
2048                 if (err)
2049                         return err;
2050         }
2051
2052         /* Map all priorities to traffic class 0. */
2053         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2054                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2055                 if (err)
2056                         return err;
2057         }
2058
2059         return 0;
2060 }
2061
2062 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2063                                 bool split, u8 module, u8 width, u8 lane)
2064 {
2065         struct mlxsw_sp_port *mlxsw_sp_port;
2066         struct net_device *dev;
2067         size_t bytes;
2068         int err;
2069
2070         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
2071         if (!dev)
2072                 return -ENOMEM;
2073         mlxsw_sp_port = netdev_priv(dev);
2074         mlxsw_sp_port->dev = dev;
2075         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2076         mlxsw_sp_port->local_port = local_port;
2077         mlxsw_sp_port->split = split;
2078         mlxsw_sp_port->mapping.module = module;
2079         mlxsw_sp_port->mapping.width = width;
2080         mlxsw_sp_port->mapping.lane = lane;
2081         bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
2082         mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
2083         if (!mlxsw_sp_port->active_vlans) {
2084                 err = -ENOMEM;
2085                 goto err_port_active_vlans_alloc;
2086         }
2087         mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
2088         if (!mlxsw_sp_port->untagged_vlans) {
2089                 err = -ENOMEM;
2090                 goto err_port_untagged_vlans_alloc;
2091         }
2092         INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
2093         INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
2094
2095         mlxsw_sp_port->pcpu_stats =
2096                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2097         if (!mlxsw_sp_port->pcpu_stats) {
2098                 err = -ENOMEM;
2099                 goto err_alloc_stats;
2100         }
2101
2102         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2103         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2104
2105         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2106         if (err) {
2107                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2108                         mlxsw_sp_port->local_port);
2109                 goto err_dev_addr_init;
2110         }
2111
2112         netif_carrier_off(dev);
2113
2114         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
2115                          NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2116         dev->hw_features |= NETIF_F_HW_TC;
2117
2118         /* Each packet needs to have a Tx header (metadata) on top all other
2119          * headers.
2120          */
2121         dev->hard_header_len += MLXSW_TXHDR_LEN;
2122
2123         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2124         if (err) {
2125                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2126                         mlxsw_sp_port->local_port);
2127                 goto err_port_system_port_mapping_set;
2128         }
2129
2130         err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2131         if (err) {
2132                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2133                         mlxsw_sp_port->local_port);
2134                 goto err_port_swid_set;
2135         }
2136
2137         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2138         if (err) {
2139                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2140                         mlxsw_sp_port->local_port);
2141                 goto err_port_speed_by_width_set;
2142         }
2143
2144         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2145         if (err) {
2146                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2147                         mlxsw_sp_port->local_port);
2148                 goto err_port_mtu_set;
2149         }
2150
2151         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2152         if (err)
2153                 goto err_port_admin_status_set;
2154
2155         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2156         if (err) {
2157                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2158                         mlxsw_sp_port->local_port);
2159                 goto err_port_buffers_init;
2160         }
2161
2162         err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
2163         if (err) {
2164                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
2165                         mlxsw_sp_port->local_port);
2166                 goto err_port_ets_init;
2167         }
2168
2169         /* ETS and buffers must be initialized before DCB. */
2170         err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
2171         if (err) {
2172                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
2173                         mlxsw_sp_port->local_port);
2174                 goto err_port_dcb_init;
2175         }
2176
2177         mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
2178         err = register_netdev(dev);
2179         if (err) {
2180                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
2181                         mlxsw_sp_port->local_port);
2182                 goto err_register_netdev;
2183         }
2184
2185         err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
2186                                    mlxsw_sp_port->local_port, dev,
2187                                    mlxsw_sp_port->split, module);
2188         if (err) {
2189                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2190                         mlxsw_sp_port->local_port);
2191                 goto err_core_port_init;
2192         }
2193
2194         err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
2195         if (err)
2196                 goto err_port_vlan_init;
2197
2198         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
2199         return 0;
2200
2201 err_port_vlan_init:
2202         mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
2203 err_core_port_init:
2204         unregister_netdev(dev);
2205 err_register_netdev:
2206         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2207 err_port_dcb_init:
2208 err_port_ets_init:
2209 err_port_buffers_init:
2210 err_port_admin_status_set:
2211 err_port_mtu_set:
2212 err_port_speed_by_width_set:
2213 err_port_swid_set:
2214 err_port_system_port_mapping_set:
2215 err_dev_addr_init:
2216         free_percpu(mlxsw_sp_port->pcpu_stats);
2217 err_alloc_stats:
2218         kfree(mlxsw_sp_port->untagged_vlans);
2219 err_port_untagged_vlans_alloc:
2220         kfree(mlxsw_sp_port->active_vlans);
2221 err_port_active_vlans_alloc:
2222         free_netdev(dev);
2223         return err;
2224 }
2225
2226 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2227 {
2228         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2229
2230         if (!mlxsw_sp_port)
2231                 return;
2232         mlxsw_sp->ports[local_port] = NULL;
2233         mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
2234         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
2235         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2236         mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
2237         mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2238         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2239         mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
2240         free_percpu(mlxsw_sp_port->pcpu_stats);
2241         kfree(mlxsw_sp_port->untagged_vlans);
2242         kfree(mlxsw_sp_port->active_vlans);
2243         WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
2244         free_netdev(mlxsw_sp_port->dev);
2245 }
2246
2247 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2248 {
2249         int i;
2250
2251         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
2252                 mlxsw_sp_port_remove(mlxsw_sp, i);
2253         kfree(mlxsw_sp->ports);
2254 }
2255
2256 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2257 {
2258         u8 module, width, lane;
2259         size_t alloc_size;
2260         int i;
2261         int err;
2262
2263         alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
2264         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2265         if (!mlxsw_sp->ports)
2266                 return -ENOMEM;
2267
2268         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
2269                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
2270                                                     &width, &lane);
2271                 if (err)
2272                         goto err_port_module_info_get;
2273                 if (!width)
2274                         continue;
2275                 mlxsw_sp->port_to_module[i] = module;
2276                 err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
2277                                            lane);
2278                 if (err)
2279                         goto err_port_create;
2280         }
2281         return 0;
2282
2283 err_port_create:
2284 err_port_module_info_get:
2285         for (i--; i >= 1; i--)
2286                 mlxsw_sp_port_remove(mlxsw_sp, i);
2287         kfree(mlxsw_sp->ports);
2288         return err;
2289 }
2290
2291 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
2292 {
2293         u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
2294
2295         return local_port - offset;
2296 }
2297
2298 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
2299                                       u8 module, unsigned int count)
2300 {
2301         u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
2302         int err, i;
2303
2304         for (i = 0; i < count; i++) {
2305                 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
2306                                                width, i * width);
2307                 if (err)
2308                         goto err_port_module_map;
2309         }
2310
2311         for (i = 0; i < count; i++) {
2312                 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
2313                 if (err)
2314                         goto err_port_swid_set;
2315         }
2316
2317         for (i = 0; i < count; i++) {
2318                 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
2319                                            module, width, i * width);
2320                 if (err)
2321                         goto err_port_create;
2322         }
2323
2324         return 0;
2325
2326 err_port_create:
2327         for (i--; i >= 0; i--)
2328                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2329         i = count;
2330 err_port_swid_set:
2331         for (i--; i >= 0; i--)
2332                 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
2333                                          MLXSW_PORT_SWID_DISABLED_PORT);
2334         i = count;
2335 err_port_module_map:
2336         for (i--; i >= 0; i--)
2337                 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
2338         return err;
2339 }
2340
2341 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2342                                          u8 base_port, unsigned int count)
2343 {
2344         u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
2345         int i;
2346
2347         /* Split by four means we need to re-create two ports, otherwise
2348          * only one.
2349          */
2350         count = count / 2;
2351
2352         for (i = 0; i < count; i++) {
2353                 local_port = base_port + i * 2;
2354                 module = mlxsw_sp->port_to_module[local_port];
2355
2356                 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
2357                                          0);
2358         }
2359
2360         for (i = 0; i < count; i++)
2361                 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
2362
2363         for (i = 0; i < count; i++) {
2364                 local_port = base_port + i * 2;
2365                 module = mlxsw_sp->port_to_module[local_port];
2366
2367                 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
2368                                      width, 0);
2369         }
2370 }
2371
2372 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
2373                                unsigned int count)
2374 {
2375         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2376         struct mlxsw_sp_port *mlxsw_sp_port;
2377         u8 module, cur_width, base_port;
2378         int i;
2379         int err;
2380
2381         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2382         if (!mlxsw_sp_port) {
2383                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2384                         local_port);
2385                 return -EINVAL;
2386         }
2387
2388         module = mlxsw_sp_port->mapping.module;
2389         cur_width = mlxsw_sp_port->mapping.width;
2390
2391         if (count != 2 && count != 4) {
2392                 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
2393                 return -EINVAL;
2394         }
2395
2396         if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
2397                 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
2398                 return -EINVAL;
2399         }
2400
2401         /* Make sure we have enough slave (even) ports for the split. */
2402         if (count == 2) {
2403                 base_port = local_port;
2404                 if (mlxsw_sp->ports[base_port + 1]) {
2405                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2406                         return -EINVAL;
2407                 }
2408         } else {
2409                 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2410                 if (mlxsw_sp->ports[base_port + 1] ||
2411                     mlxsw_sp->ports[base_port + 3]) {
2412                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2413                         return -EINVAL;
2414                 }
2415         }
2416
2417         for (i = 0; i < count; i++)
2418                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2419
2420         err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2421         if (err) {
2422                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2423                 goto err_port_split_create;
2424         }
2425
2426         return 0;
2427
2428 err_port_split_create:
2429         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2430         return err;
2431 }
2432
2433 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2434 {
2435         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2436         struct mlxsw_sp_port *mlxsw_sp_port;
2437         u8 cur_width, base_port;
2438         unsigned int count;
2439         int i;
2440
2441         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2442         if (!mlxsw_sp_port) {
2443                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2444                         local_port);
2445                 return -EINVAL;
2446         }
2447
2448         if (!mlxsw_sp_port->split) {
2449                 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2450                 return -EINVAL;
2451         }
2452
2453         cur_width = mlxsw_sp_port->mapping.width;
2454         count = cur_width == 1 ? 4 : 2;
2455
2456         base_port = mlxsw_sp_cluster_base_port_get(local_port);
2457
2458         /* Determine which ports to remove. */
2459         if (count == 2 && local_port >= base_port + 2)
2460                 base_port = base_port + 2;
2461
2462         for (i = 0; i < count; i++)
2463                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2464
2465         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2466
2467         return 0;
2468 }
2469
2470 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2471                                      char *pude_pl, void *priv)
2472 {
2473         struct mlxsw_sp *mlxsw_sp = priv;
2474         struct mlxsw_sp_port *mlxsw_sp_port;
2475         enum mlxsw_reg_pude_oper_status status;
2476         u8 local_port;
2477
2478         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2479         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2480         if (!mlxsw_sp_port)
2481                 return;
2482
2483         status = mlxsw_reg_pude_oper_status_get(pude_pl);
2484         if (status == MLXSW_PORT_OPER_STATUS_UP) {
2485                 netdev_info(mlxsw_sp_port->dev, "link up\n");
2486                 netif_carrier_on(mlxsw_sp_port->dev);
2487         } else {
2488                 netdev_info(mlxsw_sp_port->dev, "link down\n");
2489                 netif_carrier_off(mlxsw_sp_port->dev);
2490         }
2491 }
2492
2493 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
2494         .func = mlxsw_sp_pude_event_func,
2495         .trap_id = MLXSW_TRAP_ID_PUDE,
2496 };
2497
2498 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
2499                                    enum mlxsw_event_trap_id trap_id)
2500 {
2501         struct mlxsw_event_listener *el;
2502         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2503         int err;
2504
2505         switch (trap_id) {
2506         case MLXSW_TRAP_ID_PUDE:
2507                 el = &mlxsw_sp_pude_event;
2508                 break;
2509         }
2510         err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
2511         if (err)
2512                 return err;
2513
2514         mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
2515         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2516         if (err)
2517                 goto err_event_trap_set;
2518
2519         return 0;
2520
2521 err_event_trap_set:
2522         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2523         return err;
2524 }
2525
2526 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
2527                                       enum mlxsw_event_trap_id trap_id)
2528 {
2529         struct mlxsw_event_listener *el;
2530
2531         switch (trap_id) {
2532         case MLXSW_TRAP_ID_PUDE:
2533                 el = &mlxsw_sp_pude_event;
2534                 break;
2535         }
2536         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2537 }
2538
2539 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
2540                                       void *priv)
2541 {
2542         struct mlxsw_sp *mlxsw_sp = priv;
2543         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2544         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2545
2546         if (unlikely(!mlxsw_sp_port)) {
2547                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2548                                      local_port);
2549                 return;
2550         }
2551
2552         skb->dev = mlxsw_sp_port->dev;
2553
2554         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2555         u64_stats_update_begin(&pcpu_stats->syncp);
2556         pcpu_stats->rx_packets++;
2557         pcpu_stats->rx_bytes += skb->len;
2558         u64_stats_update_end(&pcpu_stats->syncp);
2559
2560         skb->protocol = eth_type_trans(skb, skb->dev);
2561         netif_receive_skb(skb);
2562 }
2563
2564 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2565         {
2566                 .func = mlxsw_sp_rx_listener_func,
2567                 .local_port = MLXSW_PORT_DONT_CARE,
2568                 .trap_id = MLXSW_TRAP_ID_FDB_MC,
2569         },
2570         /* Traps for specific L2 packet types, not trapped as FDB MC */
2571         {
2572                 .func = mlxsw_sp_rx_listener_func,
2573                 .local_port = MLXSW_PORT_DONT_CARE,
2574                 .trap_id = MLXSW_TRAP_ID_STP,
2575         },
2576         {
2577                 .func = mlxsw_sp_rx_listener_func,
2578                 .local_port = MLXSW_PORT_DONT_CARE,
2579                 .trap_id = MLXSW_TRAP_ID_LACP,
2580         },
2581         {
2582                 .func = mlxsw_sp_rx_listener_func,
2583                 .local_port = MLXSW_PORT_DONT_CARE,
2584                 .trap_id = MLXSW_TRAP_ID_EAPOL,
2585         },
2586         {
2587                 .func = mlxsw_sp_rx_listener_func,
2588                 .local_port = MLXSW_PORT_DONT_CARE,
2589                 .trap_id = MLXSW_TRAP_ID_LLDP,
2590         },
2591         {
2592                 .func = mlxsw_sp_rx_listener_func,
2593                 .local_port = MLXSW_PORT_DONT_CARE,
2594                 .trap_id = MLXSW_TRAP_ID_MMRP,
2595         },
2596         {
2597                 .func = mlxsw_sp_rx_listener_func,
2598                 .local_port = MLXSW_PORT_DONT_CARE,
2599                 .trap_id = MLXSW_TRAP_ID_MVRP,
2600         },
2601         {
2602                 .func = mlxsw_sp_rx_listener_func,
2603                 .local_port = MLXSW_PORT_DONT_CARE,
2604                 .trap_id = MLXSW_TRAP_ID_RPVST,
2605         },
2606         {
2607                 .func = mlxsw_sp_rx_listener_func,
2608                 .local_port = MLXSW_PORT_DONT_CARE,
2609                 .trap_id = MLXSW_TRAP_ID_DHCP,
2610         },
2611         {
2612                 .func = mlxsw_sp_rx_listener_func,
2613                 .local_port = MLXSW_PORT_DONT_CARE,
2614                 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
2615         },
2616         {
2617                 .func = mlxsw_sp_rx_listener_func,
2618                 .local_port = MLXSW_PORT_DONT_CARE,
2619                 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
2620         },
2621         {
2622                 .func = mlxsw_sp_rx_listener_func,
2623                 .local_port = MLXSW_PORT_DONT_CARE,
2624                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
2625         },
2626         {
2627                 .func = mlxsw_sp_rx_listener_func,
2628                 .local_port = MLXSW_PORT_DONT_CARE,
2629                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
2630         },
2631         {
2632                 .func = mlxsw_sp_rx_listener_func,
2633                 .local_port = MLXSW_PORT_DONT_CARE,
2634                 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
2635         },
2636         {
2637                 .func = mlxsw_sp_rx_listener_func,
2638                 .local_port = MLXSW_PORT_DONT_CARE,
2639                 .trap_id = MLXSW_TRAP_ID_ARPBC,
2640         },
2641         {
2642                 .func = mlxsw_sp_rx_listener_func,
2643                 .local_port = MLXSW_PORT_DONT_CARE,
2644                 .trap_id = MLXSW_TRAP_ID_ARPUC,
2645         },
2646         {
2647                 .func = mlxsw_sp_rx_listener_func,
2648                 .local_port = MLXSW_PORT_DONT_CARE,
2649                 .trap_id = MLXSW_TRAP_ID_IP2ME,
2650         },
2651         {
2652                 .func = mlxsw_sp_rx_listener_func,
2653                 .local_port = MLXSW_PORT_DONT_CARE,
2654                 .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0,
2655         },
2656         {
2657                 .func = mlxsw_sp_rx_listener_func,
2658                 .local_port = MLXSW_PORT_DONT_CARE,
2659                 .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4,
2660         },
2661 };
2662
2663 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2664 {
2665         char htgt_pl[MLXSW_REG_HTGT_LEN];
2666         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2667         int i;
2668         int err;
2669
2670         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2671         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2672         if (err)
2673                 return err;
2674
2675         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2676         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2677         if (err)
2678                 return err;
2679
2680         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2681                 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2682                                                       &mlxsw_sp_rx_listener[i],
2683                                                       mlxsw_sp);
2684                 if (err)
2685                         goto err_rx_listener_register;
2686
2687                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2688                                     mlxsw_sp_rx_listener[i].trap_id);
2689                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2690                 if (err)
2691                         goto err_rx_trap_set;
2692         }
2693         return 0;
2694
2695 err_rx_trap_set:
2696         mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2697                                           &mlxsw_sp_rx_listener[i],
2698                                           mlxsw_sp);
2699 err_rx_listener_register:
2700         for (i--; i >= 0; i--) {
2701                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
2702                                     mlxsw_sp_rx_listener[i].trap_id);
2703                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2704
2705                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2706                                                   &mlxsw_sp_rx_listener[i],
2707                                                   mlxsw_sp);
2708         }
2709         return err;
2710 }
2711
2712 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2713 {
2714         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2715         int i;
2716
2717         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2718                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
2719                                     mlxsw_sp_rx_listener[i].trap_id);
2720                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2721
2722                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2723                                                   &mlxsw_sp_rx_listener[i],
2724                                                   mlxsw_sp);
2725         }
2726 }
2727
2728 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2729                                  enum mlxsw_reg_sfgc_type type,
2730                                  enum mlxsw_reg_sfgc_bridge_type bridge_type)
2731 {
2732         enum mlxsw_flood_table_type table_type;
2733         enum mlxsw_sp_flood_table flood_table;
2734         char sfgc_pl[MLXSW_REG_SFGC_LEN];
2735
2736         if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2737                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2738         else
2739                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2740
2741         if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2742                 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2743         else
2744                 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2745
2746         mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2747                             flood_table);
2748         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2749 }
2750
2751 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2752 {
2753         int type, err;
2754
2755         for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2756                 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2757                         continue;
2758
2759                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2760                                             MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2761                 if (err)
2762                         return err;
2763
2764                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2765                                             MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2766                 if (err)
2767                         return err;
2768         }
2769
2770         return 0;
2771 }
2772
2773 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2774 {
2775         char slcr_pl[MLXSW_REG_SLCR_LEN];
2776
2777         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2778                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
2779                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2780                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
2781                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
2782                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
2783                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
2784                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
2785                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2786         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2787 }
2788
2789 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2790                          const struct mlxsw_bus_info *mlxsw_bus_info)
2791 {
2792         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2793         int err;
2794
2795         mlxsw_sp->core = mlxsw_core;
2796         mlxsw_sp->bus_info = mlxsw_bus_info;
2797         INIT_LIST_HEAD(&mlxsw_sp->fids);
2798         INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
2799         INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2800
2801         err = mlxsw_sp_base_mac_get(mlxsw_sp);
2802         if (err) {
2803                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2804                 return err;
2805         }
2806
2807         err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2808         if (err) {
2809                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2810                 return err;
2811         }
2812
2813         err = mlxsw_sp_traps_init(mlxsw_sp);
2814         if (err) {
2815                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2816                 goto err_rx_listener_register;
2817         }
2818
2819         err = mlxsw_sp_flood_init(mlxsw_sp);
2820         if (err) {
2821                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2822                 goto err_flood_init;
2823         }
2824
2825         err = mlxsw_sp_buffers_init(mlxsw_sp);
2826         if (err) {
2827                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2828                 goto err_buffers_init;
2829         }
2830
2831         err = mlxsw_sp_lag_init(mlxsw_sp);
2832         if (err) {
2833                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2834                 goto err_lag_init;
2835         }
2836
2837         err = mlxsw_sp_switchdev_init(mlxsw_sp);
2838         if (err) {
2839                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2840                 goto err_switchdev_init;
2841         }
2842
2843         err = mlxsw_sp_router_init(mlxsw_sp);
2844         if (err) {
2845                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
2846                 goto err_router_init;
2847         }
2848
2849         err = mlxsw_sp_span_init(mlxsw_sp);
2850         if (err) {
2851                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
2852                 goto err_span_init;
2853         }
2854
2855         err = mlxsw_sp_ports_create(mlxsw_sp);
2856         if (err) {
2857                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2858                 goto err_ports_create;
2859         }
2860
2861         return 0;
2862
2863 err_ports_create:
2864         mlxsw_sp_span_fini(mlxsw_sp);
2865 err_span_init:
2866         mlxsw_sp_router_fini(mlxsw_sp);
2867 err_router_init:
2868         mlxsw_sp_switchdev_fini(mlxsw_sp);
2869 err_switchdev_init:
2870 err_lag_init:
2871         mlxsw_sp_buffers_fini(mlxsw_sp);
2872 err_buffers_init:
2873 err_flood_init:
2874         mlxsw_sp_traps_fini(mlxsw_sp);
2875 err_rx_listener_register:
2876         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2877         return err;
2878 }
2879
2880 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2881 {
2882         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2883         int i;
2884
2885         mlxsw_sp_ports_remove(mlxsw_sp);
2886         mlxsw_sp_span_fini(mlxsw_sp);
2887         mlxsw_sp_router_fini(mlxsw_sp);
2888         mlxsw_sp_switchdev_fini(mlxsw_sp);
2889         mlxsw_sp_buffers_fini(mlxsw_sp);
2890         mlxsw_sp_traps_fini(mlxsw_sp);
2891         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2892         WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
2893         WARN_ON(!list_empty(&mlxsw_sp->fids));
2894         for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
2895                 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
2896 }
2897
2898 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2899         .used_max_vepa_channels         = 1,
2900         .max_vepa_channels              = 0,
2901         .used_max_lag                   = 1,
2902         .max_lag                        = MLXSW_SP_LAG_MAX,
2903         .used_max_port_per_lag          = 1,
2904         .max_port_per_lag               = MLXSW_SP_PORT_PER_LAG_MAX,
2905         .used_max_mid                   = 1,
2906         .max_mid                        = MLXSW_SP_MID_MAX,
2907         .used_max_pgt                   = 1,
2908         .max_pgt                        = 0,
2909         .used_max_system_port           = 1,
2910         .max_system_port                = 64,
2911         .used_max_vlan_groups           = 1,
2912         .max_vlan_groups                = 127,
2913         .used_max_regions               = 1,
2914         .max_regions                    = 400,
2915         .used_flood_tables              = 1,
2916         .used_flood_mode                = 1,
2917         .flood_mode                     = 3,
2918         .max_fid_offset_flood_tables    = 2,
2919         .fid_offset_flood_table_size    = VLAN_N_VID - 1,
2920         .max_fid_flood_tables           = 2,
2921         .fid_flood_table_size           = MLXSW_SP_VFID_MAX,
2922         .used_max_ib_mc                 = 1,
2923         .max_ib_mc                      = 0,
2924         .used_max_pkey                  = 1,
2925         .max_pkey                       = 0,
2926         .used_kvd_sizes                 = 1,
2927         .kvd_linear_size                = MLXSW_SP_KVD_LINEAR_SIZE,
2928         .kvd_hash_single_size           = MLXSW_SP_KVD_HASH_SINGLE_SIZE,
2929         .kvd_hash_double_size           = MLXSW_SP_KVD_HASH_DOUBLE_SIZE,
2930         .swid_config                    = {
2931                 {
2932                         .used_type      = 1,
2933                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
2934                 }
2935         },
2936         .resource_query_enable          = 1,
2937 };
2938
2939 static struct mlxsw_driver mlxsw_sp_driver = {
2940         .kind                           = MLXSW_DEVICE_KIND_SPECTRUM,
2941         .owner                          = THIS_MODULE,
2942         .priv_size                      = sizeof(struct mlxsw_sp),
2943         .init                           = mlxsw_sp_init,
2944         .fini                           = mlxsw_sp_fini,
2945         .port_split                     = mlxsw_sp_port_split,
2946         .port_unsplit                   = mlxsw_sp_port_unsplit,
2947         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
2948         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
2949         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
2950         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
2951         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
2952         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
2953         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
2954         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
2955         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
2956         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
2957         .txhdr_construct                = mlxsw_sp_txhdr_construct,
2958         .txhdr_len                      = MLXSW_TXHDR_LEN,
2959         .profile                        = &mlxsw_sp_config_profile,
2960 };
2961
2962 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2963 {
2964         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2965 }
2966
2967 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
2968 {
2969         struct net_device *lower_dev;
2970         struct list_head *iter;
2971
2972         if (mlxsw_sp_port_dev_check(dev))
2973                 return netdev_priv(dev);
2974
2975         netdev_for_each_all_lower_dev(dev, lower_dev, iter) {
2976                 if (mlxsw_sp_port_dev_check(lower_dev))
2977                         return netdev_priv(lower_dev);
2978         }
2979         return NULL;
2980 }
2981
2982 static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
2983 {
2984         struct mlxsw_sp_port *mlxsw_sp_port;
2985
2986         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2987         return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
2988 }
2989
2990 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
2991 {
2992         struct net_device *lower_dev;
2993         struct list_head *iter;
2994
2995         if (mlxsw_sp_port_dev_check(dev))
2996                 return netdev_priv(dev);
2997
2998         netdev_for_each_all_lower_dev_rcu(dev, lower_dev, iter) {
2999                 if (mlxsw_sp_port_dev_check(lower_dev))
3000                         return netdev_priv(lower_dev);
3001         }
3002         return NULL;
3003 }
3004
3005 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3006 {
3007         struct mlxsw_sp_port *mlxsw_sp_port;
3008
3009         rcu_read_lock();
3010         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3011         if (mlxsw_sp_port)
3012                 dev_hold(mlxsw_sp_port->dev);
3013         rcu_read_unlock();
3014         return mlxsw_sp_port;
3015 }
3016
3017 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3018 {
3019         dev_put(mlxsw_sp_port->dev);
3020 }
3021
3022 static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
3023                                        unsigned long event)
3024 {
3025         switch (event) {
3026         case NETDEV_UP:
3027                 if (!r)
3028                         return true;
3029                 r->ref_count++;
3030                 return false;
3031         case NETDEV_DOWN:
3032                 if (r && --r->ref_count == 0)
3033                         return true;
3034                 /* It is possible we already removed the RIF ourselves
3035                  * if it was assigned to a netdev that is now a bridge
3036                  * or LAG slave.
3037                  */
3038                 return false;
3039         }
3040
3041         return false;
3042 }
3043
3044 static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
3045 {
3046         int i;
3047
3048         for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
3049                 if (!mlxsw_sp->rifs[i])
3050                         return i;
3051
3052         return MLXSW_SP_RIF_MAX;
3053 }
3054
3055 static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
3056                                            bool *p_lagged, u16 *p_system_port)
3057 {
3058         u8 local_port = mlxsw_sp_vport->local_port;
3059
3060         *p_lagged = mlxsw_sp_vport->lagged;
3061         *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
3062 }
3063
3064 static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
3065                                     struct net_device *l3_dev, u16 rif,
3066                                     bool create)
3067 {
3068         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3069         bool lagged = mlxsw_sp_vport->lagged;
3070         char ritr_pl[MLXSW_REG_RITR_LEN];
3071         u16 system_port;
3072
3073         mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif,
3074                             l3_dev->mtu, l3_dev->dev_addr);
3075
3076         mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
3077         mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
3078                                   mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
3079
3080         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3081 }
3082
3083 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
3084
3085 static struct mlxsw_sp_fid *
3086 mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
3087 {
3088         struct mlxsw_sp_fid *f;
3089
3090         f = kzalloc(sizeof(*f), GFP_KERNEL);
3091         if (!f)
3092                 return NULL;
3093
3094         f->leave = mlxsw_sp_vport_rif_sp_leave;
3095         f->ref_count = 0;
3096         f->dev = l3_dev;
3097         f->fid = fid;
3098
3099         return f;
3100 }
3101
3102 static struct mlxsw_sp_rif *
3103 mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
3104 {
3105         struct mlxsw_sp_rif *r;
3106
3107         r = kzalloc(sizeof(*r), GFP_KERNEL);
3108         if (!r)
3109                 return NULL;
3110
3111         ether_addr_copy(r->addr, l3_dev->dev_addr);
3112         r->mtu = l3_dev->mtu;
3113         r->ref_count = 1;
3114         r->dev = l3_dev;
3115         r->rif = rif;
3116         r->f = f;
3117
3118         return r;
3119 }
3120
3121 static struct mlxsw_sp_rif *
3122 mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
3123                              struct net_device *l3_dev)
3124 {
3125         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3126         struct mlxsw_sp_fid *f;
3127         struct mlxsw_sp_rif *r;
3128         u16 fid, rif;
3129         int err;
3130
3131         rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
3132         if (rif == MLXSW_SP_RIF_MAX)
3133                 return ERR_PTR(-ERANGE);
3134
3135         err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
3136         if (err)
3137                 return ERR_PTR(err);
3138
3139         fid = mlxsw_sp_rif_sp_to_fid(rif);
3140         err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
3141         if (err)
3142                 goto err_rif_fdb_op;
3143
3144         f = mlxsw_sp_rfid_alloc(fid, l3_dev);
3145         if (!f) {
3146                 err = -ENOMEM;
3147                 goto err_rfid_alloc;
3148         }
3149
3150         r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
3151         if (!r) {
3152                 err = -ENOMEM;
3153                 goto err_rif_alloc;
3154         }
3155
3156         f->r = r;
3157         mlxsw_sp->rifs[rif] = r;
3158
3159         return r;
3160
3161 err_rif_alloc:
3162         kfree(f);
3163 err_rfid_alloc:
3164         mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
3165 err_rif_fdb_op:
3166         mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
3167         return ERR_PTR(err);
3168 }
3169
3170 static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
3171                                           struct mlxsw_sp_rif *r)
3172 {
3173         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3174         struct net_device *l3_dev = r->dev;
3175         struct mlxsw_sp_fid *f = r->f;
3176         u16 fid = f->fid;
3177         u16 rif = r->rif;
3178
3179         mlxsw_sp->rifs[rif] = NULL;
3180         f->r = NULL;
3181
3182         kfree(r);
3183
3184         kfree(f);
3185
3186         mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
3187
3188         mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
3189 }
3190
3191 static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3192                                       struct net_device *l3_dev)
3193 {
3194         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3195         struct mlxsw_sp_rif *r;
3196
3197         r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3198         if (!r) {
3199                 r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
3200                 if (IS_ERR(r))
3201                         return PTR_ERR(r);
3202         }
3203
3204         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
3205         r->f->ref_count++;
3206
3207         netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
3208
3209         return 0;
3210 }
3211
3212 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3213 {
3214         struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3215
3216         netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
3217
3218         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
3219         if (--f->ref_count == 0)
3220                 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
3221 }
3222
3223 static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
3224                                          struct net_device *port_dev,
3225                                          unsigned long event, u16 vid)
3226 {
3227         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
3228         struct mlxsw_sp_port *mlxsw_sp_vport;
3229
3230         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3231         if (WARN_ON(!mlxsw_sp_vport))
3232                 return -EINVAL;
3233
3234         switch (event) {
3235         case NETDEV_UP:
3236                 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
3237         case NETDEV_DOWN:
3238                 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
3239                 break;
3240         }
3241
3242         return 0;
3243 }
3244
3245 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
3246                                         unsigned long event)
3247 {
3248         if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
3249                 return 0;
3250
3251         return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
3252 }
3253
3254 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
3255                                          struct net_device *lag_dev,
3256                                          unsigned long event, u16 vid)
3257 {
3258         struct net_device *port_dev;
3259         struct list_head *iter;
3260         int err;
3261
3262         netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
3263                 if (mlxsw_sp_port_dev_check(port_dev)) {
3264                         err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
3265                                                             event, vid);
3266                         if (err)
3267                                 return err;
3268                 }
3269         }
3270
3271         return 0;
3272 }
3273
3274 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
3275                                        unsigned long event)
3276 {
3277         if (netif_is_bridge_port(lag_dev))
3278                 return 0;
3279
3280         return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
3281 }
3282
3283 static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
3284                                                     struct net_device *l3_dev)
3285 {
3286         u16 fid;
3287
3288         if (is_vlan_dev(l3_dev))
3289                 fid = vlan_dev_vlan_id(l3_dev);
3290         else if (mlxsw_sp->master_bridge.dev == l3_dev)
3291                 fid = 1;
3292         else
3293                 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
3294
3295         return mlxsw_sp_fid_find(mlxsw_sp, fid);
3296 }
3297
3298 static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
3299 {
3300         if (mlxsw_sp_fid_is_vfid(fid))
3301                 return MLXSW_REG_RITR_FID_IF;
3302         else
3303                 return MLXSW_REG_RITR_VLAN_IF;
3304 }
3305
3306 static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp,
3307                                   struct net_device *l3_dev,
3308                                   u16 fid, u16 rif,
3309                                   bool create)
3310 {
3311         enum mlxsw_reg_ritr_if_type rif_type;
3312         char ritr_pl[MLXSW_REG_RITR_LEN];
3313
3314         rif_type = mlxsw_sp_rif_type_get(fid);
3315         mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu,
3316                             l3_dev->dev_addr);
3317         mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
3318
3319         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3320 }
3321
3322 static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
3323                                       struct net_device *l3_dev,
3324                                       struct mlxsw_sp_fid *f)
3325 {
3326         struct mlxsw_sp_rif *r;
3327         u16 rif;
3328         int err;
3329
3330         rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
3331         if (rif == MLXSW_SP_RIF_MAX)
3332                 return -ERANGE;
3333
3334         err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
3335         if (err)
3336                 return err;
3337
3338         err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
3339         if (err)
3340                 goto err_rif_fdb_op;
3341
3342         r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
3343         if (!r) {
3344                 err = -ENOMEM;
3345                 goto err_rif_alloc;
3346         }
3347
3348         f->r = r;
3349         mlxsw_sp->rifs[rif] = r;
3350
3351         netdev_dbg(l3_dev, "RIF=%d created\n", rif);
3352
3353         return 0;
3354
3355 err_rif_alloc:
3356         mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3357 err_rif_fdb_op:
3358         mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
3359         return err;
3360 }
3361
3362 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
3363                                  struct mlxsw_sp_rif *r)
3364 {
3365         struct net_device *l3_dev = r->dev;
3366         struct mlxsw_sp_fid *f = r->f;
3367         u16 rif = r->rif;
3368
3369         mlxsw_sp->rifs[rif] = NULL;
3370         f->r = NULL;
3371
3372         kfree(r);
3373
3374         mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3375
3376         mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
3377
3378         netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
3379 }
3380
3381 static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
3382                                           struct net_device *br_dev,
3383                                           unsigned long event)
3384 {
3385         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3386         struct mlxsw_sp_fid *f;
3387
3388         /* FID can either be an actual FID if the L3 device is the
3389          * VLAN-aware bridge or a VLAN device on top. Otherwise, the
3390          * L3 device is a VLAN-unaware bridge and we get a vFID.
3391          */
3392         f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
3393         if (WARN_ON(!f))
3394                 return -EINVAL;
3395
3396         switch (event) {
3397         case NETDEV_UP:
3398                 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
3399         case NETDEV_DOWN:
3400                 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
3401                 break;
3402         }
3403
3404         return 0;
3405 }
3406
3407 static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
3408                                         unsigned long event)
3409 {
3410         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3411         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
3412         u16 vid = vlan_dev_vlan_id(vlan_dev);
3413
3414         if (mlxsw_sp_port_dev_check(real_dev))
3415                 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
3416                                                      vid);
3417         else if (netif_is_lag_master(real_dev))
3418                 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
3419                                                      vid);
3420         else if (netif_is_bridge_master(real_dev) &&
3421                  mlxsw_sp->master_bridge.dev == real_dev)
3422                 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
3423                                                       event);
3424
3425         return 0;
3426 }
3427
3428 static int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
3429                                    unsigned long event, void *ptr)
3430 {
3431         struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3432         struct net_device *dev = ifa->ifa_dev->dev;
3433         struct mlxsw_sp *mlxsw_sp;
3434         struct mlxsw_sp_rif *r;
3435         int err = 0;
3436
3437         mlxsw_sp = mlxsw_sp_lower_get(dev);
3438         if (!mlxsw_sp)
3439                 goto out;
3440
3441         r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3442         if (!mlxsw_sp_rif_should_config(r, event))
3443                 goto out;
3444
3445         if (mlxsw_sp_port_dev_check(dev))
3446                 err = mlxsw_sp_inetaddr_port_event(dev, event);
3447         else if (netif_is_lag_master(dev))
3448                 err = mlxsw_sp_inetaddr_lag_event(dev, event);
3449         else if (netif_is_bridge_master(dev))
3450                 err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
3451         else if (is_vlan_dev(dev))
3452                 err = mlxsw_sp_inetaddr_vlan_event(dev, event);
3453
3454 out:
3455         return notifier_from_errno(err);
3456 }
3457
3458 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
3459                              const char *mac, int mtu)
3460 {
3461         char ritr_pl[MLXSW_REG_RITR_LEN];
3462         int err;
3463
3464         mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
3465         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3466         if (err)
3467                 return err;
3468
3469         mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3470         mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3471         mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3472         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3473 }
3474
3475 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3476 {
3477         struct mlxsw_sp *mlxsw_sp;
3478         struct mlxsw_sp_rif *r;
3479         int err;
3480
3481         mlxsw_sp = mlxsw_sp_lower_get(dev);
3482         if (!mlxsw_sp)
3483                 return 0;
3484
3485         r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3486         if (!r)
3487                 return 0;
3488
3489         err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
3490         if (err)
3491                 return err;
3492
3493         err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
3494         if (err)
3495                 goto err_rif_edit;
3496
3497         err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
3498         if (err)
3499                 goto err_rif_fdb_op;
3500
3501         ether_addr_copy(r->addr, dev->dev_addr);
3502         r->mtu = dev->mtu;
3503
3504         netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
3505
3506         return 0;
3507
3508 err_rif_fdb_op:
3509         mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
3510 err_rif_edit:
3511         mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
3512         return err;
3513 }
3514
3515 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
3516                                          u16 fid)
3517 {
3518         if (mlxsw_sp_fid_is_vfid(fid))
3519                 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
3520         else
3521                 return test_bit(fid, lag_port->active_vlans);
3522 }
3523
3524 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
3525                                            u16 fid)
3526 {
3527         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3528         u8 local_port = mlxsw_sp_port->local_port;
3529         u16 lag_id = mlxsw_sp_port->lag_id;
3530         int i, count = 0;
3531
3532         if (!mlxsw_sp_port->lagged)
3533                 return true;
3534
3535         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
3536                 struct mlxsw_sp_port *lag_port;
3537
3538                 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
3539                 if (!lag_port || lag_port->local_port == local_port)
3540                         continue;
3541                 if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
3542                         count++;
3543         }
3544
3545         return !count;
3546 }
3547
3548 static int
3549 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3550                                     u16 fid)
3551 {
3552         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3553         char sfdf_pl[MLXSW_REG_SFDF_LEN];
3554
3555         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
3556         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3557         mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
3558                                                 mlxsw_sp_port->local_port);
3559
3560         netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
3561                    mlxsw_sp_port->local_port, fid);
3562
3563         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3564 }
3565
3566 static int
3567 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3568                                       u16 fid)
3569 {
3570         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3571         char sfdf_pl[MLXSW_REG_SFDF_LEN];
3572
3573         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
3574         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3575         mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
3576
3577         netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
3578                    mlxsw_sp_port->lag_id, fid);
3579
3580         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3581 }
3582
3583 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
3584 {
3585         if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
3586                 return 0;
3587
3588         if (mlxsw_sp_port->lagged)
3589                 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
3590                                                              fid);
3591         else
3592                 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
3593 }
3594
3595 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
3596 {
3597         struct mlxsw_sp_fid *f, *tmp;
3598
3599         list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
3600                 if (--f->ref_count == 0)
3601                         mlxsw_sp_fid_destroy(mlxsw_sp, f);
3602                 else
3603                         WARN_ON_ONCE(1);
3604 }
3605
3606 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
3607                                          struct net_device *br_dev)
3608 {
3609         return !mlxsw_sp->master_bridge.dev ||
3610                mlxsw_sp->master_bridge.dev == br_dev;
3611 }
3612
3613 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
3614                                        struct net_device *br_dev)
3615 {
3616         mlxsw_sp->master_bridge.dev = br_dev;
3617         mlxsw_sp->master_bridge.ref_count++;
3618 }
3619
3620 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
3621 {
3622         if (--mlxsw_sp->master_bridge.ref_count == 0) {
3623                 mlxsw_sp->master_bridge.dev = NULL;
3624                 /* It's possible upper VLAN devices are still holding
3625                  * references to underlying FIDs. Drop the reference
3626                  * and release the resources if it was the last one.
3627                  * If it wasn't, then something bad happened.
3628                  */
3629                 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
3630         }
3631 }
3632
3633 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
3634                                      struct net_device *br_dev)
3635 {
3636         struct net_device *dev = mlxsw_sp_port->dev;
3637         int err;
3638
3639         /* When port is not bridged untagged packets are tagged with
3640          * PVID=VID=1, thereby creating an implicit VLAN interface in
3641          * the device. Remove it and let bridge code take care of its
3642          * own VLANs.
3643          */
3644         err = mlxsw_sp_port_kill_vid(dev, 0, 1);
3645         if (err)
3646                 return err;
3647
3648         mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
3649
3650         mlxsw_sp_port->learning = 1;
3651         mlxsw_sp_port->learning_sync = 1;
3652         mlxsw_sp_port->uc_flood = 1;
3653         mlxsw_sp_port->bridged = 1;
3654
3655         return 0;
3656 }
3657
3658 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3659 {
3660         struct net_device *dev = mlxsw_sp_port->dev;
3661
3662         mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
3663
3664         mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
3665
3666         mlxsw_sp_port->learning = 0;
3667         mlxsw_sp_port->learning_sync = 0;
3668         mlxsw_sp_port->uc_flood = 0;
3669         mlxsw_sp_port->bridged = 0;
3670
3671         /* Add implicit VLAN interface in the device, so that untagged
3672          * packets will be classified to the default vFID.
3673          */
3674         mlxsw_sp_port_add_vid(dev, 0, 1);
3675 }
3676
3677 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3678 {
3679         char sldr_pl[MLXSW_REG_SLDR_LEN];
3680
3681         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3682         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3683 }
3684
3685 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3686 {
3687         char sldr_pl[MLXSW_REG_SLDR_LEN];
3688
3689         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3690         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3691 }
3692
3693 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3694                                      u16 lag_id, u8 port_index)
3695 {
3696         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3697         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3698
3699         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3700                                       lag_id, port_index);
3701         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3702 }
3703
3704 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3705                                         u16 lag_id)
3706 {
3707         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3708         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3709
3710         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3711                                          lag_id);
3712         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3713 }
3714
3715 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3716                                         u16 lag_id)
3717 {
3718         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3719         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3720
3721         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3722                                         lag_id);
3723         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3724 }
3725
3726 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3727                                          u16 lag_id)
3728 {
3729         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3730         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3731
3732         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3733                                          lag_id);
3734         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3735 }
3736
3737 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3738                                   struct net_device *lag_dev,
3739                                   u16 *p_lag_id)
3740 {
3741         struct mlxsw_sp_upper *lag;
3742         int free_lag_id = -1;
3743         int i;
3744
3745         for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
3746                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3747                 if (lag->ref_count) {
3748                         if (lag->dev == lag_dev) {
3749                                 *p_lag_id = i;
3750                                 return 0;
3751                         }
3752                 } else if (free_lag_id < 0) {
3753                         free_lag_id = i;
3754                 }
3755         }
3756         if (free_lag_id < 0)
3757                 return -EBUSY;
3758         *p_lag_id = free_lag_id;
3759         return 0;
3760 }
3761
3762 static bool
3763 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3764                           struct net_device *lag_dev,
3765                           struct netdev_lag_upper_info *lag_upper_info)
3766 {
3767         u16 lag_id;
3768
3769         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3770                 return false;
3771         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3772                 return false;
3773         return true;
3774 }
3775
3776 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3777                                        u16 lag_id, u8 *p_port_index)
3778 {
3779         int i;
3780
3781         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
3782                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3783                         *p_port_index = i;
3784                         return 0;
3785                 }
3786         }
3787         return -EBUSY;
3788 }
3789
3790 static void
3791 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3792                                   u16 lag_id)
3793 {
3794         struct mlxsw_sp_port *mlxsw_sp_vport;
3795         struct mlxsw_sp_fid *f;
3796
3797         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3798         if (WARN_ON(!mlxsw_sp_vport))
3799                 return;
3800
3801         /* If vPort is assigned a RIF, then leave it since it's no
3802          * longer valid.
3803          */
3804         f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3805         if (f)
3806                 f->leave(mlxsw_sp_vport);
3807
3808         mlxsw_sp_vport->lag_id = lag_id;
3809         mlxsw_sp_vport->lagged = 1;
3810 }
3811
3812 static void
3813 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3814 {
3815         struct mlxsw_sp_port *mlxsw_sp_vport;
3816         struct mlxsw_sp_fid *f;
3817
3818         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3819         if (WARN_ON(!mlxsw_sp_vport))
3820                 return;
3821
3822         f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3823         if (f)
3824                 f->leave(mlxsw_sp_vport);
3825
3826         mlxsw_sp_vport->lagged = 0;
3827 }
3828
3829 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3830                                   struct net_device *lag_dev)
3831 {
3832         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3833         struct mlxsw_sp_upper *lag;
3834         u16 lag_id;
3835         u8 port_index;
3836         int err;
3837
3838         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3839         if (err)
3840                 return err;
3841         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3842         if (!lag->ref_count) {
3843                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3844                 if (err)
3845                         return err;
3846                 lag->dev = lag_dev;
3847         }
3848
3849         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3850         if (err)
3851                 return err;
3852         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3853         if (err)
3854                 goto err_col_port_add;
3855         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
3856         if (err)
3857                 goto err_col_port_enable;
3858
3859         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3860                                    mlxsw_sp_port->local_port);
3861         mlxsw_sp_port->lag_id = lag_id;
3862         mlxsw_sp_port->lagged = 1;
3863         lag->ref_count++;
3864
3865         mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id);
3866
3867         return 0;
3868
3869 err_col_port_enable:
3870         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3871 err_col_port_add:
3872         if (!lag->ref_count)
3873                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3874         return err;
3875 }
3876
3877 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3878                                     struct net_device *lag_dev)
3879 {
3880         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3881         u16 lag_id = mlxsw_sp_port->lag_id;
3882         struct mlxsw_sp_upper *lag;
3883
3884         if (!mlxsw_sp_port->lagged)
3885                 return;
3886         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3887         WARN_ON(lag->ref_count == 0);
3888
3889         mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
3890         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3891
3892         if (mlxsw_sp_port->bridged) {
3893                 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
3894                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3895         }
3896
3897         if (lag->ref_count == 1)
3898                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3899
3900         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3901                                      mlxsw_sp_port->local_port);
3902         mlxsw_sp_port->lagged = 0;
3903         lag->ref_count--;
3904
3905         mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
3906 }
3907
3908 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3909                                       u16 lag_id)
3910 {
3911         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3912         char sldr_pl[MLXSW_REG_SLDR_LEN];
3913
3914         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3915                                          mlxsw_sp_port->local_port);
3916         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3917 }
3918
3919 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3920                                          u16 lag_id)
3921 {
3922         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3923         char sldr_pl[MLXSW_REG_SLDR_LEN];
3924
3925         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3926                                             mlxsw_sp_port->local_port);
3927         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3928 }
3929
3930 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
3931                                        bool lag_tx_enabled)
3932 {
3933         if (lag_tx_enabled)
3934                 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
3935                                                   mlxsw_sp_port->lag_id);
3936         else
3937                 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3938                                                      mlxsw_sp_port->lag_id);
3939 }
3940
3941 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3942                                      struct netdev_lag_lower_state_info *info)
3943 {
3944         return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
3945 }
3946
3947 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
3948                                    struct net_device *vlan_dev)
3949 {
3950         struct mlxsw_sp_port *mlxsw_sp_vport;
3951         u16 vid = vlan_dev_vlan_id(vlan_dev);
3952
3953         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3954         if (WARN_ON(!mlxsw_sp_vport))
3955                 return -EINVAL;
3956
3957         mlxsw_sp_vport->dev = vlan_dev;
3958
3959         return 0;
3960 }
3961
3962 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
3963                                       struct net_device *vlan_dev)
3964 {
3965         struct mlxsw_sp_port *mlxsw_sp_vport;
3966         u16 vid = vlan_dev_vlan_id(vlan_dev);
3967
3968         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3969         if (WARN_ON(!mlxsw_sp_vport))
3970                 return;
3971
3972         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3973 }
3974
3975 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3976                                                unsigned long event, void *ptr)
3977 {
3978         struct netdev_notifier_changeupper_info *info;
3979         struct mlxsw_sp_port *mlxsw_sp_port;
3980         struct net_device *upper_dev;
3981         struct mlxsw_sp *mlxsw_sp;
3982         int err = 0;
3983
3984         mlxsw_sp_port = netdev_priv(dev);
3985         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3986         info = ptr;
3987
3988         switch (event) {
3989         case NETDEV_PRECHANGEUPPER:
3990                 upper_dev = info->upper_dev;
3991                 if (!is_vlan_dev(upper_dev) &&
3992                     !netif_is_lag_master(upper_dev) &&
3993                     !netif_is_bridge_master(upper_dev))
3994                         return -EINVAL;
3995                 if (!info->linking)
3996                         break;
3997                 /* HW limitation forbids to put ports to multiple bridges. */
3998                 if (netif_is_bridge_master(upper_dev) &&
3999                     !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
4000                         return -EINVAL;
4001                 if (netif_is_lag_master(upper_dev) &&
4002                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4003                                                info->upper_info))
4004                         return -EINVAL;
4005                 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
4006                         return -EINVAL;
4007                 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4008                     !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
4009                         return -EINVAL;
4010                 break;
4011         case NETDEV_CHANGEUPPER:
4012                 upper_dev = info->upper_dev;
4013                 if (is_vlan_dev(upper_dev)) {
4014                         if (info->linking)
4015                                 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
4016                                                               upper_dev);
4017                         else
4018                                  mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
4019                                                            upper_dev);
4020                 } else if (netif_is_bridge_master(upper_dev)) {
4021                         if (info->linking)
4022                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4023                                                                 upper_dev);
4024                         else
4025                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
4026                 } else if (netif_is_lag_master(upper_dev)) {
4027                         if (info->linking)
4028                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4029                                                              upper_dev);
4030                         else
4031                                 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4032                                                         upper_dev);
4033                 } else {
4034                         err = -EINVAL;
4035                         WARN_ON(1);
4036                 }
4037                 break;
4038         }
4039
4040         return err;
4041 }
4042
4043 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4044                                                unsigned long event, void *ptr)
4045 {
4046         struct netdev_notifier_changelowerstate_info *info;
4047         struct mlxsw_sp_port *mlxsw_sp_port;
4048         int err;
4049
4050         mlxsw_sp_port = netdev_priv(dev);
4051         info = ptr;
4052
4053         switch (event) {
4054         case NETDEV_CHANGELOWERSTATE:
4055                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4056                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4057                                                         info->lower_state_info);
4058                         if (err)
4059                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4060                 }
4061                 break;
4062         }
4063
4064         return 0;
4065 }
4066
4067 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
4068                                          unsigned long event, void *ptr)
4069 {
4070         switch (event) {
4071         case NETDEV_PRECHANGEUPPER:
4072         case NETDEV_CHANGEUPPER:
4073                 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
4074         case NETDEV_CHANGELOWERSTATE:
4075                 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
4076         }
4077
4078         return 0;
4079 }
4080
4081 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4082                                         unsigned long event, void *ptr)
4083 {
4084         struct net_device *dev;
4085         struct list_head *iter;
4086         int ret;
4087
4088         netdev_for_each_lower_dev(lag_dev, dev, iter) {
4089                 if (mlxsw_sp_port_dev_check(dev)) {
4090                         ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
4091                         if (ret)
4092                                 return ret;
4093                 }
4094         }
4095
4096         return 0;
4097 }
4098
4099 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
4100                                             struct net_device *vlan_dev)
4101 {
4102         u16 fid = vlan_dev_vlan_id(vlan_dev);
4103         struct mlxsw_sp_fid *f;
4104
4105         f = mlxsw_sp_fid_find(mlxsw_sp, fid);
4106         if (!f) {
4107                 f = mlxsw_sp_fid_create(mlxsw_sp, fid);
4108                 if (IS_ERR(f))
4109                         return PTR_ERR(f);
4110         }
4111
4112         f->ref_count++;
4113
4114         return 0;
4115 }
4116
4117 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
4118                                                struct net_device *vlan_dev)
4119 {
4120         u16 fid = vlan_dev_vlan_id(vlan_dev);
4121         struct mlxsw_sp_fid *f;
4122
4123         f = mlxsw_sp_fid_find(mlxsw_sp, fid);
4124         if (f && f->r)
4125                 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
4126         if (f && --f->ref_count == 0)
4127                 mlxsw_sp_fid_destroy(mlxsw_sp, f);
4128 }
4129
4130 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4131                                            unsigned long event, void *ptr)
4132 {
4133         struct netdev_notifier_changeupper_info *info;
4134         struct net_device *upper_dev;
4135         struct mlxsw_sp *mlxsw_sp;
4136         int err;
4137
4138         mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4139         if (!mlxsw_sp)
4140                 return 0;
4141         if (br_dev != mlxsw_sp->master_bridge.dev)
4142                 return 0;
4143
4144         info = ptr;
4145
4146         switch (event) {
4147         case NETDEV_CHANGEUPPER:
4148                 upper_dev = info->upper_dev;
4149                 if (!is_vlan_dev(upper_dev))
4150                         break;
4151                 if (info->linking) {
4152                         err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
4153                                                                upper_dev);
4154                         if (err)
4155                                 return err;
4156                 } else {
4157                         mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev);
4158                 }
4159                 break;
4160         }
4161
4162         return 0;
4163 }
4164
4165 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
4166 {
4167         return find_first_zero_bit(mlxsw_sp->vfids.mapped,
4168                                    MLXSW_SP_VFID_MAX);
4169 }
4170
4171 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
4172 {
4173         char sfmr_pl[MLXSW_REG_SFMR_LEN];
4174
4175         mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
4176         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
4177 }
4178
4179 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
4180
4181 static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
4182                                                  struct net_device *br_dev)
4183 {
4184         struct device *dev = mlxsw_sp->bus_info->dev;
4185         struct mlxsw_sp_fid *f;
4186         u16 vfid, fid;
4187         int err;
4188
4189         vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
4190         if (vfid == MLXSW_SP_VFID_MAX) {
4191                 dev_err(dev, "No available vFIDs\n");
4192                 return ERR_PTR(-ERANGE);
4193         }
4194
4195         fid = mlxsw_sp_vfid_to_fid(vfid);
4196         err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
4197         if (err) {
4198                 dev_err(dev, "Failed to create FID=%d\n", fid);
4199                 return ERR_PTR(err);
4200         }
4201
4202         f = kzalloc(sizeof(*f), GFP_KERNEL);
4203         if (!f)
4204                 goto err_allocate_vfid;
4205
4206         f->leave = mlxsw_sp_vport_vfid_leave;
4207         f->fid = fid;
4208         f->dev = br_dev;
4209
4210         list_add(&f->list, &mlxsw_sp->vfids.list);
4211         set_bit(vfid, mlxsw_sp->vfids.mapped);
4212
4213         return f;
4214
4215 err_allocate_vfid:
4216         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
4217         return ERR_PTR(-ENOMEM);
4218 }
4219
4220 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
4221                                   struct mlxsw_sp_fid *f)
4222 {
4223         u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
4224         u16 fid = f->fid;
4225
4226         clear_bit(vfid, mlxsw_sp->vfids.mapped);
4227         list_del(&f->list);
4228
4229         if (f->r)
4230                 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
4231
4232         kfree(f);
4233
4234         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
4235 }
4236
4237 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
4238                                   bool valid)
4239 {
4240         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
4241         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4242
4243         return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
4244                                             vid);
4245 }
4246
4247 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
4248                                     struct net_device *br_dev)
4249 {
4250         struct mlxsw_sp_fid *f;
4251         int err;
4252
4253         f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
4254         if (!f) {
4255                 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
4256                 if (IS_ERR(f))
4257                         return PTR_ERR(f);
4258         }
4259
4260         err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
4261         if (err)
4262                 goto err_vport_flood_set;
4263
4264         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
4265         if (err)
4266                 goto err_vport_fid_map;
4267
4268         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
4269         f->ref_count++;
4270
4271         netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
4272
4273         return 0;
4274
4275 err_vport_fid_map:
4276         mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
4277 err_vport_flood_set:
4278         if (!f->ref_count)
4279                 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
4280         return err;
4281 }
4282
4283 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
4284 {
4285         struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
4286
4287         netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
4288
4289         mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
4290
4291         mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
4292
4293         mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
4294
4295         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
4296         if (--f->ref_count == 0)
4297                 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
4298 }
4299
4300 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
4301                                       struct net_device *br_dev)
4302 {
4303         struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
4304         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4305         struct net_device *dev = mlxsw_sp_vport->dev;
4306         int err;
4307
4308         if (f && !WARN_ON(!f->leave))
4309                 f->leave(mlxsw_sp_vport);
4310
4311         err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
4312         if (err) {
4313                 netdev_err(dev, "Failed to join vFID\n");
4314                 return err;
4315         }
4316
4317         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
4318         if (err) {
4319                 netdev_err(dev, "Failed to enable learning\n");
4320                 goto err_port_vid_learning_set;
4321         }
4322
4323         mlxsw_sp_vport->learning = 1;
4324         mlxsw_sp_vport->learning_sync = 1;
4325         mlxsw_sp_vport->uc_flood = 1;
4326         mlxsw_sp_vport->bridged = 1;
4327
4328         return 0;
4329
4330 err_port_vid_learning_set:
4331         mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
4332         return err;
4333 }
4334
4335 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
4336 {
4337         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4338
4339         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
4340
4341         mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
4342
4343         mlxsw_sp_vport->learning = 0;
4344         mlxsw_sp_vport->learning_sync = 0;
4345         mlxsw_sp_vport->uc_flood = 0;
4346         mlxsw_sp_vport->bridged = 0;
4347 }
4348
4349 static bool
4350 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
4351                                   const struct net_device *br_dev)
4352 {
4353         struct mlxsw_sp_port *mlxsw_sp_vport;
4354
4355         list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
4356                             vport.list) {
4357                 struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
4358
4359                 if (dev && dev == br_dev)
4360                         return false;
4361         }
4362
4363         return true;
4364 }
4365
4366 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
4367                                           unsigned long event, void *ptr,
4368                                           u16 vid)
4369 {
4370         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4371         struct netdev_notifier_changeupper_info *info = ptr;
4372         struct mlxsw_sp_port *mlxsw_sp_vport;
4373         struct net_device *upper_dev;
4374         int err = 0;
4375
4376         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
4377
4378         switch (event) {
4379         case NETDEV_PRECHANGEUPPER:
4380                 upper_dev = info->upper_dev;
4381                 if (!netif_is_bridge_master(upper_dev))
4382                         return -EINVAL;
4383                 if (!info->linking)
4384                         break;
4385                 /* We can't have multiple VLAN interfaces configured on
4386                  * the same port and being members in the same bridge.
4387                  */
4388                 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
4389                                                        upper_dev))
4390                         return -EINVAL;
4391                 break;
4392         case NETDEV_CHANGEUPPER:
4393                 upper_dev = info->upper_dev;
4394                 if (info->linking) {
4395                         if (WARN_ON(!mlxsw_sp_vport))
4396                                 return -EINVAL;
4397                         err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
4398                                                          upper_dev);
4399                 } else {
4400                         if (!mlxsw_sp_vport)
4401                                 return 0;
4402                         mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
4403                 }
4404         }
4405
4406         return err;
4407 }
4408
4409 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
4410                                               unsigned long event, void *ptr,
4411                                               u16 vid)
4412 {
4413         struct net_device *dev;
4414         struct list_head *iter;
4415         int ret;
4416
4417         netdev_for_each_lower_dev(lag_dev, dev, iter) {
4418                 if (mlxsw_sp_port_dev_check(dev)) {
4419                         ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
4420                                                              vid);
4421                         if (ret)
4422                                 return ret;
4423                 }
4424         }
4425
4426         return 0;
4427 }
4428
4429 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4430                                          unsigned long event, void *ptr)
4431 {
4432         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4433         u16 vid = vlan_dev_vlan_id(vlan_dev);
4434
4435         if (mlxsw_sp_port_dev_check(real_dev))
4436                 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
4437                                                       vid);
4438         else if (netif_is_lag_master(real_dev))
4439                 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
4440                                                           vid);
4441
4442         return 0;
4443 }
4444
4445 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4446                                     unsigned long event, void *ptr)
4447 {
4448         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4449         int err = 0;
4450
4451         if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4452                 err = mlxsw_sp_netdevice_router_port_event(dev);
4453         else if (mlxsw_sp_port_dev_check(dev))
4454                 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
4455         else if (netif_is_lag_master(dev))
4456                 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4457         else if (netif_is_bridge_master(dev))
4458                 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
4459         else if (is_vlan_dev(dev))
4460                 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4461
4462         return notifier_from_errno(err);
4463 }
4464
4465 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4466         .notifier_call = mlxsw_sp_netdevice_event,
4467 };
4468
4469 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4470         .notifier_call = mlxsw_sp_inetaddr_event,
4471         .priority = 10, /* Must be called before FIB notifier block */
4472 };
4473
4474 static int __init mlxsw_sp_module_init(void)
4475 {
4476         int err;
4477
4478         register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4479         register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4480         err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4481         if (err)
4482                 goto err_core_driver_register;
4483         return 0;
4484
4485 err_core_driver_register:
4486         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4487         return err;
4488 }
4489
4490 static void __exit mlxsw_sp_module_exit(void)
4491 {
4492         mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4493         unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4494         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4495 }
4496
4497 module_init(mlxsw_sp_module_init);
4498 module_exit(mlxsw_sp_module_exit);
4499
4500 MODULE_LICENSE("Dual BSD/GPL");
4501 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4502 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4503 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);