mlxsw: spectrum: Expose per-priority counters via ethtool
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/notifier.h>
53 #include <linux/dcbnl.h>
54 #include <linux/inetdevice.h>
55 #include <net/switchdev.h>
56 #include <generated/utsrelease.h>
57
58 #include "spectrum.h"
59 #include "core.h"
60 #include "reg.h"
61 #include "port.h"
62 #include "trap.h"
63 #include "txheader.h"
64
65 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
66 static const char mlxsw_sp_driver_version[] = "1.0";
67
68 /* tx_hdr_version
69  * Tx header version.
70  * Must be set to 1.
71  */
72 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
73
74 /* tx_hdr_ctl
75  * Packet control type.
76  * 0 - Ethernet control (e.g. EMADs, LACP)
77  * 1 - Ethernet data
78  */
79 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
80
81 /* tx_hdr_proto
82  * Packet protocol type. Must be set to 1 (Ethernet).
83  */
84 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
85
86 /* tx_hdr_rx_is_router
87  * Packet is sent from the router. Valid for data packets only.
88  */
89 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
90
91 /* tx_hdr_fid_valid
92  * Indicates if the 'fid' field is valid and should be used for
93  * forwarding lookup. Valid for data packets only.
94  */
95 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
96
97 /* tx_hdr_swid
98  * Switch partition ID. Must be set to 0.
99  */
100 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
101
102 /* tx_hdr_control_tclass
103  * Indicates if the packet should use the control TClass and not one
104  * of the data TClasses.
105  */
106 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
107
108 /* tx_hdr_etclass
109  * Egress TClass to be used on the egress device on the egress port.
110  */
111 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
112
113 /* tx_hdr_port_mid
114  * Destination local port for unicast packets.
115  * Destination multicast ID for multicast packets.
116  *
117  * Control packets are directed to a specific egress port, while data
118  * packets are transmitted through the CPU port (0) into the switch partition,
119  * where forwarding rules are applied.
120  */
121 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
122
123 /* tx_hdr_fid
124  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
125  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
126  * Valid for data packets only.
127  */
128 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
129
130 /* tx_hdr_type
131  * 0 - Data packets
132  * 6 - Control packets
133  */
134 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
135
136 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
137                                      const struct mlxsw_tx_info *tx_info)
138 {
139         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
140
141         memset(txhdr, 0, MLXSW_TXHDR_LEN);
142
143         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
144         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
145         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
146         mlxsw_tx_hdr_swid_set(txhdr, 0);
147         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
148         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
149         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
150 }
151
152 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
153 {
154         char spad_pl[MLXSW_REG_SPAD_LEN];
155         int err;
156
157         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
158         if (err)
159                 return err;
160         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
161         return 0;
162 }
163
164 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
165                                           bool is_up)
166 {
167         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
168         char paos_pl[MLXSW_REG_PAOS_LEN];
169
170         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
171                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
172                             MLXSW_PORT_ADMIN_STATUS_DOWN);
173         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
174 }
175
176 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
177                                          bool *p_is_up)
178 {
179         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
180         char paos_pl[MLXSW_REG_PAOS_LEN];
181         u8 oper_status;
182         int err;
183
184         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
185         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
186         if (err)
187                 return err;
188         oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
189         *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
190         return 0;
191 }
192
193 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
194                                       unsigned char *addr)
195 {
196         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
197         char ppad_pl[MLXSW_REG_PPAD_LEN];
198
199         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
200         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
201         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
202 }
203
204 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
205 {
206         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
207         unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
208
209         ether_addr_copy(addr, mlxsw_sp->base_mac);
210         addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
211         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
212 }
213
214 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
215 {
216         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
217         char pmtu_pl[MLXSW_REG_PMTU_LEN];
218         int max_mtu;
219         int err;
220
221         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
222         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
223         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
224         if (err)
225                 return err;
226         max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
227
228         if (mtu > max_mtu)
229                 return -EINVAL;
230
231         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
232         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
233 }
234
235 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
236                                     u8 swid)
237 {
238         char pspa_pl[MLXSW_REG_PSPA_LEN];
239
240         mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
241         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
242 }
243
244 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
245 {
246         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
247
248         return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
249                                         swid);
250 }
251
252 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
253                                      bool enable)
254 {
255         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
256         char svpe_pl[MLXSW_REG_SVPE_LEN];
257
258         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
259         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
260 }
261
262 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
263                                  enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
264                                  u16 vid)
265 {
266         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
267         char svfa_pl[MLXSW_REG_SVFA_LEN];
268
269         mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
270                             fid, vid);
271         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
272 }
273
274 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
275                                           u16 vid, bool learn_enable)
276 {
277         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
278         char *spvmlr_pl;
279         int err;
280
281         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
282         if (!spvmlr_pl)
283                 return -ENOMEM;
284         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
285                               learn_enable);
286         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
287         kfree(spvmlr_pl);
288         return err;
289 }
290
291 static int
292 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
293 {
294         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
295         char sspr_pl[MLXSW_REG_SSPR_LEN];
296
297         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
298         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
299 }
300
301 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
302                                          u8 local_port, u8 *p_module,
303                                          u8 *p_width, u8 *p_lane)
304 {
305         char pmlp_pl[MLXSW_REG_PMLP_LEN];
306         int err;
307
308         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
309         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
310         if (err)
311                 return err;
312         *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
313         *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
314         *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
315         return 0;
316 }
317
318 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
319                                     u8 module, u8 width, u8 lane)
320 {
321         char pmlp_pl[MLXSW_REG_PMLP_LEN];
322         int i;
323
324         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
325         mlxsw_reg_pmlp_width_set(pmlp_pl, width);
326         for (i = 0; i < width; i++) {
327                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
328                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
329         }
330
331         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
332 }
333
334 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
335 {
336         char pmlp_pl[MLXSW_REG_PMLP_LEN];
337
338         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
339         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
340         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
341 }
342
343 static int mlxsw_sp_port_open(struct net_device *dev)
344 {
345         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
346         int err;
347
348         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
349         if (err)
350                 return err;
351         netif_start_queue(dev);
352         return 0;
353 }
354
355 static int mlxsw_sp_port_stop(struct net_device *dev)
356 {
357         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
358
359         netif_stop_queue(dev);
360         return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
361 }
362
363 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
364                                       struct net_device *dev)
365 {
366         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
367         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
368         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
369         const struct mlxsw_tx_info tx_info = {
370                 .local_port = mlxsw_sp_port->local_port,
371                 .is_emad = false,
372         };
373         u64 len;
374         int err;
375
376         if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
377                 return NETDEV_TX_BUSY;
378
379         if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
380                 struct sk_buff *skb_orig = skb;
381
382                 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
383                 if (!skb) {
384                         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
385                         dev_kfree_skb_any(skb_orig);
386                         return NETDEV_TX_OK;
387                 }
388         }
389
390         if (eth_skb_pad(skb)) {
391                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
392                 return NETDEV_TX_OK;
393         }
394
395         mlxsw_sp_txhdr_construct(skb, &tx_info);
396         /* TX header is consumed by HW on the way so we shouldn't count its
397          * bytes as being sent.
398          */
399         len = skb->len - MLXSW_TXHDR_LEN;
400
401         /* Due to a race we might fail here because of a full queue. In that
402          * unlikely case we simply drop the packet.
403          */
404         err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
405
406         if (!err) {
407                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
408                 u64_stats_update_begin(&pcpu_stats->syncp);
409                 pcpu_stats->tx_packets++;
410                 pcpu_stats->tx_bytes += len;
411                 u64_stats_update_end(&pcpu_stats->syncp);
412         } else {
413                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
414                 dev_kfree_skb_any(skb);
415         }
416         return NETDEV_TX_OK;
417 }
418
419 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
420 {
421 }
422
423 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
424 {
425         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
426         struct sockaddr *addr = p;
427         int err;
428
429         if (!is_valid_ether_addr(addr->sa_data))
430                 return -EADDRNOTAVAIL;
431
432         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
433         if (err)
434                 return err;
435         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
436         return 0;
437 }
438
439 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
440                                  bool pause_en, bool pfc_en, u16 delay)
441 {
442         u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
443
444         delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
445                          MLXSW_SP_PAUSE_DELAY;
446
447         if (pause_en || pfc_en)
448                 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
449                                                     pg_size + delay, pg_size);
450         else
451                 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
452 }
453
454 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
455                                  u8 *prio_tc, bool pause_en,
456                                  struct ieee_pfc *my_pfc)
457 {
458         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
459         u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
460         u16 delay = !!my_pfc ? my_pfc->delay : 0;
461         char pbmc_pl[MLXSW_REG_PBMC_LEN];
462         int i, j, err;
463
464         mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
465         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
466         if (err)
467                 return err;
468
469         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
470                 bool configure = false;
471                 bool pfc = false;
472
473                 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
474                         if (prio_tc[j] == i) {
475                                 pfc = pfc_en & BIT(j);
476                                 configure = true;
477                                 break;
478                         }
479                 }
480
481                 if (!configure)
482                         continue;
483                 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
484         }
485
486         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
487 }
488
489 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
490                                       int mtu, bool pause_en)
491 {
492         u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
493         bool dcb_en = !!mlxsw_sp_port->dcb.ets;
494         struct ieee_pfc *my_pfc;
495         u8 *prio_tc;
496
497         prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
498         my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
499
500         return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
501                                             pause_en, my_pfc);
502 }
503
504 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
505 {
506         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
507         bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
508         int err;
509
510         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
511         if (err)
512                 return err;
513         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
514         if (err)
515                 goto err_port_mtu_set;
516         dev->mtu = mtu;
517         return 0;
518
519 err_port_mtu_set:
520         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
521         return err;
522 }
523
524 static struct rtnl_link_stats64 *
525 mlxsw_sp_port_get_stats64(struct net_device *dev,
526                           struct rtnl_link_stats64 *stats)
527 {
528         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
529         struct mlxsw_sp_port_pcpu_stats *p;
530         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
531         u32 tx_dropped = 0;
532         unsigned int start;
533         int i;
534
535         for_each_possible_cpu(i) {
536                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
537                 do {
538                         start = u64_stats_fetch_begin_irq(&p->syncp);
539                         rx_packets      = p->rx_packets;
540                         rx_bytes        = p->rx_bytes;
541                         tx_packets      = p->tx_packets;
542                         tx_bytes        = p->tx_bytes;
543                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
544
545                 stats->rx_packets       += rx_packets;
546                 stats->rx_bytes         += rx_bytes;
547                 stats->tx_packets       += tx_packets;
548                 stats->tx_bytes         += tx_bytes;
549                 /* tx_dropped is u32, updated without syncp protection. */
550                 tx_dropped      += p->tx_dropped;
551         }
552         stats->tx_dropped       = tx_dropped;
553         return stats;
554 }
555
556 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
557                            u16 vid_end, bool is_member, bool untagged)
558 {
559         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
560         char *spvm_pl;
561         int err;
562
563         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
564         if (!spvm_pl)
565                 return -ENOMEM;
566
567         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
568                             vid_end, is_member, untagged);
569         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
570         kfree(spvm_pl);
571         return err;
572 }
573
574 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
575 {
576         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
577         u16 vid, last_visited_vid;
578         int err;
579
580         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
581                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
582                                                    vid);
583                 if (err) {
584                         last_visited_vid = vid;
585                         goto err_port_vid_to_fid_set;
586                 }
587         }
588
589         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
590         if (err) {
591                 last_visited_vid = VLAN_N_VID;
592                 goto err_port_vid_to_fid_set;
593         }
594
595         return 0;
596
597 err_port_vid_to_fid_set:
598         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
599                 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
600                                              vid);
601         return err;
602 }
603
604 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
605 {
606         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
607         u16 vid;
608         int err;
609
610         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
611         if (err)
612                 return err;
613
614         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
615                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
616                                                    vid, vid);
617                 if (err)
618                         return err;
619         }
620
621         return 0;
622 }
623
624 static struct mlxsw_sp_port *
625 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
626 {
627         struct mlxsw_sp_port *mlxsw_sp_vport;
628
629         mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
630         if (!mlxsw_sp_vport)
631                 return NULL;
632
633         /* dev will be set correctly after the VLAN device is linked
634          * with the real device. In case of bridge SELF invocation, dev
635          * will remain as is.
636          */
637         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
638         mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
639         mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
640         mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
641         mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
642         mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
643         mlxsw_sp_vport->vport.vid = vid;
644
645         list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
646
647         return mlxsw_sp_vport;
648 }
649
650 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
651 {
652         list_del(&mlxsw_sp_vport->vport.list);
653         kfree(mlxsw_sp_vport);
654 }
655
656 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
657                           u16 vid)
658 {
659         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
660         struct mlxsw_sp_port *mlxsw_sp_vport;
661         bool untagged = vid == 1;
662         int err;
663
664         /* VLAN 0 is added to HW filter when device goes up, but it is
665          * reserved in our case, so simply return.
666          */
667         if (!vid)
668                 return 0;
669
670         if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
671                 netdev_warn(dev, "VID=%d already configured\n", vid);
672                 return 0;
673         }
674
675         mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
676         if (!mlxsw_sp_vport) {
677                 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
678                 return -ENOMEM;
679         }
680
681         /* When adding the first VLAN interface on a bridged port we need to
682          * transition all the active 802.1Q bridge VLANs to use explicit
683          * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
684          */
685         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
686                 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
687                 if (err) {
688                         netdev_err(dev, "Failed to set to Virtual mode\n");
689                         goto err_port_vp_mode_trans;
690                 }
691         }
692
693         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
694         if (err) {
695                 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
696                 goto err_port_vid_learning_set;
697         }
698
699         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
700         if (err) {
701                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
702                            vid);
703                 goto err_port_add_vid;
704         }
705
706         return 0;
707
708 err_port_add_vid:
709         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
710 err_port_vid_learning_set:
711         if (list_is_singular(&mlxsw_sp_port->vports_list))
712                 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
713 err_port_vp_mode_trans:
714         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
715         return err;
716 }
717
718 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
719                                   __be16 __always_unused proto, u16 vid)
720 {
721         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
722         struct mlxsw_sp_port *mlxsw_sp_vport;
723         struct mlxsw_sp_fid *f;
724         int err;
725
726         /* VLAN 0 is removed from HW filter when device goes down, but
727          * it is reserved in our case, so simply return.
728          */
729         if (!vid)
730                 return 0;
731
732         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
733         if (!mlxsw_sp_vport) {
734                 netdev_warn(dev, "VID=%d does not exist\n", vid);
735                 return 0;
736         }
737
738         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
739         if (err) {
740                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
741                            vid);
742                 return err;
743         }
744
745         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
746         if (err) {
747                 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
748                 return err;
749         }
750
751         /* Drop FID reference. If this was the last reference the
752          * resources will be freed.
753          */
754         f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
755         if (f && !WARN_ON(!f->leave))
756                 f->leave(mlxsw_sp_vport);
757
758         /* When removing the last VLAN interface on a bridged port we need to
759          * transition all active 802.1Q bridge VLANs to use VID to FID
760          * mappings and set port's mode to VLAN mode.
761          */
762         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
763                 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
764                 if (err) {
765                         netdev_err(dev, "Failed to set to VLAN mode\n");
766                         return err;
767                 }
768         }
769
770         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
771
772         return 0;
773 }
774
775 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
776                                             size_t len)
777 {
778         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
779         u8 module = mlxsw_sp_port->mapping.module;
780         u8 width = mlxsw_sp_port->mapping.width;
781         u8 lane = mlxsw_sp_port->mapping.lane;
782         int err;
783
784         if (!mlxsw_sp_port->split)
785                 err = snprintf(name, len, "p%d", module + 1);
786         else
787                 err = snprintf(name, len, "p%ds%d", module + 1,
788                                lane / width);
789
790         if (err >= len)
791                 return -EINVAL;
792
793         return 0;
794 }
795
796 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
797         .ndo_open               = mlxsw_sp_port_open,
798         .ndo_stop               = mlxsw_sp_port_stop,
799         .ndo_start_xmit         = mlxsw_sp_port_xmit,
800         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
801         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
802         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
803         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
804         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
805         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
806         .ndo_neigh_construct    = mlxsw_sp_router_neigh_construct,
807         .ndo_neigh_destroy      = mlxsw_sp_router_neigh_destroy,
808         .ndo_fdb_add            = switchdev_port_fdb_add,
809         .ndo_fdb_del            = switchdev_port_fdb_del,
810         .ndo_fdb_dump           = switchdev_port_fdb_dump,
811         .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
812         .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
813         .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
814         .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
815 };
816
817 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
818                                       struct ethtool_drvinfo *drvinfo)
819 {
820         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
821         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
822
823         strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
824         strlcpy(drvinfo->version, mlxsw_sp_driver_version,
825                 sizeof(drvinfo->version));
826         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
827                  "%d.%d.%d",
828                  mlxsw_sp->bus_info->fw_rev.major,
829                  mlxsw_sp->bus_info->fw_rev.minor,
830                  mlxsw_sp->bus_info->fw_rev.subminor);
831         strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
832                 sizeof(drvinfo->bus_info));
833 }
834
835 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
836                                          struct ethtool_pauseparam *pause)
837 {
838         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
839
840         pause->rx_pause = mlxsw_sp_port->link.rx_pause;
841         pause->tx_pause = mlxsw_sp_port->link.tx_pause;
842 }
843
844 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
845                                    struct ethtool_pauseparam *pause)
846 {
847         char pfcc_pl[MLXSW_REG_PFCC_LEN];
848
849         mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
850         mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
851         mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
852
853         return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
854                                pfcc_pl);
855 }
856
857 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
858                                         struct ethtool_pauseparam *pause)
859 {
860         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
861         bool pause_en = pause->tx_pause || pause->rx_pause;
862         int err;
863
864         if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
865                 netdev_err(dev, "PFC already enabled on port\n");
866                 return -EINVAL;
867         }
868
869         if (pause->autoneg) {
870                 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
871                 return -EINVAL;
872         }
873
874         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
875         if (err) {
876                 netdev_err(dev, "Failed to configure port's headroom\n");
877                 return err;
878         }
879
880         err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
881         if (err) {
882                 netdev_err(dev, "Failed to set PAUSE parameters\n");
883                 goto err_port_pause_configure;
884         }
885
886         mlxsw_sp_port->link.rx_pause = pause->rx_pause;
887         mlxsw_sp_port->link.tx_pause = pause->tx_pause;
888
889         return 0;
890
891 err_port_pause_configure:
892         pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
893         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
894         return err;
895 }
896
897 struct mlxsw_sp_port_hw_stats {
898         char str[ETH_GSTRING_LEN];
899         u64 (*getter)(char *payload);
900 };
901
902 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
903         {
904                 .str = "a_frames_transmitted_ok",
905                 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
906         },
907         {
908                 .str = "a_frames_received_ok",
909                 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
910         },
911         {
912                 .str = "a_frame_check_sequence_errors",
913                 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
914         },
915         {
916                 .str = "a_alignment_errors",
917                 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
918         },
919         {
920                 .str = "a_octets_transmitted_ok",
921                 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
922         },
923         {
924                 .str = "a_octets_received_ok",
925                 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
926         },
927         {
928                 .str = "a_multicast_frames_xmitted_ok",
929                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
930         },
931         {
932                 .str = "a_broadcast_frames_xmitted_ok",
933                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
934         },
935         {
936                 .str = "a_multicast_frames_received_ok",
937                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
938         },
939         {
940                 .str = "a_broadcast_frames_received_ok",
941                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
942         },
943         {
944                 .str = "a_in_range_length_errors",
945                 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
946         },
947         {
948                 .str = "a_out_of_range_length_field",
949                 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
950         },
951         {
952                 .str = "a_frame_too_long_errors",
953                 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
954         },
955         {
956                 .str = "a_symbol_error_during_carrier",
957                 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
958         },
959         {
960                 .str = "a_mac_control_frames_transmitted",
961                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
962         },
963         {
964                 .str = "a_mac_control_frames_received",
965                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
966         },
967         {
968                 .str = "a_unsupported_opcodes_received",
969                 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
970         },
971         {
972                 .str = "a_pause_mac_ctrl_frames_received",
973                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
974         },
975         {
976                 .str = "a_pause_mac_ctrl_frames_xmitted",
977                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
978         },
979 };
980
981 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
982
983 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
984         {
985                 .str = "rx_octets_prio",
986                 .getter = mlxsw_reg_ppcnt_rx_octets_get,
987         },
988         {
989                 .str = "rx_frames_prio",
990                 .getter = mlxsw_reg_ppcnt_rx_frames_get,
991         },
992         {
993                 .str = "tx_octets_prio",
994                 .getter = mlxsw_reg_ppcnt_tx_octets_get,
995         },
996         {
997                 .str = "tx_frames_prio",
998                 .getter = mlxsw_reg_ppcnt_tx_frames_get,
999         },
1000         {
1001                 .str = "rx_pause_prio",
1002                 .getter = mlxsw_reg_ppcnt_rx_pause_get,
1003         },
1004         {
1005                 .str = "rx_pause_duration_prio",
1006                 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1007         },
1008         {
1009                 .str = "tx_pause_prio",
1010                 .getter = mlxsw_reg_ppcnt_tx_pause_get,
1011         },
1012         {
1013                 .str = "tx_pause_duration_prio",
1014                 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1015         },
1016 };
1017
1018 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1019
1020 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
1021                                          MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
1022                                          IEEE_8021QAZ_MAX_TCS)
1023
1024 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
1025 {
1026         int i;
1027
1028         for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
1029                 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1030                          mlxsw_sp_port_hw_prio_stats[i].str, prio);
1031                 *p += ETH_GSTRING_LEN;
1032         }
1033 }
1034
1035 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1036                                       u32 stringset, u8 *data)
1037 {
1038         u8 *p = data;
1039         int i;
1040
1041         switch (stringset) {
1042         case ETH_SS_STATS:
1043                 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1044                         memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1045                                ETH_GSTRING_LEN);
1046                         p += ETH_GSTRING_LEN;
1047                 }
1048
1049                 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1050                         mlxsw_sp_port_get_prio_strings(&p, i);
1051
1052                 break;
1053         }
1054 }
1055
1056 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1057                                      enum ethtool_phys_id_state state)
1058 {
1059         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1060         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1061         char mlcr_pl[MLXSW_REG_MLCR_LEN];
1062         bool active;
1063
1064         switch (state) {
1065         case ETHTOOL_ID_ACTIVE:
1066                 active = true;
1067                 break;
1068         case ETHTOOL_ID_INACTIVE:
1069                 active = false;
1070                 break;
1071         default:
1072                 return -EOPNOTSUPP;
1073         }
1074
1075         mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1076         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1077 }
1078
1079 static int
1080 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
1081                                int *p_len, enum mlxsw_reg_ppcnt_grp grp)
1082 {
1083         switch (grp) {
1084         case  MLXSW_REG_PPCNT_IEEE_8023_CNT:
1085                 *p_hw_stats = mlxsw_sp_port_hw_stats;
1086                 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
1087                 break;
1088         case MLXSW_REG_PPCNT_PRIO_CNT:
1089                 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
1090                 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1091                 break;
1092         default:
1093                 WARN_ON(1);
1094                 return -ENOTSUPP;
1095         }
1096         return 0;
1097 }
1098
1099 static void __mlxsw_sp_port_get_stats(struct net_device *dev,
1100                                       enum mlxsw_reg_ppcnt_grp grp, int prio,
1101                                       u64 *data, int data_index)
1102 {
1103         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1104         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1105         struct mlxsw_sp_port_hw_stats *hw_stats;
1106         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1107         int i, len;
1108         int err;
1109
1110         err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
1111         if (err)
1112                 return;
1113         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
1114         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1115         for (i = 0; i < len; i++)
1116                 data[data_index + i] = !err ? hw_stats[i].getter(ppcnt_pl) : 0;
1117 }
1118
1119 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1120                                     struct ethtool_stats *stats, u64 *data)
1121 {
1122         int i, data_index = 0;
1123
1124         /* IEEE 802.3 Counters */
1125         __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
1126                                   data, data_index);
1127         data_index = MLXSW_SP_PORT_HW_STATS_LEN;
1128
1129         /* Per-Priority Counters */
1130         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1131                 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
1132                                           data, data_index);
1133                 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1134         }
1135 }
1136
1137 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1138 {
1139         switch (sset) {
1140         case ETH_SS_STATS:
1141                 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
1142         default:
1143                 return -EOPNOTSUPP;
1144         }
1145 }
1146
1147 struct mlxsw_sp_port_link_mode {
1148         u32 mask;
1149         u32 supported;
1150         u32 advertised;
1151         u32 speed;
1152 };
1153
1154 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1155         {
1156                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1157                 .supported      = SUPPORTED_100baseT_Full,
1158                 .advertised     = ADVERTISED_100baseT_Full,
1159                 .speed          = 100,
1160         },
1161         {
1162                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1163                 .speed          = 100,
1164         },
1165         {
1166                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1167                                   MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1168                 .supported      = SUPPORTED_1000baseKX_Full,
1169                 .advertised     = ADVERTISED_1000baseKX_Full,
1170                 .speed          = 1000,
1171         },
1172         {
1173                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1174                 .supported      = SUPPORTED_10000baseT_Full,
1175                 .advertised     = ADVERTISED_10000baseT_Full,
1176                 .speed          = 10000,
1177         },
1178         {
1179                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1180                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1181                 .supported      = SUPPORTED_10000baseKX4_Full,
1182                 .advertised     = ADVERTISED_10000baseKX4_Full,
1183                 .speed          = 10000,
1184         },
1185         {
1186                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1187                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1188                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1189                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1190                 .supported      = SUPPORTED_10000baseKR_Full,
1191                 .advertised     = ADVERTISED_10000baseKR_Full,
1192                 .speed          = 10000,
1193         },
1194         {
1195                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1196                 .supported      = SUPPORTED_20000baseKR2_Full,
1197                 .advertised     = ADVERTISED_20000baseKR2_Full,
1198                 .speed          = 20000,
1199         },
1200         {
1201                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1202                 .supported      = SUPPORTED_40000baseCR4_Full,
1203                 .advertised     = ADVERTISED_40000baseCR4_Full,
1204                 .speed          = 40000,
1205         },
1206         {
1207                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1208                 .supported      = SUPPORTED_40000baseKR4_Full,
1209                 .advertised     = ADVERTISED_40000baseKR4_Full,
1210                 .speed          = 40000,
1211         },
1212         {
1213                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1214                 .supported      = SUPPORTED_40000baseSR4_Full,
1215                 .advertised     = ADVERTISED_40000baseSR4_Full,
1216                 .speed          = 40000,
1217         },
1218         {
1219                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1220                 .supported      = SUPPORTED_40000baseLR4_Full,
1221                 .advertised     = ADVERTISED_40000baseLR4_Full,
1222                 .speed          = 40000,
1223         },
1224         {
1225                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1226                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1227                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1228                 .speed          = 25000,
1229         },
1230         {
1231                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1232                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1233                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1234                 .speed          = 50000,
1235         },
1236         {
1237                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1238                 .supported      = SUPPORTED_56000baseKR4_Full,
1239                 .advertised     = ADVERTISED_56000baseKR4_Full,
1240                 .speed          = 56000,
1241         },
1242         {
1243                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1244                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1245                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1246                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1247                 .speed          = 100000,
1248         },
1249 };
1250
1251 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1252
1253 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1254 {
1255         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1256                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1257                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1258                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1259                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1260                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1261                 return SUPPORTED_FIBRE;
1262
1263         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1264                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1265                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1266                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1267                               MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1268                 return SUPPORTED_Backplane;
1269         return 0;
1270 }
1271
1272 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1273 {
1274         u32 modes = 0;
1275         int i;
1276
1277         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1278                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1279                         modes |= mlxsw_sp_port_link_mode[i].supported;
1280         }
1281         return modes;
1282 }
1283
1284 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1285 {
1286         u32 modes = 0;
1287         int i;
1288
1289         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1290                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1291                         modes |= mlxsw_sp_port_link_mode[i].advertised;
1292         }
1293         return modes;
1294 }
1295
1296 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1297                                             struct ethtool_cmd *cmd)
1298 {
1299         u32 speed = SPEED_UNKNOWN;
1300         u8 duplex = DUPLEX_UNKNOWN;
1301         int i;
1302
1303         if (!carrier_ok)
1304                 goto out;
1305
1306         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1307                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1308                         speed = mlxsw_sp_port_link_mode[i].speed;
1309                         duplex = DUPLEX_FULL;
1310                         break;
1311                 }
1312         }
1313 out:
1314         ethtool_cmd_speed_set(cmd, speed);
1315         cmd->duplex = duplex;
1316 }
1317
1318 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1319 {
1320         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1321                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1322                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1323                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1324                 return PORT_FIBRE;
1325
1326         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1327                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1328                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1329                 return PORT_DA;
1330
1331         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1332                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1333                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1334                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1335                 return PORT_NONE;
1336
1337         return PORT_OTHER;
1338 }
1339
1340 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1341                                       struct ethtool_cmd *cmd)
1342 {
1343         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1344         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1345         char ptys_pl[MLXSW_REG_PTYS_LEN];
1346         u32 eth_proto_cap;
1347         u32 eth_proto_admin;
1348         u32 eth_proto_oper;
1349         int err;
1350
1351         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1352         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1353         if (err) {
1354                 netdev_err(dev, "Failed to get proto");
1355                 return err;
1356         }
1357         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1358                               &eth_proto_admin, &eth_proto_oper);
1359
1360         cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1361                          mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1362                          SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1363         cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1364         mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1365                                         eth_proto_oper, cmd);
1366
1367         eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1368         cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1369         cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1370
1371         cmd->transceiver = XCVR_INTERNAL;
1372         return 0;
1373 }
1374
1375 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1376 {
1377         u32 ptys_proto = 0;
1378         int i;
1379
1380         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1381                 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1382                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1383         }
1384         return ptys_proto;
1385 }
1386
1387 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1388 {
1389         u32 ptys_proto = 0;
1390         int i;
1391
1392         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1393                 if (speed == mlxsw_sp_port_link_mode[i].speed)
1394                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1395         }
1396         return ptys_proto;
1397 }
1398
1399 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1400 {
1401         u32 ptys_proto = 0;
1402         int i;
1403
1404         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1405                 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1406                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1407         }
1408         return ptys_proto;
1409 }
1410
1411 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1412                                       struct ethtool_cmd *cmd)
1413 {
1414         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1415         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1416         char ptys_pl[MLXSW_REG_PTYS_LEN];
1417         u32 speed;
1418         u32 eth_proto_new;
1419         u32 eth_proto_cap;
1420         u32 eth_proto_admin;
1421         bool is_up;
1422         int err;
1423
1424         speed = ethtool_cmd_speed(cmd);
1425
1426         eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1427                 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1428                 mlxsw_sp_to_ptys_speed(speed);
1429
1430         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1431         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1432         if (err) {
1433                 netdev_err(dev, "Failed to get proto");
1434                 return err;
1435         }
1436         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1437
1438         eth_proto_new = eth_proto_new & eth_proto_cap;
1439         if (!eth_proto_new) {
1440                 netdev_err(dev, "Not supported proto admin requested");
1441                 return -EINVAL;
1442         }
1443         if (eth_proto_new == eth_proto_admin)
1444                 return 0;
1445
1446         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1447         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1448         if (err) {
1449                 netdev_err(dev, "Failed to set proto admin");
1450                 return err;
1451         }
1452
1453         err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1454         if (err) {
1455                 netdev_err(dev, "Failed to get oper status");
1456                 return err;
1457         }
1458         if (!is_up)
1459                 return 0;
1460
1461         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1462         if (err) {
1463                 netdev_err(dev, "Failed to set admin status");
1464                 return err;
1465         }
1466
1467         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1468         if (err) {
1469                 netdev_err(dev, "Failed to set admin status");
1470                 return err;
1471         }
1472
1473         return 0;
1474 }
1475
1476 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1477         .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
1478         .get_link               = ethtool_op_get_link,
1479         .get_pauseparam         = mlxsw_sp_port_get_pauseparam,
1480         .set_pauseparam         = mlxsw_sp_port_set_pauseparam,
1481         .get_strings            = mlxsw_sp_port_get_strings,
1482         .set_phys_id            = mlxsw_sp_port_set_phys_id,
1483         .get_ethtool_stats      = mlxsw_sp_port_get_stats,
1484         .get_sset_count         = mlxsw_sp_port_get_sset_count,
1485         .get_settings           = mlxsw_sp_port_get_settings,
1486         .set_settings           = mlxsw_sp_port_set_settings,
1487 };
1488
1489 static int
1490 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1491 {
1492         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1493         u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1494         char ptys_pl[MLXSW_REG_PTYS_LEN];
1495         u32 eth_proto_admin;
1496
1497         eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1498         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1499                             eth_proto_admin);
1500         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1501 }
1502
1503 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1504                           enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1505                           bool dwrr, u8 dwrr_weight)
1506 {
1507         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1508         char qeec_pl[MLXSW_REG_QEEC_LEN];
1509
1510         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1511                             next_index);
1512         mlxsw_reg_qeec_de_set(qeec_pl, true);
1513         mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1514         mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1515         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1516 }
1517
1518 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1519                                   enum mlxsw_reg_qeec_hr hr, u8 index,
1520                                   u8 next_index, u32 maxrate)
1521 {
1522         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1523         char qeec_pl[MLXSW_REG_QEEC_LEN];
1524
1525         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1526                             next_index);
1527         mlxsw_reg_qeec_mase_set(qeec_pl, true);
1528         mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1529         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1530 }
1531
1532 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1533                               u8 switch_prio, u8 tclass)
1534 {
1535         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1536         char qtct_pl[MLXSW_REG_QTCT_LEN];
1537
1538         mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1539                             tclass);
1540         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1541 }
1542
1543 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1544 {
1545         int err, i;
1546
1547         /* Setup the elements hierarcy, so that each TC is linked to
1548          * one subgroup, which are all member in the same group.
1549          */
1550         err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1551                                     MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
1552                                     0);
1553         if (err)
1554                 return err;
1555         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1556                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1557                                             MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
1558                                             0, false, 0);
1559                 if (err)
1560                         return err;
1561         }
1562         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1563                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1564                                             MLXSW_REG_QEEC_HIERARCY_TC, i, i,
1565                                             false, 0);
1566                 if (err)
1567                         return err;
1568         }
1569
1570         /* Make sure the max shaper is disabled in all hierarcies that
1571          * support it.
1572          */
1573         err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1574                                             MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
1575                                             MLXSW_REG_QEEC_MAS_DIS);
1576         if (err)
1577                 return err;
1578         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1579                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1580                                                     MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
1581                                                     i, 0,
1582                                                     MLXSW_REG_QEEC_MAS_DIS);
1583                 if (err)
1584                         return err;
1585         }
1586         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1587                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1588                                                     MLXSW_REG_QEEC_HIERARCY_TC,
1589                                                     i, i,
1590                                                     MLXSW_REG_QEEC_MAS_DIS);
1591                 if (err)
1592                         return err;
1593         }
1594
1595         /* Map all priorities to traffic class 0. */
1596         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1597                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1598                 if (err)
1599                         return err;
1600         }
1601
1602         return 0;
1603 }
1604
1605 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1606                                 bool split, u8 module, u8 width, u8 lane)
1607 {
1608         struct mlxsw_sp_port *mlxsw_sp_port;
1609         struct net_device *dev;
1610         size_t bytes;
1611         int err;
1612
1613         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1614         if (!dev)
1615                 return -ENOMEM;
1616         mlxsw_sp_port = netdev_priv(dev);
1617         mlxsw_sp_port->dev = dev;
1618         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1619         mlxsw_sp_port->local_port = local_port;
1620         mlxsw_sp_port->split = split;
1621         mlxsw_sp_port->mapping.module = module;
1622         mlxsw_sp_port->mapping.width = width;
1623         mlxsw_sp_port->mapping.lane = lane;
1624         bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1625         mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1626         if (!mlxsw_sp_port->active_vlans) {
1627                 err = -ENOMEM;
1628                 goto err_port_active_vlans_alloc;
1629         }
1630         mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1631         if (!mlxsw_sp_port->untagged_vlans) {
1632                 err = -ENOMEM;
1633                 goto err_port_untagged_vlans_alloc;
1634         }
1635         INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1636
1637         mlxsw_sp_port->pcpu_stats =
1638                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1639         if (!mlxsw_sp_port->pcpu_stats) {
1640                 err = -ENOMEM;
1641                 goto err_alloc_stats;
1642         }
1643
1644         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1645         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1646
1647         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1648         if (err) {
1649                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1650                         mlxsw_sp_port->local_port);
1651                 goto err_dev_addr_init;
1652         }
1653
1654         netif_carrier_off(dev);
1655
1656         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1657                          NETIF_F_HW_VLAN_CTAG_FILTER;
1658
1659         /* Each packet needs to have a Tx header (metadata) on top all other
1660          * headers.
1661          */
1662         dev->hard_header_len += MLXSW_TXHDR_LEN;
1663
1664         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1665         if (err) {
1666                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1667                         mlxsw_sp_port->local_port);
1668                 goto err_port_system_port_mapping_set;
1669         }
1670
1671         err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1672         if (err) {
1673                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1674                         mlxsw_sp_port->local_port);
1675                 goto err_port_swid_set;
1676         }
1677
1678         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
1679         if (err) {
1680                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1681                         mlxsw_sp_port->local_port);
1682                 goto err_port_speed_by_width_set;
1683         }
1684
1685         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1686         if (err) {
1687                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1688                         mlxsw_sp_port->local_port);
1689                 goto err_port_mtu_set;
1690         }
1691
1692         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1693         if (err)
1694                 goto err_port_admin_status_set;
1695
1696         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1697         if (err) {
1698                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1699                         mlxsw_sp_port->local_port);
1700                 goto err_port_buffers_init;
1701         }
1702
1703         err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1704         if (err) {
1705                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1706                         mlxsw_sp_port->local_port);
1707                 goto err_port_ets_init;
1708         }
1709
1710         /* ETS and buffers must be initialized before DCB. */
1711         err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1712         if (err) {
1713                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1714                         mlxsw_sp_port->local_port);
1715                 goto err_port_dcb_init;
1716         }
1717
1718         mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1719         err = register_netdev(dev);
1720         if (err) {
1721                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1722                         mlxsw_sp_port->local_port);
1723                 goto err_register_netdev;
1724         }
1725
1726         err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
1727                                    mlxsw_sp_port->local_port, dev,
1728                                    mlxsw_sp_port->split, module);
1729         if (err) {
1730                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1731                         mlxsw_sp_port->local_port);
1732                 goto err_core_port_init;
1733         }
1734
1735         err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1736         if (err)
1737                 goto err_port_vlan_init;
1738
1739         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1740         return 0;
1741
1742 err_port_vlan_init:
1743         mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1744 err_core_port_init:
1745         unregister_netdev(dev);
1746 err_register_netdev:
1747 err_port_dcb_init:
1748 err_port_ets_init:
1749 err_port_buffers_init:
1750 err_port_admin_status_set:
1751 err_port_mtu_set:
1752 err_port_speed_by_width_set:
1753 err_port_swid_set:
1754 err_port_system_port_mapping_set:
1755 err_dev_addr_init:
1756         free_percpu(mlxsw_sp_port->pcpu_stats);
1757 err_alloc_stats:
1758         kfree(mlxsw_sp_port->untagged_vlans);
1759 err_port_untagged_vlans_alloc:
1760         kfree(mlxsw_sp_port->active_vlans);
1761 err_port_active_vlans_alloc:
1762         free_netdev(dev);
1763         return err;
1764 }
1765
1766 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1767 {
1768         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1769
1770         if (!mlxsw_sp_port)
1771                 return;
1772         mlxsw_sp->ports[local_port] = NULL;
1773         mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1774         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1775         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1776         mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
1777         mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1778         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1779         mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1780         free_percpu(mlxsw_sp_port->pcpu_stats);
1781         kfree(mlxsw_sp_port->untagged_vlans);
1782         kfree(mlxsw_sp_port->active_vlans);
1783         WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
1784         free_netdev(mlxsw_sp_port->dev);
1785 }
1786
1787 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1788 {
1789         int i;
1790
1791         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1792                 mlxsw_sp_port_remove(mlxsw_sp, i);
1793         kfree(mlxsw_sp->ports);
1794 }
1795
1796 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1797 {
1798         u8 module, width, lane;
1799         size_t alloc_size;
1800         int i;
1801         int err;
1802
1803         alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1804         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1805         if (!mlxsw_sp->ports)
1806                 return -ENOMEM;
1807
1808         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1809                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1810                                                     &width, &lane);
1811                 if (err)
1812                         goto err_port_module_info_get;
1813                 if (!width)
1814                         continue;
1815                 mlxsw_sp->port_to_module[i] = module;
1816                 err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
1817                                            lane);
1818                 if (err)
1819                         goto err_port_create;
1820         }
1821         return 0;
1822
1823 err_port_create:
1824 err_port_module_info_get:
1825         for (i--; i >= 1; i--)
1826                 mlxsw_sp_port_remove(mlxsw_sp, i);
1827         kfree(mlxsw_sp->ports);
1828         return err;
1829 }
1830
1831 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1832 {
1833         u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
1834
1835         return local_port - offset;
1836 }
1837
1838 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1839                                       u8 module, unsigned int count)
1840 {
1841         u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1842         int err, i;
1843
1844         for (i = 0; i < count; i++) {
1845                 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
1846                                                width, i * width);
1847                 if (err)
1848                         goto err_port_module_map;
1849         }
1850
1851         for (i = 0; i < count; i++) {
1852                 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
1853                 if (err)
1854                         goto err_port_swid_set;
1855         }
1856
1857         for (i = 0; i < count; i++) {
1858                 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
1859                                            module, width, i * width);
1860                 if (err)
1861                         goto err_port_create;
1862         }
1863
1864         return 0;
1865
1866 err_port_create:
1867         for (i--; i >= 0; i--)
1868                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1869         i = count;
1870 err_port_swid_set:
1871         for (i--; i >= 0; i--)
1872                 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
1873                                          MLXSW_PORT_SWID_DISABLED_PORT);
1874         i = count;
1875 err_port_module_map:
1876         for (i--; i >= 0; i--)
1877                 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
1878         return err;
1879 }
1880
1881 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1882                                          u8 base_port, unsigned int count)
1883 {
1884         u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
1885         int i;
1886
1887         /* Split by four means we need to re-create two ports, otherwise
1888          * only one.
1889          */
1890         count = count / 2;
1891
1892         for (i = 0; i < count; i++) {
1893                 local_port = base_port + i * 2;
1894                 module = mlxsw_sp->port_to_module[local_port];
1895
1896                 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1897                                          0);
1898         }
1899
1900         for (i = 0; i < count; i++)
1901                 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
1902
1903         for (i = 0; i < count; i++) {
1904                 local_port = base_port + i * 2;
1905                 module = mlxsw_sp->port_to_module[local_port];
1906
1907                 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
1908                                      width, 0);
1909         }
1910 }
1911
1912 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1913                                unsigned int count)
1914 {
1915         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1916         struct mlxsw_sp_port *mlxsw_sp_port;
1917         u8 module, cur_width, base_port;
1918         int i;
1919         int err;
1920
1921         mlxsw_sp_port = mlxsw_sp->ports[local_port];
1922         if (!mlxsw_sp_port) {
1923                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1924                         local_port);
1925                 return -EINVAL;
1926         }
1927
1928         module = mlxsw_sp_port->mapping.module;
1929         cur_width = mlxsw_sp_port->mapping.width;
1930
1931         if (count != 2 && count != 4) {
1932                 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
1933                 return -EINVAL;
1934         }
1935
1936         if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
1937                 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
1938                 return -EINVAL;
1939         }
1940
1941         /* Make sure we have enough slave (even) ports for the split. */
1942         if (count == 2) {
1943                 base_port = local_port;
1944                 if (mlxsw_sp->ports[base_port + 1]) {
1945                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1946                         return -EINVAL;
1947                 }
1948         } else {
1949                 base_port = mlxsw_sp_cluster_base_port_get(local_port);
1950                 if (mlxsw_sp->ports[base_port + 1] ||
1951                     mlxsw_sp->ports[base_port + 3]) {
1952                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1953                         return -EINVAL;
1954                 }
1955         }
1956
1957         for (i = 0; i < count; i++)
1958                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1959
1960         err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
1961         if (err) {
1962                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
1963                 goto err_port_split_create;
1964         }
1965
1966         return 0;
1967
1968 err_port_split_create:
1969         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
1970         return err;
1971 }
1972
1973 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
1974 {
1975         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1976         struct mlxsw_sp_port *mlxsw_sp_port;
1977         u8 cur_width, base_port;
1978         unsigned int count;
1979         int i;
1980
1981         mlxsw_sp_port = mlxsw_sp->ports[local_port];
1982         if (!mlxsw_sp_port) {
1983                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1984                         local_port);
1985                 return -EINVAL;
1986         }
1987
1988         if (!mlxsw_sp_port->split) {
1989                 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
1990                 return -EINVAL;
1991         }
1992
1993         cur_width = mlxsw_sp_port->mapping.width;
1994         count = cur_width == 1 ? 4 : 2;
1995
1996         base_port = mlxsw_sp_cluster_base_port_get(local_port);
1997
1998         /* Determine which ports to remove. */
1999         if (count == 2 && local_port >= base_port + 2)
2000                 base_port = base_port + 2;
2001
2002         for (i = 0; i < count; i++)
2003                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2004
2005         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2006
2007         return 0;
2008 }
2009
2010 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2011                                      char *pude_pl, void *priv)
2012 {
2013         struct mlxsw_sp *mlxsw_sp = priv;
2014         struct mlxsw_sp_port *mlxsw_sp_port;
2015         enum mlxsw_reg_pude_oper_status status;
2016         u8 local_port;
2017
2018         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2019         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2020         if (!mlxsw_sp_port)
2021                 return;
2022
2023         status = mlxsw_reg_pude_oper_status_get(pude_pl);
2024         if (status == MLXSW_PORT_OPER_STATUS_UP) {
2025                 netdev_info(mlxsw_sp_port->dev, "link up\n");
2026                 netif_carrier_on(mlxsw_sp_port->dev);
2027         } else {
2028                 netdev_info(mlxsw_sp_port->dev, "link down\n");
2029                 netif_carrier_off(mlxsw_sp_port->dev);
2030         }
2031 }
2032
2033 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
2034         .func = mlxsw_sp_pude_event_func,
2035         .trap_id = MLXSW_TRAP_ID_PUDE,
2036 };
2037
2038 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
2039                                    enum mlxsw_event_trap_id trap_id)
2040 {
2041         struct mlxsw_event_listener *el;
2042         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2043         int err;
2044
2045         switch (trap_id) {
2046         case MLXSW_TRAP_ID_PUDE:
2047                 el = &mlxsw_sp_pude_event;
2048                 break;
2049         }
2050         err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
2051         if (err)
2052                 return err;
2053
2054         mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
2055         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2056         if (err)
2057                 goto err_event_trap_set;
2058
2059         return 0;
2060
2061 err_event_trap_set:
2062         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2063         return err;
2064 }
2065
2066 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
2067                                       enum mlxsw_event_trap_id trap_id)
2068 {
2069         struct mlxsw_event_listener *el;
2070
2071         switch (trap_id) {
2072         case MLXSW_TRAP_ID_PUDE:
2073                 el = &mlxsw_sp_pude_event;
2074                 break;
2075         }
2076         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2077 }
2078
2079 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
2080                                       void *priv)
2081 {
2082         struct mlxsw_sp *mlxsw_sp = priv;
2083         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2084         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2085
2086         if (unlikely(!mlxsw_sp_port)) {
2087                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2088                                      local_port);
2089                 return;
2090         }
2091
2092         skb->dev = mlxsw_sp_port->dev;
2093
2094         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2095         u64_stats_update_begin(&pcpu_stats->syncp);
2096         pcpu_stats->rx_packets++;
2097         pcpu_stats->rx_bytes += skb->len;
2098         u64_stats_update_end(&pcpu_stats->syncp);
2099
2100         skb->protocol = eth_type_trans(skb, skb->dev);
2101         netif_receive_skb(skb);
2102 }
2103
2104 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2105         {
2106                 .func = mlxsw_sp_rx_listener_func,
2107                 .local_port = MLXSW_PORT_DONT_CARE,
2108                 .trap_id = MLXSW_TRAP_ID_FDB_MC,
2109         },
2110         /* Traps for specific L2 packet types, not trapped as FDB MC */
2111         {
2112                 .func = mlxsw_sp_rx_listener_func,
2113                 .local_port = MLXSW_PORT_DONT_CARE,
2114                 .trap_id = MLXSW_TRAP_ID_STP,
2115         },
2116         {
2117                 .func = mlxsw_sp_rx_listener_func,
2118                 .local_port = MLXSW_PORT_DONT_CARE,
2119                 .trap_id = MLXSW_TRAP_ID_LACP,
2120         },
2121         {
2122                 .func = mlxsw_sp_rx_listener_func,
2123                 .local_port = MLXSW_PORT_DONT_CARE,
2124                 .trap_id = MLXSW_TRAP_ID_EAPOL,
2125         },
2126         {
2127                 .func = mlxsw_sp_rx_listener_func,
2128                 .local_port = MLXSW_PORT_DONT_CARE,
2129                 .trap_id = MLXSW_TRAP_ID_LLDP,
2130         },
2131         {
2132                 .func = mlxsw_sp_rx_listener_func,
2133                 .local_port = MLXSW_PORT_DONT_CARE,
2134                 .trap_id = MLXSW_TRAP_ID_MMRP,
2135         },
2136         {
2137                 .func = mlxsw_sp_rx_listener_func,
2138                 .local_port = MLXSW_PORT_DONT_CARE,
2139                 .trap_id = MLXSW_TRAP_ID_MVRP,
2140         },
2141         {
2142                 .func = mlxsw_sp_rx_listener_func,
2143                 .local_port = MLXSW_PORT_DONT_CARE,
2144                 .trap_id = MLXSW_TRAP_ID_RPVST,
2145         },
2146         {
2147                 .func = mlxsw_sp_rx_listener_func,
2148                 .local_port = MLXSW_PORT_DONT_CARE,
2149                 .trap_id = MLXSW_TRAP_ID_DHCP,
2150         },
2151         {
2152                 .func = mlxsw_sp_rx_listener_func,
2153                 .local_port = MLXSW_PORT_DONT_CARE,
2154                 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
2155         },
2156         {
2157                 .func = mlxsw_sp_rx_listener_func,
2158                 .local_port = MLXSW_PORT_DONT_CARE,
2159                 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
2160         },
2161         {
2162                 .func = mlxsw_sp_rx_listener_func,
2163                 .local_port = MLXSW_PORT_DONT_CARE,
2164                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
2165         },
2166         {
2167                 .func = mlxsw_sp_rx_listener_func,
2168                 .local_port = MLXSW_PORT_DONT_CARE,
2169                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
2170         },
2171         {
2172                 .func = mlxsw_sp_rx_listener_func,
2173                 .local_port = MLXSW_PORT_DONT_CARE,
2174                 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
2175         },
2176         {
2177                 .func = mlxsw_sp_rx_listener_func,
2178                 .local_port = MLXSW_PORT_DONT_CARE,
2179                 .trap_id = MLXSW_TRAP_ID_ARPBC,
2180         },
2181         {
2182                 .func = mlxsw_sp_rx_listener_func,
2183                 .local_port = MLXSW_PORT_DONT_CARE,
2184                 .trap_id = MLXSW_TRAP_ID_ARPUC,
2185         },
2186         {
2187                 .func = mlxsw_sp_rx_listener_func,
2188                 .local_port = MLXSW_PORT_DONT_CARE,
2189                 .trap_id = MLXSW_TRAP_ID_IP2ME,
2190         },
2191         {
2192                 .func = mlxsw_sp_rx_listener_func,
2193                 .local_port = MLXSW_PORT_DONT_CARE,
2194                 .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0,
2195         },
2196         {
2197                 .func = mlxsw_sp_rx_listener_func,
2198                 .local_port = MLXSW_PORT_DONT_CARE,
2199                 .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4,
2200         },
2201 };
2202
2203 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2204 {
2205         char htgt_pl[MLXSW_REG_HTGT_LEN];
2206         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2207         int i;
2208         int err;
2209
2210         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2211         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2212         if (err)
2213                 return err;
2214
2215         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2216         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2217         if (err)
2218                 return err;
2219
2220         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2221                 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2222                                                       &mlxsw_sp_rx_listener[i],
2223                                                       mlxsw_sp);
2224                 if (err)
2225                         goto err_rx_listener_register;
2226
2227                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2228                                     mlxsw_sp_rx_listener[i].trap_id);
2229                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2230                 if (err)
2231                         goto err_rx_trap_set;
2232         }
2233         return 0;
2234
2235 err_rx_trap_set:
2236         mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2237                                           &mlxsw_sp_rx_listener[i],
2238                                           mlxsw_sp);
2239 err_rx_listener_register:
2240         for (i--; i >= 0; i--) {
2241                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
2242                                     mlxsw_sp_rx_listener[i].trap_id);
2243                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2244
2245                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2246                                                   &mlxsw_sp_rx_listener[i],
2247                                                   mlxsw_sp);
2248         }
2249         return err;
2250 }
2251
2252 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2253 {
2254         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2255         int i;
2256
2257         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2258                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
2259                                     mlxsw_sp_rx_listener[i].trap_id);
2260                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2261
2262                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2263                                                   &mlxsw_sp_rx_listener[i],
2264                                                   mlxsw_sp);
2265         }
2266 }
2267
2268 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2269                                  enum mlxsw_reg_sfgc_type type,
2270                                  enum mlxsw_reg_sfgc_bridge_type bridge_type)
2271 {
2272         enum mlxsw_flood_table_type table_type;
2273         enum mlxsw_sp_flood_table flood_table;
2274         char sfgc_pl[MLXSW_REG_SFGC_LEN];
2275
2276         if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2277                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2278         else
2279                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2280
2281         if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2282                 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2283         else
2284                 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2285
2286         mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2287                             flood_table);
2288         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2289 }
2290
2291 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2292 {
2293         int type, err;
2294
2295         for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2296                 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2297                         continue;
2298
2299                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2300                                             MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2301                 if (err)
2302                         return err;
2303
2304                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2305                                             MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2306                 if (err)
2307                         return err;
2308         }
2309
2310         return 0;
2311 }
2312
2313 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2314 {
2315         char slcr_pl[MLXSW_REG_SLCR_LEN];
2316
2317         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2318                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
2319                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2320                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
2321                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
2322                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
2323                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
2324                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
2325                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2326         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2327 }
2328
2329 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2330                          const struct mlxsw_bus_info *mlxsw_bus_info)
2331 {
2332         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2333         int err;
2334
2335         mlxsw_sp->core = mlxsw_core;
2336         mlxsw_sp->bus_info = mlxsw_bus_info;
2337         INIT_LIST_HEAD(&mlxsw_sp->fids);
2338         INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
2339         INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2340
2341         err = mlxsw_sp_base_mac_get(mlxsw_sp);
2342         if (err) {
2343                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2344                 return err;
2345         }
2346
2347         err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2348         if (err) {
2349                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2350                 return err;
2351         }
2352
2353         err = mlxsw_sp_traps_init(mlxsw_sp);
2354         if (err) {
2355                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2356                 goto err_rx_listener_register;
2357         }
2358
2359         err = mlxsw_sp_flood_init(mlxsw_sp);
2360         if (err) {
2361                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2362                 goto err_flood_init;
2363         }
2364
2365         err = mlxsw_sp_buffers_init(mlxsw_sp);
2366         if (err) {
2367                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2368                 goto err_buffers_init;
2369         }
2370
2371         err = mlxsw_sp_lag_init(mlxsw_sp);
2372         if (err) {
2373                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2374                 goto err_lag_init;
2375         }
2376
2377         err = mlxsw_sp_switchdev_init(mlxsw_sp);
2378         if (err) {
2379                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2380                 goto err_switchdev_init;
2381         }
2382
2383         err = mlxsw_sp_router_init(mlxsw_sp);
2384         if (err) {
2385                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
2386                 goto err_router_init;
2387         }
2388
2389         err = mlxsw_sp_ports_create(mlxsw_sp);
2390         if (err) {
2391                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2392                 goto err_ports_create;
2393         }
2394
2395         return 0;
2396
2397 err_ports_create:
2398         mlxsw_sp_router_fini(mlxsw_sp);
2399 err_router_init:
2400         mlxsw_sp_switchdev_fini(mlxsw_sp);
2401 err_switchdev_init:
2402 err_lag_init:
2403         mlxsw_sp_buffers_fini(mlxsw_sp);
2404 err_buffers_init:
2405 err_flood_init:
2406         mlxsw_sp_traps_fini(mlxsw_sp);
2407 err_rx_listener_register:
2408         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2409         return err;
2410 }
2411
2412 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2413 {
2414         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2415         int i;
2416
2417         mlxsw_sp_ports_remove(mlxsw_sp);
2418         mlxsw_sp_router_fini(mlxsw_sp);
2419         mlxsw_sp_switchdev_fini(mlxsw_sp);
2420         mlxsw_sp_buffers_fini(mlxsw_sp);
2421         mlxsw_sp_traps_fini(mlxsw_sp);
2422         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2423         WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
2424         WARN_ON(!list_empty(&mlxsw_sp->fids));
2425         for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
2426                 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
2427 }
2428
2429 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2430         .used_max_vepa_channels         = 1,
2431         .max_vepa_channels              = 0,
2432         .used_max_lag                   = 1,
2433         .max_lag                        = MLXSW_SP_LAG_MAX,
2434         .used_max_port_per_lag          = 1,
2435         .max_port_per_lag               = MLXSW_SP_PORT_PER_LAG_MAX,
2436         .used_max_mid                   = 1,
2437         .max_mid                        = MLXSW_SP_MID_MAX,
2438         .used_max_pgt                   = 1,
2439         .max_pgt                        = 0,
2440         .used_max_system_port           = 1,
2441         .max_system_port                = 64,
2442         .used_max_vlan_groups           = 1,
2443         .max_vlan_groups                = 127,
2444         .used_max_regions               = 1,
2445         .max_regions                    = 400,
2446         .used_flood_tables              = 1,
2447         .used_flood_mode                = 1,
2448         .flood_mode                     = 3,
2449         .max_fid_offset_flood_tables    = 2,
2450         .fid_offset_flood_table_size    = VLAN_N_VID - 1,
2451         .max_fid_flood_tables           = 2,
2452         .fid_flood_table_size           = MLXSW_SP_VFID_MAX,
2453         .used_max_ib_mc                 = 1,
2454         .max_ib_mc                      = 0,
2455         .used_max_pkey                  = 1,
2456         .max_pkey                       = 0,
2457         .used_kvd_sizes                 = 1,
2458         .kvd_linear_size                = MLXSW_SP_KVD_LINEAR_SIZE,
2459         .kvd_hash_single_size           = MLXSW_SP_KVD_HASH_SINGLE_SIZE,
2460         .kvd_hash_double_size           = MLXSW_SP_KVD_HASH_DOUBLE_SIZE,
2461         .swid_config                    = {
2462                 {
2463                         .used_type      = 1,
2464                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
2465                 }
2466         },
2467 };
2468
2469 static struct mlxsw_driver mlxsw_sp_driver = {
2470         .kind                           = MLXSW_DEVICE_KIND_SPECTRUM,
2471         .owner                          = THIS_MODULE,
2472         .priv_size                      = sizeof(struct mlxsw_sp),
2473         .init                           = mlxsw_sp_init,
2474         .fini                           = mlxsw_sp_fini,
2475         .port_split                     = mlxsw_sp_port_split,
2476         .port_unsplit                   = mlxsw_sp_port_unsplit,
2477         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
2478         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
2479         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
2480         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
2481         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
2482         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
2483         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
2484         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
2485         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
2486         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
2487         .txhdr_construct                = mlxsw_sp_txhdr_construct,
2488         .txhdr_len                      = MLXSW_TXHDR_LEN,
2489         .profile                        = &mlxsw_sp_config_profile,
2490 };
2491
2492 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2493 {
2494         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2495 }
2496
2497 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
2498 {
2499         struct net_device *lower_dev;
2500         struct list_head *iter;
2501
2502         if (mlxsw_sp_port_dev_check(dev))
2503                 return netdev_priv(dev);
2504
2505         netdev_for_each_all_lower_dev(dev, lower_dev, iter) {
2506                 if (mlxsw_sp_port_dev_check(lower_dev))
2507                         return netdev_priv(lower_dev);
2508         }
2509         return NULL;
2510 }
2511
2512 static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
2513 {
2514         struct mlxsw_sp_port *mlxsw_sp_port;
2515
2516         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2517         return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
2518 }
2519
2520 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
2521 {
2522         struct net_device *lower_dev;
2523         struct list_head *iter;
2524
2525         if (mlxsw_sp_port_dev_check(dev))
2526                 return netdev_priv(dev);
2527
2528         netdev_for_each_all_lower_dev_rcu(dev, lower_dev, iter) {
2529                 if (mlxsw_sp_port_dev_check(lower_dev))
2530                         return netdev_priv(lower_dev);
2531         }
2532         return NULL;
2533 }
2534
2535 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
2536 {
2537         struct mlxsw_sp_port *mlxsw_sp_port;
2538
2539         rcu_read_lock();
2540         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
2541         if (mlxsw_sp_port)
2542                 dev_hold(mlxsw_sp_port->dev);
2543         rcu_read_unlock();
2544         return mlxsw_sp_port;
2545 }
2546
2547 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
2548 {
2549         dev_put(mlxsw_sp_port->dev);
2550 }
2551
2552 static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
2553                                        unsigned long event)
2554 {
2555         switch (event) {
2556         case NETDEV_UP:
2557                 if (!r)
2558                         return true;
2559                 r->ref_count++;
2560                 return false;
2561         case NETDEV_DOWN:
2562                 if (r && --r->ref_count == 0)
2563                         return true;
2564                 /* It is possible we already removed the RIF ourselves
2565                  * if it was assigned to a netdev that is now a bridge
2566                  * or LAG slave.
2567                  */
2568                 return false;
2569         }
2570
2571         return false;
2572 }
2573
2574 static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
2575 {
2576         int i;
2577
2578         for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
2579                 if (!mlxsw_sp->rifs[i])
2580                         return i;
2581
2582         return MLXSW_SP_RIF_MAX;
2583 }
2584
2585 static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
2586                                            bool *p_lagged, u16 *p_system_port)
2587 {
2588         u8 local_port = mlxsw_sp_vport->local_port;
2589
2590         *p_lagged = mlxsw_sp_vport->lagged;
2591         *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
2592 }
2593
2594 static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
2595                                     struct net_device *l3_dev, u16 rif,
2596                                     bool create)
2597 {
2598         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2599         bool lagged = mlxsw_sp_vport->lagged;
2600         char ritr_pl[MLXSW_REG_RITR_LEN];
2601         u16 system_port;
2602
2603         mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif,
2604                             l3_dev->mtu, l3_dev->dev_addr);
2605
2606         mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
2607         mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
2608                                   mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
2609
2610         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2611 }
2612
2613 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
2614
2615 static struct mlxsw_sp_fid *
2616 mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
2617 {
2618         struct mlxsw_sp_fid *f;
2619
2620         f = kzalloc(sizeof(*f), GFP_KERNEL);
2621         if (!f)
2622                 return NULL;
2623
2624         f->leave = mlxsw_sp_vport_rif_sp_leave;
2625         f->ref_count = 0;
2626         f->dev = l3_dev;
2627         f->fid = fid;
2628
2629         return f;
2630 }
2631
2632 static struct mlxsw_sp_rif *
2633 mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
2634 {
2635         struct mlxsw_sp_rif *r;
2636
2637         r = kzalloc(sizeof(*r), GFP_KERNEL);
2638         if (!r)
2639                 return NULL;
2640
2641         ether_addr_copy(r->addr, l3_dev->dev_addr);
2642         r->mtu = l3_dev->mtu;
2643         r->ref_count = 1;
2644         r->dev = l3_dev;
2645         r->rif = rif;
2646         r->f = f;
2647
2648         return r;
2649 }
2650
2651 static struct mlxsw_sp_rif *
2652 mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
2653                              struct net_device *l3_dev)
2654 {
2655         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2656         struct mlxsw_sp_fid *f;
2657         struct mlxsw_sp_rif *r;
2658         u16 fid, rif;
2659         int err;
2660
2661         rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
2662         if (rif == MLXSW_SP_RIF_MAX)
2663                 return ERR_PTR(-ERANGE);
2664
2665         err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
2666         if (err)
2667                 return ERR_PTR(err);
2668
2669         fid = mlxsw_sp_rif_sp_to_fid(rif);
2670         err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
2671         if (err)
2672                 goto err_rif_fdb_op;
2673
2674         f = mlxsw_sp_rfid_alloc(fid, l3_dev);
2675         if (!f) {
2676                 err = -ENOMEM;
2677                 goto err_rfid_alloc;
2678         }
2679
2680         r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
2681         if (!r) {
2682                 err = -ENOMEM;
2683                 goto err_rif_alloc;
2684         }
2685
2686         f->r = r;
2687         mlxsw_sp->rifs[rif] = r;
2688
2689         return r;
2690
2691 err_rif_alloc:
2692         kfree(f);
2693 err_rfid_alloc:
2694         mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2695 err_rif_fdb_op:
2696         mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
2697         return ERR_PTR(err);
2698 }
2699
2700 static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
2701                                           struct mlxsw_sp_rif *r)
2702 {
2703         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2704         struct net_device *l3_dev = r->dev;
2705         struct mlxsw_sp_fid *f = r->f;
2706         u16 fid = f->fid;
2707         u16 rif = r->rif;
2708
2709         mlxsw_sp->rifs[rif] = NULL;
2710         f->r = NULL;
2711
2712         kfree(r);
2713
2714         kfree(f);
2715
2716         mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2717
2718         mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
2719 }
2720
2721 static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
2722                                       struct net_device *l3_dev)
2723 {
2724         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2725         struct mlxsw_sp_rif *r;
2726
2727         r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
2728         if (!r) {
2729                 r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
2730                 if (IS_ERR(r))
2731                         return PTR_ERR(r);
2732         }
2733
2734         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
2735         r->f->ref_count++;
2736
2737         netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
2738
2739         return 0;
2740 }
2741
2742 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
2743 {
2744         struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
2745
2746         netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
2747
2748         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
2749         if (--f->ref_count == 0)
2750                 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
2751 }
2752
2753 static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
2754                                          struct net_device *port_dev,
2755                                          unsigned long event, u16 vid)
2756 {
2757         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
2758         struct mlxsw_sp_port *mlxsw_sp_vport;
2759
2760         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2761         if (WARN_ON(!mlxsw_sp_vport))
2762                 return -EINVAL;
2763
2764         switch (event) {
2765         case NETDEV_UP:
2766                 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
2767         case NETDEV_DOWN:
2768                 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
2769                 break;
2770         }
2771
2772         return 0;
2773 }
2774
2775 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
2776                                         unsigned long event)
2777 {
2778         if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
2779                 return 0;
2780
2781         return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
2782 }
2783
2784 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
2785                                          struct net_device *lag_dev,
2786                                          unsigned long event, u16 vid)
2787 {
2788         struct net_device *port_dev;
2789         struct list_head *iter;
2790         int err;
2791
2792         netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
2793                 if (mlxsw_sp_port_dev_check(port_dev)) {
2794                         err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
2795                                                             event, vid);
2796                         if (err)
2797                                 return err;
2798                 }
2799         }
2800
2801         return 0;
2802 }
2803
2804 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
2805                                        unsigned long event)
2806 {
2807         if (netif_is_bridge_port(lag_dev))
2808                 return 0;
2809
2810         return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
2811 }
2812
2813 static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
2814                                                     struct net_device *l3_dev)
2815 {
2816         u16 fid;
2817
2818         if (is_vlan_dev(l3_dev))
2819                 fid = vlan_dev_vlan_id(l3_dev);
2820         else if (mlxsw_sp->master_bridge.dev == l3_dev)
2821                 fid = 1;
2822         else
2823                 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
2824
2825         return mlxsw_sp_fid_find(mlxsw_sp, fid);
2826 }
2827
2828 static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
2829 {
2830         if (mlxsw_sp_fid_is_vfid(fid))
2831                 return MLXSW_REG_RITR_FID_IF;
2832         else
2833                 return MLXSW_REG_RITR_VLAN_IF;
2834 }
2835
2836 static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp,
2837                                   struct net_device *l3_dev,
2838                                   u16 fid, u16 rif,
2839                                   bool create)
2840 {
2841         enum mlxsw_reg_ritr_if_type rif_type;
2842         char ritr_pl[MLXSW_REG_RITR_LEN];
2843
2844         rif_type = mlxsw_sp_rif_type_get(fid);
2845         mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu,
2846                             l3_dev->dev_addr);
2847         mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
2848
2849         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2850 }
2851
2852 static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
2853                                       struct net_device *l3_dev,
2854                                       struct mlxsw_sp_fid *f)
2855 {
2856         struct mlxsw_sp_rif *r;
2857         u16 rif;
2858         int err;
2859
2860         rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
2861         if (rif == MLXSW_SP_RIF_MAX)
2862                 return -ERANGE;
2863
2864         err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
2865         if (err)
2866                 return err;
2867
2868         err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
2869         if (err)
2870                 goto err_rif_fdb_op;
2871
2872         r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
2873         if (!r) {
2874                 err = -ENOMEM;
2875                 goto err_rif_alloc;
2876         }
2877
2878         f->r = r;
2879         mlxsw_sp->rifs[rif] = r;
2880
2881         netdev_dbg(l3_dev, "RIF=%d created\n", rif);
2882
2883         return 0;
2884
2885 err_rif_alloc:
2886         mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
2887 err_rif_fdb_op:
2888         mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
2889         return err;
2890 }
2891
2892 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
2893                                  struct mlxsw_sp_rif *r)
2894 {
2895         struct net_device *l3_dev = r->dev;
2896         struct mlxsw_sp_fid *f = r->f;
2897         u16 rif = r->rif;
2898
2899         mlxsw_sp->rifs[rif] = NULL;
2900         f->r = NULL;
2901
2902         kfree(r);
2903
2904         mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
2905
2906         mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
2907
2908         netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
2909 }
2910
2911 static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
2912                                           struct net_device *br_dev,
2913                                           unsigned long event)
2914 {
2915         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
2916         struct mlxsw_sp_fid *f;
2917
2918         /* FID can either be an actual FID if the L3 device is the
2919          * VLAN-aware bridge or a VLAN device on top. Otherwise, the
2920          * L3 device is a VLAN-unaware bridge and we get a vFID.
2921          */
2922         f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
2923         if (WARN_ON(!f))
2924                 return -EINVAL;
2925
2926         switch (event) {
2927         case NETDEV_UP:
2928                 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
2929         case NETDEV_DOWN:
2930                 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
2931                 break;
2932         }
2933
2934         return 0;
2935 }
2936
2937 static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
2938                                         unsigned long event)
2939 {
2940         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
2941         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
2942         u16 vid = vlan_dev_vlan_id(vlan_dev);
2943
2944         if (mlxsw_sp_port_dev_check(real_dev))
2945                 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
2946                                                      vid);
2947         else if (netif_is_lag_master(real_dev))
2948                 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
2949                                                      vid);
2950         else if (netif_is_bridge_master(real_dev) &&
2951                  mlxsw_sp->master_bridge.dev == real_dev)
2952                 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
2953                                                       event);
2954
2955         return 0;
2956 }
2957
2958 static int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
2959                                    unsigned long event, void *ptr)
2960 {
2961         struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
2962         struct net_device *dev = ifa->ifa_dev->dev;
2963         struct mlxsw_sp *mlxsw_sp;
2964         struct mlxsw_sp_rif *r;
2965         int err = 0;
2966
2967         mlxsw_sp = mlxsw_sp_lower_get(dev);
2968         if (!mlxsw_sp)
2969                 goto out;
2970
2971         r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
2972         if (!mlxsw_sp_rif_should_config(r, event))
2973                 goto out;
2974
2975         if (mlxsw_sp_port_dev_check(dev))
2976                 err = mlxsw_sp_inetaddr_port_event(dev, event);
2977         else if (netif_is_lag_master(dev))
2978                 err = mlxsw_sp_inetaddr_lag_event(dev, event);
2979         else if (netif_is_bridge_master(dev))
2980                 err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
2981         else if (is_vlan_dev(dev))
2982                 err = mlxsw_sp_inetaddr_vlan_event(dev, event);
2983
2984 out:
2985         return notifier_from_errno(err);
2986 }
2987
2988 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
2989                              const char *mac, int mtu)
2990 {
2991         char ritr_pl[MLXSW_REG_RITR_LEN];
2992         int err;
2993
2994         mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
2995         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2996         if (err)
2997                 return err;
2998
2999         mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3000         mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3001         mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3002         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3003 }
3004
3005 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3006 {
3007         struct mlxsw_sp *mlxsw_sp;
3008         struct mlxsw_sp_rif *r;
3009         int err;
3010
3011         mlxsw_sp = mlxsw_sp_lower_get(dev);
3012         if (!mlxsw_sp)
3013                 return 0;
3014
3015         r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3016         if (!r)
3017                 return 0;
3018
3019         err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
3020         if (err)
3021                 return err;
3022
3023         err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
3024         if (err)
3025                 goto err_rif_edit;
3026
3027         err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
3028         if (err)
3029                 goto err_rif_fdb_op;
3030
3031         ether_addr_copy(r->addr, dev->dev_addr);
3032         r->mtu = dev->mtu;
3033
3034         netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
3035
3036         return 0;
3037
3038 err_rif_fdb_op:
3039         mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
3040 err_rif_edit:
3041         mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
3042         return err;
3043 }
3044
3045 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
3046                                          u16 fid)
3047 {
3048         if (mlxsw_sp_fid_is_vfid(fid))
3049                 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
3050         else
3051                 return test_bit(fid, lag_port->active_vlans);
3052 }
3053
3054 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
3055                                            u16 fid)
3056 {
3057         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3058         u8 local_port = mlxsw_sp_port->local_port;
3059         u16 lag_id = mlxsw_sp_port->lag_id;
3060         int i, count = 0;
3061
3062         if (!mlxsw_sp_port->lagged)
3063                 return true;
3064
3065         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
3066                 struct mlxsw_sp_port *lag_port;
3067
3068                 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
3069                 if (!lag_port || lag_port->local_port == local_port)
3070                         continue;
3071                 if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
3072                         count++;
3073         }
3074
3075         return !count;
3076 }
3077
3078 static int
3079 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3080                                     u16 fid)
3081 {
3082         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3083         char sfdf_pl[MLXSW_REG_SFDF_LEN];
3084
3085         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
3086         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3087         mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
3088                                                 mlxsw_sp_port->local_port);
3089
3090         netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
3091                    mlxsw_sp_port->local_port, fid);
3092
3093         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3094 }
3095
3096 static int
3097 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3098                                       u16 fid)
3099 {
3100         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3101         char sfdf_pl[MLXSW_REG_SFDF_LEN];
3102
3103         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
3104         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3105         mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
3106
3107         netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
3108                    mlxsw_sp_port->lag_id, fid);
3109
3110         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3111 }
3112
3113 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
3114 {
3115         if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
3116                 return 0;
3117
3118         if (mlxsw_sp_port->lagged)
3119                 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
3120                                                              fid);
3121         else
3122                 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
3123 }
3124
3125 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
3126 {
3127         struct mlxsw_sp_fid *f, *tmp;
3128
3129         list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
3130                 if (--f->ref_count == 0)
3131                         mlxsw_sp_fid_destroy(mlxsw_sp, f);
3132                 else
3133                         WARN_ON_ONCE(1);
3134 }
3135
3136 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
3137                                          struct net_device *br_dev)
3138 {
3139         return !mlxsw_sp->master_bridge.dev ||
3140                mlxsw_sp->master_bridge.dev == br_dev;
3141 }
3142
3143 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
3144                                        struct net_device *br_dev)
3145 {
3146         mlxsw_sp->master_bridge.dev = br_dev;
3147         mlxsw_sp->master_bridge.ref_count++;
3148 }
3149
3150 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
3151 {
3152         if (--mlxsw_sp->master_bridge.ref_count == 0) {
3153                 mlxsw_sp->master_bridge.dev = NULL;
3154                 /* It's possible upper VLAN devices are still holding
3155                  * references to underlying FIDs. Drop the reference
3156                  * and release the resources if it was the last one.
3157                  * If it wasn't, then something bad happened.
3158                  */
3159                 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
3160         }
3161 }
3162
3163 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
3164                                      struct net_device *br_dev)
3165 {
3166         struct net_device *dev = mlxsw_sp_port->dev;
3167         int err;
3168
3169         /* When port is not bridged untagged packets are tagged with
3170          * PVID=VID=1, thereby creating an implicit VLAN interface in
3171          * the device. Remove it and let bridge code take care of its
3172          * own VLANs.
3173          */
3174         err = mlxsw_sp_port_kill_vid(dev, 0, 1);
3175         if (err)
3176                 return err;
3177
3178         mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
3179
3180         mlxsw_sp_port->learning = 1;
3181         mlxsw_sp_port->learning_sync = 1;
3182         mlxsw_sp_port->uc_flood = 1;
3183         mlxsw_sp_port->bridged = 1;
3184
3185         return 0;
3186 }
3187
3188 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3189 {
3190         struct net_device *dev = mlxsw_sp_port->dev;
3191
3192         mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
3193
3194         mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
3195
3196         mlxsw_sp_port->learning = 0;
3197         mlxsw_sp_port->learning_sync = 0;
3198         mlxsw_sp_port->uc_flood = 0;
3199         mlxsw_sp_port->bridged = 0;
3200
3201         /* Add implicit VLAN interface in the device, so that untagged
3202          * packets will be classified to the default vFID.
3203          */
3204         mlxsw_sp_port_add_vid(dev, 0, 1);
3205 }
3206
3207 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3208 {
3209         char sldr_pl[MLXSW_REG_SLDR_LEN];
3210
3211         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3212         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3213 }
3214
3215 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3216 {
3217         char sldr_pl[MLXSW_REG_SLDR_LEN];
3218
3219         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3220         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3221 }
3222
3223 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3224                                      u16 lag_id, u8 port_index)
3225 {
3226         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3227         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3228
3229         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3230                                       lag_id, port_index);
3231         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3232 }
3233
3234 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3235                                         u16 lag_id)
3236 {
3237         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3238         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3239
3240         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3241                                          lag_id);
3242         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3243 }
3244
3245 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3246                                         u16 lag_id)
3247 {
3248         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3249         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3250
3251         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3252                                         lag_id);
3253         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3254 }
3255
3256 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3257                                          u16 lag_id)
3258 {
3259         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3260         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3261
3262         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3263                                          lag_id);
3264         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3265 }
3266
3267 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3268                                   struct net_device *lag_dev,
3269                                   u16 *p_lag_id)
3270 {
3271         struct mlxsw_sp_upper *lag;
3272         int free_lag_id = -1;
3273         int i;
3274
3275         for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
3276                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3277                 if (lag->ref_count) {
3278                         if (lag->dev == lag_dev) {
3279                                 *p_lag_id = i;
3280                                 return 0;
3281                         }
3282                 } else if (free_lag_id < 0) {
3283                         free_lag_id = i;
3284                 }
3285         }
3286         if (free_lag_id < 0)
3287                 return -EBUSY;
3288         *p_lag_id = free_lag_id;
3289         return 0;
3290 }
3291
3292 static bool
3293 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3294                           struct net_device *lag_dev,
3295                           struct netdev_lag_upper_info *lag_upper_info)
3296 {
3297         u16 lag_id;
3298
3299         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3300                 return false;
3301         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3302                 return false;
3303         return true;
3304 }
3305
3306 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3307                                        u16 lag_id, u8 *p_port_index)
3308 {
3309         int i;
3310
3311         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
3312                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3313                         *p_port_index = i;
3314                         return 0;
3315                 }
3316         }
3317         return -EBUSY;
3318 }
3319
3320 static void
3321 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3322                                   u16 lag_id)
3323 {
3324         struct mlxsw_sp_port *mlxsw_sp_vport;
3325         struct mlxsw_sp_fid *f;
3326
3327         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3328         if (WARN_ON(!mlxsw_sp_vport))
3329                 return;
3330
3331         /* If vPort is assigned a RIF, then leave it since it's no
3332          * longer valid.
3333          */
3334         f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3335         if (f)
3336                 f->leave(mlxsw_sp_vport);
3337
3338         mlxsw_sp_vport->lag_id = lag_id;
3339         mlxsw_sp_vport->lagged = 1;
3340 }
3341
3342 static void
3343 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3344 {
3345         struct mlxsw_sp_port *mlxsw_sp_vport;
3346         struct mlxsw_sp_fid *f;
3347
3348         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3349         if (WARN_ON(!mlxsw_sp_vport))
3350                 return;
3351
3352         f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3353         if (f)
3354                 f->leave(mlxsw_sp_vport);
3355
3356         mlxsw_sp_vport->lagged = 0;
3357 }
3358
3359 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3360                                   struct net_device *lag_dev)
3361 {
3362         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3363         struct mlxsw_sp_upper *lag;
3364         u16 lag_id;
3365         u8 port_index;
3366         int err;
3367
3368         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3369         if (err)
3370                 return err;
3371         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3372         if (!lag->ref_count) {
3373                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3374                 if (err)
3375                         return err;
3376                 lag->dev = lag_dev;
3377         }
3378
3379         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3380         if (err)
3381                 return err;
3382         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3383         if (err)
3384                 goto err_col_port_add;
3385         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
3386         if (err)
3387                 goto err_col_port_enable;
3388
3389         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3390                                    mlxsw_sp_port->local_port);
3391         mlxsw_sp_port->lag_id = lag_id;
3392         mlxsw_sp_port->lagged = 1;
3393         lag->ref_count++;
3394
3395         mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id);
3396
3397         return 0;
3398
3399 err_col_port_enable:
3400         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3401 err_col_port_add:
3402         if (!lag->ref_count)
3403                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3404         return err;
3405 }
3406
3407 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3408                                     struct net_device *lag_dev)
3409 {
3410         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3411         u16 lag_id = mlxsw_sp_port->lag_id;
3412         struct mlxsw_sp_upper *lag;
3413
3414         if (!mlxsw_sp_port->lagged)
3415                 return;
3416         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3417         WARN_ON(lag->ref_count == 0);
3418
3419         mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
3420         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3421
3422         if (mlxsw_sp_port->bridged) {
3423                 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
3424                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3425         }
3426
3427         if (lag->ref_count == 1)
3428                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3429
3430         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3431                                      mlxsw_sp_port->local_port);
3432         mlxsw_sp_port->lagged = 0;
3433         lag->ref_count--;
3434
3435         mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
3436 }
3437
3438 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3439                                       u16 lag_id)
3440 {
3441         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3442         char sldr_pl[MLXSW_REG_SLDR_LEN];
3443
3444         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3445                                          mlxsw_sp_port->local_port);
3446         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3447 }
3448
3449 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3450                                          u16 lag_id)
3451 {
3452         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3453         char sldr_pl[MLXSW_REG_SLDR_LEN];
3454
3455         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3456                                             mlxsw_sp_port->local_port);
3457         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3458 }
3459
3460 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
3461                                        bool lag_tx_enabled)
3462 {
3463         if (lag_tx_enabled)
3464                 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
3465                                                   mlxsw_sp_port->lag_id);
3466         else
3467                 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3468                                                      mlxsw_sp_port->lag_id);
3469 }
3470
3471 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3472                                      struct netdev_lag_lower_state_info *info)
3473 {
3474         return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
3475 }
3476
3477 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
3478                                    struct net_device *vlan_dev)
3479 {
3480         struct mlxsw_sp_port *mlxsw_sp_vport;
3481         u16 vid = vlan_dev_vlan_id(vlan_dev);
3482
3483         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3484         if (WARN_ON(!mlxsw_sp_vport))
3485                 return -EINVAL;
3486
3487         mlxsw_sp_vport->dev = vlan_dev;
3488
3489         return 0;
3490 }
3491
3492 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
3493                                       struct net_device *vlan_dev)
3494 {
3495         struct mlxsw_sp_port *mlxsw_sp_vport;
3496         u16 vid = vlan_dev_vlan_id(vlan_dev);
3497
3498         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3499         if (WARN_ON(!mlxsw_sp_vport))
3500                 return;
3501
3502         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3503 }
3504
3505 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3506                                                unsigned long event, void *ptr)
3507 {
3508         struct netdev_notifier_changeupper_info *info;
3509         struct mlxsw_sp_port *mlxsw_sp_port;
3510         struct net_device *upper_dev;
3511         struct mlxsw_sp *mlxsw_sp;
3512         int err = 0;
3513
3514         mlxsw_sp_port = netdev_priv(dev);
3515         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3516         info = ptr;
3517
3518         switch (event) {
3519         case NETDEV_PRECHANGEUPPER:
3520                 upper_dev = info->upper_dev;
3521                 if (!is_vlan_dev(upper_dev) &&
3522                     !netif_is_lag_master(upper_dev) &&
3523                     !netif_is_bridge_master(upper_dev))
3524                         return -EINVAL;
3525                 if (!info->linking)
3526                         break;
3527                 /* HW limitation forbids to put ports to multiple bridges. */
3528                 if (netif_is_bridge_master(upper_dev) &&
3529                     !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
3530                         return -EINVAL;
3531                 if (netif_is_lag_master(upper_dev) &&
3532                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3533                                                info->upper_info))
3534                         return -EINVAL;
3535                 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
3536                         return -EINVAL;
3537                 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
3538                     !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
3539                         return -EINVAL;
3540                 break;
3541         case NETDEV_CHANGEUPPER:
3542                 upper_dev = info->upper_dev;
3543                 if (is_vlan_dev(upper_dev)) {
3544                         if (info->linking)
3545                                 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
3546                                                               upper_dev);
3547                         else
3548                                  mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
3549                                                            upper_dev);
3550                 } else if (netif_is_bridge_master(upper_dev)) {
3551                         if (info->linking)
3552                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
3553                                                                 upper_dev);
3554                         else
3555                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3556                 } else if (netif_is_lag_master(upper_dev)) {
3557                         if (info->linking)
3558                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3559                                                              upper_dev);
3560                         else
3561                                 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3562                                                         upper_dev);
3563                 } else {
3564                         err = -EINVAL;
3565                         WARN_ON(1);
3566                 }
3567                 break;
3568         }
3569
3570         return err;
3571 }
3572
3573 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
3574                                                unsigned long event, void *ptr)
3575 {
3576         struct netdev_notifier_changelowerstate_info *info;
3577         struct mlxsw_sp_port *mlxsw_sp_port;
3578         int err;
3579
3580         mlxsw_sp_port = netdev_priv(dev);
3581         info = ptr;
3582
3583         switch (event) {
3584         case NETDEV_CHANGELOWERSTATE:
3585                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
3586                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
3587                                                         info->lower_state_info);
3588                         if (err)
3589                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
3590                 }
3591                 break;
3592         }
3593
3594         return 0;
3595 }
3596
3597 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
3598                                          unsigned long event, void *ptr)
3599 {
3600         switch (event) {
3601         case NETDEV_PRECHANGEUPPER:
3602         case NETDEV_CHANGEUPPER:
3603                 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
3604         case NETDEV_CHANGELOWERSTATE:
3605                 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
3606         }
3607
3608         return 0;
3609 }
3610
3611 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3612                                         unsigned long event, void *ptr)
3613 {
3614         struct net_device *dev;
3615         struct list_head *iter;
3616         int ret;
3617
3618         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3619                 if (mlxsw_sp_port_dev_check(dev)) {
3620                         ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3621                         if (ret)
3622                                 return ret;
3623                 }
3624         }
3625
3626         return 0;
3627 }
3628
3629 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
3630                                             struct net_device *vlan_dev)
3631 {
3632         u16 fid = vlan_dev_vlan_id(vlan_dev);
3633         struct mlxsw_sp_fid *f;
3634
3635         f = mlxsw_sp_fid_find(mlxsw_sp, fid);
3636         if (!f) {
3637                 f = mlxsw_sp_fid_create(mlxsw_sp, fid);
3638                 if (IS_ERR(f))
3639                         return PTR_ERR(f);
3640         }
3641
3642         f->ref_count++;
3643
3644         return 0;
3645 }
3646
3647 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
3648                                                struct net_device *vlan_dev)
3649 {
3650         u16 fid = vlan_dev_vlan_id(vlan_dev);
3651         struct mlxsw_sp_fid *f;
3652
3653         f = mlxsw_sp_fid_find(mlxsw_sp, fid);
3654         if (f && f->r)
3655                 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
3656         if (f && --f->ref_count == 0)
3657                 mlxsw_sp_fid_destroy(mlxsw_sp, f);
3658 }
3659
3660 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
3661                                            unsigned long event, void *ptr)
3662 {
3663         struct netdev_notifier_changeupper_info *info;
3664         struct net_device *upper_dev;
3665         struct mlxsw_sp *mlxsw_sp;
3666         int err;
3667
3668         mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3669         if (!mlxsw_sp)
3670                 return 0;
3671         if (br_dev != mlxsw_sp->master_bridge.dev)
3672                 return 0;
3673
3674         info = ptr;
3675
3676         switch (event) {
3677         case NETDEV_CHANGEUPPER:
3678                 upper_dev = info->upper_dev;
3679                 if (!is_vlan_dev(upper_dev))
3680                         break;
3681                 if (info->linking) {
3682                         err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
3683                                                                upper_dev);
3684                         if (err)
3685                                 return err;
3686                 } else {
3687                         mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev);
3688                 }
3689                 break;
3690         }
3691
3692         return 0;
3693 }
3694
3695 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
3696 {
3697         return find_first_zero_bit(mlxsw_sp->vfids.mapped,
3698                                    MLXSW_SP_VFID_MAX);
3699 }
3700
3701 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
3702 {
3703         char sfmr_pl[MLXSW_REG_SFMR_LEN];
3704
3705         mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
3706         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
3707 }
3708
3709 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
3710
3711 static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
3712                                                  struct net_device *br_dev)
3713 {
3714         struct device *dev = mlxsw_sp->bus_info->dev;
3715         struct mlxsw_sp_fid *f;
3716         u16 vfid, fid;
3717         int err;
3718
3719         vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
3720         if (vfid == MLXSW_SP_VFID_MAX) {
3721                 dev_err(dev, "No available vFIDs\n");
3722                 return ERR_PTR(-ERANGE);
3723         }
3724
3725         fid = mlxsw_sp_vfid_to_fid(vfid);
3726         err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
3727         if (err) {
3728                 dev_err(dev, "Failed to create FID=%d\n", fid);
3729                 return ERR_PTR(err);
3730         }
3731
3732         f = kzalloc(sizeof(*f), GFP_KERNEL);
3733         if (!f)
3734                 goto err_allocate_vfid;
3735
3736         f->leave = mlxsw_sp_vport_vfid_leave;
3737         f->fid = fid;
3738         f->dev = br_dev;
3739
3740         list_add(&f->list, &mlxsw_sp->vfids.list);
3741         set_bit(vfid, mlxsw_sp->vfids.mapped);
3742
3743         return f;
3744
3745 err_allocate_vfid:
3746         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3747         return ERR_PTR(-ENOMEM);
3748 }
3749
3750 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
3751                                   struct mlxsw_sp_fid *f)
3752 {
3753         u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
3754         u16 fid = f->fid;
3755
3756         clear_bit(vfid, mlxsw_sp->vfids.mapped);
3757         list_del(&f->list);
3758
3759         if (f->r)
3760                 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
3761
3762         kfree(f);
3763
3764         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3765 }
3766
3767 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
3768                                   bool valid)
3769 {
3770         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
3771         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3772
3773         return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
3774                                             vid);
3775 }
3776
3777 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3778                                     struct net_device *br_dev)
3779 {
3780         struct mlxsw_sp_fid *f;
3781         int err;
3782
3783         f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
3784         if (!f) {
3785                 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
3786                 if (IS_ERR(f))
3787                         return PTR_ERR(f);
3788         }
3789
3790         err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
3791         if (err)
3792                 goto err_vport_flood_set;
3793
3794         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
3795         if (err)
3796                 goto err_vport_fid_map;
3797
3798         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
3799         f->ref_count++;
3800
3801         netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
3802
3803         return 0;
3804
3805 err_vport_fid_map:
3806         mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
3807 err_vport_flood_set:
3808         if (!f->ref_count)
3809                 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3810         return err;
3811 }
3812
3813 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3814 {
3815         struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3816
3817         netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
3818
3819         mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
3820
3821         mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
3822
3823         mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
3824
3825         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
3826         if (--f->ref_count == 0)
3827                 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3828 }
3829
3830 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3831                                       struct net_device *br_dev)
3832 {
3833         struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3834         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3835         struct net_device *dev = mlxsw_sp_vport->dev;
3836         int err;
3837
3838         if (f && !WARN_ON(!f->leave))
3839                 f->leave(mlxsw_sp_vport);
3840
3841         err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
3842         if (err) {
3843                 netdev_err(dev, "Failed to join vFID\n");
3844                 return err;
3845         }
3846
3847         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3848         if (err) {
3849                 netdev_err(dev, "Failed to enable learning\n");
3850                 goto err_port_vid_learning_set;
3851         }
3852
3853         mlxsw_sp_vport->learning = 1;
3854         mlxsw_sp_vport->learning_sync = 1;
3855         mlxsw_sp_vport->uc_flood = 1;
3856         mlxsw_sp_vport->bridged = 1;
3857
3858         return 0;
3859
3860 err_port_vid_learning_set:
3861         mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
3862         return err;
3863 }
3864
3865 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3866 {
3867         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3868
3869         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3870
3871         mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
3872
3873         mlxsw_sp_vport->learning = 0;
3874         mlxsw_sp_vport->learning_sync = 0;
3875         mlxsw_sp_vport->uc_flood = 0;
3876         mlxsw_sp_vport->bridged = 0;
3877 }
3878
3879 static bool
3880 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3881                                   const struct net_device *br_dev)
3882 {
3883         struct mlxsw_sp_port *mlxsw_sp_vport;
3884
3885         list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3886                             vport.list) {
3887                 struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
3888
3889                 if (dev && dev == br_dev)
3890                         return false;
3891         }
3892
3893         return true;
3894 }
3895
3896 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3897                                           unsigned long event, void *ptr,
3898                                           u16 vid)
3899 {
3900         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3901         struct netdev_notifier_changeupper_info *info = ptr;
3902         struct mlxsw_sp_port *mlxsw_sp_vport;
3903         struct net_device *upper_dev;
3904         int err = 0;
3905
3906         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3907
3908         switch (event) {
3909         case NETDEV_PRECHANGEUPPER:
3910                 upper_dev = info->upper_dev;
3911                 if (!netif_is_bridge_master(upper_dev))
3912                         return -EINVAL;
3913                 if (!info->linking)
3914                         break;
3915                 /* We can't have multiple VLAN interfaces configured on
3916                  * the same port and being members in the same bridge.
3917                  */
3918                 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3919                                                        upper_dev))
3920                         return -EINVAL;
3921                 break;
3922         case NETDEV_CHANGEUPPER:
3923                 upper_dev = info->upper_dev;
3924                 if (info->linking) {
3925                         if (WARN_ON(!mlxsw_sp_vport))
3926                                 return -EINVAL;
3927                         err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3928                                                          upper_dev);
3929                 } else {
3930                         if (!mlxsw_sp_vport)
3931                                 return 0;
3932                         mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
3933                 }
3934         }
3935
3936         return err;
3937 }
3938
3939 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3940                                               unsigned long event, void *ptr,
3941                                               u16 vid)
3942 {
3943         struct net_device *dev;
3944         struct list_head *iter;
3945         int ret;
3946
3947         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3948                 if (mlxsw_sp_port_dev_check(dev)) {
3949                         ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
3950                                                              vid);
3951                         if (ret)
3952                                 return ret;
3953                 }
3954         }
3955
3956         return 0;
3957 }
3958
3959 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
3960                                          unsigned long event, void *ptr)
3961 {
3962         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3963         u16 vid = vlan_dev_vlan_id(vlan_dev);
3964
3965         if (mlxsw_sp_port_dev_check(real_dev))
3966                 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
3967                                                       vid);
3968         else if (netif_is_lag_master(real_dev))
3969                 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
3970                                                           vid);
3971
3972         return 0;
3973 }
3974
3975 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3976                                     unsigned long event, void *ptr)
3977 {
3978         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3979         int err = 0;
3980
3981         if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
3982                 err = mlxsw_sp_netdevice_router_port_event(dev);
3983         else if (mlxsw_sp_port_dev_check(dev))
3984                 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3985         else if (netif_is_lag_master(dev))
3986                 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3987         else if (netif_is_bridge_master(dev))
3988                 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
3989         else if (is_vlan_dev(dev))
3990                 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3991
3992         return notifier_from_errno(err);
3993 }
3994
3995 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3996         .notifier_call = mlxsw_sp_netdevice_event,
3997 };
3998
3999 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4000         .notifier_call = mlxsw_sp_inetaddr_event,
4001         .priority = 10, /* Must be called before FIB notifier block */
4002 };
4003
4004 static int __init mlxsw_sp_module_init(void)
4005 {
4006         int err;
4007
4008         register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4009         register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4010         err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4011         if (err)
4012                 goto err_core_driver_register;
4013         return 0;
4014
4015 err_core_driver_register:
4016         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4017         return err;
4018 }
4019
4020 static void __exit mlxsw_sp_module_exit(void)
4021 {
4022         mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4023         unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4024         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4025 }
4026
4027 module_init(mlxsw_sp_module_init);
4028 module_exit(mlxsw_sp_module_exit);
4029
4030 MODULE_LICENSE("Dual BSD/GPL");
4031 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4032 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4033 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);