mlxsw: spectrum: Add debug prints
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/notifier.h>
53 #include <linux/dcbnl.h>
54 #include <net/switchdev.h>
55 #include <generated/utsrelease.h>
56
57 #include "spectrum.h"
58 #include "core.h"
59 #include "reg.h"
60 #include "port.h"
61 #include "trap.h"
62 #include "txheader.h"
63
64 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
65 static const char mlxsw_sp_driver_version[] = "1.0";
66
67 /* tx_hdr_version
68  * Tx header version.
69  * Must be set to 1.
70  */
71 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
72
73 /* tx_hdr_ctl
74  * Packet control type.
75  * 0 - Ethernet control (e.g. EMADs, LACP)
76  * 1 - Ethernet data
77  */
78 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
79
80 /* tx_hdr_proto
81  * Packet protocol type. Must be set to 1 (Ethernet).
82  */
83 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
84
85 /* tx_hdr_rx_is_router
86  * Packet is sent from the router. Valid for data packets only.
87  */
88 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
89
90 /* tx_hdr_fid_valid
91  * Indicates if the 'fid' field is valid and should be used for
92  * forwarding lookup. Valid for data packets only.
93  */
94 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
95
96 /* tx_hdr_swid
97  * Switch partition ID. Must be set to 0.
98  */
99 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
100
101 /* tx_hdr_control_tclass
102  * Indicates if the packet should use the control TClass and not one
103  * of the data TClasses.
104  */
105 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
106
107 /* tx_hdr_etclass
108  * Egress TClass to be used on the egress device on the egress port.
109  */
110 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
111
112 /* tx_hdr_port_mid
113  * Destination local port for unicast packets.
114  * Destination multicast ID for multicast packets.
115  *
116  * Control packets are directed to a specific egress port, while data
117  * packets are transmitted through the CPU port (0) into the switch partition,
118  * where forwarding rules are applied.
119  */
120 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
121
122 /* tx_hdr_fid
123  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
124  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
125  * Valid for data packets only.
126  */
127 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
128
129 /* tx_hdr_type
130  * 0 - Data packets
131  * 6 - Control packets
132  */
133 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
134
135 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
136                                      const struct mlxsw_tx_info *tx_info)
137 {
138         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
139
140         memset(txhdr, 0, MLXSW_TXHDR_LEN);
141
142         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
143         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
144         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
145         mlxsw_tx_hdr_swid_set(txhdr, 0);
146         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
147         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
148         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
149 }
150
151 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
152 {
153         char spad_pl[MLXSW_REG_SPAD_LEN];
154         int err;
155
156         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
157         if (err)
158                 return err;
159         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
160         return 0;
161 }
162
163 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
164                                           bool is_up)
165 {
166         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
167         char paos_pl[MLXSW_REG_PAOS_LEN];
168
169         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
170                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
171                             MLXSW_PORT_ADMIN_STATUS_DOWN);
172         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
173 }
174
175 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
176                                          bool *p_is_up)
177 {
178         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
179         char paos_pl[MLXSW_REG_PAOS_LEN];
180         u8 oper_status;
181         int err;
182
183         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
184         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
185         if (err)
186                 return err;
187         oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
188         *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
189         return 0;
190 }
191
192 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
193                                       unsigned char *addr)
194 {
195         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
196         char ppad_pl[MLXSW_REG_PPAD_LEN];
197
198         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
199         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
200         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
201 }
202
203 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
204 {
205         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
206         unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
207
208         ether_addr_copy(addr, mlxsw_sp->base_mac);
209         addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
210         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
211 }
212
213 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
214                                        u16 vid, enum mlxsw_reg_spms_state state)
215 {
216         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
217         char *spms_pl;
218         int err;
219
220         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
221         if (!spms_pl)
222                 return -ENOMEM;
223         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
224         mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
225         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
226         kfree(spms_pl);
227         return err;
228 }
229
230 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
231 {
232         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
233         char pmtu_pl[MLXSW_REG_PMTU_LEN];
234         int max_mtu;
235         int err;
236
237         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
238         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
239         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
240         if (err)
241                 return err;
242         max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
243
244         if (mtu > max_mtu)
245                 return -EINVAL;
246
247         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
248         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
249 }
250
251 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
252                                     u8 swid)
253 {
254         char pspa_pl[MLXSW_REG_PSPA_LEN];
255
256         mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
257         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
258 }
259
260 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
261 {
262         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
263
264         return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
265                                         swid);
266 }
267
268 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
269                                      bool enable)
270 {
271         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
272         char svpe_pl[MLXSW_REG_SVPE_LEN];
273
274         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
275         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
276 }
277
278 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
279                                  enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
280                                  u16 vid)
281 {
282         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
283         char svfa_pl[MLXSW_REG_SVFA_LEN];
284
285         mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
286                             fid, vid);
287         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
288 }
289
290 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
291                                           u16 vid, bool learn_enable)
292 {
293         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
294         char *spvmlr_pl;
295         int err;
296
297         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
298         if (!spvmlr_pl)
299                 return -ENOMEM;
300         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
301                               learn_enable);
302         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
303         kfree(spvmlr_pl);
304         return err;
305 }
306
307 static int
308 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
309 {
310         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
311         char sspr_pl[MLXSW_REG_SSPR_LEN];
312
313         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
314         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
315 }
316
317 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
318                                          u8 local_port, u8 *p_module,
319                                          u8 *p_width, u8 *p_lane)
320 {
321         char pmlp_pl[MLXSW_REG_PMLP_LEN];
322         int err;
323
324         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
325         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
326         if (err)
327                 return err;
328         *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
329         *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
330         *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
331         return 0;
332 }
333
334 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
335                                     u8 module, u8 width, u8 lane)
336 {
337         char pmlp_pl[MLXSW_REG_PMLP_LEN];
338         int i;
339
340         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
341         mlxsw_reg_pmlp_width_set(pmlp_pl, width);
342         for (i = 0; i < width; i++) {
343                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
344                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
345         }
346
347         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
348 }
349
350 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
351 {
352         char pmlp_pl[MLXSW_REG_PMLP_LEN];
353
354         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
355         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
356         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
357 }
358
359 static int mlxsw_sp_port_open(struct net_device *dev)
360 {
361         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
362         int err;
363
364         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
365         if (err)
366                 return err;
367         netif_start_queue(dev);
368         return 0;
369 }
370
371 static int mlxsw_sp_port_stop(struct net_device *dev)
372 {
373         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
374
375         netif_stop_queue(dev);
376         return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
377 }
378
379 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
380                                       struct net_device *dev)
381 {
382         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
383         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
384         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
385         const struct mlxsw_tx_info tx_info = {
386                 .local_port = mlxsw_sp_port->local_port,
387                 .is_emad = false,
388         };
389         u64 len;
390         int err;
391
392         if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
393                 return NETDEV_TX_BUSY;
394
395         if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
396                 struct sk_buff *skb_orig = skb;
397
398                 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
399                 if (!skb) {
400                         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
401                         dev_kfree_skb_any(skb_orig);
402                         return NETDEV_TX_OK;
403                 }
404         }
405
406         if (eth_skb_pad(skb)) {
407                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
408                 return NETDEV_TX_OK;
409         }
410
411         mlxsw_sp_txhdr_construct(skb, &tx_info);
412         len = skb->len;
413         /* Due to a race we might fail here because of a full queue. In that
414          * unlikely case we simply drop the packet.
415          */
416         err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
417
418         if (!err) {
419                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
420                 u64_stats_update_begin(&pcpu_stats->syncp);
421                 pcpu_stats->tx_packets++;
422                 pcpu_stats->tx_bytes += len;
423                 u64_stats_update_end(&pcpu_stats->syncp);
424         } else {
425                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
426                 dev_kfree_skb_any(skb);
427         }
428         return NETDEV_TX_OK;
429 }
430
431 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
432 {
433 }
434
435 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
436 {
437         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
438         struct sockaddr *addr = p;
439         int err;
440
441         if (!is_valid_ether_addr(addr->sa_data))
442                 return -EADDRNOTAVAIL;
443
444         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
445         if (err)
446                 return err;
447         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
448         return 0;
449 }
450
451 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
452                                  bool pause_en, bool pfc_en, u16 delay)
453 {
454         u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
455
456         delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
457                          MLXSW_SP_PAUSE_DELAY;
458
459         if (pause_en || pfc_en)
460                 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
461                                                     pg_size + delay, pg_size);
462         else
463                 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
464 }
465
466 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
467                                  u8 *prio_tc, bool pause_en,
468                                  struct ieee_pfc *my_pfc)
469 {
470         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
471         u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
472         u16 delay = !!my_pfc ? my_pfc->delay : 0;
473         char pbmc_pl[MLXSW_REG_PBMC_LEN];
474         int i, j, err;
475
476         mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
477         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
478         if (err)
479                 return err;
480
481         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
482                 bool configure = false;
483                 bool pfc = false;
484
485                 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
486                         if (prio_tc[j] == i) {
487                                 pfc = pfc_en & BIT(j);
488                                 configure = true;
489                                 break;
490                         }
491                 }
492
493                 if (!configure)
494                         continue;
495                 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
496         }
497
498         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
499 }
500
501 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
502                                       int mtu, bool pause_en)
503 {
504         u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
505         bool dcb_en = !!mlxsw_sp_port->dcb.ets;
506         struct ieee_pfc *my_pfc;
507         u8 *prio_tc;
508
509         prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
510         my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
511
512         return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
513                                             pause_en, my_pfc);
514 }
515
516 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
517 {
518         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
519         bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
520         int err;
521
522         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
523         if (err)
524                 return err;
525         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
526         if (err)
527                 goto err_port_mtu_set;
528         dev->mtu = mtu;
529         return 0;
530
531 err_port_mtu_set:
532         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
533         return err;
534 }
535
536 static struct rtnl_link_stats64 *
537 mlxsw_sp_port_get_stats64(struct net_device *dev,
538                           struct rtnl_link_stats64 *stats)
539 {
540         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
541         struct mlxsw_sp_port_pcpu_stats *p;
542         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
543         u32 tx_dropped = 0;
544         unsigned int start;
545         int i;
546
547         for_each_possible_cpu(i) {
548                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
549                 do {
550                         start = u64_stats_fetch_begin_irq(&p->syncp);
551                         rx_packets      = p->rx_packets;
552                         rx_bytes        = p->rx_bytes;
553                         tx_packets      = p->tx_packets;
554                         tx_bytes        = p->tx_bytes;
555                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
556
557                 stats->rx_packets       += rx_packets;
558                 stats->rx_bytes         += rx_bytes;
559                 stats->tx_packets       += tx_packets;
560                 stats->tx_bytes         += tx_bytes;
561                 /* tx_dropped is u32, updated without syncp protection. */
562                 tx_dropped      += p->tx_dropped;
563         }
564         stats->tx_dropped       = tx_dropped;
565         return stats;
566 }
567
568 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
569                            u16 vid_end, bool is_member, bool untagged)
570 {
571         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
572         char *spvm_pl;
573         int err;
574
575         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
576         if (!spvm_pl)
577                 return -ENOMEM;
578
579         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
580                             vid_end, is_member, untagged);
581         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
582         kfree(spvm_pl);
583         return err;
584 }
585
586 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
587 {
588         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
589         u16 vid, last_visited_vid;
590         int err;
591
592         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
593                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
594                                                    vid);
595                 if (err) {
596                         last_visited_vid = vid;
597                         goto err_port_vid_to_fid_set;
598                 }
599         }
600
601         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
602         if (err) {
603                 last_visited_vid = VLAN_N_VID;
604                 goto err_port_vid_to_fid_set;
605         }
606
607         return 0;
608
609 err_port_vid_to_fid_set:
610         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
611                 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
612                                              vid);
613         return err;
614 }
615
616 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
617 {
618         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
619         u16 vid;
620         int err;
621
622         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
623         if (err)
624                 return err;
625
626         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
627                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
628                                                    vid, vid);
629                 if (err)
630                         return err;
631         }
632
633         return 0;
634 }
635
636 static struct mlxsw_sp_fid *
637 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
638 {
639         struct mlxsw_sp_fid *f;
640
641         list_for_each_entry(f, &mlxsw_sp->port_vfids.list, list) {
642                 if (f->vid == vid)
643                         return f;
644         }
645
646         return NULL;
647 }
648
649 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
650 {
651         return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
652                                    MLXSW_SP_VFID_PORT_MAX);
653 }
654
655 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
656 {
657         char sfmr_pl[MLXSW_REG_SFMR_LEN];
658
659         mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
660         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
661 }
662
663 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
664
665 static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
666                                                  u16 vid)
667 {
668         struct device *dev = mlxsw_sp->bus_info->dev;
669         struct mlxsw_sp_fid *f;
670         u16 vfid, fid;
671         int err;
672
673         vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
674         if (vfid == MLXSW_SP_VFID_PORT_MAX) {
675                 dev_err(dev, "No available vFIDs\n");
676                 return ERR_PTR(-ERANGE);
677         }
678
679         fid = mlxsw_sp_vfid_to_fid(vfid);
680         err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
681         if (err) {
682                 dev_err(dev, "Failed to create FID=%d\n", fid);
683                 return ERR_PTR(err);
684         }
685
686         f = kzalloc(sizeof(*f), GFP_KERNEL);
687         if (!f)
688                 goto err_allocate_vfid;
689
690         f->leave = mlxsw_sp_vport_vfid_leave;
691         f->fid = fid;
692         f->vid = vid;
693
694         list_add(&f->list, &mlxsw_sp->port_vfids.list);
695         set_bit(vfid, mlxsw_sp->port_vfids.mapped);
696
697         return f;
698
699 err_allocate_vfid:
700         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
701         return ERR_PTR(-ENOMEM);
702 }
703
704 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
705                                   struct mlxsw_sp_fid *f)
706 {
707         u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
708
709         clear_bit(vfid, mlxsw_sp->port_vfids.mapped);
710         list_del(&f->list);
711
712         mlxsw_sp_vfid_op(mlxsw_sp, f->fid, false);
713
714         kfree(f);
715 }
716
717 static struct mlxsw_sp_port *
718 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
719 {
720         struct mlxsw_sp_port *mlxsw_sp_vport;
721
722         mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
723         if (!mlxsw_sp_vport)
724                 return NULL;
725
726         /* dev will be set correctly after the VLAN device is linked
727          * with the real device. In case of bridge SELF invocation, dev
728          * will remain as is.
729          */
730         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
731         mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
732         mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
733         mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
734         mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
735         mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
736         mlxsw_sp_vport->vport.vid = vid;
737
738         list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
739
740         return mlxsw_sp_vport;
741 }
742
743 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
744 {
745         list_del(&mlxsw_sp_vport->vport.list);
746         kfree(mlxsw_sp_vport);
747 }
748
749 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
750                                   bool valid)
751 {
752         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
753         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
754
755         return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
756                                             vid);
757 }
758
759 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport)
760 {
761         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
762         struct mlxsw_sp_fid *f;
763         int err;
764
765         f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, vid);
766         if (!f) {
767                 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, vid);
768                 if (IS_ERR(f))
769                         return PTR_ERR(f);
770         }
771
772         if (!f->ref_count) {
773                 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
774                 if (err)
775                         goto err_vport_flood_set;
776         }
777
778         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
779         if (err)
780                 goto err_vport_fid_map;
781
782         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
783         f->ref_count++;
784
785         return 0;
786
787 err_vport_fid_map:
788         if (!f->ref_count)
789                 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
790 err_vport_flood_set:
791         if (!f->ref_count)
792                 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
793         return err;
794 }
795
796 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
797 {
798         struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
799
800         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
801
802         mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
803
804         if (--f->ref_count == 0) {
805                 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
806                 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
807         }
808 }
809
810 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
811                           u16 vid)
812 {
813         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
814         struct mlxsw_sp_port *mlxsw_sp_vport;
815         int err;
816
817         /* VLAN 0 is added to HW filter when device goes up, but it is
818          * reserved in our case, so simply return.
819          */
820         if (!vid)
821                 return 0;
822
823         if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
824                 netdev_warn(dev, "VID=%d already configured\n", vid);
825                 return 0;
826         }
827
828         mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
829         if (!mlxsw_sp_vport) {
830                 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
831                 return -ENOMEM;
832         }
833
834         /* When adding the first VLAN interface on a bridged port we need to
835          * transition all the active 802.1Q bridge VLANs to use explicit
836          * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
837          */
838         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
839                 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
840                 if (err) {
841                         netdev_err(dev, "Failed to set to Virtual mode\n");
842                         goto err_port_vp_mode_trans;
843                 }
844         }
845
846         err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
847         if (err) {
848                 netdev_err(dev, "Failed to join vFID\n");
849                 goto err_vport_vfid_join;
850         }
851
852         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
853         if (err) {
854                 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
855                 goto err_port_vid_learning_set;
856         }
857
858         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
859         if (err) {
860                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
861                            vid);
862                 goto err_port_add_vid;
863         }
864
865         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
866                                           MLXSW_REG_SPMS_STATE_FORWARDING);
867         if (err) {
868                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
869                 goto err_port_stp_state_set;
870         }
871
872         return 0;
873
874 err_port_stp_state_set:
875         mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
876 err_port_add_vid:
877         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
878 err_port_vid_learning_set:
879         mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
880 err_vport_vfid_join:
881         if (list_is_singular(&mlxsw_sp_port->vports_list))
882                 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
883 err_port_vp_mode_trans:
884         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
885         return err;
886 }
887
888 int mlxsw_sp_port_kill_vid(struct net_device *dev,
889                            __be16 __always_unused proto, u16 vid)
890 {
891         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
892         struct mlxsw_sp_port *mlxsw_sp_vport;
893         struct mlxsw_sp_fid *f;
894         int err;
895
896         /* VLAN 0 is removed from HW filter when device goes down, but
897          * it is reserved in our case, so simply return.
898          */
899         if (!vid)
900                 return 0;
901
902         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
903         if (!mlxsw_sp_vport) {
904                 netdev_warn(dev, "VID=%d does not exist\n", vid);
905                 return 0;
906         }
907
908         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
909                                           MLXSW_REG_SPMS_STATE_DISCARDING);
910         if (err) {
911                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
912                 return err;
913         }
914
915         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
916         if (err) {
917                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
918                            vid);
919                 return err;
920         }
921
922         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
923         if (err) {
924                 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
925                 return err;
926         }
927
928         /* Drop FID reference. If this was the last reference the
929          * resources will be freed.
930          */
931         f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
932         if (f && !WARN_ON(!f->leave))
933                 f->leave(mlxsw_sp_vport);
934
935         /* When removing the last VLAN interface on a bridged port we need to
936          * transition all active 802.1Q bridge VLANs to use VID to FID
937          * mappings and set port's mode to VLAN mode.
938          */
939         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
940                 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
941                 if (err) {
942                         netdev_err(dev, "Failed to set to VLAN mode\n");
943                         return err;
944                 }
945         }
946
947         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
948
949         return 0;
950 }
951
952 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
953                                             size_t len)
954 {
955         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
956         u8 module = mlxsw_sp_port->mapping.module;
957         u8 width = mlxsw_sp_port->mapping.width;
958         u8 lane = mlxsw_sp_port->mapping.lane;
959         int err;
960
961         if (!mlxsw_sp_port->split)
962                 err = snprintf(name, len, "p%d", module + 1);
963         else
964                 err = snprintf(name, len, "p%ds%d", module + 1,
965                                lane / width);
966
967         if (err >= len)
968                 return -EINVAL;
969
970         return 0;
971 }
972
973 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
974         .ndo_open               = mlxsw_sp_port_open,
975         .ndo_stop               = mlxsw_sp_port_stop,
976         .ndo_start_xmit         = mlxsw_sp_port_xmit,
977         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
978         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
979         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
980         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
981         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
982         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
983         .ndo_fdb_add            = switchdev_port_fdb_add,
984         .ndo_fdb_del            = switchdev_port_fdb_del,
985         .ndo_fdb_dump           = switchdev_port_fdb_dump,
986         .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
987         .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
988         .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
989         .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
990 };
991
992 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
993                                       struct ethtool_drvinfo *drvinfo)
994 {
995         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
996         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
997
998         strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
999         strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1000                 sizeof(drvinfo->version));
1001         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1002                  "%d.%d.%d",
1003                  mlxsw_sp->bus_info->fw_rev.major,
1004                  mlxsw_sp->bus_info->fw_rev.minor,
1005                  mlxsw_sp->bus_info->fw_rev.subminor);
1006         strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1007                 sizeof(drvinfo->bus_info));
1008 }
1009
1010 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1011                                          struct ethtool_pauseparam *pause)
1012 {
1013         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1014
1015         pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1016         pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1017 }
1018
1019 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1020                                    struct ethtool_pauseparam *pause)
1021 {
1022         char pfcc_pl[MLXSW_REG_PFCC_LEN];
1023
1024         mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1025         mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1026         mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1027
1028         return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1029                                pfcc_pl);
1030 }
1031
1032 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1033                                         struct ethtool_pauseparam *pause)
1034 {
1035         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1036         bool pause_en = pause->tx_pause || pause->rx_pause;
1037         int err;
1038
1039         if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1040                 netdev_err(dev, "PFC already enabled on port\n");
1041                 return -EINVAL;
1042         }
1043
1044         if (pause->autoneg) {
1045                 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1046                 return -EINVAL;
1047         }
1048
1049         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1050         if (err) {
1051                 netdev_err(dev, "Failed to configure port's headroom\n");
1052                 return err;
1053         }
1054
1055         err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1056         if (err) {
1057                 netdev_err(dev, "Failed to set PAUSE parameters\n");
1058                 goto err_port_pause_configure;
1059         }
1060
1061         mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1062         mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1063
1064         return 0;
1065
1066 err_port_pause_configure:
1067         pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1068         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1069         return err;
1070 }
1071
1072 struct mlxsw_sp_port_hw_stats {
1073         char str[ETH_GSTRING_LEN];
1074         u64 (*getter)(char *payload);
1075 };
1076
1077 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1078         {
1079                 .str = "a_frames_transmitted_ok",
1080                 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1081         },
1082         {
1083                 .str = "a_frames_received_ok",
1084                 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1085         },
1086         {
1087                 .str = "a_frame_check_sequence_errors",
1088                 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1089         },
1090         {
1091                 .str = "a_alignment_errors",
1092                 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1093         },
1094         {
1095                 .str = "a_octets_transmitted_ok",
1096                 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1097         },
1098         {
1099                 .str = "a_octets_received_ok",
1100                 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1101         },
1102         {
1103                 .str = "a_multicast_frames_xmitted_ok",
1104                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1105         },
1106         {
1107                 .str = "a_broadcast_frames_xmitted_ok",
1108                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1109         },
1110         {
1111                 .str = "a_multicast_frames_received_ok",
1112                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1113         },
1114         {
1115                 .str = "a_broadcast_frames_received_ok",
1116                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1117         },
1118         {
1119                 .str = "a_in_range_length_errors",
1120                 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1121         },
1122         {
1123                 .str = "a_out_of_range_length_field",
1124                 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1125         },
1126         {
1127                 .str = "a_frame_too_long_errors",
1128                 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1129         },
1130         {
1131                 .str = "a_symbol_error_during_carrier",
1132                 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1133         },
1134         {
1135                 .str = "a_mac_control_frames_transmitted",
1136                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1137         },
1138         {
1139                 .str = "a_mac_control_frames_received",
1140                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1141         },
1142         {
1143                 .str = "a_unsupported_opcodes_received",
1144                 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1145         },
1146         {
1147                 .str = "a_pause_mac_ctrl_frames_received",
1148                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1149         },
1150         {
1151                 .str = "a_pause_mac_ctrl_frames_xmitted",
1152                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1153         },
1154 };
1155
1156 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1157
1158 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1159                                       u32 stringset, u8 *data)
1160 {
1161         u8 *p = data;
1162         int i;
1163
1164         switch (stringset) {
1165         case ETH_SS_STATS:
1166                 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1167                         memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1168                                ETH_GSTRING_LEN);
1169                         p += ETH_GSTRING_LEN;
1170                 }
1171                 break;
1172         }
1173 }
1174
1175 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1176                                      enum ethtool_phys_id_state state)
1177 {
1178         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1179         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1180         char mlcr_pl[MLXSW_REG_MLCR_LEN];
1181         bool active;
1182
1183         switch (state) {
1184         case ETHTOOL_ID_ACTIVE:
1185                 active = true;
1186                 break;
1187         case ETHTOOL_ID_INACTIVE:
1188                 active = false;
1189                 break;
1190         default:
1191                 return -EOPNOTSUPP;
1192         }
1193
1194         mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1195         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1196 }
1197
1198 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1199                                     struct ethtool_stats *stats, u64 *data)
1200 {
1201         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1202         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1203         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1204         int i;
1205         int err;
1206
1207         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
1208                              MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
1209         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1210         for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
1211                 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
1212 }
1213
1214 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1215 {
1216         switch (sset) {
1217         case ETH_SS_STATS:
1218                 return MLXSW_SP_PORT_HW_STATS_LEN;
1219         default:
1220                 return -EOPNOTSUPP;
1221         }
1222 }
1223
1224 struct mlxsw_sp_port_link_mode {
1225         u32 mask;
1226         u32 supported;
1227         u32 advertised;
1228         u32 speed;
1229 };
1230
1231 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1232         {
1233                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1234                 .supported      = SUPPORTED_100baseT_Full,
1235                 .advertised     = ADVERTISED_100baseT_Full,
1236                 .speed          = 100,
1237         },
1238         {
1239                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1240                 .speed          = 100,
1241         },
1242         {
1243                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1244                                   MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1245                 .supported      = SUPPORTED_1000baseKX_Full,
1246                 .advertised     = ADVERTISED_1000baseKX_Full,
1247                 .speed          = 1000,
1248         },
1249         {
1250                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1251                 .supported      = SUPPORTED_10000baseT_Full,
1252                 .advertised     = ADVERTISED_10000baseT_Full,
1253                 .speed          = 10000,
1254         },
1255         {
1256                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1257                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1258                 .supported      = SUPPORTED_10000baseKX4_Full,
1259                 .advertised     = ADVERTISED_10000baseKX4_Full,
1260                 .speed          = 10000,
1261         },
1262         {
1263                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1264                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1265                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1266                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1267                 .supported      = SUPPORTED_10000baseKR_Full,
1268                 .advertised     = ADVERTISED_10000baseKR_Full,
1269                 .speed          = 10000,
1270         },
1271         {
1272                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1273                 .supported      = SUPPORTED_20000baseKR2_Full,
1274                 .advertised     = ADVERTISED_20000baseKR2_Full,
1275                 .speed          = 20000,
1276         },
1277         {
1278                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1279                 .supported      = SUPPORTED_40000baseCR4_Full,
1280                 .advertised     = ADVERTISED_40000baseCR4_Full,
1281                 .speed          = 40000,
1282         },
1283         {
1284                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1285                 .supported      = SUPPORTED_40000baseKR4_Full,
1286                 .advertised     = ADVERTISED_40000baseKR4_Full,
1287                 .speed          = 40000,
1288         },
1289         {
1290                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1291                 .supported      = SUPPORTED_40000baseSR4_Full,
1292                 .advertised     = ADVERTISED_40000baseSR4_Full,
1293                 .speed          = 40000,
1294         },
1295         {
1296                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1297                 .supported      = SUPPORTED_40000baseLR4_Full,
1298                 .advertised     = ADVERTISED_40000baseLR4_Full,
1299                 .speed          = 40000,
1300         },
1301         {
1302                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1303                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1304                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1305                 .speed          = 25000,
1306         },
1307         {
1308                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1309                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1310                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1311                 .speed          = 50000,
1312         },
1313         {
1314                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1315                 .supported      = SUPPORTED_56000baseKR4_Full,
1316                 .advertised     = ADVERTISED_56000baseKR4_Full,
1317                 .speed          = 56000,
1318         },
1319         {
1320                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1321                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1322                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1323                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1324                 .speed          = 100000,
1325         },
1326 };
1327
1328 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1329
1330 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1331 {
1332         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1333                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1334                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1335                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1336                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1337                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1338                 return SUPPORTED_FIBRE;
1339
1340         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1341                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1342                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1343                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1344                               MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1345                 return SUPPORTED_Backplane;
1346         return 0;
1347 }
1348
1349 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1350 {
1351         u32 modes = 0;
1352         int i;
1353
1354         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1355                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1356                         modes |= mlxsw_sp_port_link_mode[i].supported;
1357         }
1358         return modes;
1359 }
1360
1361 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1362 {
1363         u32 modes = 0;
1364         int i;
1365
1366         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1367                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1368                         modes |= mlxsw_sp_port_link_mode[i].advertised;
1369         }
1370         return modes;
1371 }
1372
1373 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1374                                             struct ethtool_cmd *cmd)
1375 {
1376         u32 speed = SPEED_UNKNOWN;
1377         u8 duplex = DUPLEX_UNKNOWN;
1378         int i;
1379
1380         if (!carrier_ok)
1381                 goto out;
1382
1383         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1384                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1385                         speed = mlxsw_sp_port_link_mode[i].speed;
1386                         duplex = DUPLEX_FULL;
1387                         break;
1388                 }
1389         }
1390 out:
1391         ethtool_cmd_speed_set(cmd, speed);
1392         cmd->duplex = duplex;
1393 }
1394
1395 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1396 {
1397         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1398                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1399                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1400                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1401                 return PORT_FIBRE;
1402
1403         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1404                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1405                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1406                 return PORT_DA;
1407
1408         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1409                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1410                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1411                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1412                 return PORT_NONE;
1413
1414         return PORT_OTHER;
1415 }
1416
1417 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1418                                       struct ethtool_cmd *cmd)
1419 {
1420         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1421         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1422         char ptys_pl[MLXSW_REG_PTYS_LEN];
1423         u32 eth_proto_cap;
1424         u32 eth_proto_admin;
1425         u32 eth_proto_oper;
1426         int err;
1427
1428         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1429         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1430         if (err) {
1431                 netdev_err(dev, "Failed to get proto");
1432                 return err;
1433         }
1434         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1435                               &eth_proto_admin, &eth_proto_oper);
1436
1437         cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1438                          mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1439                          SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1440         cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1441         mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1442                                         eth_proto_oper, cmd);
1443
1444         eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1445         cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1446         cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1447
1448         cmd->transceiver = XCVR_INTERNAL;
1449         return 0;
1450 }
1451
1452 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1453 {
1454         u32 ptys_proto = 0;
1455         int i;
1456
1457         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1458                 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1459                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1460         }
1461         return ptys_proto;
1462 }
1463
1464 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1465 {
1466         u32 ptys_proto = 0;
1467         int i;
1468
1469         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1470                 if (speed == mlxsw_sp_port_link_mode[i].speed)
1471                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1472         }
1473         return ptys_proto;
1474 }
1475
1476 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1477 {
1478         u32 ptys_proto = 0;
1479         int i;
1480
1481         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1482                 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1483                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1484         }
1485         return ptys_proto;
1486 }
1487
1488 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1489                                       struct ethtool_cmd *cmd)
1490 {
1491         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1492         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1493         char ptys_pl[MLXSW_REG_PTYS_LEN];
1494         u32 speed;
1495         u32 eth_proto_new;
1496         u32 eth_proto_cap;
1497         u32 eth_proto_admin;
1498         bool is_up;
1499         int err;
1500
1501         speed = ethtool_cmd_speed(cmd);
1502
1503         eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1504                 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1505                 mlxsw_sp_to_ptys_speed(speed);
1506
1507         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1508         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1509         if (err) {
1510                 netdev_err(dev, "Failed to get proto");
1511                 return err;
1512         }
1513         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1514
1515         eth_proto_new = eth_proto_new & eth_proto_cap;
1516         if (!eth_proto_new) {
1517                 netdev_err(dev, "Not supported proto admin requested");
1518                 return -EINVAL;
1519         }
1520         if (eth_proto_new == eth_proto_admin)
1521                 return 0;
1522
1523         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1524         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1525         if (err) {
1526                 netdev_err(dev, "Failed to set proto admin");
1527                 return err;
1528         }
1529
1530         err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1531         if (err) {
1532                 netdev_err(dev, "Failed to get oper status");
1533                 return err;
1534         }
1535         if (!is_up)
1536                 return 0;
1537
1538         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1539         if (err) {
1540                 netdev_err(dev, "Failed to set admin status");
1541                 return err;
1542         }
1543
1544         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1545         if (err) {
1546                 netdev_err(dev, "Failed to set admin status");
1547                 return err;
1548         }
1549
1550         return 0;
1551 }
1552
1553 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1554         .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
1555         .get_link               = ethtool_op_get_link,
1556         .get_pauseparam         = mlxsw_sp_port_get_pauseparam,
1557         .set_pauseparam         = mlxsw_sp_port_set_pauseparam,
1558         .get_strings            = mlxsw_sp_port_get_strings,
1559         .set_phys_id            = mlxsw_sp_port_set_phys_id,
1560         .get_ethtool_stats      = mlxsw_sp_port_get_stats,
1561         .get_sset_count         = mlxsw_sp_port_get_sset_count,
1562         .get_settings           = mlxsw_sp_port_get_settings,
1563         .set_settings           = mlxsw_sp_port_set_settings,
1564 };
1565
1566 static int
1567 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1568 {
1569         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1570         u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1571         char ptys_pl[MLXSW_REG_PTYS_LEN];
1572         u32 eth_proto_admin;
1573
1574         eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1575         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1576                             eth_proto_admin);
1577         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1578 }
1579
1580 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1581                           enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1582                           bool dwrr, u8 dwrr_weight)
1583 {
1584         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1585         char qeec_pl[MLXSW_REG_QEEC_LEN];
1586
1587         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1588                             next_index);
1589         mlxsw_reg_qeec_de_set(qeec_pl, true);
1590         mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1591         mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1592         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1593 }
1594
1595 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1596                                   enum mlxsw_reg_qeec_hr hr, u8 index,
1597                                   u8 next_index, u32 maxrate)
1598 {
1599         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1600         char qeec_pl[MLXSW_REG_QEEC_LEN];
1601
1602         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1603                             next_index);
1604         mlxsw_reg_qeec_mase_set(qeec_pl, true);
1605         mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1606         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1607 }
1608
1609 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1610                               u8 switch_prio, u8 tclass)
1611 {
1612         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1613         char qtct_pl[MLXSW_REG_QTCT_LEN];
1614
1615         mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1616                             tclass);
1617         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1618 }
1619
1620 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1621 {
1622         int err, i;
1623
1624         /* Setup the elements hierarcy, so that each TC is linked to
1625          * one subgroup, which are all member in the same group.
1626          */
1627         err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1628                                     MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
1629                                     0);
1630         if (err)
1631                 return err;
1632         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1633                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1634                                             MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
1635                                             0, false, 0);
1636                 if (err)
1637                         return err;
1638         }
1639         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1640                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1641                                             MLXSW_REG_QEEC_HIERARCY_TC, i, i,
1642                                             false, 0);
1643                 if (err)
1644                         return err;
1645         }
1646
1647         /* Make sure the max shaper is disabled in all hierarcies that
1648          * support it.
1649          */
1650         err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1651                                             MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
1652                                             MLXSW_REG_QEEC_MAS_DIS);
1653         if (err)
1654                 return err;
1655         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1656                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1657                                                     MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
1658                                                     i, 0,
1659                                                     MLXSW_REG_QEEC_MAS_DIS);
1660                 if (err)
1661                         return err;
1662         }
1663         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1664                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1665                                                     MLXSW_REG_QEEC_HIERARCY_TC,
1666                                                     i, i,
1667                                                     MLXSW_REG_QEEC_MAS_DIS);
1668                 if (err)
1669                         return err;
1670         }
1671
1672         /* Map all priorities to traffic class 0. */
1673         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1674                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1675                 if (err)
1676                         return err;
1677         }
1678
1679         return 0;
1680 }
1681
1682 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1683                                 bool split, u8 module, u8 width, u8 lane)
1684 {
1685         struct mlxsw_sp_port *mlxsw_sp_port;
1686         struct net_device *dev;
1687         size_t bytes;
1688         int err;
1689
1690         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1691         if (!dev)
1692                 return -ENOMEM;
1693         mlxsw_sp_port = netdev_priv(dev);
1694         mlxsw_sp_port->dev = dev;
1695         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1696         mlxsw_sp_port->local_port = local_port;
1697         mlxsw_sp_port->split = split;
1698         mlxsw_sp_port->mapping.module = module;
1699         mlxsw_sp_port->mapping.width = width;
1700         mlxsw_sp_port->mapping.lane = lane;
1701         bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1702         mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1703         if (!mlxsw_sp_port->active_vlans) {
1704                 err = -ENOMEM;
1705                 goto err_port_active_vlans_alloc;
1706         }
1707         mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1708         if (!mlxsw_sp_port->untagged_vlans) {
1709                 err = -ENOMEM;
1710                 goto err_port_untagged_vlans_alloc;
1711         }
1712         INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1713
1714         mlxsw_sp_port->pcpu_stats =
1715                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1716         if (!mlxsw_sp_port->pcpu_stats) {
1717                 err = -ENOMEM;
1718                 goto err_alloc_stats;
1719         }
1720
1721         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1722         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1723
1724         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1725         if (err) {
1726                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1727                         mlxsw_sp_port->local_port);
1728                 goto err_dev_addr_init;
1729         }
1730
1731         netif_carrier_off(dev);
1732
1733         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1734                          NETIF_F_HW_VLAN_CTAG_FILTER;
1735
1736         /* Each packet needs to have a Tx header (metadata) on top all other
1737          * headers.
1738          */
1739         dev->hard_header_len += MLXSW_TXHDR_LEN;
1740
1741         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1742         if (err) {
1743                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1744                         mlxsw_sp_port->local_port);
1745                 goto err_port_system_port_mapping_set;
1746         }
1747
1748         err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1749         if (err) {
1750                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1751                         mlxsw_sp_port->local_port);
1752                 goto err_port_swid_set;
1753         }
1754
1755         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
1756         if (err) {
1757                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1758                         mlxsw_sp_port->local_port);
1759                 goto err_port_speed_by_width_set;
1760         }
1761
1762         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1763         if (err) {
1764                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1765                         mlxsw_sp_port->local_port);
1766                 goto err_port_mtu_set;
1767         }
1768
1769         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1770         if (err)
1771                 goto err_port_admin_status_set;
1772
1773         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1774         if (err) {
1775                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1776                         mlxsw_sp_port->local_port);
1777                 goto err_port_buffers_init;
1778         }
1779
1780         err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1781         if (err) {
1782                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1783                         mlxsw_sp_port->local_port);
1784                 goto err_port_ets_init;
1785         }
1786
1787         /* ETS and buffers must be initialized before DCB. */
1788         err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1789         if (err) {
1790                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1791                         mlxsw_sp_port->local_port);
1792                 goto err_port_dcb_init;
1793         }
1794
1795         mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1796         err = register_netdev(dev);
1797         if (err) {
1798                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1799                         mlxsw_sp_port->local_port);
1800                 goto err_register_netdev;
1801         }
1802
1803         err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
1804                                    mlxsw_sp_port->local_port, dev,
1805                                    mlxsw_sp_port->split, module);
1806         if (err) {
1807                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1808                         mlxsw_sp_port->local_port);
1809                 goto err_core_port_init;
1810         }
1811
1812         err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1813         if (err)
1814                 goto err_port_vlan_init;
1815
1816         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1817         return 0;
1818
1819 err_port_vlan_init:
1820         mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1821 err_core_port_init:
1822         unregister_netdev(dev);
1823 err_register_netdev:
1824 err_port_dcb_init:
1825 err_port_ets_init:
1826 err_port_buffers_init:
1827 err_port_admin_status_set:
1828 err_port_mtu_set:
1829 err_port_speed_by_width_set:
1830 err_port_swid_set:
1831 err_port_system_port_mapping_set:
1832 err_dev_addr_init:
1833         free_percpu(mlxsw_sp_port->pcpu_stats);
1834 err_alloc_stats:
1835         kfree(mlxsw_sp_port->untagged_vlans);
1836 err_port_untagged_vlans_alloc:
1837         kfree(mlxsw_sp_port->active_vlans);
1838 err_port_active_vlans_alloc:
1839         free_netdev(dev);
1840         return err;
1841 }
1842
1843 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1844 {
1845         struct net_device *dev = mlxsw_sp_port->dev;
1846         struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
1847
1848         list_for_each_entry_safe(mlxsw_sp_vport, tmp,
1849                                  &mlxsw_sp_port->vports_list, vport.list) {
1850                 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1851
1852                 /* vPorts created for VLAN devices should already be gone
1853                  * by now, since we unregistered the port netdev.
1854                  */
1855                 WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
1856                 mlxsw_sp_port_kill_vid(dev, 0, vid);
1857         }
1858 }
1859
1860 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1861 {
1862         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1863
1864         if (!mlxsw_sp_port)
1865                 return;
1866         mlxsw_sp->ports[local_port] = NULL;
1867         mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1868         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1869         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1870         mlxsw_sp_port_vports_fini(mlxsw_sp_port);
1871         mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1872         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1873         mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1874         free_percpu(mlxsw_sp_port->pcpu_stats);
1875         kfree(mlxsw_sp_port->untagged_vlans);
1876         kfree(mlxsw_sp_port->active_vlans);
1877         free_netdev(mlxsw_sp_port->dev);
1878 }
1879
1880 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1881 {
1882         int i;
1883
1884         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1885                 mlxsw_sp_port_remove(mlxsw_sp, i);
1886         kfree(mlxsw_sp->ports);
1887 }
1888
1889 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1890 {
1891         u8 module, width, lane;
1892         size_t alloc_size;
1893         int i;
1894         int err;
1895
1896         alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1897         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1898         if (!mlxsw_sp->ports)
1899                 return -ENOMEM;
1900
1901         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1902                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1903                                                     &width, &lane);
1904                 if (err)
1905                         goto err_port_module_info_get;
1906                 if (!width)
1907                         continue;
1908                 mlxsw_sp->port_to_module[i] = module;
1909                 err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
1910                                            lane);
1911                 if (err)
1912                         goto err_port_create;
1913         }
1914         return 0;
1915
1916 err_port_create:
1917 err_port_module_info_get:
1918         for (i--; i >= 1; i--)
1919                 mlxsw_sp_port_remove(mlxsw_sp, i);
1920         kfree(mlxsw_sp->ports);
1921         return err;
1922 }
1923
1924 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1925 {
1926         u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
1927
1928         return local_port - offset;
1929 }
1930
1931 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1932                                       u8 module, unsigned int count)
1933 {
1934         u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1935         int err, i;
1936
1937         for (i = 0; i < count; i++) {
1938                 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
1939                                                width, i * width);
1940                 if (err)
1941                         goto err_port_module_map;
1942         }
1943
1944         for (i = 0; i < count; i++) {
1945                 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
1946                 if (err)
1947                         goto err_port_swid_set;
1948         }
1949
1950         for (i = 0; i < count; i++) {
1951                 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
1952                                            module, width, i * width);
1953                 if (err)
1954                         goto err_port_create;
1955         }
1956
1957         return 0;
1958
1959 err_port_create:
1960         for (i--; i >= 0; i--)
1961                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1962         i = count;
1963 err_port_swid_set:
1964         for (i--; i >= 0; i--)
1965                 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
1966                                          MLXSW_PORT_SWID_DISABLED_PORT);
1967         i = count;
1968 err_port_module_map:
1969         for (i--; i >= 0; i--)
1970                 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
1971         return err;
1972 }
1973
1974 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1975                                          u8 base_port, unsigned int count)
1976 {
1977         u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
1978         int i;
1979
1980         /* Split by four means we need to re-create two ports, otherwise
1981          * only one.
1982          */
1983         count = count / 2;
1984
1985         for (i = 0; i < count; i++) {
1986                 local_port = base_port + i * 2;
1987                 module = mlxsw_sp->port_to_module[local_port];
1988
1989                 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1990                                          0);
1991         }
1992
1993         for (i = 0; i < count; i++)
1994                 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
1995
1996         for (i = 0; i < count; i++) {
1997                 local_port = base_port + i * 2;
1998                 module = mlxsw_sp->port_to_module[local_port];
1999
2000                 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
2001                                      width, 0);
2002         }
2003 }
2004
2005 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
2006                                unsigned int count)
2007 {
2008         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2009         struct mlxsw_sp_port *mlxsw_sp_port;
2010         u8 module, cur_width, base_port;
2011         int i;
2012         int err;
2013
2014         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2015         if (!mlxsw_sp_port) {
2016                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2017                         local_port);
2018                 return -EINVAL;
2019         }
2020
2021         module = mlxsw_sp_port->mapping.module;
2022         cur_width = mlxsw_sp_port->mapping.width;
2023
2024         if (count != 2 && count != 4) {
2025                 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
2026                 return -EINVAL;
2027         }
2028
2029         if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
2030                 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
2031                 return -EINVAL;
2032         }
2033
2034         /* Make sure we have enough slave (even) ports for the split. */
2035         if (count == 2) {
2036                 base_port = local_port;
2037                 if (mlxsw_sp->ports[base_port + 1]) {
2038                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2039                         return -EINVAL;
2040                 }
2041         } else {
2042                 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2043                 if (mlxsw_sp->ports[base_port + 1] ||
2044                     mlxsw_sp->ports[base_port + 3]) {
2045                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2046                         return -EINVAL;
2047                 }
2048         }
2049
2050         for (i = 0; i < count; i++)
2051                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2052
2053         err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2054         if (err) {
2055                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2056                 goto err_port_split_create;
2057         }
2058
2059         return 0;
2060
2061 err_port_split_create:
2062         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2063         return err;
2064 }
2065
2066 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2067 {
2068         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2069         struct mlxsw_sp_port *mlxsw_sp_port;
2070         u8 cur_width, base_port;
2071         unsigned int count;
2072         int i;
2073
2074         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2075         if (!mlxsw_sp_port) {
2076                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2077                         local_port);
2078                 return -EINVAL;
2079         }
2080
2081         if (!mlxsw_sp_port->split) {
2082                 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2083                 return -EINVAL;
2084         }
2085
2086         cur_width = mlxsw_sp_port->mapping.width;
2087         count = cur_width == 1 ? 4 : 2;
2088
2089         base_port = mlxsw_sp_cluster_base_port_get(local_port);
2090
2091         /* Determine which ports to remove. */
2092         if (count == 2 && local_port >= base_port + 2)
2093                 base_port = base_port + 2;
2094
2095         for (i = 0; i < count; i++)
2096                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2097
2098         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2099
2100         return 0;
2101 }
2102
2103 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2104                                      char *pude_pl, void *priv)
2105 {
2106         struct mlxsw_sp *mlxsw_sp = priv;
2107         struct mlxsw_sp_port *mlxsw_sp_port;
2108         enum mlxsw_reg_pude_oper_status status;
2109         u8 local_port;
2110
2111         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2112         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2113         if (!mlxsw_sp_port) {
2114                 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
2115                          local_port);
2116                 return;
2117         }
2118
2119         status = mlxsw_reg_pude_oper_status_get(pude_pl);
2120         if (status == MLXSW_PORT_OPER_STATUS_UP) {
2121                 netdev_info(mlxsw_sp_port->dev, "link up\n");
2122                 netif_carrier_on(mlxsw_sp_port->dev);
2123         } else {
2124                 netdev_info(mlxsw_sp_port->dev, "link down\n");
2125                 netif_carrier_off(mlxsw_sp_port->dev);
2126         }
2127 }
2128
2129 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
2130         .func = mlxsw_sp_pude_event_func,
2131         .trap_id = MLXSW_TRAP_ID_PUDE,
2132 };
2133
2134 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
2135                                    enum mlxsw_event_trap_id trap_id)
2136 {
2137         struct mlxsw_event_listener *el;
2138         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2139         int err;
2140
2141         switch (trap_id) {
2142         case MLXSW_TRAP_ID_PUDE:
2143                 el = &mlxsw_sp_pude_event;
2144                 break;
2145         }
2146         err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
2147         if (err)
2148                 return err;
2149
2150         mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
2151         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2152         if (err)
2153                 goto err_event_trap_set;
2154
2155         return 0;
2156
2157 err_event_trap_set:
2158         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2159         return err;
2160 }
2161
2162 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
2163                                       enum mlxsw_event_trap_id trap_id)
2164 {
2165         struct mlxsw_event_listener *el;
2166
2167         switch (trap_id) {
2168         case MLXSW_TRAP_ID_PUDE:
2169                 el = &mlxsw_sp_pude_event;
2170                 break;
2171         }
2172         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2173 }
2174
2175 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
2176                                       void *priv)
2177 {
2178         struct mlxsw_sp *mlxsw_sp = priv;
2179         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2180         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2181
2182         if (unlikely(!mlxsw_sp_port)) {
2183                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2184                                      local_port);
2185                 return;
2186         }
2187
2188         skb->dev = mlxsw_sp_port->dev;
2189
2190         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2191         u64_stats_update_begin(&pcpu_stats->syncp);
2192         pcpu_stats->rx_packets++;
2193         pcpu_stats->rx_bytes += skb->len;
2194         u64_stats_update_end(&pcpu_stats->syncp);
2195
2196         skb->protocol = eth_type_trans(skb, skb->dev);
2197         netif_receive_skb(skb);
2198 }
2199
2200 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2201         {
2202                 .func = mlxsw_sp_rx_listener_func,
2203                 .local_port = MLXSW_PORT_DONT_CARE,
2204                 .trap_id = MLXSW_TRAP_ID_FDB_MC,
2205         },
2206         /* Traps for specific L2 packet types, not trapped as FDB MC */
2207         {
2208                 .func = mlxsw_sp_rx_listener_func,
2209                 .local_port = MLXSW_PORT_DONT_CARE,
2210                 .trap_id = MLXSW_TRAP_ID_STP,
2211         },
2212         {
2213                 .func = mlxsw_sp_rx_listener_func,
2214                 .local_port = MLXSW_PORT_DONT_CARE,
2215                 .trap_id = MLXSW_TRAP_ID_LACP,
2216         },
2217         {
2218                 .func = mlxsw_sp_rx_listener_func,
2219                 .local_port = MLXSW_PORT_DONT_CARE,
2220                 .trap_id = MLXSW_TRAP_ID_EAPOL,
2221         },
2222         {
2223                 .func = mlxsw_sp_rx_listener_func,
2224                 .local_port = MLXSW_PORT_DONT_CARE,
2225                 .trap_id = MLXSW_TRAP_ID_LLDP,
2226         },
2227         {
2228                 .func = mlxsw_sp_rx_listener_func,
2229                 .local_port = MLXSW_PORT_DONT_CARE,
2230                 .trap_id = MLXSW_TRAP_ID_MMRP,
2231         },
2232         {
2233                 .func = mlxsw_sp_rx_listener_func,
2234                 .local_port = MLXSW_PORT_DONT_CARE,
2235                 .trap_id = MLXSW_TRAP_ID_MVRP,
2236         },
2237         {
2238                 .func = mlxsw_sp_rx_listener_func,
2239                 .local_port = MLXSW_PORT_DONT_CARE,
2240                 .trap_id = MLXSW_TRAP_ID_RPVST,
2241         },
2242         {
2243                 .func = mlxsw_sp_rx_listener_func,
2244                 .local_port = MLXSW_PORT_DONT_CARE,
2245                 .trap_id = MLXSW_TRAP_ID_DHCP,
2246         },
2247         {
2248                 .func = mlxsw_sp_rx_listener_func,
2249                 .local_port = MLXSW_PORT_DONT_CARE,
2250                 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
2251         },
2252         {
2253                 .func = mlxsw_sp_rx_listener_func,
2254                 .local_port = MLXSW_PORT_DONT_CARE,
2255                 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
2256         },
2257         {
2258                 .func = mlxsw_sp_rx_listener_func,
2259                 .local_port = MLXSW_PORT_DONT_CARE,
2260                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
2261         },
2262         {
2263                 .func = mlxsw_sp_rx_listener_func,
2264                 .local_port = MLXSW_PORT_DONT_CARE,
2265                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
2266         },
2267         {
2268                 .func = mlxsw_sp_rx_listener_func,
2269                 .local_port = MLXSW_PORT_DONT_CARE,
2270                 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
2271         },
2272 };
2273
2274 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2275 {
2276         char htgt_pl[MLXSW_REG_HTGT_LEN];
2277         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2278         int i;
2279         int err;
2280
2281         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2282         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2283         if (err)
2284                 return err;
2285
2286         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2287         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2288         if (err)
2289                 return err;
2290
2291         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2292                 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2293                                                       &mlxsw_sp_rx_listener[i],
2294                                                       mlxsw_sp);
2295                 if (err)
2296                         goto err_rx_listener_register;
2297
2298                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2299                                     mlxsw_sp_rx_listener[i].trap_id);
2300                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2301                 if (err)
2302                         goto err_rx_trap_set;
2303         }
2304         return 0;
2305
2306 err_rx_trap_set:
2307         mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2308                                           &mlxsw_sp_rx_listener[i],
2309                                           mlxsw_sp);
2310 err_rx_listener_register:
2311         for (i--; i >= 0; i--) {
2312                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2313                                     mlxsw_sp_rx_listener[i].trap_id);
2314                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2315
2316                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2317                                                   &mlxsw_sp_rx_listener[i],
2318                                                   mlxsw_sp);
2319         }
2320         return err;
2321 }
2322
2323 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2324 {
2325         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2326         int i;
2327
2328         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2329                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2330                                     mlxsw_sp_rx_listener[i].trap_id);
2331                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2332
2333                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2334                                                   &mlxsw_sp_rx_listener[i],
2335                                                   mlxsw_sp);
2336         }
2337 }
2338
2339 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2340                                  enum mlxsw_reg_sfgc_type type,
2341                                  enum mlxsw_reg_sfgc_bridge_type bridge_type)
2342 {
2343         enum mlxsw_flood_table_type table_type;
2344         enum mlxsw_sp_flood_table flood_table;
2345         char sfgc_pl[MLXSW_REG_SFGC_LEN];
2346
2347         if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2348                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2349         else
2350                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2351
2352         if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2353                 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2354         else
2355                 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2356
2357         mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2358                             flood_table);
2359         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2360 }
2361
2362 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2363 {
2364         int type, err;
2365
2366         for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2367                 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2368                         continue;
2369
2370                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2371                                             MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2372                 if (err)
2373                         return err;
2374
2375                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2376                                             MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2377                 if (err)
2378                         return err;
2379         }
2380
2381         return 0;
2382 }
2383
2384 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2385 {
2386         char slcr_pl[MLXSW_REG_SLCR_LEN];
2387
2388         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2389                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
2390                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2391                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
2392                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
2393                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
2394                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
2395                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
2396                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2397         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2398 }
2399
2400 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2401                          const struct mlxsw_bus_info *mlxsw_bus_info)
2402 {
2403         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2404         int err;
2405
2406         mlxsw_sp->core = mlxsw_core;
2407         mlxsw_sp->bus_info = mlxsw_bus_info;
2408         INIT_LIST_HEAD(&mlxsw_sp->fids);
2409         INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
2410         INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
2411         INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2412
2413         err = mlxsw_sp_base_mac_get(mlxsw_sp);
2414         if (err) {
2415                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2416                 return err;
2417         }
2418
2419         err = mlxsw_sp_ports_create(mlxsw_sp);
2420         if (err) {
2421                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2422                 return err;
2423         }
2424
2425         err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2426         if (err) {
2427                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2428                 goto err_event_register;
2429         }
2430
2431         err = mlxsw_sp_traps_init(mlxsw_sp);
2432         if (err) {
2433                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2434                 goto err_rx_listener_register;
2435         }
2436
2437         err = mlxsw_sp_flood_init(mlxsw_sp);
2438         if (err) {
2439                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2440                 goto err_flood_init;
2441         }
2442
2443         err = mlxsw_sp_buffers_init(mlxsw_sp);
2444         if (err) {
2445                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2446                 goto err_buffers_init;
2447         }
2448
2449         err = mlxsw_sp_lag_init(mlxsw_sp);
2450         if (err) {
2451                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2452                 goto err_lag_init;
2453         }
2454
2455         err = mlxsw_sp_switchdev_init(mlxsw_sp);
2456         if (err) {
2457                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2458                 goto err_switchdev_init;
2459         }
2460
2461         return 0;
2462
2463 err_switchdev_init:
2464 err_lag_init:
2465         mlxsw_sp_buffers_fini(mlxsw_sp);
2466 err_buffers_init:
2467 err_flood_init:
2468         mlxsw_sp_traps_fini(mlxsw_sp);
2469 err_rx_listener_register:
2470         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2471 err_event_register:
2472         mlxsw_sp_ports_remove(mlxsw_sp);
2473         return err;
2474 }
2475
2476 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2477 {
2478         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2479
2480         mlxsw_sp_switchdev_fini(mlxsw_sp);
2481         mlxsw_sp_buffers_fini(mlxsw_sp);
2482         mlxsw_sp_traps_fini(mlxsw_sp);
2483         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2484         mlxsw_sp_ports_remove(mlxsw_sp);
2485         WARN_ON(!list_empty(&mlxsw_sp->fids));
2486 }
2487
2488 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2489         .used_max_vepa_channels         = 1,
2490         .max_vepa_channels              = 0,
2491         .used_max_lag                   = 1,
2492         .max_lag                        = MLXSW_SP_LAG_MAX,
2493         .used_max_port_per_lag          = 1,
2494         .max_port_per_lag               = MLXSW_SP_PORT_PER_LAG_MAX,
2495         .used_max_mid                   = 1,
2496         .max_mid                        = MLXSW_SP_MID_MAX,
2497         .used_max_pgt                   = 1,
2498         .max_pgt                        = 0,
2499         .used_max_system_port           = 1,
2500         .max_system_port                = 64,
2501         .used_max_vlan_groups           = 1,
2502         .max_vlan_groups                = 127,
2503         .used_max_regions               = 1,
2504         .max_regions                    = 400,
2505         .used_flood_tables              = 1,
2506         .used_flood_mode                = 1,
2507         .flood_mode                     = 3,
2508         .max_fid_offset_flood_tables    = 2,
2509         .fid_offset_flood_table_size    = VLAN_N_VID - 1,
2510         .max_fid_flood_tables           = 2,
2511         .fid_flood_table_size           = MLXSW_SP_VFID_MAX,
2512         .used_max_ib_mc                 = 1,
2513         .max_ib_mc                      = 0,
2514         .used_max_pkey                  = 1,
2515         .max_pkey                       = 0,
2516         .swid_config                    = {
2517                 {
2518                         .used_type      = 1,
2519                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
2520                 }
2521         },
2522 };
2523
2524 static struct mlxsw_driver mlxsw_sp_driver = {
2525         .kind                           = MLXSW_DEVICE_KIND_SPECTRUM,
2526         .owner                          = THIS_MODULE,
2527         .priv_size                      = sizeof(struct mlxsw_sp),
2528         .init                           = mlxsw_sp_init,
2529         .fini                           = mlxsw_sp_fini,
2530         .port_split                     = mlxsw_sp_port_split,
2531         .port_unsplit                   = mlxsw_sp_port_unsplit,
2532         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
2533         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
2534         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
2535         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
2536         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
2537         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
2538         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
2539         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
2540         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
2541         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
2542         .txhdr_construct                = mlxsw_sp_txhdr_construct,
2543         .txhdr_len                      = MLXSW_TXHDR_LEN,
2544         .profile                        = &mlxsw_sp_config_profile,
2545 };
2546
2547 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
2548                                          u16 fid)
2549 {
2550         if (mlxsw_sp_fid_is_vfid(fid))
2551                 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
2552         else
2553                 return test_bit(fid, lag_port->active_vlans);
2554 }
2555
2556 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
2557                                            u16 fid)
2558 {
2559         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2560         u8 local_port = mlxsw_sp_port->local_port;
2561         u16 lag_id = mlxsw_sp_port->lag_id;
2562         int i, count = 0;
2563
2564         if (!mlxsw_sp_port->lagged)
2565                 return true;
2566
2567         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2568                 struct mlxsw_sp_port *lag_port;
2569
2570                 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
2571                 if (!lag_port || lag_port->local_port == local_port)
2572                         continue;
2573                 if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
2574                         count++;
2575         }
2576
2577         return !count;
2578 }
2579
2580 static int
2581 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2582                                     u16 fid)
2583 {
2584         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2585         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2586
2587         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
2588         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2589         mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2590                                                 mlxsw_sp_port->local_port);
2591
2592         netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
2593                    mlxsw_sp_port->local_port, fid);
2594
2595         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2596 }
2597
2598 static int
2599 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2600                                       u16 fid)
2601 {
2602         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2603         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2604
2605         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
2606         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2607         mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2608
2609         netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
2610                    mlxsw_sp_port->lag_id, fid);
2611
2612         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2613 }
2614
2615 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
2616 {
2617         if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
2618                 return 0;
2619
2620         if (mlxsw_sp_port->lagged)
2621                 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
2622                                                              fid);
2623         else
2624                 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
2625 }
2626
2627 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2628 {
2629         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2630 }
2631
2632 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
2633                                          struct net_device *br_dev)
2634 {
2635         return !mlxsw_sp->master_bridge.dev ||
2636                mlxsw_sp->master_bridge.dev == br_dev;
2637 }
2638
2639 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2640                                        struct net_device *br_dev)
2641 {
2642         mlxsw_sp->master_bridge.dev = br_dev;
2643         mlxsw_sp->master_bridge.ref_count++;
2644 }
2645
2646 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
2647 {
2648         if (--mlxsw_sp->master_bridge.ref_count == 0)
2649                 mlxsw_sp->master_bridge.dev = NULL;
2650 }
2651
2652 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2653                                      struct net_device *br_dev)
2654 {
2655         struct net_device *dev = mlxsw_sp_port->dev;
2656         int err;
2657
2658         /* When port is not bridged untagged packets are tagged with
2659          * PVID=VID=1, thereby creating an implicit VLAN interface in
2660          * the device. Remove it and let bridge code take care of its
2661          * own VLANs.
2662          */
2663         err = mlxsw_sp_port_kill_vid(dev, 0, 1);
2664         if (err)
2665                 return err;
2666
2667         mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
2668
2669         mlxsw_sp_port->learning = 1;
2670         mlxsw_sp_port->learning_sync = 1;
2671         mlxsw_sp_port->uc_flood = 1;
2672         mlxsw_sp_port->bridged = 1;
2673
2674         return 0;
2675 }
2676
2677 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
2678 {
2679         struct net_device *dev = mlxsw_sp_port->dev;
2680
2681         mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2682
2683         mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
2684
2685         mlxsw_sp_port->learning = 0;
2686         mlxsw_sp_port->learning_sync = 0;
2687         mlxsw_sp_port->uc_flood = 0;
2688         mlxsw_sp_port->bridged = 0;
2689
2690         /* Add implicit VLAN interface in the device, so that untagged
2691          * packets will be classified to the default vFID.
2692          */
2693         mlxsw_sp_port_add_vid(dev, 0, 1);
2694 }
2695
2696 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2697 {
2698         char sldr_pl[MLXSW_REG_SLDR_LEN];
2699
2700         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
2701         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2702 }
2703
2704 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2705 {
2706         char sldr_pl[MLXSW_REG_SLDR_LEN];
2707
2708         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
2709         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2710 }
2711
2712 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2713                                      u16 lag_id, u8 port_index)
2714 {
2715         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2716         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2717
2718         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
2719                                       lag_id, port_index);
2720         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2721 }
2722
2723 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2724                                         u16 lag_id)
2725 {
2726         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2727         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2728
2729         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
2730                                          lag_id);
2731         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2732 }
2733
2734 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
2735                                         u16 lag_id)
2736 {
2737         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2738         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2739
2740         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
2741                                         lag_id);
2742         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2743 }
2744
2745 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
2746                                          u16 lag_id)
2747 {
2748         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2749         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2750
2751         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
2752                                          lag_id);
2753         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2754 }
2755
2756 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2757                                   struct net_device *lag_dev,
2758                                   u16 *p_lag_id)
2759 {
2760         struct mlxsw_sp_upper *lag;
2761         int free_lag_id = -1;
2762         int i;
2763
2764         for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
2765                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
2766                 if (lag->ref_count) {
2767                         if (lag->dev == lag_dev) {
2768                                 *p_lag_id = i;
2769                                 return 0;
2770                         }
2771                 } else if (free_lag_id < 0) {
2772                         free_lag_id = i;
2773                 }
2774         }
2775         if (free_lag_id < 0)
2776                 return -EBUSY;
2777         *p_lag_id = free_lag_id;
2778         return 0;
2779 }
2780
2781 static bool
2782 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
2783                           struct net_device *lag_dev,
2784                           struct netdev_lag_upper_info *lag_upper_info)
2785 {
2786         u16 lag_id;
2787
2788         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
2789                 return false;
2790         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2791                 return false;
2792         return true;
2793 }
2794
2795 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2796                                        u16 lag_id, u8 *p_port_index)
2797 {
2798         int i;
2799
2800         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2801                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
2802                         *p_port_index = i;
2803                         return 0;
2804                 }
2805         }
2806         return -EBUSY;
2807 }
2808
2809 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2810                                   struct net_device *lag_dev)
2811 {
2812         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2813         struct mlxsw_sp_upper *lag;
2814         u16 lag_id;
2815         u8 port_index;
2816         int err;
2817
2818         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
2819         if (err)
2820                 return err;
2821         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2822         if (!lag->ref_count) {
2823                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
2824                 if (err)
2825                         return err;
2826                 lag->dev = lag_dev;
2827         }
2828
2829         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
2830         if (err)
2831                 return err;
2832         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
2833         if (err)
2834                 goto err_col_port_add;
2835         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
2836         if (err)
2837                 goto err_col_port_enable;
2838
2839         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
2840                                    mlxsw_sp_port->local_port);
2841         mlxsw_sp_port->lag_id = lag_id;
2842         mlxsw_sp_port->lagged = 1;
2843         lag->ref_count++;
2844         return 0;
2845
2846 err_col_port_enable:
2847         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2848 err_col_port_add:
2849         if (!lag->ref_count)
2850                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2851         return err;
2852 }
2853
2854 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2855                                     struct net_device *lag_dev)
2856 {
2857         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2858         u16 lag_id = mlxsw_sp_port->lag_id;
2859         struct mlxsw_sp_upper *lag;
2860
2861         if (!mlxsw_sp_port->lagged)
2862                 return;
2863         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2864         WARN_ON(lag->ref_count == 0);
2865
2866         mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2867         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2868
2869         if (mlxsw_sp_port->bridged) {
2870                 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
2871                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
2872         }
2873
2874         if (lag->ref_count == 1)
2875                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2876
2877         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2878                                      mlxsw_sp_port->local_port);
2879         mlxsw_sp_port->lagged = 0;
2880         lag->ref_count--;
2881 }
2882
2883 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2884                                       u16 lag_id)
2885 {
2886         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2887         char sldr_pl[MLXSW_REG_SLDR_LEN];
2888
2889         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
2890                                          mlxsw_sp_port->local_port);
2891         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2892 }
2893
2894 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2895                                          u16 lag_id)
2896 {
2897         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2898         char sldr_pl[MLXSW_REG_SLDR_LEN];
2899
2900         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
2901                                             mlxsw_sp_port->local_port);
2902         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2903 }
2904
2905 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
2906                                        bool lag_tx_enabled)
2907 {
2908         if (lag_tx_enabled)
2909                 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
2910                                                   mlxsw_sp_port->lag_id);
2911         else
2912                 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
2913                                                      mlxsw_sp_port->lag_id);
2914 }
2915
2916 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
2917                                      struct netdev_lag_lower_state_info *info)
2918 {
2919         return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
2920 }
2921
2922 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
2923                                    struct net_device *vlan_dev)
2924 {
2925         struct mlxsw_sp_port *mlxsw_sp_vport;
2926         u16 vid = vlan_dev_vlan_id(vlan_dev);
2927
2928         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2929         if (WARN_ON(!mlxsw_sp_vport))
2930                 return -EINVAL;
2931
2932         mlxsw_sp_vport->dev = vlan_dev;
2933
2934         return 0;
2935 }
2936
2937 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
2938                                       struct net_device *vlan_dev)
2939 {
2940         struct mlxsw_sp_port *mlxsw_sp_vport;
2941         u16 vid = vlan_dev_vlan_id(vlan_dev);
2942
2943         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2944         if (WARN_ON(!mlxsw_sp_vport))
2945                 return;
2946
2947         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
2948 }
2949
2950 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
2951                                                unsigned long event, void *ptr)
2952 {
2953         struct netdev_notifier_changeupper_info *info;
2954         struct mlxsw_sp_port *mlxsw_sp_port;
2955         struct net_device *upper_dev;
2956         struct mlxsw_sp *mlxsw_sp;
2957         int err = 0;
2958
2959         mlxsw_sp_port = netdev_priv(dev);
2960         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2961         info = ptr;
2962
2963         switch (event) {
2964         case NETDEV_PRECHANGEUPPER:
2965                 upper_dev = info->upper_dev;
2966                 if (!is_vlan_dev(upper_dev) &&
2967                     !netif_is_lag_master(upper_dev) &&
2968                     !netif_is_bridge_master(upper_dev))
2969                         return -EINVAL;
2970                 if (!info->linking)
2971                         break;
2972                 /* HW limitation forbids to put ports to multiple bridges. */
2973                 if (netif_is_bridge_master(upper_dev) &&
2974                     !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
2975                         return -EINVAL;
2976                 if (netif_is_lag_master(upper_dev) &&
2977                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
2978                                                info->upper_info))
2979                         return -EINVAL;
2980                 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
2981                         return -EINVAL;
2982                 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
2983                     !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
2984                         return -EINVAL;
2985                 break;
2986         case NETDEV_CHANGEUPPER:
2987                 upper_dev = info->upper_dev;
2988                 if (is_vlan_dev(upper_dev)) {
2989                         if (info->linking)
2990                                 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
2991                                                               upper_dev);
2992                         else
2993                                  mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
2994                                                            upper_dev);
2995                 } else if (netif_is_bridge_master(upper_dev)) {
2996                         if (info->linking)
2997                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
2998                                                                 upper_dev);
2999                         else
3000                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3001                 } else if (netif_is_lag_master(upper_dev)) {
3002                         if (info->linking)
3003                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3004                                                              upper_dev);
3005                         else
3006                                 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3007                                                         upper_dev);
3008                 } else {
3009                         err = -EINVAL;
3010                         WARN_ON(1);
3011                 }
3012                 break;
3013         }
3014
3015         return err;
3016 }
3017
3018 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
3019                                                unsigned long event, void *ptr)
3020 {
3021         struct netdev_notifier_changelowerstate_info *info;
3022         struct mlxsw_sp_port *mlxsw_sp_port;
3023         int err;
3024
3025         mlxsw_sp_port = netdev_priv(dev);
3026         info = ptr;
3027
3028         switch (event) {
3029         case NETDEV_CHANGELOWERSTATE:
3030                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
3031                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
3032                                                         info->lower_state_info);
3033                         if (err)
3034                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
3035                 }
3036                 break;
3037         }
3038
3039         return 0;
3040 }
3041
3042 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
3043                                          unsigned long event, void *ptr)
3044 {
3045         switch (event) {
3046         case NETDEV_PRECHANGEUPPER:
3047         case NETDEV_CHANGEUPPER:
3048                 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
3049         case NETDEV_CHANGELOWERSTATE:
3050                 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
3051         }
3052
3053         return 0;
3054 }
3055
3056 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3057                                         unsigned long event, void *ptr)
3058 {
3059         struct net_device *dev;
3060         struct list_head *iter;
3061         int ret;
3062
3063         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3064                 if (mlxsw_sp_port_dev_check(dev)) {
3065                         ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3066                         if (ret)
3067                                 return ret;
3068                 }
3069         }
3070
3071         return 0;
3072 }
3073
3074 static struct mlxsw_sp_fid *
3075 mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
3076                       const struct net_device *br_dev)
3077 {
3078         struct mlxsw_sp_fid *f;
3079
3080         list_for_each_entry(f, &mlxsw_sp->br_vfids.list, list) {
3081                 if (f->dev == br_dev)
3082                         return f;
3083         }
3084
3085         return NULL;
3086 }
3087
3088 static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
3089 {
3090         return vfid - MLXSW_SP_VFID_PORT_MAX;
3091 }
3092
3093 static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
3094 {
3095         return MLXSW_SP_VFID_PORT_MAX + br_vfid;
3096 }
3097
3098 static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
3099 {
3100         return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
3101                                    MLXSW_SP_VFID_BR_MAX);
3102 }
3103
3104 static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
3105
3106 static struct mlxsw_sp_fid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
3107                                                     struct net_device *br_dev)
3108 {
3109         struct device *dev = mlxsw_sp->bus_info->dev;
3110         struct mlxsw_sp_fid *f;
3111         u16 vfid, fid;
3112         int err;
3113
3114         vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
3115         if (vfid == MLXSW_SP_VFID_MAX) {
3116                 dev_err(dev, "No available vFIDs\n");
3117                 return ERR_PTR(-ERANGE);
3118         }
3119
3120         fid = mlxsw_sp_vfid_to_fid(vfid);
3121         err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
3122         if (err) {
3123                 dev_err(dev, "Failed to create FID=%d\n", fid);
3124                 return ERR_PTR(err);
3125         }
3126
3127         f = kzalloc(sizeof(*f), GFP_KERNEL);
3128         if (!f)
3129                 goto err_allocate_vfid;
3130
3131         f->leave = mlxsw_sp_vport_br_vfid_leave;
3132         f->fid = fid;
3133         f->dev = br_dev;
3134
3135         list_add(&f->list, &mlxsw_sp->br_vfids.list);
3136         set_bit(mlxsw_sp_vfid_to_br_vfid(vfid), mlxsw_sp->br_vfids.mapped);
3137
3138         return f;
3139
3140 err_allocate_vfid:
3141         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3142         return ERR_PTR(-ENOMEM);
3143 }
3144
3145 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
3146                                      struct mlxsw_sp_fid *f)
3147 {
3148         u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
3149         u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid);
3150
3151         clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
3152         list_del(&f->list);
3153
3154         mlxsw_sp_vfid_op(mlxsw_sp, f->fid, false);
3155
3156         kfree(f);
3157 }
3158
3159 static int mlxsw_sp_vport_br_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3160                                        struct net_device *br_dev)
3161 {
3162         struct mlxsw_sp_fid *f;
3163         int err;
3164
3165         f = mlxsw_sp_br_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
3166         if (!f) {
3167                 f = mlxsw_sp_br_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
3168                 if (IS_ERR(f))
3169                         return PTR_ERR(f);
3170         }
3171
3172         err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
3173         if (err)
3174                 goto err_vport_flood_set;
3175
3176         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
3177         if (err)
3178                 goto err_vport_fid_map;
3179
3180         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
3181         f->ref_count++;
3182
3183         netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
3184
3185         return 0;
3186
3187 err_vport_fid_map:
3188         mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
3189 err_vport_flood_set:
3190         if (!f->ref_count)
3191                 mlxsw_sp_br_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3192         return err;
3193 }
3194
3195 static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3196 {
3197         struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3198
3199         netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
3200
3201         mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
3202
3203         mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
3204
3205         mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
3206
3207         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
3208         if (--f->ref_count == 0)
3209                 mlxsw_sp_br_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3210 }
3211
3212 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3213                                       struct net_device *br_dev)
3214 {
3215         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3216         struct net_device *dev = mlxsw_sp_vport->dev;
3217         int err;
3218
3219         mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
3220
3221         err = mlxsw_sp_vport_br_vfid_join(mlxsw_sp_vport, br_dev);
3222         if (err) {
3223                 netdev_err(dev, "Failed to join vFID\n");
3224                 goto err_vport_br_vfid_join;
3225         }
3226
3227         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3228         if (err) {
3229                 netdev_err(dev, "Failed to enable learning\n");
3230                 goto err_port_vid_learning_set;
3231         }
3232
3233         mlxsw_sp_vport->learning = 1;
3234         mlxsw_sp_vport->learning_sync = 1;
3235         mlxsw_sp_vport->uc_flood = 1;
3236         mlxsw_sp_vport->bridged = 1;
3237
3238         return 0;
3239
3240 err_port_vid_learning_set:
3241         mlxsw_sp_vport_br_vfid_leave(mlxsw_sp_vport);
3242 err_vport_br_vfid_join:
3243         mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
3244         return err;
3245 }
3246
3247 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3248 {
3249         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3250
3251         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3252
3253         mlxsw_sp_vport_br_vfid_leave(mlxsw_sp_vport);
3254
3255         mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
3256
3257         mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
3258                                     MLXSW_REG_SPMS_STATE_FORWARDING);
3259
3260         mlxsw_sp_vport->learning = 0;
3261         mlxsw_sp_vport->learning_sync = 0;
3262         mlxsw_sp_vport->uc_flood = 0;
3263         mlxsw_sp_vport->bridged = 0;
3264 }
3265
3266 static bool
3267 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3268                                   const struct net_device *br_dev)
3269 {
3270         struct mlxsw_sp_port *mlxsw_sp_vport;
3271
3272         list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3273                             vport.list) {
3274                 struct net_device *dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
3275
3276                 if (dev && dev == br_dev)
3277                         return false;
3278         }
3279
3280         return true;
3281 }
3282
3283 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3284                                           unsigned long event, void *ptr,
3285                                           u16 vid)
3286 {
3287         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3288         struct netdev_notifier_changeupper_info *info = ptr;
3289         struct mlxsw_sp_port *mlxsw_sp_vport;
3290         struct net_device *upper_dev;
3291         int err = 0;
3292
3293         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3294
3295         switch (event) {
3296         case NETDEV_PRECHANGEUPPER:
3297                 upper_dev = info->upper_dev;
3298                 if (!netif_is_bridge_master(upper_dev))
3299                         return -EINVAL;
3300                 if (!info->linking)
3301                         break;
3302                 /* We can't have multiple VLAN interfaces configured on
3303                  * the same port and being members in the same bridge.
3304                  */
3305                 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3306                                                        upper_dev))
3307                         return -EINVAL;
3308                 break;
3309         case NETDEV_CHANGEUPPER:
3310                 upper_dev = info->upper_dev;
3311                 if (info->linking) {
3312                         if (WARN_ON(!mlxsw_sp_vport))
3313                                 return -EINVAL;
3314                         err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3315                                                          upper_dev);
3316                 } else {
3317                         if (!mlxsw_sp_vport)
3318                                 return 0;
3319                         mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
3320                 }
3321         }
3322
3323         return err;
3324 }
3325
3326 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3327                                               unsigned long event, void *ptr,
3328                                               u16 vid)
3329 {
3330         struct net_device *dev;
3331         struct list_head *iter;
3332         int ret;
3333
3334         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3335                 if (mlxsw_sp_port_dev_check(dev)) {
3336                         ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
3337                                                              vid);
3338                         if (ret)
3339                                 return ret;
3340                 }
3341         }
3342
3343         return 0;
3344 }
3345
3346 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
3347                                          unsigned long event, void *ptr)
3348 {
3349         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3350         u16 vid = vlan_dev_vlan_id(vlan_dev);
3351
3352         if (mlxsw_sp_port_dev_check(real_dev))
3353                 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
3354                                                       vid);
3355         else if (netif_is_lag_master(real_dev))
3356                 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
3357                                                           vid);
3358
3359         return 0;
3360 }
3361
3362 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3363                                     unsigned long event, void *ptr)
3364 {
3365         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3366         int err = 0;
3367
3368         if (mlxsw_sp_port_dev_check(dev))
3369                 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3370         else if (netif_is_lag_master(dev))
3371                 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3372         else if (is_vlan_dev(dev))
3373                 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3374
3375         return notifier_from_errno(err);
3376 }
3377
3378 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3379         .notifier_call = mlxsw_sp_netdevice_event,
3380 };
3381
3382 static int __init mlxsw_sp_module_init(void)
3383 {
3384         int err;
3385
3386         register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3387         err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3388         if (err)
3389                 goto err_core_driver_register;
3390         return 0;
3391
3392 err_core_driver_register:
3393         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3394         return err;
3395 }
3396
3397 static void __exit mlxsw_sp_module_exit(void)
3398 {
3399         mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3400         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3401 }
3402
3403 module_init(mlxsw_sp_module_init);
3404 module_exit(mlxsw_sp_module_exit);
3405
3406 MODULE_LICENSE("Dual BSD/GPL");
3407 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3408 MODULE_DESCRIPTION("Mellanox Spectrum driver");
3409 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);