mlxsw: spectrum: Use FID instead of vFID to setup flooding
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/notifier.h>
53 #include <linux/dcbnl.h>
54 #include <net/switchdev.h>
55 #include <generated/utsrelease.h>
56
57 #include "spectrum.h"
58 #include "core.h"
59 #include "reg.h"
60 #include "port.h"
61 #include "trap.h"
62 #include "txheader.h"
63
64 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
65 static const char mlxsw_sp_driver_version[] = "1.0";
66
67 /* tx_hdr_version
68  * Tx header version.
69  * Must be set to 1.
70  */
71 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
72
73 /* tx_hdr_ctl
74  * Packet control type.
75  * 0 - Ethernet control (e.g. EMADs, LACP)
76  * 1 - Ethernet data
77  */
78 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
79
80 /* tx_hdr_proto
81  * Packet protocol type. Must be set to 1 (Ethernet).
82  */
83 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
84
85 /* tx_hdr_rx_is_router
86  * Packet is sent from the router. Valid for data packets only.
87  */
88 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
89
90 /* tx_hdr_fid_valid
91  * Indicates if the 'fid' field is valid and should be used for
92  * forwarding lookup. Valid for data packets only.
93  */
94 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
95
96 /* tx_hdr_swid
97  * Switch partition ID. Must be set to 0.
98  */
99 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
100
101 /* tx_hdr_control_tclass
102  * Indicates if the packet should use the control TClass and not one
103  * of the data TClasses.
104  */
105 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
106
107 /* tx_hdr_etclass
108  * Egress TClass to be used on the egress device on the egress port.
109  */
110 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
111
112 /* tx_hdr_port_mid
113  * Destination local port for unicast packets.
114  * Destination multicast ID for multicast packets.
115  *
116  * Control packets are directed to a specific egress port, while data
117  * packets are transmitted through the CPU port (0) into the switch partition,
118  * where forwarding rules are applied.
119  */
120 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
121
122 /* tx_hdr_fid
123  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
124  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
125  * Valid for data packets only.
126  */
127 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
128
129 /* tx_hdr_type
130  * 0 - Data packets
131  * 6 - Control packets
132  */
133 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
134
135 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
136                                      const struct mlxsw_tx_info *tx_info)
137 {
138         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
139
140         memset(txhdr, 0, MLXSW_TXHDR_LEN);
141
142         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
143         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
144         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
145         mlxsw_tx_hdr_swid_set(txhdr, 0);
146         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
147         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
148         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
149 }
150
151 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
152 {
153         char spad_pl[MLXSW_REG_SPAD_LEN];
154         int err;
155
156         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
157         if (err)
158                 return err;
159         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
160         return 0;
161 }
162
163 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
164                                           bool is_up)
165 {
166         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
167         char paos_pl[MLXSW_REG_PAOS_LEN];
168
169         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
170                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
171                             MLXSW_PORT_ADMIN_STATUS_DOWN);
172         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
173 }
174
175 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
176                                          bool *p_is_up)
177 {
178         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
179         char paos_pl[MLXSW_REG_PAOS_LEN];
180         u8 oper_status;
181         int err;
182
183         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
184         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
185         if (err)
186                 return err;
187         oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
188         *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
189         return 0;
190 }
191
192 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
193                                       unsigned char *addr)
194 {
195         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
196         char ppad_pl[MLXSW_REG_PPAD_LEN];
197
198         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
199         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
200         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
201 }
202
203 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
204 {
205         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
206         unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
207
208         ether_addr_copy(addr, mlxsw_sp->base_mac);
209         addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
210         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
211 }
212
213 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
214                                        u16 vid, enum mlxsw_reg_spms_state state)
215 {
216         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
217         char *spms_pl;
218         int err;
219
220         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
221         if (!spms_pl)
222                 return -ENOMEM;
223         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
224         mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
225         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
226         kfree(spms_pl);
227         return err;
228 }
229
230 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
231 {
232         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
233         char pmtu_pl[MLXSW_REG_PMTU_LEN];
234         int max_mtu;
235         int err;
236
237         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
238         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
239         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
240         if (err)
241                 return err;
242         max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
243
244         if (mtu > max_mtu)
245                 return -EINVAL;
246
247         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
248         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
249 }
250
251 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
252                                     u8 swid)
253 {
254         char pspa_pl[MLXSW_REG_PSPA_LEN];
255
256         mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
257         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
258 }
259
260 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
261 {
262         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
263
264         return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
265                                         swid);
266 }
267
268 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
269                                      bool enable)
270 {
271         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
272         char svpe_pl[MLXSW_REG_SVPE_LEN];
273
274         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
275         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
276 }
277
278 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
279                                  enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
280                                  u16 vid)
281 {
282         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
283         char svfa_pl[MLXSW_REG_SVFA_LEN];
284
285         mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
286                             fid, vid);
287         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
288 }
289
290 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
291                                           u16 vid, bool learn_enable)
292 {
293         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
294         char *spvmlr_pl;
295         int err;
296
297         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
298         if (!spvmlr_pl)
299                 return -ENOMEM;
300         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
301                               learn_enable);
302         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
303         kfree(spvmlr_pl);
304         return err;
305 }
306
307 static int
308 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
309 {
310         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
311         char sspr_pl[MLXSW_REG_SSPR_LEN];
312
313         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
314         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
315 }
316
317 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
318                                          u8 local_port, u8 *p_module,
319                                          u8 *p_width, u8 *p_lane)
320 {
321         char pmlp_pl[MLXSW_REG_PMLP_LEN];
322         int err;
323
324         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
325         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
326         if (err)
327                 return err;
328         *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
329         *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
330         *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
331         return 0;
332 }
333
334 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
335                                     u8 module, u8 width, u8 lane)
336 {
337         char pmlp_pl[MLXSW_REG_PMLP_LEN];
338         int i;
339
340         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
341         mlxsw_reg_pmlp_width_set(pmlp_pl, width);
342         for (i = 0; i < width; i++) {
343                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
344                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
345         }
346
347         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
348 }
349
350 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
351 {
352         char pmlp_pl[MLXSW_REG_PMLP_LEN];
353
354         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
355         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
356         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
357 }
358
359 static int mlxsw_sp_port_open(struct net_device *dev)
360 {
361         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
362         int err;
363
364         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
365         if (err)
366                 return err;
367         netif_start_queue(dev);
368         return 0;
369 }
370
371 static int mlxsw_sp_port_stop(struct net_device *dev)
372 {
373         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
374
375         netif_stop_queue(dev);
376         return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
377 }
378
379 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
380                                       struct net_device *dev)
381 {
382         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
383         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
384         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
385         const struct mlxsw_tx_info tx_info = {
386                 .local_port = mlxsw_sp_port->local_port,
387                 .is_emad = false,
388         };
389         u64 len;
390         int err;
391
392         if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
393                 return NETDEV_TX_BUSY;
394
395         if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
396                 struct sk_buff *skb_orig = skb;
397
398                 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
399                 if (!skb) {
400                         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
401                         dev_kfree_skb_any(skb_orig);
402                         return NETDEV_TX_OK;
403                 }
404         }
405
406         if (eth_skb_pad(skb)) {
407                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
408                 return NETDEV_TX_OK;
409         }
410
411         mlxsw_sp_txhdr_construct(skb, &tx_info);
412         len = skb->len;
413         /* Due to a race we might fail here because of a full queue. In that
414          * unlikely case we simply drop the packet.
415          */
416         err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
417
418         if (!err) {
419                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
420                 u64_stats_update_begin(&pcpu_stats->syncp);
421                 pcpu_stats->tx_packets++;
422                 pcpu_stats->tx_bytes += len;
423                 u64_stats_update_end(&pcpu_stats->syncp);
424         } else {
425                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
426                 dev_kfree_skb_any(skb);
427         }
428         return NETDEV_TX_OK;
429 }
430
431 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
432 {
433 }
434
435 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
436 {
437         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
438         struct sockaddr *addr = p;
439         int err;
440
441         if (!is_valid_ether_addr(addr->sa_data))
442                 return -EADDRNOTAVAIL;
443
444         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
445         if (err)
446                 return err;
447         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
448         return 0;
449 }
450
451 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
452                                  bool pause_en, bool pfc_en, u16 delay)
453 {
454         u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
455
456         delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
457                          MLXSW_SP_PAUSE_DELAY;
458
459         if (pause_en || pfc_en)
460                 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
461                                                     pg_size + delay, pg_size);
462         else
463                 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
464 }
465
466 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
467                                  u8 *prio_tc, bool pause_en,
468                                  struct ieee_pfc *my_pfc)
469 {
470         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
471         u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
472         u16 delay = !!my_pfc ? my_pfc->delay : 0;
473         char pbmc_pl[MLXSW_REG_PBMC_LEN];
474         int i, j, err;
475
476         mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
477         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
478         if (err)
479                 return err;
480
481         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
482                 bool configure = false;
483                 bool pfc = false;
484
485                 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
486                         if (prio_tc[j] == i) {
487                                 pfc = pfc_en & BIT(j);
488                                 configure = true;
489                                 break;
490                         }
491                 }
492
493                 if (!configure)
494                         continue;
495                 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
496         }
497
498         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
499 }
500
501 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
502                                       int mtu, bool pause_en)
503 {
504         u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
505         bool dcb_en = !!mlxsw_sp_port->dcb.ets;
506         struct ieee_pfc *my_pfc;
507         u8 *prio_tc;
508
509         prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
510         my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
511
512         return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
513                                             pause_en, my_pfc);
514 }
515
516 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
517 {
518         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
519         bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
520         int err;
521
522         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
523         if (err)
524                 return err;
525         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
526         if (err)
527                 goto err_port_mtu_set;
528         dev->mtu = mtu;
529         return 0;
530
531 err_port_mtu_set:
532         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
533         return err;
534 }
535
536 static struct rtnl_link_stats64 *
537 mlxsw_sp_port_get_stats64(struct net_device *dev,
538                           struct rtnl_link_stats64 *stats)
539 {
540         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
541         struct mlxsw_sp_port_pcpu_stats *p;
542         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
543         u32 tx_dropped = 0;
544         unsigned int start;
545         int i;
546
547         for_each_possible_cpu(i) {
548                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
549                 do {
550                         start = u64_stats_fetch_begin_irq(&p->syncp);
551                         rx_packets      = p->rx_packets;
552                         rx_bytes        = p->rx_bytes;
553                         tx_packets      = p->tx_packets;
554                         tx_bytes        = p->tx_bytes;
555                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
556
557                 stats->rx_packets       += rx_packets;
558                 stats->rx_bytes         += rx_bytes;
559                 stats->tx_packets       += tx_packets;
560                 stats->tx_bytes         += tx_bytes;
561                 /* tx_dropped is u32, updated without syncp protection. */
562                 tx_dropped      += p->tx_dropped;
563         }
564         stats->tx_dropped       = tx_dropped;
565         return stats;
566 }
567
568 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
569                            u16 vid_end, bool is_member, bool untagged)
570 {
571         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
572         char *spvm_pl;
573         int err;
574
575         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
576         if (!spvm_pl)
577                 return -ENOMEM;
578
579         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
580                             vid_end, is_member, untagged);
581         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
582         kfree(spvm_pl);
583         return err;
584 }
585
586 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
587 {
588         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
589         u16 vid, last_visited_vid;
590         int err;
591
592         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
593                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
594                                                    vid);
595                 if (err) {
596                         last_visited_vid = vid;
597                         goto err_port_vid_to_fid_set;
598                 }
599         }
600
601         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
602         if (err) {
603                 last_visited_vid = VLAN_N_VID;
604                 goto err_port_vid_to_fid_set;
605         }
606
607         return 0;
608
609 err_port_vid_to_fid_set:
610         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
611                 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
612                                              vid);
613         return err;
614 }
615
616 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
617 {
618         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
619         u16 vid;
620         int err;
621
622         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
623         if (err)
624                 return err;
625
626         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
627                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
628                                                    vid, vid);
629                 if (err)
630                         return err;
631         }
632
633         return 0;
634 }
635
636 static struct mlxsw_sp_vfid *
637 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
638 {
639         struct mlxsw_sp_vfid *vfid;
640
641         list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
642                 if (vfid->vid == vid)
643                         return vfid;
644         }
645
646         return NULL;
647 }
648
649 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
650 {
651         return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
652                                    MLXSW_SP_VFID_PORT_MAX);
653 }
654
655 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
656 {
657         char sfmr_pl[MLXSW_REG_SFMR_LEN];
658
659         mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
660         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
661 }
662
663 static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
664                                                   u16 vid)
665 {
666         struct device *dev = mlxsw_sp->bus_info->dev;
667         struct mlxsw_sp_vfid *f;
668         u16 vfid, fid;
669         int err;
670
671         vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
672         if (vfid == MLXSW_SP_VFID_PORT_MAX) {
673                 dev_err(dev, "No available vFIDs\n");
674                 return ERR_PTR(-ERANGE);
675         }
676
677         fid = mlxsw_sp_vfid_to_fid(vfid);
678         err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
679         if (err) {
680                 dev_err(dev, "Failed to create FID=%d\n", fid);
681                 return ERR_PTR(err);
682         }
683
684         f = kzalloc(sizeof(*f), GFP_KERNEL);
685         if (!f)
686                 goto err_allocate_vfid;
687
688         f->vfid = vfid;
689         f->vid = vid;
690
691         list_add(&f->list, &mlxsw_sp->port_vfids.list);
692         set_bit(vfid, mlxsw_sp->port_vfids.mapped);
693
694         return f;
695
696 err_allocate_vfid:
697         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
698         return ERR_PTR(-ENOMEM);
699 }
700
701 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
702                                   struct mlxsw_sp_vfid *vfid)
703 {
704         u16 fid = mlxsw_sp_vfid_to_fid(vfid->vfid);
705
706         clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
707         list_del(&vfid->list);
708
709         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
710
711         kfree(vfid);
712 }
713
714 static struct mlxsw_sp_port *
715 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
716                            struct mlxsw_sp_vfid *vfid)
717 {
718         struct mlxsw_sp_port *mlxsw_sp_vport;
719
720         mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
721         if (!mlxsw_sp_vport)
722                 return NULL;
723
724         /* dev will be set correctly after the VLAN device is linked
725          * with the real device. In case of bridge SELF invocation, dev
726          * will remain as is.
727          */
728         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
729         mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
730         mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
731         mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
732         mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
733         mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
734         mlxsw_sp_vport->vport.vfid = vfid;
735         mlxsw_sp_vport->vport.vid = vfid->vid;
736
737         list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
738
739         return mlxsw_sp_vport;
740 }
741
742 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
743 {
744         list_del(&mlxsw_sp_vport->vport.list);
745         kfree(mlxsw_sp_vport);
746 }
747
748 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
749                                   bool valid)
750 {
751         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
752         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
753
754         return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
755                                             vid);
756 }
757
758 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
759                           u16 vid)
760 {
761         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
762         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
763         struct mlxsw_sp_port *mlxsw_sp_vport;
764         struct mlxsw_sp_vfid *vfid;
765         u16 fid;
766         int err;
767
768         /* VLAN 0 is added to HW filter when device goes up, but it is
769          * reserved in our case, so simply return.
770          */
771         if (!vid)
772                 return 0;
773
774         if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
775                 netdev_warn(dev, "VID=%d already configured\n", vid);
776                 return 0;
777         }
778
779         vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
780         if (!vfid) {
781                 vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
782                 if (IS_ERR(vfid)) {
783                         netdev_err(dev, "Failed to create vFID for VID=%d\n",
784                                    vid);
785                         return PTR_ERR(vfid);
786                 }
787         }
788
789         mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
790         if (!mlxsw_sp_vport) {
791                 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
792                 err = -ENOMEM;
793                 goto err_port_vport_create;
794         }
795
796         fid = mlxsw_sp_vfid_to_fid(vfid->vfid);
797         if (!vfid->nr_vports) {
798                 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, fid, true);
799                 if (err) {
800                         netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
801                                    vfid->vfid);
802                         goto err_vport_flood_set;
803                 }
804         }
805
806         /* When adding the first VLAN interface on a bridged port we need to
807          * transition all the active 802.1Q bridge VLANs to use explicit
808          * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
809          */
810         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
811                 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
812                 if (err) {
813                         netdev_err(dev, "Failed to set to Virtual mode\n");
814                         goto err_port_vp_mode_trans;
815                 }
816         }
817
818         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, fid, true);
819         if (err) {
820                 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
821                            vid, vfid->vfid);
822                 goto err_vport_fid_map;
823         }
824
825         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
826         if (err) {
827                 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
828                 goto err_port_vid_learning_set;
829         }
830
831         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
832         if (err) {
833                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
834                            vid);
835                 goto err_port_add_vid;
836         }
837
838         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
839                                           MLXSW_REG_SPMS_STATE_FORWARDING);
840         if (err) {
841                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
842                 goto err_port_stp_state_set;
843         }
844
845         vfid->nr_vports++;
846
847         return 0;
848
849 err_port_stp_state_set:
850         mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
851 err_port_add_vid:
852         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
853 err_port_vid_learning_set:
854         mlxsw_sp_vport_fid_map(mlxsw_sp_vport, fid, false);
855 err_vport_fid_map:
856         if (list_is_singular(&mlxsw_sp_port->vports_list))
857                 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
858 err_port_vp_mode_trans:
859         if (!vfid->nr_vports)
860                 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, fid, false);
861 err_vport_flood_set:
862         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
863 err_port_vport_create:
864         if (!vfid->nr_vports)
865                 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
866         return err;
867 }
868
869 int mlxsw_sp_port_kill_vid(struct net_device *dev,
870                            __be16 __always_unused proto, u16 vid)
871 {
872         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
873         struct mlxsw_sp_port *mlxsw_sp_vport;
874         struct mlxsw_sp_vfid *vfid;
875         u16 fid;
876         int err;
877
878         /* VLAN 0 is removed from HW filter when device goes down, but
879          * it is reserved in our case, so simply return.
880          */
881         if (!vid)
882                 return 0;
883
884         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
885         if (!mlxsw_sp_vport) {
886                 netdev_warn(dev, "VID=%d does not exist\n", vid);
887                 return 0;
888         }
889
890         vfid = mlxsw_sp_vport->vport.vfid;
891
892         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
893                                           MLXSW_REG_SPMS_STATE_DISCARDING);
894         if (err) {
895                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
896                 return err;
897         }
898
899         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
900         if (err) {
901                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
902                            vid);
903                 return err;
904         }
905
906         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
907         if (err) {
908                 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
909                 return err;
910         }
911
912         fid = mlxsw_sp_vfid_to_fid(vfid->vfid);
913         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, fid, false);
914         if (err) {
915                 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
916                            vid, vfid->vfid);
917                 return err;
918         }
919
920         /* When removing the last VLAN interface on a bridged port we need to
921          * transition all active 802.1Q bridge VLANs to use VID to FID
922          * mappings and set port's mode to VLAN mode.
923          */
924         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
925                 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
926                 if (err) {
927                         netdev_err(dev, "Failed to set to VLAN mode\n");
928                         return err;
929                 }
930         }
931
932         vfid->nr_vports--;
933         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
934
935         /* Destroy the vFID if no vPorts are assigned to it anymore. */
936         if (!vfid->nr_vports)
937                 mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
938
939         return 0;
940 }
941
942 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
943                                             size_t len)
944 {
945         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
946         u8 module = mlxsw_sp_port->mapping.module;
947         u8 width = mlxsw_sp_port->mapping.width;
948         u8 lane = mlxsw_sp_port->mapping.lane;
949         int err;
950
951         if (!mlxsw_sp_port->split)
952                 err = snprintf(name, len, "p%d", module + 1);
953         else
954                 err = snprintf(name, len, "p%ds%d", module + 1,
955                                lane / width);
956
957         if (err >= len)
958                 return -EINVAL;
959
960         return 0;
961 }
962
963 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
964         .ndo_open               = mlxsw_sp_port_open,
965         .ndo_stop               = mlxsw_sp_port_stop,
966         .ndo_start_xmit         = mlxsw_sp_port_xmit,
967         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
968         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
969         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
970         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
971         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
972         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
973         .ndo_fdb_add            = switchdev_port_fdb_add,
974         .ndo_fdb_del            = switchdev_port_fdb_del,
975         .ndo_fdb_dump           = switchdev_port_fdb_dump,
976         .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
977         .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
978         .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
979         .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
980 };
981
982 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
983                                       struct ethtool_drvinfo *drvinfo)
984 {
985         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
986         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
987
988         strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
989         strlcpy(drvinfo->version, mlxsw_sp_driver_version,
990                 sizeof(drvinfo->version));
991         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
992                  "%d.%d.%d",
993                  mlxsw_sp->bus_info->fw_rev.major,
994                  mlxsw_sp->bus_info->fw_rev.minor,
995                  mlxsw_sp->bus_info->fw_rev.subminor);
996         strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
997                 sizeof(drvinfo->bus_info));
998 }
999
1000 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1001                                          struct ethtool_pauseparam *pause)
1002 {
1003         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1004
1005         pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1006         pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1007 }
1008
1009 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1010                                    struct ethtool_pauseparam *pause)
1011 {
1012         char pfcc_pl[MLXSW_REG_PFCC_LEN];
1013
1014         mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1015         mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1016         mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1017
1018         return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1019                                pfcc_pl);
1020 }
1021
1022 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1023                                         struct ethtool_pauseparam *pause)
1024 {
1025         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1026         bool pause_en = pause->tx_pause || pause->rx_pause;
1027         int err;
1028
1029         if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1030                 netdev_err(dev, "PFC already enabled on port\n");
1031                 return -EINVAL;
1032         }
1033
1034         if (pause->autoneg) {
1035                 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1036                 return -EINVAL;
1037         }
1038
1039         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1040         if (err) {
1041                 netdev_err(dev, "Failed to configure port's headroom\n");
1042                 return err;
1043         }
1044
1045         err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1046         if (err) {
1047                 netdev_err(dev, "Failed to set PAUSE parameters\n");
1048                 goto err_port_pause_configure;
1049         }
1050
1051         mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1052         mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1053
1054         return 0;
1055
1056 err_port_pause_configure:
1057         pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1058         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1059         return err;
1060 }
1061
1062 struct mlxsw_sp_port_hw_stats {
1063         char str[ETH_GSTRING_LEN];
1064         u64 (*getter)(char *payload);
1065 };
1066
1067 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1068         {
1069                 .str = "a_frames_transmitted_ok",
1070                 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1071         },
1072         {
1073                 .str = "a_frames_received_ok",
1074                 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1075         },
1076         {
1077                 .str = "a_frame_check_sequence_errors",
1078                 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1079         },
1080         {
1081                 .str = "a_alignment_errors",
1082                 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1083         },
1084         {
1085                 .str = "a_octets_transmitted_ok",
1086                 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1087         },
1088         {
1089                 .str = "a_octets_received_ok",
1090                 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1091         },
1092         {
1093                 .str = "a_multicast_frames_xmitted_ok",
1094                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1095         },
1096         {
1097                 .str = "a_broadcast_frames_xmitted_ok",
1098                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1099         },
1100         {
1101                 .str = "a_multicast_frames_received_ok",
1102                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1103         },
1104         {
1105                 .str = "a_broadcast_frames_received_ok",
1106                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1107         },
1108         {
1109                 .str = "a_in_range_length_errors",
1110                 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1111         },
1112         {
1113                 .str = "a_out_of_range_length_field",
1114                 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1115         },
1116         {
1117                 .str = "a_frame_too_long_errors",
1118                 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1119         },
1120         {
1121                 .str = "a_symbol_error_during_carrier",
1122                 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1123         },
1124         {
1125                 .str = "a_mac_control_frames_transmitted",
1126                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1127         },
1128         {
1129                 .str = "a_mac_control_frames_received",
1130                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1131         },
1132         {
1133                 .str = "a_unsupported_opcodes_received",
1134                 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1135         },
1136         {
1137                 .str = "a_pause_mac_ctrl_frames_received",
1138                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1139         },
1140         {
1141                 .str = "a_pause_mac_ctrl_frames_xmitted",
1142                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1143         },
1144 };
1145
1146 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1147
1148 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1149                                       u32 stringset, u8 *data)
1150 {
1151         u8 *p = data;
1152         int i;
1153
1154         switch (stringset) {
1155         case ETH_SS_STATS:
1156                 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1157                         memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1158                                ETH_GSTRING_LEN);
1159                         p += ETH_GSTRING_LEN;
1160                 }
1161                 break;
1162         }
1163 }
1164
1165 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1166                                      enum ethtool_phys_id_state state)
1167 {
1168         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1169         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1170         char mlcr_pl[MLXSW_REG_MLCR_LEN];
1171         bool active;
1172
1173         switch (state) {
1174         case ETHTOOL_ID_ACTIVE:
1175                 active = true;
1176                 break;
1177         case ETHTOOL_ID_INACTIVE:
1178                 active = false;
1179                 break;
1180         default:
1181                 return -EOPNOTSUPP;
1182         }
1183
1184         mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1185         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1186 }
1187
1188 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1189                                     struct ethtool_stats *stats, u64 *data)
1190 {
1191         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1192         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1193         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1194         int i;
1195         int err;
1196
1197         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
1198                              MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
1199         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1200         for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
1201                 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
1202 }
1203
1204 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1205 {
1206         switch (sset) {
1207         case ETH_SS_STATS:
1208                 return MLXSW_SP_PORT_HW_STATS_LEN;
1209         default:
1210                 return -EOPNOTSUPP;
1211         }
1212 }
1213
1214 struct mlxsw_sp_port_link_mode {
1215         u32 mask;
1216         u32 supported;
1217         u32 advertised;
1218         u32 speed;
1219 };
1220
1221 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1222         {
1223                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1224                 .supported      = SUPPORTED_100baseT_Full,
1225                 .advertised     = ADVERTISED_100baseT_Full,
1226                 .speed          = 100,
1227         },
1228         {
1229                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1230                 .speed          = 100,
1231         },
1232         {
1233                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1234                                   MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1235                 .supported      = SUPPORTED_1000baseKX_Full,
1236                 .advertised     = ADVERTISED_1000baseKX_Full,
1237                 .speed          = 1000,
1238         },
1239         {
1240                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1241                 .supported      = SUPPORTED_10000baseT_Full,
1242                 .advertised     = ADVERTISED_10000baseT_Full,
1243                 .speed          = 10000,
1244         },
1245         {
1246                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1247                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1248                 .supported      = SUPPORTED_10000baseKX4_Full,
1249                 .advertised     = ADVERTISED_10000baseKX4_Full,
1250                 .speed          = 10000,
1251         },
1252         {
1253                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1254                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1255                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1256                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1257                 .supported      = SUPPORTED_10000baseKR_Full,
1258                 .advertised     = ADVERTISED_10000baseKR_Full,
1259                 .speed          = 10000,
1260         },
1261         {
1262                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1263                 .supported      = SUPPORTED_20000baseKR2_Full,
1264                 .advertised     = ADVERTISED_20000baseKR2_Full,
1265                 .speed          = 20000,
1266         },
1267         {
1268                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1269                 .supported      = SUPPORTED_40000baseCR4_Full,
1270                 .advertised     = ADVERTISED_40000baseCR4_Full,
1271                 .speed          = 40000,
1272         },
1273         {
1274                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1275                 .supported      = SUPPORTED_40000baseKR4_Full,
1276                 .advertised     = ADVERTISED_40000baseKR4_Full,
1277                 .speed          = 40000,
1278         },
1279         {
1280                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1281                 .supported      = SUPPORTED_40000baseSR4_Full,
1282                 .advertised     = ADVERTISED_40000baseSR4_Full,
1283                 .speed          = 40000,
1284         },
1285         {
1286                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1287                 .supported      = SUPPORTED_40000baseLR4_Full,
1288                 .advertised     = ADVERTISED_40000baseLR4_Full,
1289                 .speed          = 40000,
1290         },
1291         {
1292                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1293                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1294                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1295                 .speed          = 25000,
1296         },
1297         {
1298                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1299                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1300                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1301                 .speed          = 50000,
1302         },
1303         {
1304                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1305                 .supported      = SUPPORTED_56000baseKR4_Full,
1306                 .advertised     = ADVERTISED_56000baseKR4_Full,
1307                 .speed          = 56000,
1308         },
1309         {
1310                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1311                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1312                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1313                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1314                 .speed          = 100000,
1315         },
1316 };
1317
1318 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1319
1320 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1321 {
1322         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1323                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1324                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1325                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1326                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1327                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1328                 return SUPPORTED_FIBRE;
1329
1330         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1331                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1332                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1333                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1334                               MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1335                 return SUPPORTED_Backplane;
1336         return 0;
1337 }
1338
1339 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1340 {
1341         u32 modes = 0;
1342         int i;
1343
1344         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1345                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1346                         modes |= mlxsw_sp_port_link_mode[i].supported;
1347         }
1348         return modes;
1349 }
1350
1351 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1352 {
1353         u32 modes = 0;
1354         int i;
1355
1356         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1357                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1358                         modes |= mlxsw_sp_port_link_mode[i].advertised;
1359         }
1360         return modes;
1361 }
1362
1363 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1364                                             struct ethtool_cmd *cmd)
1365 {
1366         u32 speed = SPEED_UNKNOWN;
1367         u8 duplex = DUPLEX_UNKNOWN;
1368         int i;
1369
1370         if (!carrier_ok)
1371                 goto out;
1372
1373         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1374                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1375                         speed = mlxsw_sp_port_link_mode[i].speed;
1376                         duplex = DUPLEX_FULL;
1377                         break;
1378                 }
1379         }
1380 out:
1381         ethtool_cmd_speed_set(cmd, speed);
1382         cmd->duplex = duplex;
1383 }
1384
1385 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1386 {
1387         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1388                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1389                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1390                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1391                 return PORT_FIBRE;
1392
1393         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1394                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1395                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1396                 return PORT_DA;
1397
1398         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1399                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1400                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1401                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1402                 return PORT_NONE;
1403
1404         return PORT_OTHER;
1405 }
1406
1407 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1408                                       struct ethtool_cmd *cmd)
1409 {
1410         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1411         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1412         char ptys_pl[MLXSW_REG_PTYS_LEN];
1413         u32 eth_proto_cap;
1414         u32 eth_proto_admin;
1415         u32 eth_proto_oper;
1416         int err;
1417
1418         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1419         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1420         if (err) {
1421                 netdev_err(dev, "Failed to get proto");
1422                 return err;
1423         }
1424         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1425                               &eth_proto_admin, &eth_proto_oper);
1426
1427         cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1428                          mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1429                          SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1430         cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1431         mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1432                                         eth_proto_oper, cmd);
1433
1434         eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1435         cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1436         cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1437
1438         cmd->transceiver = XCVR_INTERNAL;
1439         return 0;
1440 }
1441
1442 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1443 {
1444         u32 ptys_proto = 0;
1445         int i;
1446
1447         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1448                 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1449                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1450         }
1451         return ptys_proto;
1452 }
1453
1454 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1455 {
1456         u32 ptys_proto = 0;
1457         int i;
1458
1459         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1460                 if (speed == mlxsw_sp_port_link_mode[i].speed)
1461                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1462         }
1463         return ptys_proto;
1464 }
1465
1466 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1467 {
1468         u32 ptys_proto = 0;
1469         int i;
1470
1471         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1472                 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1473                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1474         }
1475         return ptys_proto;
1476 }
1477
1478 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1479                                       struct ethtool_cmd *cmd)
1480 {
1481         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1482         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1483         char ptys_pl[MLXSW_REG_PTYS_LEN];
1484         u32 speed;
1485         u32 eth_proto_new;
1486         u32 eth_proto_cap;
1487         u32 eth_proto_admin;
1488         bool is_up;
1489         int err;
1490
1491         speed = ethtool_cmd_speed(cmd);
1492
1493         eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1494                 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1495                 mlxsw_sp_to_ptys_speed(speed);
1496
1497         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1498         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1499         if (err) {
1500                 netdev_err(dev, "Failed to get proto");
1501                 return err;
1502         }
1503         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1504
1505         eth_proto_new = eth_proto_new & eth_proto_cap;
1506         if (!eth_proto_new) {
1507                 netdev_err(dev, "Not supported proto admin requested");
1508                 return -EINVAL;
1509         }
1510         if (eth_proto_new == eth_proto_admin)
1511                 return 0;
1512
1513         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1514         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1515         if (err) {
1516                 netdev_err(dev, "Failed to set proto admin");
1517                 return err;
1518         }
1519
1520         err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1521         if (err) {
1522                 netdev_err(dev, "Failed to get oper status");
1523                 return err;
1524         }
1525         if (!is_up)
1526                 return 0;
1527
1528         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1529         if (err) {
1530                 netdev_err(dev, "Failed to set admin status");
1531                 return err;
1532         }
1533
1534         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1535         if (err) {
1536                 netdev_err(dev, "Failed to set admin status");
1537                 return err;
1538         }
1539
1540         return 0;
1541 }
1542
1543 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1544         .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
1545         .get_link               = ethtool_op_get_link,
1546         .get_pauseparam         = mlxsw_sp_port_get_pauseparam,
1547         .set_pauseparam         = mlxsw_sp_port_set_pauseparam,
1548         .get_strings            = mlxsw_sp_port_get_strings,
1549         .set_phys_id            = mlxsw_sp_port_set_phys_id,
1550         .get_ethtool_stats      = mlxsw_sp_port_get_stats,
1551         .get_sset_count         = mlxsw_sp_port_get_sset_count,
1552         .get_settings           = mlxsw_sp_port_get_settings,
1553         .set_settings           = mlxsw_sp_port_set_settings,
1554 };
1555
1556 static int
1557 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1558 {
1559         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1560         u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1561         char ptys_pl[MLXSW_REG_PTYS_LEN];
1562         u32 eth_proto_admin;
1563
1564         eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1565         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1566                             eth_proto_admin);
1567         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1568 }
1569
1570 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1571                           enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1572                           bool dwrr, u8 dwrr_weight)
1573 {
1574         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1575         char qeec_pl[MLXSW_REG_QEEC_LEN];
1576
1577         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1578                             next_index);
1579         mlxsw_reg_qeec_de_set(qeec_pl, true);
1580         mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1581         mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1582         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1583 }
1584
1585 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1586                                   enum mlxsw_reg_qeec_hr hr, u8 index,
1587                                   u8 next_index, u32 maxrate)
1588 {
1589         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1590         char qeec_pl[MLXSW_REG_QEEC_LEN];
1591
1592         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1593                             next_index);
1594         mlxsw_reg_qeec_mase_set(qeec_pl, true);
1595         mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1596         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1597 }
1598
1599 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1600                               u8 switch_prio, u8 tclass)
1601 {
1602         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1603         char qtct_pl[MLXSW_REG_QTCT_LEN];
1604
1605         mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1606                             tclass);
1607         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1608 }
1609
1610 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1611 {
1612         int err, i;
1613
1614         /* Setup the elements hierarcy, so that each TC is linked to
1615          * one subgroup, which are all member in the same group.
1616          */
1617         err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1618                                     MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
1619                                     0);
1620         if (err)
1621                 return err;
1622         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1623                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1624                                             MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
1625                                             0, false, 0);
1626                 if (err)
1627                         return err;
1628         }
1629         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1630                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1631                                             MLXSW_REG_QEEC_HIERARCY_TC, i, i,
1632                                             false, 0);
1633                 if (err)
1634                         return err;
1635         }
1636
1637         /* Make sure the max shaper is disabled in all hierarcies that
1638          * support it.
1639          */
1640         err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1641                                             MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
1642                                             MLXSW_REG_QEEC_MAS_DIS);
1643         if (err)
1644                 return err;
1645         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1646                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1647                                                     MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
1648                                                     i, 0,
1649                                                     MLXSW_REG_QEEC_MAS_DIS);
1650                 if (err)
1651                         return err;
1652         }
1653         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1654                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1655                                                     MLXSW_REG_QEEC_HIERARCY_TC,
1656                                                     i, i,
1657                                                     MLXSW_REG_QEEC_MAS_DIS);
1658                 if (err)
1659                         return err;
1660         }
1661
1662         /* Map all priorities to traffic class 0. */
1663         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1664                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1665                 if (err)
1666                         return err;
1667         }
1668
1669         return 0;
1670 }
1671
1672 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1673                                 bool split, u8 module, u8 width, u8 lane)
1674 {
1675         struct mlxsw_sp_port *mlxsw_sp_port;
1676         struct net_device *dev;
1677         size_t bytes;
1678         int err;
1679
1680         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1681         if (!dev)
1682                 return -ENOMEM;
1683         mlxsw_sp_port = netdev_priv(dev);
1684         mlxsw_sp_port->dev = dev;
1685         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1686         mlxsw_sp_port->local_port = local_port;
1687         mlxsw_sp_port->split = split;
1688         mlxsw_sp_port->mapping.module = module;
1689         mlxsw_sp_port->mapping.width = width;
1690         mlxsw_sp_port->mapping.lane = lane;
1691         bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1692         mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1693         if (!mlxsw_sp_port->active_vlans) {
1694                 err = -ENOMEM;
1695                 goto err_port_active_vlans_alloc;
1696         }
1697         mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1698         if (!mlxsw_sp_port->untagged_vlans) {
1699                 err = -ENOMEM;
1700                 goto err_port_untagged_vlans_alloc;
1701         }
1702         INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1703
1704         mlxsw_sp_port->pcpu_stats =
1705                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1706         if (!mlxsw_sp_port->pcpu_stats) {
1707                 err = -ENOMEM;
1708                 goto err_alloc_stats;
1709         }
1710
1711         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1712         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1713
1714         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1715         if (err) {
1716                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1717                         mlxsw_sp_port->local_port);
1718                 goto err_dev_addr_init;
1719         }
1720
1721         netif_carrier_off(dev);
1722
1723         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1724                          NETIF_F_HW_VLAN_CTAG_FILTER;
1725
1726         /* Each packet needs to have a Tx header (metadata) on top all other
1727          * headers.
1728          */
1729         dev->hard_header_len += MLXSW_TXHDR_LEN;
1730
1731         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1732         if (err) {
1733                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1734                         mlxsw_sp_port->local_port);
1735                 goto err_port_system_port_mapping_set;
1736         }
1737
1738         err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1739         if (err) {
1740                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1741                         mlxsw_sp_port->local_port);
1742                 goto err_port_swid_set;
1743         }
1744
1745         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
1746         if (err) {
1747                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1748                         mlxsw_sp_port->local_port);
1749                 goto err_port_speed_by_width_set;
1750         }
1751
1752         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1753         if (err) {
1754                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1755                         mlxsw_sp_port->local_port);
1756                 goto err_port_mtu_set;
1757         }
1758
1759         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1760         if (err)
1761                 goto err_port_admin_status_set;
1762
1763         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1764         if (err) {
1765                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1766                         mlxsw_sp_port->local_port);
1767                 goto err_port_buffers_init;
1768         }
1769
1770         err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1771         if (err) {
1772                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1773                         mlxsw_sp_port->local_port);
1774                 goto err_port_ets_init;
1775         }
1776
1777         /* ETS and buffers must be initialized before DCB. */
1778         err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1779         if (err) {
1780                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1781                         mlxsw_sp_port->local_port);
1782                 goto err_port_dcb_init;
1783         }
1784
1785         mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1786         err = register_netdev(dev);
1787         if (err) {
1788                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1789                         mlxsw_sp_port->local_port);
1790                 goto err_register_netdev;
1791         }
1792
1793         err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
1794                                    mlxsw_sp_port->local_port, dev,
1795                                    mlxsw_sp_port->split, module);
1796         if (err) {
1797                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1798                         mlxsw_sp_port->local_port);
1799                 goto err_core_port_init;
1800         }
1801
1802         err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1803         if (err)
1804                 goto err_port_vlan_init;
1805
1806         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1807         return 0;
1808
1809 err_port_vlan_init:
1810         mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1811 err_core_port_init:
1812         unregister_netdev(dev);
1813 err_register_netdev:
1814 err_port_dcb_init:
1815 err_port_ets_init:
1816 err_port_buffers_init:
1817 err_port_admin_status_set:
1818 err_port_mtu_set:
1819 err_port_speed_by_width_set:
1820 err_port_swid_set:
1821 err_port_system_port_mapping_set:
1822 err_dev_addr_init:
1823         free_percpu(mlxsw_sp_port->pcpu_stats);
1824 err_alloc_stats:
1825         kfree(mlxsw_sp_port->untagged_vlans);
1826 err_port_untagged_vlans_alloc:
1827         kfree(mlxsw_sp_port->active_vlans);
1828 err_port_active_vlans_alloc:
1829         free_netdev(dev);
1830         return err;
1831 }
1832
1833 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1834 {
1835         struct net_device *dev = mlxsw_sp_port->dev;
1836         struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
1837
1838         list_for_each_entry_safe(mlxsw_sp_vport, tmp,
1839                                  &mlxsw_sp_port->vports_list, vport.list) {
1840                 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1841
1842                 /* vPorts created for VLAN devices should already be gone
1843                  * by now, since we unregistered the port netdev.
1844                  */
1845                 WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
1846                 mlxsw_sp_port_kill_vid(dev, 0, vid);
1847         }
1848 }
1849
1850 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1851 {
1852         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1853
1854         if (!mlxsw_sp_port)
1855                 return;
1856         mlxsw_sp->ports[local_port] = NULL;
1857         mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1858         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1859         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1860         mlxsw_sp_port_vports_fini(mlxsw_sp_port);
1861         mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1862         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1863         mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1864         free_percpu(mlxsw_sp_port->pcpu_stats);
1865         kfree(mlxsw_sp_port->untagged_vlans);
1866         kfree(mlxsw_sp_port->active_vlans);
1867         free_netdev(mlxsw_sp_port->dev);
1868 }
1869
1870 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1871 {
1872         int i;
1873
1874         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1875                 mlxsw_sp_port_remove(mlxsw_sp, i);
1876         kfree(mlxsw_sp->ports);
1877 }
1878
1879 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1880 {
1881         u8 module, width, lane;
1882         size_t alloc_size;
1883         int i;
1884         int err;
1885
1886         alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1887         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1888         if (!mlxsw_sp->ports)
1889                 return -ENOMEM;
1890
1891         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1892                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1893                                                     &width, &lane);
1894                 if (err)
1895                         goto err_port_module_info_get;
1896                 if (!width)
1897                         continue;
1898                 mlxsw_sp->port_to_module[i] = module;
1899                 err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
1900                                            lane);
1901                 if (err)
1902                         goto err_port_create;
1903         }
1904         return 0;
1905
1906 err_port_create:
1907 err_port_module_info_get:
1908         for (i--; i >= 1; i--)
1909                 mlxsw_sp_port_remove(mlxsw_sp, i);
1910         kfree(mlxsw_sp->ports);
1911         return err;
1912 }
1913
1914 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1915 {
1916         u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
1917
1918         return local_port - offset;
1919 }
1920
1921 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1922                                       u8 module, unsigned int count)
1923 {
1924         u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1925         int err, i;
1926
1927         for (i = 0; i < count; i++) {
1928                 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
1929                                                width, i * width);
1930                 if (err)
1931                         goto err_port_module_map;
1932         }
1933
1934         for (i = 0; i < count; i++) {
1935                 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
1936                 if (err)
1937                         goto err_port_swid_set;
1938         }
1939
1940         for (i = 0; i < count; i++) {
1941                 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
1942                                            module, width, i * width);
1943                 if (err)
1944                         goto err_port_create;
1945         }
1946
1947         return 0;
1948
1949 err_port_create:
1950         for (i--; i >= 0; i--)
1951                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1952         i = count;
1953 err_port_swid_set:
1954         for (i--; i >= 0; i--)
1955                 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
1956                                          MLXSW_PORT_SWID_DISABLED_PORT);
1957         i = count;
1958 err_port_module_map:
1959         for (i--; i >= 0; i--)
1960                 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
1961         return err;
1962 }
1963
1964 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1965                                          u8 base_port, unsigned int count)
1966 {
1967         u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
1968         int i;
1969
1970         /* Split by four means we need to re-create two ports, otherwise
1971          * only one.
1972          */
1973         count = count / 2;
1974
1975         for (i = 0; i < count; i++) {
1976                 local_port = base_port + i * 2;
1977                 module = mlxsw_sp->port_to_module[local_port];
1978
1979                 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1980                                          0);
1981         }
1982
1983         for (i = 0; i < count; i++)
1984                 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
1985
1986         for (i = 0; i < count; i++) {
1987                 local_port = base_port + i * 2;
1988                 module = mlxsw_sp->port_to_module[local_port];
1989
1990                 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
1991                                      width, 0);
1992         }
1993 }
1994
1995 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1996                                unsigned int count)
1997 {
1998         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1999         struct mlxsw_sp_port *mlxsw_sp_port;
2000         u8 module, cur_width, base_port;
2001         int i;
2002         int err;
2003
2004         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2005         if (!mlxsw_sp_port) {
2006                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2007                         local_port);
2008                 return -EINVAL;
2009         }
2010
2011         module = mlxsw_sp_port->mapping.module;
2012         cur_width = mlxsw_sp_port->mapping.width;
2013
2014         if (count != 2 && count != 4) {
2015                 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
2016                 return -EINVAL;
2017         }
2018
2019         if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
2020                 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
2021                 return -EINVAL;
2022         }
2023
2024         /* Make sure we have enough slave (even) ports for the split. */
2025         if (count == 2) {
2026                 base_port = local_port;
2027                 if (mlxsw_sp->ports[base_port + 1]) {
2028                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2029                         return -EINVAL;
2030                 }
2031         } else {
2032                 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2033                 if (mlxsw_sp->ports[base_port + 1] ||
2034                     mlxsw_sp->ports[base_port + 3]) {
2035                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2036                         return -EINVAL;
2037                 }
2038         }
2039
2040         for (i = 0; i < count; i++)
2041                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2042
2043         err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2044         if (err) {
2045                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2046                 goto err_port_split_create;
2047         }
2048
2049         return 0;
2050
2051 err_port_split_create:
2052         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2053         return err;
2054 }
2055
2056 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2057 {
2058         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2059         struct mlxsw_sp_port *mlxsw_sp_port;
2060         u8 cur_width, base_port;
2061         unsigned int count;
2062         int i;
2063
2064         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2065         if (!mlxsw_sp_port) {
2066                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2067                         local_port);
2068                 return -EINVAL;
2069         }
2070
2071         if (!mlxsw_sp_port->split) {
2072                 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2073                 return -EINVAL;
2074         }
2075
2076         cur_width = mlxsw_sp_port->mapping.width;
2077         count = cur_width == 1 ? 4 : 2;
2078
2079         base_port = mlxsw_sp_cluster_base_port_get(local_port);
2080
2081         /* Determine which ports to remove. */
2082         if (count == 2 && local_port >= base_port + 2)
2083                 base_port = base_port + 2;
2084
2085         for (i = 0; i < count; i++)
2086                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2087
2088         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2089
2090         return 0;
2091 }
2092
2093 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2094                                      char *pude_pl, void *priv)
2095 {
2096         struct mlxsw_sp *mlxsw_sp = priv;
2097         struct mlxsw_sp_port *mlxsw_sp_port;
2098         enum mlxsw_reg_pude_oper_status status;
2099         u8 local_port;
2100
2101         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2102         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2103         if (!mlxsw_sp_port) {
2104                 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
2105                          local_port);
2106                 return;
2107         }
2108
2109         status = mlxsw_reg_pude_oper_status_get(pude_pl);
2110         if (status == MLXSW_PORT_OPER_STATUS_UP) {
2111                 netdev_info(mlxsw_sp_port->dev, "link up\n");
2112                 netif_carrier_on(mlxsw_sp_port->dev);
2113         } else {
2114                 netdev_info(mlxsw_sp_port->dev, "link down\n");
2115                 netif_carrier_off(mlxsw_sp_port->dev);
2116         }
2117 }
2118
2119 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
2120         .func = mlxsw_sp_pude_event_func,
2121         .trap_id = MLXSW_TRAP_ID_PUDE,
2122 };
2123
2124 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
2125                                    enum mlxsw_event_trap_id trap_id)
2126 {
2127         struct mlxsw_event_listener *el;
2128         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2129         int err;
2130
2131         switch (trap_id) {
2132         case MLXSW_TRAP_ID_PUDE:
2133                 el = &mlxsw_sp_pude_event;
2134                 break;
2135         }
2136         err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
2137         if (err)
2138                 return err;
2139
2140         mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
2141         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2142         if (err)
2143                 goto err_event_trap_set;
2144
2145         return 0;
2146
2147 err_event_trap_set:
2148         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2149         return err;
2150 }
2151
2152 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
2153                                       enum mlxsw_event_trap_id trap_id)
2154 {
2155         struct mlxsw_event_listener *el;
2156
2157         switch (trap_id) {
2158         case MLXSW_TRAP_ID_PUDE:
2159                 el = &mlxsw_sp_pude_event;
2160                 break;
2161         }
2162         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2163 }
2164
2165 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
2166                                       void *priv)
2167 {
2168         struct mlxsw_sp *mlxsw_sp = priv;
2169         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2170         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2171
2172         if (unlikely(!mlxsw_sp_port)) {
2173                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2174                                      local_port);
2175                 return;
2176         }
2177
2178         skb->dev = mlxsw_sp_port->dev;
2179
2180         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2181         u64_stats_update_begin(&pcpu_stats->syncp);
2182         pcpu_stats->rx_packets++;
2183         pcpu_stats->rx_bytes += skb->len;
2184         u64_stats_update_end(&pcpu_stats->syncp);
2185
2186         skb->protocol = eth_type_trans(skb, skb->dev);
2187         netif_receive_skb(skb);
2188 }
2189
2190 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2191         {
2192                 .func = mlxsw_sp_rx_listener_func,
2193                 .local_port = MLXSW_PORT_DONT_CARE,
2194                 .trap_id = MLXSW_TRAP_ID_FDB_MC,
2195         },
2196         /* Traps for specific L2 packet types, not trapped as FDB MC */
2197         {
2198                 .func = mlxsw_sp_rx_listener_func,
2199                 .local_port = MLXSW_PORT_DONT_CARE,
2200                 .trap_id = MLXSW_TRAP_ID_STP,
2201         },
2202         {
2203                 .func = mlxsw_sp_rx_listener_func,
2204                 .local_port = MLXSW_PORT_DONT_CARE,
2205                 .trap_id = MLXSW_TRAP_ID_LACP,
2206         },
2207         {
2208                 .func = mlxsw_sp_rx_listener_func,
2209                 .local_port = MLXSW_PORT_DONT_CARE,
2210                 .trap_id = MLXSW_TRAP_ID_EAPOL,
2211         },
2212         {
2213                 .func = mlxsw_sp_rx_listener_func,
2214                 .local_port = MLXSW_PORT_DONT_CARE,
2215                 .trap_id = MLXSW_TRAP_ID_LLDP,
2216         },
2217         {
2218                 .func = mlxsw_sp_rx_listener_func,
2219                 .local_port = MLXSW_PORT_DONT_CARE,
2220                 .trap_id = MLXSW_TRAP_ID_MMRP,
2221         },
2222         {
2223                 .func = mlxsw_sp_rx_listener_func,
2224                 .local_port = MLXSW_PORT_DONT_CARE,
2225                 .trap_id = MLXSW_TRAP_ID_MVRP,
2226         },
2227         {
2228                 .func = mlxsw_sp_rx_listener_func,
2229                 .local_port = MLXSW_PORT_DONT_CARE,
2230                 .trap_id = MLXSW_TRAP_ID_RPVST,
2231         },
2232         {
2233                 .func = mlxsw_sp_rx_listener_func,
2234                 .local_port = MLXSW_PORT_DONT_CARE,
2235                 .trap_id = MLXSW_TRAP_ID_DHCP,
2236         },
2237         {
2238                 .func = mlxsw_sp_rx_listener_func,
2239                 .local_port = MLXSW_PORT_DONT_CARE,
2240                 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
2241         },
2242         {
2243                 .func = mlxsw_sp_rx_listener_func,
2244                 .local_port = MLXSW_PORT_DONT_CARE,
2245                 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
2246         },
2247         {
2248                 .func = mlxsw_sp_rx_listener_func,
2249                 .local_port = MLXSW_PORT_DONT_CARE,
2250                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
2251         },
2252         {
2253                 .func = mlxsw_sp_rx_listener_func,
2254                 .local_port = MLXSW_PORT_DONT_CARE,
2255                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
2256         },
2257         {
2258                 .func = mlxsw_sp_rx_listener_func,
2259                 .local_port = MLXSW_PORT_DONT_CARE,
2260                 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
2261         },
2262 };
2263
2264 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2265 {
2266         char htgt_pl[MLXSW_REG_HTGT_LEN];
2267         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2268         int i;
2269         int err;
2270
2271         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2272         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2273         if (err)
2274                 return err;
2275
2276         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2277         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2278         if (err)
2279                 return err;
2280
2281         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2282                 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2283                                                       &mlxsw_sp_rx_listener[i],
2284                                                       mlxsw_sp);
2285                 if (err)
2286                         goto err_rx_listener_register;
2287
2288                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2289                                     mlxsw_sp_rx_listener[i].trap_id);
2290                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2291                 if (err)
2292                         goto err_rx_trap_set;
2293         }
2294         return 0;
2295
2296 err_rx_trap_set:
2297         mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2298                                           &mlxsw_sp_rx_listener[i],
2299                                           mlxsw_sp);
2300 err_rx_listener_register:
2301         for (i--; i >= 0; i--) {
2302                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2303                                     mlxsw_sp_rx_listener[i].trap_id);
2304                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2305
2306                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2307                                                   &mlxsw_sp_rx_listener[i],
2308                                                   mlxsw_sp);
2309         }
2310         return err;
2311 }
2312
2313 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2314 {
2315         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2316         int i;
2317
2318         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2319                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2320                                     mlxsw_sp_rx_listener[i].trap_id);
2321                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2322
2323                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2324                                                   &mlxsw_sp_rx_listener[i],
2325                                                   mlxsw_sp);
2326         }
2327 }
2328
2329 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2330                                  enum mlxsw_reg_sfgc_type type,
2331                                  enum mlxsw_reg_sfgc_bridge_type bridge_type)
2332 {
2333         enum mlxsw_flood_table_type table_type;
2334         enum mlxsw_sp_flood_table flood_table;
2335         char sfgc_pl[MLXSW_REG_SFGC_LEN];
2336
2337         if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2338                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2339         else
2340                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2341
2342         if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2343                 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2344         else
2345                 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2346
2347         mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2348                             flood_table);
2349         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2350 }
2351
2352 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2353 {
2354         int type, err;
2355
2356         for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2357                 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2358                         continue;
2359
2360                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2361                                             MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2362                 if (err)
2363                         return err;
2364
2365                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2366                                             MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2367                 if (err)
2368                         return err;
2369         }
2370
2371         return 0;
2372 }
2373
2374 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2375 {
2376         char slcr_pl[MLXSW_REG_SLCR_LEN];
2377
2378         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2379                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
2380                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2381                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
2382                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
2383                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
2384                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
2385                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
2386                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2387         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2388 }
2389
2390 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2391                          const struct mlxsw_bus_info *mlxsw_bus_info)
2392 {
2393         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2394         int err;
2395
2396         mlxsw_sp->core = mlxsw_core;
2397         mlxsw_sp->bus_info = mlxsw_bus_info;
2398         INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
2399         INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
2400         INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2401
2402         err = mlxsw_sp_base_mac_get(mlxsw_sp);
2403         if (err) {
2404                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2405                 return err;
2406         }
2407
2408         err = mlxsw_sp_ports_create(mlxsw_sp);
2409         if (err) {
2410                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2411                 return err;
2412         }
2413
2414         err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2415         if (err) {
2416                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2417                 goto err_event_register;
2418         }
2419
2420         err = mlxsw_sp_traps_init(mlxsw_sp);
2421         if (err) {
2422                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2423                 goto err_rx_listener_register;
2424         }
2425
2426         err = mlxsw_sp_flood_init(mlxsw_sp);
2427         if (err) {
2428                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2429                 goto err_flood_init;
2430         }
2431
2432         err = mlxsw_sp_buffers_init(mlxsw_sp);
2433         if (err) {
2434                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2435                 goto err_buffers_init;
2436         }
2437
2438         err = mlxsw_sp_lag_init(mlxsw_sp);
2439         if (err) {
2440                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2441                 goto err_lag_init;
2442         }
2443
2444         err = mlxsw_sp_switchdev_init(mlxsw_sp);
2445         if (err) {
2446                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2447                 goto err_switchdev_init;
2448         }
2449
2450         return 0;
2451
2452 err_switchdev_init:
2453 err_lag_init:
2454         mlxsw_sp_buffers_fini(mlxsw_sp);
2455 err_buffers_init:
2456 err_flood_init:
2457         mlxsw_sp_traps_fini(mlxsw_sp);
2458 err_rx_listener_register:
2459         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2460 err_event_register:
2461         mlxsw_sp_ports_remove(mlxsw_sp);
2462         return err;
2463 }
2464
2465 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2466 {
2467         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2468
2469         mlxsw_sp_switchdev_fini(mlxsw_sp);
2470         mlxsw_sp_buffers_fini(mlxsw_sp);
2471         mlxsw_sp_traps_fini(mlxsw_sp);
2472         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2473         mlxsw_sp_ports_remove(mlxsw_sp);
2474 }
2475
2476 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2477         .used_max_vepa_channels         = 1,
2478         .max_vepa_channels              = 0,
2479         .used_max_lag                   = 1,
2480         .max_lag                        = MLXSW_SP_LAG_MAX,
2481         .used_max_port_per_lag          = 1,
2482         .max_port_per_lag               = MLXSW_SP_PORT_PER_LAG_MAX,
2483         .used_max_mid                   = 1,
2484         .max_mid                        = MLXSW_SP_MID_MAX,
2485         .used_max_pgt                   = 1,
2486         .max_pgt                        = 0,
2487         .used_max_system_port           = 1,
2488         .max_system_port                = 64,
2489         .used_max_vlan_groups           = 1,
2490         .max_vlan_groups                = 127,
2491         .used_max_regions               = 1,
2492         .max_regions                    = 400,
2493         .used_flood_tables              = 1,
2494         .used_flood_mode                = 1,
2495         .flood_mode                     = 3,
2496         .max_fid_offset_flood_tables    = 2,
2497         .fid_offset_flood_table_size    = VLAN_N_VID - 1,
2498         .max_fid_flood_tables           = 2,
2499         .fid_flood_table_size           = MLXSW_SP_VFID_MAX,
2500         .used_max_ib_mc                 = 1,
2501         .max_ib_mc                      = 0,
2502         .used_max_pkey                  = 1,
2503         .max_pkey                       = 0,
2504         .swid_config                    = {
2505                 {
2506                         .used_type      = 1,
2507                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
2508                 }
2509         },
2510 };
2511
2512 static struct mlxsw_driver mlxsw_sp_driver = {
2513         .kind                           = MLXSW_DEVICE_KIND_SPECTRUM,
2514         .owner                          = THIS_MODULE,
2515         .priv_size                      = sizeof(struct mlxsw_sp),
2516         .init                           = mlxsw_sp_init,
2517         .fini                           = mlxsw_sp_fini,
2518         .port_split                     = mlxsw_sp_port_split,
2519         .port_unsplit                   = mlxsw_sp_port_unsplit,
2520         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
2521         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
2522         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
2523         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
2524         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
2525         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
2526         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
2527         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
2528         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
2529         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
2530         .txhdr_construct                = mlxsw_sp_txhdr_construct,
2531         .txhdr_len                      = MLXSW_TXHDR_LEN,
2532         .profile                        = &mlxsw_sp_config_profile,
2533 };
2534
2535 static int
2536 mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
2537 {
2538         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2539         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2540
2541         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
2542         mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
2543
2544         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2545 }
2546
2547 static int
2548 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2549                                     u16 fid)
2550 {
2551         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2552         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2553
2554         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
2555         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2556         mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2557                                                 mlxsw_sp_port->local_port);
2558
2559         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2560 }
2561
2562 static int
2563 mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
2564 {
2565         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2566         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2567
2568         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
2569         mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2570
2571         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2572 }
2573
2574 static int
2575 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2576                                       u16 fid)
2577 {
2578         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2579         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2580
2581         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
2582         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2583         mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2584
2585         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2586 }
2587
2588 static int
2589 __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
2590 {
2591         int err, last_err = 0;
2592         u16 vid;
2593
2594         for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2595                 err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
2596                 if (err)
2597                         last_err = err;
2598         }
2599
2600         return last_err;
2601 }
2602
2603 static int
2604 __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
2605 {
2606         int err, last_err = 0;
2607         u16 vid;
2608
2609         for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2610                 err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
2611                 if (err)
2612                         last_err = err;
2613         }
2614
2615         return last_err;
2616 }
2617
2618 static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
2619 {
2620         if (!list_empty(&mlxsw_sp_port->vports_list))
2621                 if (mlxsw_sp_port->lagged)
2622                         return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
2623                 else
2624                         return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
2625         else
2626                 if (mlxsw_sp_port->lagged)
2627                         return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
2628                 else
2629                         return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
2630 }
2631
2632 static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
2633 {
2634         u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
2635         u16 fid = mlxsw_sp_vfid_to_fid(vfid);
2636
2637         if (mlxsw_sp_vport->lagged)
2638                 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
2639                                                              fid);
2640         else
2641                 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
2642 }
2643
2644 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2645 {
2646         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2647 }
2648
2649 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
2650                                          struct net_device *br_dev)
2651 {
2652         return !mlxsw_sp->master_bridge.dev ||
2653                mlxsw_sp->master_bridge.dev == br_dev;
2654 }
2655
2656 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2657                                        struct net_device *br_dev)
2658 {
2659         mlxsw_sp->master_bridge.dev = br_dev;
2660         mlxsw_sp->master_bridge.ref_count++;
2661 }
2662
2663 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
2664 {
2665         if (--mlxsw_sp->master_bridge.ref_count == 0)
2666                 mlxsw_sp->master_bridge.dev = NULL;
2667 }
2668
2669 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2670                                      struct net_device *br_dev)
2671 {
2672         struct net_device *dev = mlxsw_sp_port->dev;
2673         int err;
2674
2675         /* When port is not bridged untagged packets are tagged with
2676          * PVID=VID=1, thereby creating an implicit VLAN interface in
2677          * the device. Remove it and let bridge code take care of its
2678          * own VLANs.
2679          */
2680         err = mlxsw_sp_port_kill_vid(dev, 0, 1);
2681         if (err)
2682                 return err;
2683
2684         mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
2685
2686         mlxsw_sp_port->learning = 1;
2687         mlxsw_sp_port->learning_sync = 1;
2688         mlxsw_sp_port->uc_flood = 1;
2689         mlxsw_sp_port->bridged = 1;
2690
2691         return 0;
2692 }
2693
2694 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2695                                        bool flush_fdb)
2696 {
2697         struct net_device *dev = mlxsw_sp_port->dev;
2698
2699         if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2700                 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2701
2702         mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2703
2704         mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
2705
2706         mlxsw_sp_port->learning = 0;
2707         mlxsw_sp_port->learning_sync = 0;
2708         mlxsw_sp_port->uc_flood = 0;
2709         mlxsw_sp_port->bridged = 0;
2710
2711         /* Add implicit VLAN interface in the device, so that untagged
2712          * packets will be classified to the default vFID.
2713          */
2714         mlxsw_sp_port_add_vid(dev, 0, 1);
2715 }
2716
2717 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2718 {
2719         char sldr_pl[MLXSW_REG_SLDR_LEN];
2720
2721         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
2722         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2723 }
2724
2725 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2726 {
2727         char sldr_pl[MLXSW_REG_SLDR_LEN];
2728
2729         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
2730         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2731 }
2732
2733 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2734                                      u16 lag_id, u8 port_index)
2735 {
2736         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2737         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2738
2739         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
2740                                       lag_id, port_index);
2741         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2742 }
2743
2744 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2745                                         u16 lag_id)
2746 {
2747         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2748         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2749
2750         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
2751                                          lag_id);
2752         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2753 }
2754
2755 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
2756                                         u16 lag_id)
2757 {
2758         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2759         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2760
2761         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
2762                                         lag_id);
2763         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2764 }
2765
2766 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
2767                                          u16 lag_id)
2768 {
2769         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2770         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2771
2772         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
2773                                          lag_id);
2774         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2775 }
2776
2777 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2778                                   struct net_device *lag_dev,
2779                                   u16 *p_lag_id)
2780 {
2781         struct mlxsw_sp_upper *lag;
2782         int free_lag_id = -1;
2783         int i;
2784
2785         for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
2786                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
2787                 if (lag->ref_count) {
2788                         if (lag->dev == lag_dev) {
2789                                 *p_lag_id = i;
2790                                 return 0;
2791                         }
2792                 } else if (free_lag_id < 0) {
2793                         free_lag_id = i;
2794                 }
2795         }
2796         if (free_lag_id < 0)
2797                 return -EBUSY;
2798         *p_lag_id = free_lag_id;
2799         return 0;
2800 }
2801
2802 static bool
2803 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
2804                           struct net_device *lag_dev,
2805                           struct netdev_lag_upper_info *lag_upper_info)
2806 {
2807         u16 lag_id;
2808
2809         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
2810                 return false;
2811         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2812                 return false;
2813         return true;
2814 }
2815
2816 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2817                                        u16 lag_id, u8 *p_port_index)
2818 {
2819         int i;
2820
2821         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2822                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
2823                         *p_port_index = i;
2824                         return 0;
2825                 }
2826         }
2827         return -EBUSY;
2828 }
2829
2830 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2831                                   struct net_device *lag_dev)
2832 {
2833         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2834         struct mlxsw_sp_upper *lag;
2835         u16 lag_id;
2836         u8 port_index;
2837         int err;
2838
2839         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
2840         if (err)
2841                 return err;
2842         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2843         if (!lag->ref_count) {
2844                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
2845                 if (err)
2846                         return err;
2847                 lag->dev = lag_dev;
2848         }
2849
2850         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
2851         if (err)
2852                 return err;
2853         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
2854         if (err)
2855                 goto err_col_port_add;
2856         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
2857         if (err)
2858                 goto err_col_port_enable;
2859
2860         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
2861                                    mlxsw_sp_port->local_port);
2862         mlxsw_sp_port->lag_id = lag_id;
2863         mlxsw_sp_port->lagged = 1;
2864         lag->ref_count++;
2865         return 0;
2866
2867 err_col_port_enable:
2868         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2869 err_col_port_add:
2870         if (!lag->ref_count)
2871                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2872         return err;
2873 }
2874
2875 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2876                                         struct net_device *br_dev,
2877                                         bool flush_fdb);
2878
2879 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2880                                     struct net_device *lag_dev)
2881 {
2882         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2883         struct mlxsw_sp_port *mlxsw_sp_vport;
2884         struct mlxsw_sp_upper *lag;
2885         u16 lag_id = mlxsw_sp_port->lag_id;
2886
2887         if (!mlxsw_sp_port->lagged)
2888                 return;
2889         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2890         WARN_ON(lag->ref_count == 0);
2891
2892         mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2893         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2894
2895         /* In case we leave a LAG device that has bridges built on top,
2896          * then their teardown sequence is never issued and we need to
2897          * invoke the necessary cleanup routines ourselves.
2898          */
2899         list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
2900                             vport.list) {
2901                 struct net_device *br_dev;
2902
2903                 if (!mlxsw_sp_vport->bridged)
2904                         continue;
2905
2906                 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2907                 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
2908         }
2909
2910         if (mlxsw_sp_port->bridged) {
2911                 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
2912                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
2913         }
2914
2915         if (lag->ref_count == 1) {
2916                 if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
2917                         netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2918                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2919         }
2920
2921         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2922                                      mlxsw_sp_port->local_port);
2923         mlxsw_sp_port->lagged = 0;
2924         lag->ref_count--;
2925 }
2926
2927 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2928                                       u16 lag_id)
2929 {
2930         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2931         char sldr_pl[MLXSW_REG_SLDR_LEN];
2932
2933         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
2934                                          mlxsw_sp_port->local_port);
2935         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2936 }
2937
2938 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2939                                          u16 lag_id)
2940 {
2941         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2942         char sldr_pl[MLXSW_REG_SLDR_LEN];
2943
2944         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
2945                                             mlxsw_sp_port->local_port);
2946         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2947 }
2948
2949 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
2950                                        bool lag_tx_enabled)
2951 {
2952         if (lag_tx_enabled)
2953                 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
2954                                                   mlxsw_sp_port->lag_id);
2955         else
2956                 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
2957                                                      mlxsw_sp_port->lag_id);
2958 }
2959
2960 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
2961                                      struct netdev_lag_lower_state_info *info)
2962 {
2963         return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
2964 }
2965
2966 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
2967                                    struct net_device *vlan_dev)
2968 {
2969         struct mlxsw_sp_port *mlxsw_sp_vport;
2970         u16 vid = vlan_dev_vlan_id(vlan_dev);
2971
2972         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2973         if (WARN_ON(!mlxsw_sp_vport))
2974                 return -EINVAL;
2975
2976         mlxsw_sp_vport->dev = vlan_dev;
2977
2978         return 0;
2979 }
2980
2981 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
2982                                       struct net_device *vlan_dev)
2983 {
2984         struct mlxsw_sp_port *mlxsw_sp_vport;
2985         u16 vid = vlan_dev_vlan_id(vlan_dev);
2986
2987         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2988         if (WARN_ON(!mlxsw_sp_vport))
2989                 return;
2990
2991         /* When removing a VLAN device while still bridged we should first
2992          * remove it from the bridge, as we receive the bridge's notification
2993          * when the vPort is already gone.
2994          */
2995         if (mlxsw_sp_vport->bridged) {
2996                 struct net_device *br_dev;
2997
2998                 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2999                 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
3000         }
3001
3002         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3003 }
3004
3005 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3006                                                unsigned long event, void *ptr)
3007 {
3008         struct netdev_notifier_changeupper_info *info;
3009         struct mlxsw_sp_port *mlxsw_sp_port;
3010         struct net_device *upper_dev;
3011         struct mlxsw_sp *mlxsw_sp;
3012         int err = 0;
3013
3014         mlxsw_sp_port = netdev_priv(dev);
3015         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3016         info = ptr;
3017
3018         switch (event) {
3019         case NETDEV_PRECHANGEUPPER:
3020                 upper_dev = info->upper_dev;
3021                 if (!is_vlan_dev(upper_dev) &&
3022                     !netif_is_lag_master(upper_dev) &&
3023                     !netif_is_bridge_master(upper_dev))
3024                         return -EINVAL;
3025                 if (!info->linking)
3026                         break;
3027                 /* HW limitation forbids to put ports to multiple bridges. */
3028                 if (netif_is_bridge_master(upper_dev) &&
3029                     !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
3030                         return -EINVAL;
3031                 if (netif_is_lag_master(upper_dev) &&
3032                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3033                                                info->upper_info))
3034                         return -EINVAL;
3035                 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
3036                         return -EINVAL;
3037                 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
3038                     !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
3039                         return -EINVAL;
3040                 break;
3041         case NETDEV_CHANGEUPPER:
3042                 upper_dev = info->upper_dev;
3043                 if (is_vlan_dev(upper_dev)) {
3044                         if (info->linking)
3045                                 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
3046                                                               upper_dev);
3047                         else
3048                                  mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
3049                                                            upper_dev);
3050                 } else if (netif_is_bridge_master(upper_dev)) {
3051                         if (info->linking)
3052                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
3053                                                                 upper_dev);
3054                         else
3055                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, true);
3056                 } else if (netif_is_lag_master(upper_dev)) {
3057                         if (info->linking)
3058                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3059                                                              upper_dev);
3060                         else
3061                                 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3062                                                         upper_dev);
3063                 } else {
3064                         err = -EINVAL;
3065                         WARN_ON(1);
3066                 }
3067                 break;
3068         }
3069
3070         return err;
3071 }
3072
3073 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
3074                                                unsigned long event, void *ptr)
3075 {
3076         struct netdev_notifier_changelowerstate_info *info;
3077         struct mlxsw_sp_port *mlxsw_sp_port;
3078         int err;
3079
3080         mlxsw_sp_port = netdev_priv(dev);
3081         info = ptr;
3082
3083         switch (event) {
3084         case NETDEV_CHANGELOWERSTATE:
3085                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
3086                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
3087                                                         info->lower_state_info);
3088                         if (err)
3089                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
3090                 }
3091                 break;
3092         }
3093
3094         return 0;
3095 }
3096
3097 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
3098                                          unsigned long event, void *ptr)
3099 {
3100         switch (event) {
3101         case NETDEV_PRECHANGEUPPER:
3102         case NETDEV_CHANGEUPPER:
3103                 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
3104         case NETDEV_CHANGELOWERSTATE:
3105                 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
3106         }
3107
3108         return 0;
3109 }
3110
3111 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3112                                         unsigned long event, void *ptr)
3113 {
3114         struct net_device *dev;
3115         struct list_head *iter;
3116         int ret;
3117
3118         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3119                 if (mlxsw_sp_port_dev_check(dev)) {
3120                         ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3121                         if (ret)
3122                                 return ret;
3123                 }
3124         }
3125
3126         return 0;
3127 }
3128
3129 static struct mlxsw_sp_vfid *
3130 mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
3131                       const struct net_device *br_dev)
3132 {
3133         struct mlxsw_sp_vfid *vfid;
3134
3135         list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
3136                 if (vfid->br_dev == br_dev)
3137                         return vfid;
3138         }
3139
3140         return NULL;
3141 }
3142
3143 static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
3144 {
3145         return vfid - MLXSW_SP_VFID_PORT_MAX;
3146 }
3147
3148 static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
3149 {
3150         return MLXSW_SP_VFID_PORT_MAX + br_vfid;
3151 }
3152
3153 static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
3154 {
3155         return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
3156                                    MLXSW_SP_VFID_BR_MAX);
3157 }
3158
3159 static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
3160                                                      struct net_device *br_dev)
3161 {
3162         struct device *dev = mlxsw_sp->bus_info->dev;
3163         struct mlxsw_sp_vfid *f;
3164         u16 vfid, fid;
3165         int err;
3166
3167         vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
3168         if (vfid == MLXSW_SP_VFID_MAX) {
3169                 dev_err(dev, "No available vFIDs\n");
3170                 return ERR_PTR(-ERANGE);
3171         }
3172
3173         fid = mlxsw_sp_vfid_to_fid(vfid);
3174         err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
3175         if (err) {
3176                 dev_err(dev, "Failed to create FID=%d\n", fid);
3177                 return ERR_PTR(err);
3178         }
3179
3180         f = kzalloc(sizeof(*f), GFP_KERNEL);
3181         if (!f)
3182                 goto err_allocate_vfid;
3183
3184         f->vfid = vfid;
3185         f->br_dev = br_dev;
3186
3187         list_add(&f->list, &mlxsw_sp->br_vfids.list);
3188         set_bit(mlxsw_sp_vfid_to_br_vfid(vfid), mlxsw_sp->br_vfids.mapped);
3189
3190         return f;
3191
3192 err_allocate_vfid:
3193         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3194         return ERR_PTR(-ENOMEM);
3195 }
3196
3197 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
3198                                      struct mlxsw_sp_vfid *vfid)
3199 {
3200         u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
3201         u16 fid = mlxsw_sp_vfid_to_fid(vfid->vfid);
3202
3203         clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
3204         list_del(&vfid->list);
3205
3206         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3207
3208         kfree(vfid);
3209 }
3210
3211 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
3212                                         struct net_device *br_dev,
3213                                         bool flush_fdb)
3214 {
3215         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3216         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3217         struct net_device *dev = mlxsw_sp_vport->dev;
3218         struct mlxsw_sp_vfid *vfid, *new_vfid;
3219         u16 fid, new_fid;
3220         int err;
3221
3222         vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
3223         if (WARN_ON(!vfid))
3224                 return;
3225
3226         /* We need a vFID to go back to after leaving the bridge's vFID. */
3227         new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
3228         if (!new_vfid) {
3229                 new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
3230                 if (IS_ERR(new_vfid)) {
3231                         netdev_err(dev, "Failed to create vFID for VID=%d\n",
3232                                    vid);
3233                         return;
3234                 }
3235         }
3236
3237         /* Invalidate existing {Port, VID} to vFID mapping and create a new
3238          * one for the new vFID.
3239          */
3240         fid = mlxsw_sp_vfid_to_fid(vfid->vfid);
3241         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, fid, false);
3242         if (err) {
3243                 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3244                            vfid->vfid);
3245                 goto err_vport_fid_unmap;
3246         }
3247
3248         new_fid = mlxsw_sp_vfid_to_fid(new_vfid->vfid);
3249         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, new_fid, true);
3250         if (err) {
3251                 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3252                            new_vfid->vfid);
3253                 goto err_vport_fid_map;
3254         }
3255
3256         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3257         if (err) {
3258                 netdev_err(dev, "Failed to disable learning\n");
3259                 goto err_port_vid_learning_set;
3260         }
3261
3262         err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, fid, false);
3263         if (err) {
3264                 netdev_err(dev, "Failed clear to clear flooding\n");
3265                 goto err_vport_flood_set;
3266         }
3267
3268         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
3269                                           MLXSW_REG_SPMS_STATE_FORWARDING);
3270         if (err) {
3271                 netdev_err(dev, "Failed to set STP state\n");
3272                 goto err_port_stp_state_set;
3273         }
3274
3275         if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
3276                 netdev_err(dev, "Failed to flush FDB\n");
3277
3278         /* Switch between the vFIDs and destroy the old one if needed. */
3279         new_vfid->nr_vports++;
3280         mlxsw_sp_vport->vport.vfid = new_vfid;
3281         vfid->nr_vports--;
3282         if (!vfid->nr_vports)
3283                 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3284
3285         mlxsw_sp_vport->learning = 0;
3286         mlxsw_sp_vport->learning_sync = 0;
3287         mlxsw_sp_vport->uc_flood = 0;
3288         mlxsw_sp_vport->bridged = 0;
3289
3290         return;
3291
3292 err_port_stp_state_set:
3293 err_vport_flood_set:
3294 err_port_vid_learning_set:
3295 err_vport_fid_map:
3296 err_vport_fid_unmap:
3297         /* Rollback vFID only if new. */
3298         if (!new_vfid->nr_vports)
3299                 mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
3300 }
3301
3302 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3303                                       struct net_device *br_dev)
3304 {
3305         struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
3306         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3307         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3308         struct net_device *dev = mlxsw_sp_vport->dev;
3309         struct mlxsw_sp_vfid *vfid;
3310         u16 fid, old_fid;
3311         int err;
3312
3313         vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
3314         if (!vfid) {
3315                 vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
3316                 if (IS_ERR(vfid)) {
3317                         netdev_err(dev, "Failed to create bridge vFID\n");
3318                         return PTR_ERR(vfid);
3319                 }
3320         }
3321
3322         fid = mlxsw_sp_vfid_to_fid(vfid->vfid);
3323         err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, fid, true);
3324         if (err) {
3325                 netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
3326                            vfid->vfid);
3327                 goto err_port_flood_set;
3328         }
3329
3330         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3331         if (err) {
3332                 netdev_err(dev, "Failed to enable learning\n");
3333                 goto err_port_vid_learning_set;
3334         }
3335
3336         /* We need to invalidate existing {Port, VID} to vFID mapping and
3337          * create a new one for the bridge's vFID.
3338          */
3339         old_fid = mlxsw_sp_vfid_to_fid(old_vfid->vfid);
3340         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, old_fid, false);
3341         if (err) {
3342                 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3343                            old_vfid->vfid);
3344                 goto err_vport_fid_unmap;
3345         }
3346
3347         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, fid, true);
3348         if (err) {
3349                 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3350                            vfid->vfid);
3351                 goto err_vport_fid_map;
3352         }
3353
3354         /* Switch between the vFIDs and destroy the old one if needed. */
3355         vfid->nr_vports++;
3356         mlxsw_sp_vport->vport.vfid = vfid;
3357         old_vfid->nr_vports--;
3358         if (!old_vfid->nr_vports)
3359                 mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
3360
3361         mlxsw_sp_vport->learning = 1;
3362         mlxsw_sp_vport->learning_sync = 1;
3363         mlxsw_sp_vport->uc_flood = 1;
3364         mlxsw_sp_vport->bridged = 1;
3365
3366         return 0;
3367
3368 err_vport_fid_map:
3369         mlxsw_sp_vport_fid_map(mlxsw_sp_vport, old_fid, true);
3370 err_vport_fid_unmap:
3371         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3372 err_port_vid_learning_set:
3373         mlxsw_sp_vport_flood_set(mlxsw_sp_vport, fid, false);
3374 err_port_flood_set:
3375         if (!vfid->nr_vports)
3376                 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3377         return err;
3378 }
3379
3380 static bool
3381 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3382                                   const struct net_device *br_dev)
3383 {
3384         struct mlxsw_sp_port *mlxsw_sp_vport;
3385
3386         list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3387                             vport.list) {
3388                 if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
3389                         return false;
3390         }
3391
3392         return true;
3393 }
3394
3395 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3396                                           unsigned long event, void *ptr,
3397                                           u16 vid)
3398 {
3399         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3400         struct netdev_notifier_changeupper_info *info = ptr;
3401         struct mlxsw_sp_port *mlxsw_sp_vport;
3402         struct net_device *upper_dev;
3403         int err = 0;
3404
3405         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3406
3407         switch (event) {
3408         case NETDEV_PRECHANGEUPPER:
3409                 upper_dev = info->upper_dev;
3410                 if (!netif_is_bridge_master(upper_dev))
3411                         return -EINVAL;
3412                 if (!info->linking)
3413                         break;
3414                 /* We can't have multiple VLAN interfaces configured on
3415                  * the same port and being members in the same bridge.
3416                  */
3417                 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3418                                                        upper_dev))
3419                         return -EINVAL;
3420                 break;
3421         case NETDEV_CHANGEUPPER:
3422                 upper_dev = info->upper_dev;
3423                 if (info->linking) {
3424                         if (WARN_ON(!mlxsw_sp_vport))
3425                                 return -EINVAL;
3426                         err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3427                                                          upper_dev);
3428                 } else {
3429                         /* We ignore bridge's unlinking notifications if vPort
3430                          * is gone, since we already left the bridge when the
3431                          * VLAN device was unlinked from the real device.
3432                          */
3433                         if (!mlxsw_sp_vport)
3434                                 return 0;
3435                         mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, upper_dev,
3436                                                     true);
3437                 }
3438         }
3439
3440         return err;
3441 }
3442
3443 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3444                                               unsigned long event, void *ptr,
3445                                               u16 vid)
3446 {
3447         struct net_device *dev;
3448         struct list_head *iter;
3449         int ret;
3450
3451         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3452                 if (mlxsw_sp_port_dev_check(dev)) {
3453                         ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
3454                                                              vid);
3455                         if (ret)
3456                                 return ret;
3457                 }
3458         }
3459
3460         return 0;
3461 }
3462
3463 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
3464                                          unsigned long event, void *ptr)
3465 {
3466         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3467         u16 vid = vlan_dev_vlan_id(vlan_dev);
3468
3469         if (mlxsw_sp_port_dev_check(real_dev))
3470                 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
3471                                                       vid);
3472         else if (netif_is_lag_master(real_dev))
3473                 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
3474                                                           vid);
3475
3476         return 0;
3477 }
3478
3479 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3480                                     unsigned long event, void *ptr)
3481 {
3482         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3483         int err = 0;
3484
3485         if (mlxsw_sp_port_dev_check(dev))
3486                 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3487         else if (netif_is_lag_master(dev))
3488                 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3489         else if (is_vlan_dev(dev))
3490                 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3491
3492         return notifier_from_errno(err);
3493 }
3494
3495 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3496         .notifier_call = mlxsw_sp_netdevice_event,
3497 };
3498
3499 static int __init mlxsw_sp_module_init(void)
3500 {
3501         int err;
3502
3503         register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3504         err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3505         if (err)
3506                 goto err_core_driver_register;
3507         return 0;
3508
3509 err_core_driver_register:
3510         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3511         return err;
3512 }
3513
3514 static void __exit mlxsw_sp_module_exit(void)
3515 {
3516         mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3517         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3518 }
3519
3520 module_init(mlxsw_sp_module_init);
3521 module_exit(mlxsw_sp_module_exit);
3522
3523 MODULE_LICENSE("Dual BSD/GPL");
3524 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3525 MODULE_DESCRIPTION("Mellanox Spectrum driver");
3526 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);