mlxsw: spectrum: Force link training according to admin state
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/dcbnl.h>
53 #include <net/switchdev.h>
54 #include <generated/utsrelease.h>
55
56 #include "spectrum.h"
57 #include "core.h"
58 #include "reg.h"
59 #include "port.h"
60 #include "trap.h"
61 #include "txheader.h"
62
63 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
64 static const char mlxsw_sp_driver_version[] = "1.0";
65
66 /* tx_hdr_version
67  * Tx header version.
68  * Must be set to 1.
69  */
70 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
71
72 /* tx_hdr_ctl
73  * Packet control type.
74  * 0 - Ethernet control (e.g. EMADs, LACP)
75  * 1 - Ethernet data
76  */
77 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
78
79 /* tx_hdr_proto
80  * Packet protocol type. Must be set to 1 (Ethernet).
81  */
82 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
83
84 /* tx_hdr_rx_is_router
85  * Packet is sent from the router. Valid for data packets only.
86  */
87 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
88
89 /* tx_hdr_fid_valid
90  * Indicates if the 'fid' field is valid and should be used for
91  * forwarding lookup. Valid for data packets only.
92  */
93 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
94
95 /* tx_hdr_swid
96  * Switch partition ID. Must be set to 0.
97  */
98 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
99
100 /* tx_hdr_control_tclass
101  * Indicates if the packet should use the control TClass and not one
102  * of the data TClasses.
103  */
104 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
105
106 /* tx_hdr_etclass
107  * Egress TClass to be used on the egress device on the egress port.
108  */
109 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
110
111 /* tx_hdr_port_mid
112  * Destination local port for unicast packets.
113  * Destination multicast ID for multicast packets.
114  *
115  * Control packets are directed to a specific egress port, while data
116  * packets are transmitted through the CPU port (0) into the switch partition,
117  * where forwarding rules are applied.
118  */
119 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
120
121 /* tx_hdr_fid
122  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
123  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
124  * Valid for data packets only.
125  */
126 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
127
128 /* tx_hdr_type
129  * 0 - Data packets
130  * 6 - Control packets
131  */
132 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
133
134 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
135                                      const struct mlxsw_tx_info *tx_info)
136 {
137         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
138
139         memset(txhdr, 0, MLXSW_TXHDR_LEN);
140
141         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
142         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
143         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
144         mlxsw_tx_hdr_swid_set(txhdr, 0);
145         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
146         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
147         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
148 }
149
150 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
151 {
152         char spad_pl[MLXSW_REG_SPAD_LEN];
153         int err;
154
155         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
156         if (err)
157                 return err;
158         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
159         return 0;
160 }
161
162 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
163                                           bool is_up)
164 {
165         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
166         char paos_pl[MLXSW_REG_PAOS_LEN];
167
168         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
169                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
170                             MLXSW_PORT_ADMIN_STATUS_DOWN);
171         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
172 }
173
174 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
175                                       unsigned char *addr)
176 {
177         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
178         char ppad_pl[MLXSW_REG_PPAD_LEN];
179
180         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
181         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
182         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
183 }
184
185 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
186 {
187         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
188         unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
189
190         ether_addr_copy(addr, mlxsw_sp->base_mac);
191         addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
192         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
193 }
194
195 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
196                                        u16 vid, enum mlxsw_reg_spms_state state)
197 {
198         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
199         char *spms_pl;
200         int err;
201
202         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
203         if (!spms_pl)
204                 return -ENOMEM;
205         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
206         mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
207         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
208         kfree(spms_pl);
209         return err;
210 }
211
212 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
213 {
214         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
215         char pmtu_pl[MLXSW_REG_PMTU_LEN];
216         int max_mtu;
217         int err;
218
219         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
220         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
221         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
222         if (err)
223                 return err;
224         max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
225
226         if (mtu > max_mtu)
227                 return -EINVAL;
228
229         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
230         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
231 }
232
233 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
234                                     u8 swid)
235 {
236         char pspa_pl[MLXSW_REG_PSPA_LEN];
237
238         mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
239         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
240 }
241
242 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
243 {
244         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
245
246         return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
247                                         swid);
248 }
249
250 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
251                                      bool enable)
252 {
253         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
254         char svpe_pl[MLXSW_REG_SVPE_LEN];
255
256         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
257         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
258 }
259
260 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
261                                  enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
262                                  u16 vid)
263 {
264         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
265         char svfa_pl[MLXSW_REG_SVFA_LEN];
266
267         mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
268                             fid, vid);
269         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
270 }
271
272 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
273                                           u16 vid, bool learn_enable)
274 {
275         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
276         char *spvmlr_pl;
277         int err;
278
279         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
280         if (!spvmlr_pl)
281                 return -ENOMEM;
282         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
283                               learn_enable);
284         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
285         kfree(spvmlr_pl);
286         return err;
287 }
288
289 static int
290 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
291 {
292         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
293         char sspr_pl[MLXSW_REG_SSPR_LEN];
294
295         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
296         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
297 }
298
299 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
300                                          u8 local_port, u8 *p_module,
301                                          u8 *p_width, u8 *p_lane)
302 {
303         char pmlp_pl[MLXSW_REG_PMLP_LEN];
304         int err;
305
306         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
307         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
308         if (err)
309                 return err;
310         *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
311         *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
312         *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
313         return 0;
314 }
315
316 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
317                                     u8 module, u8 width, u8 lane)
318 {
319         char pmlp_pl[MLXSW_REG_PMLP_LEN];
320         int i;
321
322         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
323         mlxsw_reg_pmlp_width_set(pmlp_pl, width);
324         for (i = 0; i < width; i++) {
325                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
326                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
327         }
328
329         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
330 }
331
332 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
333 {
334         char pmlp_pl[MLXSW_REG_PMLP_LEN];
335
336         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
337         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
338         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
339 }
340
341 static int mlxsw_sp_port_open(struct net_device *dev)
342 {
343         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
344         int err;
345
346         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
347         if (err)
348                 return err;
349         netif_start_queue(dev);
350         return 0;
351 }
352
353 static int mlxsw_sp_port_stop(struct net_device *dev)
354 {
355         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
356
357         netif_stop_queue(dev);
358         return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
359 }
360
361 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
362                                       struct net_device *dev)
363 {
364         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
365         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
366         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
367         const struct mlxsw_tx_info tx_info = {
368                 .local_port = mlxsw_sp_port->local_port,
369                 .is_emad = false,
370         };
371         u64 len;
372         int err;
373
374         if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
375                 return NETDEV_TX_BUSY;
376
377         if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
378                 struct sk_buff *skb_orig = skb;
379
380                 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
381                 if (!skb) {
382                         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
383                         dev_kfree_skb_any(skb_orig);
384                         return NETDEV_TX_OK;
385                 }
386         }
387
388         if (eth_skb_pad(skb)) {
389                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
390                 return NETDEV_TX_OK;
391         }
392
393         mlxsw_sp_txhdr_construct(skb, &tx_info);
394         /* TX header is consumed by HW on the way so we shouldn't count its
395          * bytes as being sent.
396          */
397         len = skb->len - MLXSW_TXHDR_LEN;
398
399         /* Due to a race we might fail here because of a full queue. In that
400          * unlikely case we simply drop the packet.
401          */
402         err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
403
404         if (!err) {
405                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
406                 u64_stats_update_begin(&pcpu_stats->syncp);
407                 pcpu_stats->tx_packets++;
408                 pcpu_stats->tx_bytes += len;
409                 u64_stats_update_end(&pcpu_stats->syncp);
410         } else {
411                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
412                 dev_kfree_skb_any(skb);
413         }
414         return NETDEV_TX_OK;
415 }
416
417 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
418 {
419 }
420
421 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
422 {
423         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
424         struct sockaddr *addr = p;
425         int err;
426
427         if (!is_valid_ether_addr(addr->sa_data))
428                 return -EADDRNOTAVAIL;
429
430         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
431         if (err)
432                 return err;
433         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
434         return 0;
435 }
436
437 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
438                                  bool pause_en, bool pfc_en, u16 delay)
439 {
440         u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
441
442         delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
443                          MLXSW_SP_PAUSE_DELAY;
444
445         if (pause_en || pfc_en)
446                 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
447                                                     pg_size + delay, pg_size);
448         else
449                 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
450 }
451
452 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
453                                  u8 *prio_tc, bool pause_en,
454                                  struct ieee_pfc *my_pfc)
455 {
456         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
457         u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
458         u16 delay = !!my_pfc ? my_pfc->delay : 0;
459         char pbmc_pl[MLXSW_REG_PBMC_LEN];
460         int i, j, err;
461
462         mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
463         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
464         if (err)
465                 return err;
466
467         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
468                 bool configure = false;
469                 bool pfc = false;
470
471                 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
472                         if (prio_tc[j] == i) {
473                                 pfc = pfc_en & BIT(j);
474                                 configure = true;
475                                 break;
476                         }
477                 }
478
479                 if (!configure)
480                         continue;
481                 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
482         }
483
484         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
485 }
486
487 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
488                                       int mtu, bool pause_en)
489 {
490         u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
491         bool dcb_en = !!mlxsw_sp_port->dcb.ets;
492         struct ieee_pfc *my_pfc;
493         u8 *prio_tc;
494
495         prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
496         my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
497
498         return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
499                                             pause_en, my_pfc);
500 }
501
502 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
503 {
504         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
505         bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
506         int err;
507
508         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
509         if (err)
510                 return err;
511         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
512         if (err)
513                 goto err_port_mtu_set;
514         dev->mtu = mtu;
515         return 0;
516
517 err_port_mtu_set:
518         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
519         return err;
520 }
521
522 static struct rtnl_link_stats64 *
523 mlxsw_sp_port_get_stats64(struct net_device *dev,
524                           struct rtnl_link_stats64 *stats)
525 {
526         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
527         struct mlxsw_sp_port_pcpu_stats *p;
528         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
529         u32 tx_dropped = 0;
530         unsigned int start;
531         int i;
532
533         for_each_possible_cpu(i) {
534                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
535                 do {
536                         start = u64_stats_fetch_begin_irq(&p->syncp);
537                         rx_packets      = p->rx_packets;
538                         rx_bytes        = p->rx_bytes;
539                         tx_packets      = p->tx_packets;
540                         tx_bytes        = p->tx_bytes;
541                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
542
543                 stats->rx_packets       += rx_packets;
544                 stats->rx_bytes         += rx_bytes;
545                 stats->tx_packets       += tx_packets;
546                 stats->tx_bytes         += tx_bytes;
547                 /* tx_dropped is u32, updated without syncp protection. */
548                 tx_dropped      += p->tx_dropped;
549         }
550         stats->tx_dropped       = tx_dropped;
551         return stats;
552 }
553
554 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
555                            u16 vid_end, bool is_member, bool untagged)
556 {
557         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
558         char *spvm_pl;
559         int err;
560
561         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
562         if (!spvm_pl)
563                 return -ENOMEM;
564
565         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
566                             vid_end, is_member, untagged);
567         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
568         kfree(spvm_pl);
569         return err;
570 }
571
572 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
573 {
574         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
575         u16 vid, last_visited_vid;
576         int err;
577
578         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
579                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
580                                                    vid);
581                 if (err) {
582                         last_visited_vid = vid;
583                         goto err_port_vid_to_fid_set;
584                 }
585         }
586
587         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
588         if (err) {
589                 last_visited_vid = VLAN_N_VID;
590                 goto err_port_vid_to_fid_set;
591         }
592
593         return 0;
594
595 err_port_vid_to_fid_set:
596         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
597                 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
598                                              vid);
599         return err;
600 }
601
602 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
603 {
604         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
605         u16 vid;
606         int err;
607
608         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
609         if (err)
610                 return err;
611
612         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
613                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
614                                                    vid, vid);
615                 if (err)
616                         return err;
617         }
618
619         return 0;
620 }
621
622 static struct mlxsw_sp_vfid *
623 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
624 {
625         struct mlxsw_sp_vfid *vfid;
626
627         list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
628                 if (vfid->vid == vid)
629                         return vfid;
630         }
631
632         return NULL;
633 }
634
635 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
636 {
637         return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
638                                    MLXSW_SP_VFID_PORT_MAX);
639 }
640
641 static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
642 {
643         u16 fid = mlxsw_sp_vfid_to_fid(vfid);
644         char sfmr_pl[MLXSW_REG_SFMR_LEN];
645
646         mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0);
647         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
648 }
649
650 static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
651 {
652         u16 fid = mlxsw_sp_vfid_to_fid(vfid);
653         char sfmr_pl[MLXSW_REG_SFMR_LEN];
654
655         mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0);
656         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
657 }
658
659 static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
660                                                   u16 vid)
661 {
662         struct device *dev = mlxsw_sp->bus_info->dev;
663         struct mlxsw_sp_vfid *vfid;
664         u16 n_vfid;
665         int err;
666
667         n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
668         if (n_vfid == MLXSW_SP_VFID_PORT_MAX) {
669                 dev_err(dev, "No available vFIDs\n");
670                 return ERR_PTR(-ERANGE);
671         }
672
673         err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
674         if (err) {
675                 dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
676                 return ERR_PTR(err);
677         }
678
679         vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
680         if (!vfid)
681                 goto err_allocate_vfid;
682
683         vfid->vfid = n_vfid;
684         vfid->vid = vid;
685
686         list_add(&vfid->list, &mlxsw_sp->port_vfids.list);
687         set_bit(n_vfid, mlxsw_sp->port_vfids.mapped);
688
689         return vfid;
690
691 err_allocate_vfid:
692         __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
693         return ERR_PTR(-ENOMEM);
694 }
695
696 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
697                                   struct mlxsw_sp_vfid *vfid)
698 {
699         clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
700         list_del(&vfid->list);
701
702         __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
703
704         kfree(vfid);
705 }
706
707 static struct mlxsw_sp_port *
708 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
709                            struct mlxsw_sp_vfid *vfid)
710 {
711         struct mlxsw_sp_port *mlxsw_sp_vport;
712
713         mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
714         if (!mlxsw_sp_vport)
715                 return NULL;
716
717         /* dev will be set correctly after the VLAN device is linked
718          * with the real device. In case of bridge SELF invocation, dev
719          * will remain as is.
720          */
721         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
722         mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
723         mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
724         mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
725         mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
726         mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
727         mlxsw_sp_vport->vport.vfid = vfid;
728         mlxsw_sp_vport->vport.vid = vfid->vid;
729
730         list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
731
732         return mlxsw_sp_vport;
733 }
734
735 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
736 {
737         list_del(&mlxsw_sp_vport->vport.list);
738         kfree(mlxsw_sp_vport);
739 }
740
741 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
742                           u16 vid)
743 {
744         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
745         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
746         struct mlxsw_sp_port *mlxsw_sp_vport;
747         struct mlxsw_sp_vfid *vfid;
748         int err;
749
750         /* VLAN 0 is added to HW filter when device goes up, but it is
751          * reserved in our case, so simply return.
752          */
753         if (!vid)
754                 return 0;
755
756         if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
757                 netdev_warn(dev, "VID=%d already configured\n", vid);
758                 return 0;
759         }
760
761         vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
762         if (!vfid) {
763                 vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
764                 if (IS_ERR(vfid)) {
765                         netdev_err(dev, "Failed to create vFID for VID=%d\n",
766                                    vid);
767                         return PTR_ERR(vfid);
768                 }
769         }
770
771         mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
772         if (!mlxsw_sp_vport) {
773                 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
774                 err = -ENOMEM;
775                 goto err_port_vport_create;
776         }
777
778         if (!vfid->nr_vports) {
779                 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
780                                                true, false);
781                 if (err) {
782                         netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
783                                    vfid->vfid);
784                         goto err_vport_flood_set;
785                 }
786         }
787
788         /* When adding the first VLAN interface on a bridged port we need to
789          * transition all the active 802.1Q bridge VLANs to use explicit
790          * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
791          */
792         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
793                 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
794                 if (err) {
795                         netdev_err(dev, "Failed to set to Virtual mode\n");
796                         goto err_port_vp_mode_trans;
797                 }
798         }
799
800         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
801                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
802                                            true,
803                                            mlxsw_sp_vfid_to_fid(vfid->vfid),
804                                            vid);
805         if (err) {
806                 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
807                            vid, vfid->vfid);
808                 goto err_port_vid_to_fid_set;
809         }
810
811         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
812         if (err) {
813                 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
814                 goto err_port_vid_learning_set;
815         }
816
817         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
818         if (err) {
819                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
820                            vid);
821                 goto err_port_add_vid;
822         }
823
824         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
825                                           MLXSW_REG_SPMS_STATE_FORWARDING);
826         if (err) {
827                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
828                 goto err_port_stp_state_set;
829         }
830
831         vfid->nr_vports++;
832
833         return 0;
834
835 err_port_stp_state_set:
836         mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
837 err_port_add_vid:
838         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
839 err_port_vid_learning_set:
840         mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
841                                      MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
842                                      mlxsw_sp_vfid_to_fid(vfid->vfid), vid);
843 err_port_vid_to_fid_set:
844         if (list_is_singular(&mlxsw_sp_port->vports_list))
845                 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
846 err_port_vp_mode_trans:
847         if (!vfid->nr_vports)
848                 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
849                                          false);
850 err_vport_flood_set:
851         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
852 err_port_vport_create:
853         if (!vfid->nr_vports)
854                 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
855         return err;
856 }
857
858 int mlxsw_sp_port_kill_vid(struct net_device *dev,
859                            __be16 __always_unused proto, u16 vid)
860 {
861         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
862         struct mlxsw_sp_port *mlxsw_sp_vport;
863         struct mlxsw_sp_vfid *vfid;
864         int err;
865
866         /* VLAN 0 is removed from HW filter when device goes down, but
867          * it is reserved in our case, so simply return.
868          */
869         if (!vid)
870                 return 0;
871
872         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
873         if (!mlxsw_sp_vport) {
874                 netdev_warn(dev, "VID=%d does not exist\n", vid);
875                 return 0;
876         }
877
878         vfid = mlxsw_sp_vport->vport.vfid;
879
880         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
881                                           MLXSW_REG_SPMS_STATE_DISCARDING);
882         if (err) {
883                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
884                 return err;
885         }
886
887         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
888         if (err) {
889                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
890                            vid);
891                 return err;
892         }
893
894         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
895         if (err) {
896                 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
897                 return err;
898         }
899
900         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
901                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
902                                            false,
903                                            mlxsw_sp_vfid_to_fid(vfid->vfid),
904                                            vid);
905         if (err) {
906                 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
907                            vid, vfid->vfid);
908                 return err;
909         }
910
911         /* When removing the last VLAN interface on a bridged port we need to
912          * transition all active 802.1Q bridge VLANs to use VID to FID
913          * mappings and set port's mode to VLAN mode.
914          */
915         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
916                 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
917                 if (err) {
918                         netdev_err(dev, "Failed to set to VLAN mode\n");
919                         return err;
920                 }
921         }
922
923         vfid->nr_vports--;
924         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
925
926         /* Destroy the vFID if no vPorts are assigned to it anymore. */
927         if (!vfid->nr_vports)
928                 mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
929
930         return 0;
931 }
932
933 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
934                                             size_t len)
935 {
936         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
937         u8 module = mlxsw_sp_port->mapping.module;
938         u8 width = mlxsw_sp_port->mapping.width;
939         u8 lane = mlxsw_sp_port->mapping.lane;
940         int err;
941
942         if (!mlxsw_sp_port->split)
943                 err = snprintf(name, len, "p%d", module + 1);
944         else
945                 err = snprintf(name, len, "p%ds%d", module + 1,
946                                lane / width);
947
948         if (err >= len)
949                 return -EINVAL;
950
951         return 0;
952 }
953
954 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
955         .ndo_open               = mlxsw_sp_port_open,
956         .ndo_stop               = mlxsw_sp_port_stop,
957         .ndo_start_xmit         = mlxsw_sp_port_xmit,
958         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
959         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
960         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
961         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
962         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
963         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
964         .ndo_fdb_add            = switchdev_port_fdb_add,
965         .ndo_fdb_del            = switchdev_port_fdb_del,
966         .ndo_fdb_dump           = switchdev_port_fdb_dump,
967         .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
968         .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
969         .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
970         .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
971 };
972
973 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
974                                       struct ethtool_drvinfo *drvinfo)
975 {
976         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
977         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
978
979         strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
980         strlcpy(drvinfo->version, mlxsw_sp_driver_version,
981                 sizeof(drvinfo->version));
982         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
983                  "%d.%d.%d",
984                  mlxsw_sp->bus_info->fw_rev.major,
985                  mlxsw_sp->bus_info->fw_rev.minor,
986                  mlxsw_sp->bus_info->fw_rev.subminor);
987         strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
988                 sizeof(drvinfo->bus_info));
989 }
990
991 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
992                                          struct ethtool_pauseparam *pause)
993 {
994         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
995
996         pause->rx_pause = mlxsw_sp_port->link.rx_pause;
997         pause->tx_pause = mlxsw_sp_port->link.tx_pause;
998 }
999
1000 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1001                                    struct ethtool_pauseparam *pause)
1002 {
1003         char pfcc_pl[MLXSW_REG_PFCC_LEN];
1004
1005         mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1006         mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1007         mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1008
1009         return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1010                                pfcc_pl);
1011 }
1012
1013 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1014                                         struct ethtool_pauseparam *pause)
1015 {
1016         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1017         bool pause_en = pause->tx_pause || pause->rx_pause;
1018         int err;
1019
1020         if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1021                 netdev_err(dev, "PFC already enabled on port\n");
1022                 return -EINVAL;
1023         }
1024
1025         if (pause->autoneg) {
1026                 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1027                 return -EINVAL;
1028         }
1029
1030         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1031         if (err) {
1032                 netdev_err(dev, "Failed to configure port's headroom\n");
1033                 return err;
1034         }
1035
1036         err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1037         if (err) {
1038                 netdev_err(dev, "Failed to set PAUSE parameters\n");
1039                 goto err_port_pause_configure;
1040         }
1041
1042         mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1043         mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1044
1045         return 0;
1046
1047 err_port_pause_configure:
1048         pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1049         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1050         return err;
1051 }
1052
1053 struct mlxsw_sp_port_hw_stats {
1054         char str[ETH_GSTRING_LEN];
1055         u64 (*getter)(char *payload);
1056 };
1057
1058 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1059         {
1060                 .str = "a_frames_transmitted_ok",
1061                 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1062         },
1063         {
1064                 .str = "a_frames_received_ok",
1065                 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1066         },
1067         {
1068                 .str = "a_frame_check_sequence_errors",
1069                 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1070         },
1071         {
1072                 .str = "a_alignment_errors",
1073                 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1074         },
1075         {
1076                 .str = "a_octets_transmitted_ok",
1077                 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1078         },
1079         {
1080                 .str = "a_octets_received_ok",
1081                 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1082         },
1083         {
1084                 .str = "a_multicast_frames_xmitted_ok",
1085                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1086         },
1087         {
1088                 .str = "a_broadcast_frames_xmitted_ok",
1089                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1090         },
1091         {
1092                 .str = "a_multicast_frames_received_ok",
1093                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1094         },
1095         {
1096                 .str = "a_broadcast_frames_received_ok",
1097                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1098         },
1099         {
1100                 .str = "a_in_range_length_errors",
1101                 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1102         },
1103         {
1104                 .str = "a_out_of_range_length_field",
1105                 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1106         },
1107         {
1108                 .str = "a_frame_too_long_errors",
1109                 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1110         },
1111         {
1112                 .str = "a_symbol_error_during_carrier",
1113                 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1114         },
1115         {
1116                 .str = "a_mac_control_frames_transmitted",
1117                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1118         },
1119         {
1120                 .str = "a_mac_control_frames_received",
1121                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1122         },
1123         {
1124                 .str = "a_unsupported_opcodes_received",
1125                 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1126         },
1127         {
1128                 .str = "a_pause_mac_ctrl_frames_received",
1129                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1130         },
1131         {
1132                 .str = "a_pause_mac_ctrl_frames_xmitted",
1133                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1134         },
1135 };
1136
1137 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1138
1139 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1140                                       u32 stringset, u8 *data)
1141 {
1142         u8 *p = data;
1143         int i;
1144
1145         switch (stringset) {
1146         case ETH_SS_STATS:
1147                 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1148                         memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1149                                ETH_GSTRING_LEN);
1150                         p += ETH_GSTRING_LEN;
1151                 }
1152                 break;
1153         }
1154 }
1155
1156 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1157                                      enum ethtool_phys_id_state state)
1158 {
1159         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1160         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1161         char mlcr_pl[MLXSW_REG_MLCR_LEN];
1162         bool active;
1163
1164         switch (state) {
1165         case ETHTOOL_ID_ACTIVE:
1166                 active = true;
1167                 break;
1168         case ETHTOOL_ID_INACTIVE:
1169                 active = false;
1170                 break;
1171         default:
1172                 return -EOPNOTSUPP;
1173         }
1174
1175         mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1176         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1177 }
1178
1179 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1180                                     struct ethtool_stats *stats, u64 *data)
1181 {
1182         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1183         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1184         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1185         int i;
1186         int err;
1187
1188         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
1189                              MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
1190         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1191         for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
1192                 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
1193 }
1194
1195 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1196 {
1197         switch (sset) {
1198         case ETH_SS_STATS:
1199                 return MLXSW_SP_PORT_HW_STATS_LEN;
1200         default:
1201                 return -EOPNOTSUPP;
1202         }
1203 }
1204
1205 struct mlxsw_sp_port_link_mode {
1206         u32 mask;
1207         u32 supported;
1208         u32 advertised;
1209         u32 speed;
1210 };
1211
1212 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1213         {
1214                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1215                 .supported      = SUPPORTED_100baseT_Full,
1216                 .advertised     = ADVERTISED_100baseT_Full,
1217                 .speed          = 100,
1218         },
1219         {
1220                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1221                 .speed          = 100,
1222         },
1223         {
1224                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1225                                   MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1226                 .supported      = SUPPORTED_1000baseKX_Full,
1227                 .advertised     = ADVERTISED_1000baseKX_Full,
1228                 .speed          = 1000,
1229         },
1230         {
1231                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1232                 .supported      = SUPPORTED_10000baseT_Full,
1233                 .advertised     = ADVERTISED_10000baseT_Full,
1234                 .speed          = 10000,
1235         },
1236         {
1237                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1238                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1239                 .supported      = SUPPORTED_10000baseKX4_Full,
1240                 .advertised     = ADVERTISED_10000baseKX4_Full,
1241                 .speed          = 10000,
1242         },
1243         {
1244                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1245                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1246                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1247                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1248                 .supported      = SUPPORTED_10000baseKR_Full,
1249                 .advertised     = ADVERTISED_10000baseKR_Full,
1250                 .speed          = 10000,
1251         },
1252         {
1253                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1254                 .supported      = SUPPORTED_20000baseKR2_Full,
1255                 .advertised     = ADVERTISED_20000baseKR2_Full,
1256                 .speed          = 20000,
1257         },
1258         {
1259                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1260                 .supported      = SUPPORTED_40000baseCR4_Full,
1261                 .advertised     = ADVERTISED_40000baseCR4_Full,
1262                 .speed          = 40000,
1263         },
1264         {
1265                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1266                 .supported      = SUPPORTED_40000baseKR4_Full,
1267                 .advertised     = ADVERTISED_40000baseKR4_Full,
1268                 .speed          = 40000,
1269         },
1270         {
1271                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1272                 .supported      = SUPPORTED_40000baseSR4_Full,
1273                 .advertised     = ADVERTISED_40000baseSR4_Full,
1274                 .speed          = 40000,
1275         },
1276         {
1277                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1278                 .supported      = SUPPORTED_40000baseLR4_Full,
1279                 .advertised     = ADVERTISED_40000baseLR4_Full,
1280                 .speed          = 40000,
1281         },
1282         {
1283                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1284                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1285                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1286                 .speed          = 25000,
1287         },
1288         {
1289                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1290                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1291                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1292                 .speed          = 50000,
1293         },
1294         {
1295                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1296                 .supported      = SUPPORTED_56000baseKR4_Full,
1297                 .advertised     = ADVERTISED_56000baseKR4_Full,
1298                 .speed          = 56000,
1299         },
1300         {
1301                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1302                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1303                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1304                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1305                 .speed          = 100000,
1306         },
1307 };
1308
1309 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1310
1311 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1312 {
1313         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1314                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1315                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1316                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1317                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1318                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1319                 return SUPPORTED_FIBRE;
1320
1321         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1322                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1323                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1324                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1325                               MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1326                 return SUPPORTED_Backplane;
1327         return 0;
1328 }
1329
1330 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1331 {
1332         u32 modes = 0;
1333         int i;
1334
1335         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1336                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1337                         modes |= mlxsw_sp_port_link_mode[i].supported;
1338         }
1339         return modes;
1340 }
1341
1342 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1343 {
1344         u32 modes = 0;
1345         int i;
1346
1347         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1348                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1349                         modes |= mlxsw_sp_port_link_mode[i].advertised;
1350         }
1351         return modes;
1352 }
1353
1354 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1355                                             struct ethtool_cmd *cmd)
1356 {
1357         u32 speed = SPEED_UNKNOWN;
1358         u8 duplex = DUPLEX_UNKNOWN;
1359         int i;
1360
1361         if (!carrier_ok)
1362                 goto out;
1363
1364         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1365                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1366                         speed = mlxsw_sp_port_link_mode[i].speed;
1367                         duplex = DUPLEX_FULL;
1368                         break;
1369                 }
1370         }
1371 out:
1372         ethtool_cmd_speed_set(cmd, speed);
1373         cmd->duplex = duplex;
1374 }
1375
1376 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1377 {
1378         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1379                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1380                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1381                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1382                 return PORT_FIBRE;
1383
1384         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1385                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1386                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1387                 return PORT_DA;
1388
1389         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1390                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1391                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1392                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1393                 return PORT_NONE;
1394
1395         return PORT_OTHER;
1396 }
1397
1398 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1399                                       struct ethtool_cmd *cmd)
1400 {
1401         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1402         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1403         char ptys_pl[MLXSW_REG_PTYS_LEN];
1404         u32 eth_proto_cap;
1405         u32 eth_proto_admin;
1406         u32 eth_proto_oper;
1407         int err;
1408
1409         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1410         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1411         if (err) {
1412                 netdev_err(dev, "Failed to get proto");
1413                 return err;
1414         }
1415         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1416                               &eth_proto_admin, &eth_proto_oper);
1417
1418         cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1419                          mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1420                          SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1421         cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1422         mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1423                                         eth_proto_oper, cmd);
1424
1425         eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1426         cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1427         cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1428
1429         cmd->transceiver = XCVR_INTERNAL;
1430         return 0;
1431 }
1432
1433 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1434 {
1435         u32 ptys_proto = 0;
1436         int i;
1437
1438         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1439                 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1440                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1441         }
1442         return ptys_proto;
1443 }
1444
1445 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1446 {
1447         u32 ptys_proto = 0;
1448         int i;
1449
1450         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1451                 if (speed == mlxsw_sp_port_link_mode[i].speed)
1452                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1453         }
1454         return ptys_proto;
1455 }
1456
1457 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1458 {
1459         u32 ptys_proto = 0;
1460         int i;
1461
1462         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1463                 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1464                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1465         }
1466         return ptys_proto;
1467 }
1468
1469 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1470                                       struct ethtool_cmd *cmd)
1471 {
1472         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1473         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1474         char ptys_pl[MLXSW_REG_PTYS_LEN];
1475         u32 speed;
1476         u32 eth_proto_new;
1477         u32 eth_proto_cap;
1478         u32 eth_proto_admin;
1479         int err;
1480
1481         speed = ethtool_cmd_speed(cmd);
1482
1483         eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1484                 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1485                 mlxsw_sp_to_ptys_speed(speed);
1486
1487         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1488         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1489         if (err) {
1490                 netdev_err(dev, "Failed to get proto");
1491                 return err;
1492         }
1493         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1494
1495         eth_proto_new = eth_proto_new & eth_proto_cap;
1496         if (!eth_proto_new) {
1497                 netdev_err(dev, "Not supported proto admin requested");
1498                 return -EINVAL;
1499         }
1500         if (eth_proto_new == eth_proto_admin)
1501                 return 0;
1502
1503         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1504         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1505         if (err) {
1506                 netdev_err(dev, "Failed to set proto admin");
1507                 return err;
1508         }
1509
1510         if (!netif_running(dev))
1511                 return 0;
1512
1513         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1514         if (err) {
1515                 netdev_err(dev, "Failed to set admin status");
1516                 return err;
1517         }
1518
1519         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1520         if (err) {
1521                 netdev_err(dev, "Failed to set admin status");
1522                 return err;
1523         }
1524
1525         return 0;
1526 }
1527
1528 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1529         .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
1530         .get_link               = ethtool_op_get_link,
1531         .get_pauseparam         = mlxsw_sp_port_get_pauseparam,
1532         .set_pauseparam         = mlxsw_sp_port_set_pauseparam,
1533         .get_strings            = mlxsw_sp_port_get_strings,
1534         .set_phys_id            = mlxsw_sp_port_set_phys_id,
1535         .get_ethtool_stats      = mlxsw_sp_port_get_stats,
1536         .get_sset_count         = mlxsw_sp_port_get_sset_count,
1537         .get_settings           = mlxsw_sp_port_get_settings,
1538         .set_settings           = mlxsw_sp_port_set_settings,
1539 };
1540
1541 static int
1542 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1543 {
1544         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1545         u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1546         char ptys_pl[MLXSW_REG_PTYS_LEN];
1547         u32 eth_proto_admin;
1548
1549         eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1550         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1551                             eth_proto_admin);
1552         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1553 }
1554
1555 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1556                           enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1557                           bool dwrr, u8 dwrr_weight)
1558 {
1559         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1560         char qeec_pl[MLXSW_REG_QEEC_LEN];
1561
1562         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1563                             next_index);
1564         mlxsw_reg_qeec_de_set(qeec_pl, true);
1565         mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1566         mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1567         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1568 }
1569
1570 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1571                                   enum mlxsw_reg_qeec_hr hr, u8 index,
1572                                   u8 next_index, u32 maxrate)
1573 {
1574         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1575         char qeec_pl[MLXSW_REG_QEEC_LEN];
1576
1577         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1578                             next_index);
1579         mlxsw_reg_qeec_mase_set(qeec_pl, true);
1580         mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1581         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1582 }
1583
1584 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1585                               u8 switch_prio, u8 tclass)
1586 {
1587         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1588         char qtct_pl[MLXSW_REG_QTCT_LEN];
1589
1590         mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1591                             tclass);
1592         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1593 }
1594
1595 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1596 {
1597         int err, i;
1598
1599         /* Setup the elements hierarcy, so that each TC is linked to
1600          * one subgroup, which are all member in the same group.
1601          */
1602         err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1603                                     MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
1604                                     0);
1605         if (err)
1606                 return err;
1607         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1608                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1609                                             MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
1610                                             0, false, 0);
1611                 if (err)
1612                         return err;
1613         }
1614         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1615                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1616                                             MLXSW_REG_QEEC_HIERARCY_TC, i, i,
1617                                             false, 0);
1618                 if (err)
1619                         return err;
1620         }
1621
1622         /* Make sure the max shaper is disabled in all hierarcies that
1623          * support it.
1624          */
1625         err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1626                                             MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
1627                                             MLXSW_REG_QEEC_MAS_DIS);
1628         if (err)
1629                 return err;
1630         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1631                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1632                                                     MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
1633                                                     i, 0,
1634                                                     MLXSW_REG_QEEC_MAS_DIS);
1635                 if (err)
1636                         return err;
1637         }
1638         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1639                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1640                                                     MLXSW_REG_QEEC_HIERARCY_TC,
1641                                                     i, i,
1642                                                     MLXSW_REG_QEEC_MAS_DIS);
1643                 if (err)
1644                         return err;
1645         }
1646
1647         /* Map all priorities to traffic class 0. */
1648         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1649                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1650                 if (err)
1651                         return err;
1652         }
1653
1654         return 0;
1655 }
1656
1657 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1658                                 bool split, u8 module, u8 width, u8 lane)
1659 {
1660         struct mlxsw_sp_port *mlxsw_sp_port;
1661         struct net_device *dev;
1662         size_t bytes;
1663         int err;
1664
1665         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1666         if (!dev)
1667                 return -ENOMEM;
1668         mlxsw_sp_port = netdev_priv(dev);
1669         mlxsw_sp_port->dev = dev;
1670         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1671         mlxsw_sp_port->local_port = local_port;
1672         mlxsw_sp_port->split = split;
1673         mlxsw_sp_port->mapping.module = module;
1674         mlxsw_sp_port->mapping.width = width;
1675         mlxsw_sp_port->mapping.lane = lane;
1676         bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1677         mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1678         if (!mlxsw_sp_port->active_vlans) {
1679                 err = -ENOMEM;
1680                 goto err_port_active_vlans_alloc;
1681         }
1682         mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1683         if (!mlxsw_sp_port->untagged_vlans) {
1684                 err = -ENOMEM;
1685                 goto err_port_untagged_vlans_alloc;
1686         }
1687         INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1688
1689         mlxsw_sp_port->pcpu_stats =
1690                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1691         if (!mlxsw_sp_port->pcpu_stats) {
1692                 err = -ENOMEM;
1693                 goto err_alloc_stats;
1694         }
1695
1696         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1697         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1698
1699         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1700         if (err) {
1701                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1702                         mlxsw_sp_port->local_port);
1703                 goto err_dev_addr_init;
1704         }
1705
1706         netif_carrier_off(dev);
1707
1708         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1709                          NETIF_F_HW_VLAN_CTAG_FILTER;
1710
1711         /* Each packet needs to have a Tx header (metadata) on top all other
1712          * headers.
1713          */
1714         dev->hard_header_len += MLXSW_TXHDR_LEN;
1715
1716         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1717         if (err) {
1718                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1719                         mlxsw_sp_port->local_port);
1720                 goto err_port_system_port_mapping_set;
1721         }
1722
1723         err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1724         if (err) {
1725                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1726                         mlxsw_sp_port->local_port);
1727                 goto err_port_swid_set;
1728         }
1729
1730         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
1731         if (err) {
1732                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1733                         mlxsw_sp_port->local_port);
1734                 goto err_port_speed_by_width_set;
1735         }
1736
1737         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1738         if (err) {
1739                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1740                         mlxsw_sp_port->local_port);
1741                 goto err_port_mtu_set;
1742         }
1743
1744         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1745         if (err)
1746                 goto err_port_admin_status_set;
1747
1748         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1749         if (err) {
1750                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1751                         mlxsw_sp_port->local_port);
1752                 goto err_port_buffers_init;
1753         }
1754
1755         err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1756         if (err) {
1757                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1758                         mlxsw_sp_port->local_port);
1759                 goto err_port_ets_init;
1760         }
1761
1762         /* ETS and buffers must be initialized before DCB. */
1763         err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1764         if (err) {
1765                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1766                         mlxsw_sp_port->local_port);
1767                 goto err_port_dcb_init;
1768         }
1769
1770         mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1771         err = register_netdev(dev);
1772         if (err) {
1773                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1774                         mlxsw_sp_port->local_port);
1775                 goto err_register_netdev;
1776         }
1777
1778         err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
1779                                    mlxsw_sp_port->local_port, dev,
1780                                    mlxsw_sp_port->split, module);
1781         if (err) {
1782                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1783                         mlxsw_sp_port->local_port);
1784                 goto err_core_port_init;
1785         }
1786
1787         err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1788         if (err)
1789                 goto err_port_vlan_init;
1790
1791         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1792         return 0;
1793
1794 err_port_vlan_init:
1795         mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1796 err_core_port_init:
1797         unregister_netdev(dev);
1798 err_register_netdev:
1799 err_port_dcb_init:
1800 err_port_ets_init:
1801 err_port_buffers_init:
1802 err_port_admin_status_set:
1803 err_port_mtu_set:
1804 err_port_speed_by_width_set:
1805 err_port_swid_set:
1806 err_port_system_port_mapping_set:
1807 err_dev_addr_init:
1808         free_percpu(mlxsw_sp_port->pcpu_stats);
1809 err_alloc_stats:
1810         kfree(mlxsw_sp_port->untagged_vlans);
1811 err_port_untagged_vlans_alloc:
1812         kfree(mlxsw_sp_port->active_vlans);
1813 err_port_active_vlans_alloc:
1814         free_netdev(dev);
1815         return err;
1816 }
1817
1818 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1819 {
1820         struct net_device *dev = mlxsw_sp_port->dev;
1821         struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
1822
1823         list_for_each_entry_safe(mlxsw_sp_vport, tmp,
1824                                  &mlxsw_sp_port->vports_list, vport.list) {
1825                 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1826
1827                 /* vPorts created for VLAN devices should already be gone
1828                  * by now, since we unregistered the port netdev.
1829                  */
1830                 WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
1831                 mlxsw_sp_port_kill_vid(dev, 0, vid);
1832         }
1833 }
1834
1835 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1836 {
1837         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1838
1839         if (!mlxsw_sp_port)
1840                 return;
1841         mlxsw_sp->ports[local_port] = NULL;
1842         mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1843         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1844         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1845         mlxsw_sp_port_vports_fini(mlxsw_sp_port);
1846         mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1847         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1848         mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1849         free_percpu(mlxsw_sp_port->pcpu_stats);
1850         kfree(mlxsw_sp_port->untagged_vlans);
1851         kfree(mlxsw_sp_port->active_vlans);
1852         free_netdev(mlxsw_sp_port->dev);
1853 }
1854
1855 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1856 {
1857         int i;
1858
1859         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1860                 mlxsw_sp_port_remove(mlxsw_sp, i);
1861         kfree(mlxsw_sp->ports);
1862 }
1863
1864 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1865 {
1866         u8 module, width, lane;
1867         size_t alloc_size;
1868         int i;
1869         int err;
1870
1871         alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1872         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1873         if (!mlxsw_sp->ports)
1874                 return -ENOMEM;
1875
1876         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1877                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1878                                                     &width, &lane);
1879                 if (err)
1880                         goto err_port_module_info_get;
1881                 if (!width)
1882                         continue;
1883                 mlxsw_sp->port_to_module[i] = module;
1884                 err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
1885                                            lane);
1886                 if (err)
1887                         goto err_port_create;
1888         }
1889         return 0;
1890
1891 err_port_create:
1892 err_port_module_info_get:
1893         for (i--; i >= 1; i--)
1894                 mlxsw_sp_port_remove(mlxsw_sp, i);
1895         kfree(mlxsw_sp->ports);
1896         return err;
1897 }
1898
1899 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1900 {
1901         u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
1902
1903         return local_port - offset;
1904 }
1905
1906 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1907                                       u8 module, unsigned int count)
1908 {
1909         u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1910         int err, i;
1911
1912         for (i = 0; i < count; i++) {
1913                 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
1914                                                width, i * width);
1915                 if (err)
1916                         goto err_port_module_map;
1917         }
1918
1919         for (i = 0; i < count; i++) {
1920                 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
1921                 if (err)
1922                         goto err_port_swid_set;
1923         }
1924
1925         for (i = 0; i < count; i++) {
1926                 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
1927                                            module, width, i * width);
1928                 if (err)
1929                         goto err_port_create;
1930         }
1931
1932         return 0;
1933
1934 err_port_create:
1935         for (i--; i >= 0; i--)
1936                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1937         i = count;
1938 err_port_swid_set:
1939         for (i--; i >= 0; i--)
1940                 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
1941                                          MLXSW_PORT_SWID_DISABLED_PORT);
1942         i = count;
1943 err_port_module_map:
1944         for (i--; i >= 0; i--)
1945                 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
1946         return err;
1947 }
1948
1949 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1950                                          u8 base_port, unsigned int count)
1951 {
1952         u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
1953         int i;
1954
1955         /* Split by four means we need to re-create two ports, otherwise
1956          * only one.
1957          */
1958         count = count / 2;
1959
1960         for (i = 0; i < count; i++) {
1961                 local_port = base_port + i * 2;
1962                 module = mlxsw_sp->port_to_module[local_port];
1963
1964                 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1965                                          0);
1966         }
1967
1968         for (i = 0; i < count; i++)
1969                 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
1970
1971         for (i = 0; i < count; i++) {
1972                 local_port = base_port + i * 2;
1973                 module = mlxsw_sp->port_to_module[local_port];
1974
1975                 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
1976                                      width, 0);
1977         }
1978 }
1979
1980 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1981                                unsigned int count)
1982 {
1983         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1984         struct mlxsw_sp_port *mlxsw_sp_port;
1985         u8 module, cur_width, base_port;
1986         int i;
1987         int err;
1988
1989         mlxsw_sp_port = mlxsw_sp->ports[local_port];
1990         if (!mlxsw_sp_port) {
1991                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1992                         local_port);
1993                 return -EINVAL;
1994         }
1995
1996         module = mlxsw_sp_port->mapping.module;
1997         cur_width = mlxsw_sp_port->mapping.width;
1998
1999         if (count != 2 && count != 4) {
2000                 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
2001                 return -EINVAL;
2002         }
2003
2004         if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
2005                 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
2006                 return -EINVAL;
2007         }
2008
2009         /* Make sure we have enough slave (even) ports for the split. */
2010         if (count == 2) {
2011                 base_port = local_port;
2012                 if (mlxsw_sp->ports[base_port + 1]) {
2013                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2014                         return -EINVAL;
2015                 }
2016         } else {
2017                 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2018                 if (mlxsw_sp->ports[base_port + 1] ||
2019                     mlxsw_sp->ports[base_port + 3]) {
2020                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2021                         return -EINVAL;
2022                 }
2023         }
2024
2025         for (i = 0; i < count; i++)
2026                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2027
2028         err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2029         if (err) {
2030                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2031                 goto err_port_split_create;
2032         }
2033
2034         return 0;
2035
2036 err_port_split_create:
2037         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2038         return err;
2039 }
2040
2041 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2042 {
2043         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2044         struct mlxsw_sp_port *mlxsw_sp_port;
2045         u8 cur_width, base_port;
2046         unsigned int count;
2047         int i;
2048
2049         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2050         if (!mlxsw_sp_port) {
2051                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2052                         local_port);
2053                 return -EINVAL;
2054         }
2055
2056         if (!mlxsw_sp_port->split) {
2057                 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2058                 return -EINVAL;
2059         }
2060
2061         cur_width = mlxsw_sp_port->mapping.width;
2062         count = cur_width == 1 ? 4 : 2;
2063
2064         base_port = mlxsw_sp_cluster_base_port_get(local_port);
2065
2066         /* Determine which ports to remove. */
2067         if (count == 2 && local_port >= base_port + 2)
2068                 base_port = base_port + 2;
2069
2070         for (i = 0; i < count; i++)
2071                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2072
2073         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2074
2075         return 0;
2076 }
2077
2078 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2079                                      char *pude_pl, void *priv)
2080 {
2081         struct mlxsw_sp *mlxsw_sp = priv;
2082         struct mlxsw_sp_port *mlxsw_sp_port;
2083         enum mlxsw_reg_pude_oper_status status;
2084         u8 local_port;
2085
2086         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2087         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2088         if (!mlxsw_sp_port) {
2089                 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
2090                          local_port);
2091                 return;
2092         }
2093
2094         status = mlxsw_reg_pude_oper_status_get(pude_pl);
2095         if (status == MLXSW_PORT_OPER_STATUS_UP) {
2096                 netdev_info(mlxsw_sp_port->dev, "link up\n");
2097                 netif_carrier_on(mlxsw_sp_port->dev);
2098         } else {
2099                 netdev_info(mlxsw_sp_port->dev, "link down\n");
2100                 netif_carrier_off(mlxsw_sp_port->dev);
2101         }
2102 }
2103
2104 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
2105         .func = mlxsw_sp_pude_event_func,
2106         .trap_id = MLXSW_TRAP_ID_PUDE,
2107 };
2108
2109 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
2110                                    enum mlxsw_event_trap_id trap_id)
2111 {
2112         struct mlxsw_event_listener *el;
2113         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2114         int err;
2115
2116         switch (trap_id) {
2117         case MLXSW_TRAP_ID_PUDE:
2118                 el = &mlxsw_sp_pude_event;
2119                 break;
2120         }
2121         err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
2122         if (err)
2123                 return err;
2124
2125         mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
2126         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2127         if (err)
2128                 goto err_event_trap_set;
2129
2130         return 0;
2131
2132 err_event_trap_set:
2133         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2134         return err;
2135 }
2136
2137 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
2138                                       enum mlxsw_event_trap_id trap_id)
2139 {
2140         struct mlxsw_event_listener *el;
2141
2142         switch (trap_id) {
2143         case MLXSW_TRAP_ID_PUDE:
2144                 el = &mlxsw_sp_pude_event;
2145                 break;
2146         }
2147         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2148 }
2149
2150 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
2151                                       void *priv)
2152 {
2153         struct mlxsw_sp *mlxsw_sp = priv;
2154         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2155         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2156
2157         if (unlikely(!mlxsw_sp_port)) {
2158                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2159                                      local_port);
2160                 return;
2161         }
2162
2163         skb->dev = mlxsw_sp_port->dev;
2164
2165         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2166         u64_stats_update_begin(&pcpu_stats->syncp);
2167         pcpu_stats->rx_packets++;
2168         pcpu_stats->rx_bytes += skb->len;
2169         u64_stats_update_end(&pcpu_stats->syncp);
2170
2171         skb->protocol = eth_type_trans(skb, skb->dev);
2172         netif_receive_skb(skb);
2173 }
2174
2175 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2176         {
2177                 .func = mlxsw_sp_rx_listener_func,
2178                 .local_port = MLXSW_PORT_DONT_CARE,
2179                 .trap_id = MLXSW_TRAP_ID_FDB_MC,
2180         },
2181         /* Traps for specific L2 packet types, not trapped as FDB MC */
2182         {
2183                 .func = mlxsw_sp_rx_listener_func,
2184                 .local_port = MLXSW_PORT_DONT_CARE,
2185                 .trap_id = MLXSW_TRAP_ID_STP,
2186         },
2187         {
2188                 .func = mlxsw_sp_rx_listener_func,
2189                 .local_port = MLXSW_PORT_DONT_CARE,
2190                 .trap_id = MLXSW_TRAP_ID_LACP,
2191         },
2192         {
2193                 .func = mlxsw_sp_rx_listener_func,
2194                 .local_port = MLXSW_PORT_DONT_CARE,
2195                 .trap_id = MLXSW_TRAP_ID_EAPOL,
2196         },
2197         {
2198                 .func = mlxsw_sp_rx_listener_func,
2199                 .local_port = MLXSW_PORT_DONT_CARE,
2200                 .trap_id = MLXSW_TRAP_ID_LLDP,
2201         },
2202         {
2203                 .func = mlxsw_sp_rx_listener_func,
2204                 .local_port = MLXSW_PORT_DONT_CARE,
2205                 .trap_id = MLXSW_TRAP_ID_MMRP,
2206         },
2207         {
2208                 .func = mlxsw_sp_rx_listener_func,
2209                 .local_port = MLXSW_PORT_DONT_CARE,
2210                 .trap_id = MLXSW_TRAP_ID_MVRP,
2211         },
2212         {
2213                 .func = mlxsw_sp_rx_listener_func,
2214                 .local_port = MLXSW_PORT_DONT_CARE,
2215                 .trap_id = MLXSW_TRAP_ID_RPVST,
2216         },
2217         {
2218                 .func = mlxsw_sp_rx_listener_func,
2219                 .local_port = MLXSW_PORT_DONT_CARE,
2220                 .trap_id = MLXSW_TRAP_ID_DHCP,
2221         },
2222         {
2223                 .func = mlxsw_sp_rx_listener_func,
2224                 .local_port = MLXSW_PORT_DONT_CARE,
2225                 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
2226         },
2227         {
2228                 .func = mlxsw_sp_rx_listener_func,
2229                 .local_port = MLXSW_PORT_DONT_CARE,
2230                 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
2231         },
2232         {
2233                 .func = mlxsw_sp_rx_listener_func,
2234                 .local_port = MLXSW_PORT_DONT_CARE,
2235                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
2236         },
2237         {
2238                 .func = mlxsw_sp_rx_listener_func,
2239                 .local_port = MLXSW_PORT_DONT_CARE,
2240                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
2241         },
2242         {
2243                 .func = mlxsw_sp_rx_listener_func,
2244                 .local_port = MLXSW_PORT_DONT_CARE,
2245                 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
2246         },
2247 };
2248
2249 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2250 {
2251         char htgt_pl[MLXSW_REG_HTGT_LEN];
2252         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2253         int i;
2254         int err;
2255
2256         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2257         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2258         if (err)
2259                 return err;
2260
2261         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2262         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2263         if (err)
2264                 return err;
2265
2266         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2267                 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2268                                                       &mlxsw_sp_rx_listener[i],
2269                                                       mlxsw_sp);
2270                 if (err)
2271                         goto err_rx_listener_register;
2272
2273                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2274                                     mlxsw_sp_rx_listener[i].trap_id);
2275                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2276                 if (err)
2277                         goto err_rx_trap_set;
2278         }
2279         return 0;
2280
2281 err_rx_trap_set:
2282         mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2283                                           &mlxsw_sp_rx_listener[i],
2284                                           mlxsw_sp);
2285 err_rx_listener_register:
2286         for (i--; i >= 0; i--) {
2287                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2288                                     mlxsw_sp_rx_listener[i].trap_id);
2289                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2290
2291                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2292                                                   &mlxsw_sp_rx_listener[i],
2293                                                   mlxsw_sp);
2294         }
2295         return err;
2296 }
2297
2298 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2299 {
2300         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2301         int i;
2302
2303         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2304                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2305                                     mlxsw_sp_rx_listener[i].trap_id);
2306                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2307
2308                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2309                                                   &mlxsw_sp_rx_listener[i],
2310                                                   mlxsw_sp);
2311         }
2312 }
2313
2314 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2315                                  enum mlxsw_reg_sfgc_type type,
2316                                  enum mlxsw_reg_sfgc_bridge_type bridge_type)
2317 {
2318         enum mlxsw_flood_table_type table_type;
2319         enum mlxsw_sp_flood_table flood_table;
2320         char sfgc_pl[MLXSW_REG_SFGC_LEN];
2321
2322         if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2323                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2324         else
2325                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2326
2327         if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2328                 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2329         else
2330                 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2331
2332         mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2333                             flood_table);
2334         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2335 }
2336
2337 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2338 {
2339         int type, err;
2340
2341         for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2342                 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2343                         continue;
2344
2345                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2346                                             MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2347                 if (err)
2348                         return err;
2349
2350                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2351                                             MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2352                 if (err)
2353                         return err;
2354         }
2355
2356         return 0;
2357 }
2358
2359 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2360 {
2361         char slcr_pl[MLXSW_REG_SLCR_LEN];
2362
2363         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2364                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
2365                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2366                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
2367                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
2368                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
2369                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
2370                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
2371                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2372         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2373 }
2374
2375 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2376                          const struct mlxsw_bus_info *mlxsw_bus_info)
2377 {
2378         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2379         int err;
2380
2381         mlxsw_sp->core = mlxsw_core;
2382         mlxsw_sp->bus_info = mlxsw_bus_info;
2383         INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
2384         INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
2385         INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2386
2387         err = mlxsw_sp_base_mac_get(mlxsw_sp);
2388         if (err) {
2389                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2390                 return err;
2391         }
2392
2393         err = mlxsw_sp_ports_create(mlxsw_sp);
2394         if (err) {
2395                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2396                 return err;
2397         }
2398
2399         err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2400         if (err) {
2401                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2402                 goto err_event_register;
2403         }
2404
2405         err = mlxsw_sp_traps_init(mlxsw_sp);
2406         if (err) {
2407                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2408                 goto err_rx_listener_register;
2409         }
2410
2411         err = mlxsw_sp_flood_init(mlxsw_sp);
2412         if (err) {
2413                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2414                 goto err_flood_init;
2415         }
2416
2417         err = mlxsw_sp_buffers_init(mlxsw_sp);
2418         if (err) {
2419                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2420                 goto err_buffers_init;
2421         }
2422
2423         err = mlxsw_sp_lag_init(mlxsw_sp);
2424         if (err) {
2425                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2426                 goto err_lag_init;
2427         }
2428
2429         err = mlxsw_sp_switchdev_init(mlxsw_sp);
2430         if (err) {
2431                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2432                 goto err_switchdev_init;
2433         }
2434
2435         return 0;
2436
2437 err_switchdev_init:
2438 err_lag_init:
2439         mlxsw_sp_buffers_fini(mlxsw_sp);
2440 err_buffers_init:
2441 err_flood_init:
2442         mlxsw_sp_traps_fini(mlxsw_sp);
2443 err_rx_listener_register:
2444         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2445 err_event_register:
2446         mlxsw_sp_ports_remove(mlxsw_sp);
2447         return err;
2448 }
2449
2450 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2451 {
2452         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2453
2454         mlxsw_sp_switchdev_fini(mlxsw_sp);
2455         mlxsw_sp_buffers_fini(mlxsw_sp);
2456         mlxsw_sp_traps_fini(mlxsw_sp);
2457         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2458         mlxsw_sp_ports_remove(mlxsw_sp);
2459 }
2460
2461 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2462         .used_max_vepa_channels         = 1,
2463         .max_vepa_channels              = 0,
2464         .used_max_lag                   = 1,
2465         .max_lag                        = MLXSW_SP_LAG_MAX,
2466         .used_max_port_per_lag          = 1,
2467         .max_port_per_lag               = MLXSW_SP_PORT_PER_LAG_MAX,
2468         .used_max_mid                   = 1,
2469         .max_mid                        = MLXSW_SP_MID_MAX,
2470         .used_max_pgt                   = 1,
2471         .max_pgt                        = 0,
2472         .used_max_system_port           = 1,
2473         .max_system_port                = 64,
2474         .used_max_vlan_groups           = 1,
2475         .max_vlan_groups                = 127,
2476         .used_max_regions               = 1,
2477         .max_regions                    = 400,
2478         .used_flood_tables              = 1,
2479         .used_flood_mode                = 1,
2480         .flood_mode                     = 3,
2481         .max_fid_offset_flood_tables    = 2,
2482         .fid_offset_flood_table_size    = VLAN_N_VID - 1,
2483         .max_fid_flood_tables           = 2,
2484         .fid_flood_table_size           = MLXSW_SP_VFID_MAX,
2485         .used_max_ib_mc                 = 1,
2486         .max_ib_mc                      = 0,
2487         .used_max_pkey                  = 1,
2488         .max_pkey                       = 0,
2489         .swid_config                    = {
2490                 {
2491                         .used_type      = 1,
2492                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
2493                 }
2494         },
2495 };
2496
2497 static struct mlxsw_driver mlxsw_sp_driver = {
2498         .kind                           = MLXSW_DEVICE_KIND_SPECTRUM,
2499         .owner                          = THIS_MODULE,
2500         .priv_size                      = sizeof(struct mlxsw_sp),
2501         .init                           = mlxsw_sp_init,
2502         .fini                           = mlxsw_sp_fini,
2503         .port_split                     = mlxsw_sp_port_split,
2504         .port_unsplit                   = mlxsw_sp_port_unsplit,
2505         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
2506         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
2507         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
2508         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
2509         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
2510         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
2511         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
2512         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
2513         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
2514         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
2515         .txhdr_construct                = mlxsw_sp_txhdr_construct,
2516         .txhdr_len                      = MLXSW_TXHDR_LEN,
2517         .profile                        = &mlxsw_sp_config_profile,
2518 };
2519
2520 static int
2521 mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
2522 {
2523         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2524         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2525
2526         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
2527         mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
2528
2529         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2530 }
2531
2532 static int
2533 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2534                                     u16 fid)
2535 {
2536         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2537         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2538
2539         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
2540         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2541         mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2542                                                 mlxsw_sp_port->local_port);
2543
2544         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2545 }
2546
2547 static int
2548 mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
2549 {
2550         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2551         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2552
2553         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
2554         mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2555
2556         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2557 }
2558
2559 static int
2560 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2561                                       u16 fid)
2562 {
2563         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2564         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2565
2566         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
2567         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2568         mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2569
2570         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2571 }
2572
2573 static int
2574 __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
2575 {
2576         int err, last_err = 0;
2577         u16 vid;
2578
2579         for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2580                 err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
2581                 if (err)
2582                         last_err = err;
2583         }
2584
2585         return last_err;
2586 }
2587
2588 static int
2589 __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
2590 {
2591         int err, last_err = 0;
2592         u16 vid;
2593
2594         for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2595                 err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
2596                 if (err)
2597                         last_err = err;
2598         }
2599
2600         return last_err;
2601 }
2602
2603 static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
2604 {
2605         if (!list_empty(&mlxsw_sp_port->vports_list))
2606                 if (mlxsw_sp_port->lagged)
2607                         return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
2608                 else
2609                         return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
2610         else
2611                 if (mlxsw_sp_port->lagged)
2612                         return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
2613                 else
2614                         return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
2615 }
2616
2617 static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
2618 {
2619         u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
2620         u16 fid = mlxsw_sp_vfid_to_fid(vfid);
2621
2622         if (mlxsw_sp_vport->lagged)
2623                 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
2624                                                              fid);
2625         else
2626                 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
2627 }
2628
2629 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2630 {
2631         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2632 }
2633
2634 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
2635 {
2636         struct net_device *dev = mlxsw_sp_port->dev;
2637         int err;
2638
2639         /* When port is not bridged untagged packets are tagged with
2640          * PVID=VID=1, thereby creating an implicit VLAN interface in
2641          * the device. Remove it and let bridge code take care of its
2642          * own VLANs.
2643          */
2644         err = mlxsw_sp_port_kill_vid(dev, 0, 1);
2645         if (err)
2646                 return err;
2647
2648         mlxsw_sp_port->learning = 1;
2649         mlxsw_sp_port->learning_sync = 1;
2650         mlxsw_sp_port->uc_flood = 1;
2651         mlxsw_sp_port->bridged = 1;
2652
2653         return 0;
2654 }
2655
2656 static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2657                                       bool flush_fdb)
2658 {
2659         struct net_device *dev = mlxsw_sp_port->dev;
2660
2661         if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2662                 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2663
2664         mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2665
2666         mlxsw_sp_port->learning = 0;
2667         mlxsw_sp_port->learning_sync = 0;
2668         mlxsw_sp_port->uc_flood = 0;
2669         mlxsw_sp_port->bridged = 0;
2670
2671         /* Add implicit VLAN interface in the device, so that untagged
2672          * packets will be classified to the default vFID.
2673          */
2674         return mlxsw_sp_port_add_vid(dev, 0, 1);
2675 }
2676
2677 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
2678                                          struct net_device *br_dev)
2679 {
2680         return !mlxsw_sp->master_bridge.dev ||
2681                mlxsw_sp->master_bridge.dev == br_dev;
2682 }
2683
2684 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2685                                        struct net_device *br_dev)
2686 {
2687         mlxsw_sp->master_bridge.dev = br_dev;
2688         mlxsw_sp->master_bridge.ref_count++;
2689 }
2690
2691 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
2692                                        struct net_device *br_dev)
2693 {
2694         if (--mlxsw_sp->master_bridge.ref_count == 0)
2695                 mlxsw_sp->master_bridge.dev = NULL;
2696 }
2697
2698 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2699 {
2700         char sldr_pl[MLXSW_REG_SLDR_LEN];
2701
2702         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
2703         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2704 }
2705
2706 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2707 {
2708         char sldr_pl[MLXSW_REG_SLDR_LEN];
2709
2710         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
2711         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2712 }
2713
2714 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2715                                      u16 lag_id, u8 port_index)
2716 {
2717         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2718         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2719
2720         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
2721                                       lag_id, port_index);
2722         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2723 }
2724
2725 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2726                                         u16 lag_id)
2727 {
2728         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2729         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2730
2731         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
2732                                          lag_id);
2733         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2734 }
2735
2736 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
2737                                         u16 lag_id)
2738 {
2739         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2740         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2741
2742         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
2743                                         lag_id);
2744         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2745 }
2746
2747 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
2748                                          u16 lag_id)
2749 {
2750         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2751         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2752
2753         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
2754                                          lag_id);
2755         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2756 }
2757
2758 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2759                                   struct net_device *lag_dev,
2760                                   u16 *p_lag_id)
2761 {
2762         struct mlxsw_sp_upper *lag;
2763         int free_lag_id = -1;
2764         int i;
2765
2766         for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
2767                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
2768                 if (lag->ref_count) {
2769                         if (lag->dev == lag_dev) {
2770                                 *p_lag_id = i;
2771                                 return 0;
2772                         }
2773                 } else if (free_lag_id < 0) {
2774                         free_lag_id = i;
2775                 }
2776         }
2777         if (free_lag_id < 0)
2778                 return -EBUSY;
2779         *p_lag_id = free_lag_id;
2780         return 0;
2781 }
2782
2783 static bool
2784 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
2785                           struct net_device *lag_dev,
2786                           struct netdev_lag_upper_info *lag_upper_info)
2787 {
2788         u16 lag_id;
2789
2790         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
2791                 return false;
2792         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2793                 return false;
2794         return true;
2795 }
2796
2797 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2798                                        u16 lag_id, u8 *p_port_index)
2799 {
2800         int i;
2801
2802         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2803                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
2804                         *p_port_index = i;
2805                         return 0;
2806                 }
2807         }
2808         return -EBUSY;
2809 }
2810
2811 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2812                                   struct net_device *lag_dev)
2813 {
2814         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2815         struct mlxsw_sp_upper *lag;
2816         u16 lag_id;
2817         u8 port_index;
2818         int err;
2819
2820         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
2821         if (err)
2822                 return err;
2823         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2824         if (!lag->ref_count) {
2825                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
2826                 if (err)
2827                         return err;
2828                 lag->dev = lag_dev;
2829         }
2830
2831         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
2832         if (err)
2833                 return err;
2834         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
2835         if (err)
2836                 goto err_col_port_add;
2837         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
2838         if (err)
2839                 goto err_col_port_enable;
2840
2841         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
2842                                    mlxsw_sp_port->local_port);
2843         mlxsw_sp_port->lag_id = lag_id;
2844         mlxsw_sp_port->lagged = 1;
2845         lag->ref_count++;
2846         return 0;
2847
2848 err_col_port_enable:
2849         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2850 err_col_port_add:
2851         if (!lag->ref_count)
2852                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2853         return err;
2854 }
2855
2856 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2857                                        struct net_device *br_dev,
2858                                        bool flush_fdb);
2859
2860 static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2861                                    struct net_device *lag_dev)
2862 {
2863         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2864         struct mlxsw_sp_port *mlxsw_sp_vport;
2865         struct mlxsw_sp_upper *lag;
2866         u16 lag_id = mlxsw_sp_port->lag_id;
2867         int err;
2868
2869         if (!mlxsw_sp_port->lagged)
2870                 return 0;
2871         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2872         WARN_ON(lag->ref_count == 0);
2873
2874         err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2875         if (err)
2876                 return err;
2877         err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2878         if (err)
2879                 return err;
2880
2881         /* In case we leave a LAG device that has bridges built on top,
2882          * then their teardown sequence is never issued and we need to
2883          * invoke the necessary cleanup routines ourselves.
2884          */
2885         list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
2886                             vport.list) {
2887                 struct net_device *br_dev;
2888
2889                 if (!mlxsw_sp_vport->bridged)
2890                         continue;
2891
2892                 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2893                 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
2894         }
2895
2896         if (mlxsw_sp_port->bridged) {
2897                 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
2898                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
2899                 mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
2900         }
2901
2902         if (lag->ref_count == 1) {
2903                 if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
2904                         netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2905                 err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2906                 if (err)
2907                         return err;
2908         }
2909
2910         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2911                                      mlxsw_sp_port->local_port);
2912         mlxsw_sp_port->lagged = 0;
2913         lag->ref_count--;
2914         return 0;
2915 }
2916
2917 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2918                                       u16 lag_id)
2919 {
2920         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2921         char sldr_pl[MLXSW_REG_SLDR_LEN];
2922
2923         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
2924                                          mlxsw_sp_port->local_port);
2925         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2926 }
2927
2928 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2929                                          u16 lag_id)
2930 {
2931         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2932         char sldr_pl[MLXSW_REG_SLDR_LEN];
2933
2934         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
2935                                             mlxsw_sp_port->local_port);
2936         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2937 }
2938
2939 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
2940                                        bool lag_tx_enabled)
2941 {
2942         if (lag_tx_enabled)
2943                 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
2944                                                   mlxsw_sp_port->lag_id);
2945         else
2946                 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
2947                                                      mlxsw_sp_port->lag_id);
2948 }
2949
2950 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
2951                                      struct netdev_lag_lower_state_info *info)
2952 {
2953         return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
2954 }
2955
2956 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
2957                                    struct net_device *vlan_dev)
2958 {
2959         struct mlxsw_sp_port *mlxsw_sp_vport;
2960         u16 vid = vlan_dev_vlan_id(vlan_dev);
2961
2962         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2963         if (!mlxsw_sp_vport) {
2964                 WARN_ON(!mlxsw_sp_vport);
2965                 return -EINVAL;
2966         }
2967
2968         mlxsw_sp_vport->dev = vlan_dev;
2969
2970         return 0;
2971 }
2972
2973 static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
2974                                      struct net_device *vlan_dev)
2975 {
2976         struct mlxsw_sp_port *mlxsw_sp_vport;
2977         u16 vid = vlan_dev_vlan_id(vlan_dev);
2978
2979         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2980         if (!mlxsw_sp_vport) {
2981                 WARN_ON(!mlxsw_sp_vport);
2982                 return -EINVAL;
2983         }
2984
2985         /* When removing a VLAN device while still bridged we should first
2986          * remove it from the bridge, as we receive the bridge's notification
2987          * when the vPort is already gone.
2988          */
2989         if (mlxsw_sp_vport->bridged) {
2990                 struct net_device *br_dev;
2991
2992                 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2993                 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
2994         }
2995
2996         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
2997
2998         return 0;
2999 }
3000
3001 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3002                                                unsigned long event, void *ptr)
3003 {
3004         struct netdev_notifier_changeupper_info *info;
3005         struct mlxsw_sp_port *mlxsw_sp_port;
3006         struct net_device *upper_dev;
3007         struct mlxsw_sp *mlxsw_sp;
3008         int err;
3009
3010         mlxsw_sp_port = netdev_priv(dev);
3011         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3012         info = ptr;
3013
3014         switch (event) {
3015         case NETDEV_PRECHANGEUPPER:
3016                 upper_dev = info->upper_dev;
3017                 if (!info->master || !info->linking)
3018                         break;
3019                 /* HW limitation forbids to put ports to multiple bridges. */
3020                 if (netif_is_bridge_master(upper_dev) &&
3021                     !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
3022                         return NOTIFY_BAD;
3023                 if (netif_is_lag_master(upper_dev) &&
3024                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3025                                                info->upper_info))
3026                         return NOTIFY_BAD;
3027                 break;
3028         case NETDEV_CHANGEUPPER:
3029                 upper_dev = info->upper_dev;
3030                 if (is_vlan_dev(upper_dev)) {
3031                         if (info->linking) {
3032                                 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
3033                                                               upper_dev);
3034                                 if (err) {
3035                                         netdev_err(dev, "Failed to link VLAN device\n");
3036                                         return NOTIFY_BAD;
3037                                 }
3038                         } else {
3039                                 err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
3040                                                                 upper_dev);
3041                                 if (err) {
3042                                         netdev_err(dev, "Failed to unlink VLAN device\n");
3043                                         return NOTIFY_BAD;
3044                                 }
3045                         }
3046                 } else if (netif_is_bridge_master(upper_dev)) {
3047                         if (info->linking) {
3048                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
3049                                 if (err) {
3050                                         netdev_err(dev, "Failed to join bridge\n");
3051                                         return NOTIFY_BAD;
3052                                 }
3053                                 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
3054                         } else {
3055                                 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
3056                                                                  true);
3057                                 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
3058                                 if (err) {
3059                                         netdev_err(dev, "Failed to leave bridge\n");
3060                                         return NOTIFY_BAD;
3061                                 }
3062                         }
3063                 } else if (netif_is_lag_master(upper_dev)) {
3064                         if (info->linking) {
3065                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3066                                                              upper_dev);
3067                                 if (err) {
3068                                         netdev_err(dev, "Failed to join link aggregation\n");
3069                                         return NOTIFY_BAD;
3070                                 }
3071                         } else {
3072                                 err = mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3073                                                               upper_dev);
3074                                 if (err) {
3075                                         netdev_err(dev, "Failed to leave link aggregation\n");
3076                                         return NOTIFY_BAD;
3077                                 }
3078                         }
3079                 }
3080                 break;
3081         }
3082
3083         return NOTIFY_DONE;
3084 }
3085
3086 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
3087                                                unsigned long event, void *ptr)
3088 {
3089         struct netdev_notifier_changelowerstate_info *info;
3090         struct mlxsw_sp_port *mlxsw_sp_port;
3091         int err;
3092
3093         mlxsw_sp_port = netdev_priv(dev);
3094         info = ptr;
3095
3096         switch (event) {
3097         case NETDEV_CHANGELOWERSTATE:
3098                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
3099                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
3100                                                         info->lower_state_info);
3101                         if (err)
3102                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
3103                 }
3104                 break;
3105         }
3106
3107         return NOTIFY_DONE;
3108 }
3109
3110 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
3111                                          unsigned long event, void *ptr)
3112 {
3113         switch (event) {
3114         case NETDEV_PRECHANGEUPPER:
3115         case NETDEV_CHANGEUPPER:
3116                 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
3117         case NETDEV_CHANGELOWERSTATE:
3118                 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
3119         }
3120
3121         return NOTIFY_DONE;
3122 }
3123
3124 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3125                                         unsigned long event, void *ptr)
3126 {
3127         struct net_device *dev;
3128         struct list_head *iter;
3129         int ret;
3130
3131         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3132                 if (mlxsw_sp_port_dev_check(dev)) {
3133                         ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3134                         if (ret == NOTIFY_BAD)
3135                                 return ret;
3136                 }
3137         }
3138
3139         return NOTIFY_DONE;
3140 }
3141
3142 static struct mlxsw_sp_vfid *
3143 mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
3144                       const struct net_device *br_dev)
3145 {
3146         struct mlxsw_sp_vfid *vfid;
3147
3148         list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
3149                 if (vfid->br_dev == br_dev)
3150                         return vfid;
3151         }
3152
3153         return NULL;
3154 }
3155
3156 static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
3157 {
3158         return vfid - MLXSW_SP_VFID_PORT_MAX;
3159 }
3160
3161 static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
3162 {
3163         return MLXSW_SP_VFID_PORT_MAX + br_vfid;
3164 }
3165
3166 static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
3167 {
3168         return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
3169                                    MLXSW_SP_VFID_BR_MAX);
3170 }
3171
3172 static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
3173                                                      struct net_device *br_dev)
3174 {
3175         struct device *dev = mlxsw_sp->bus_info->dev;
3176         struct mlxsw_sp_vfid *vfid;
3177         u16 n_vfid;
3178         int err;
3179
3180         n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
3181         if (n_vfid == MLXSW_SP_VFID_MAX) {
3182                 dev_err(dev, "No available vFIDs\n");
3183                 return ERR_PTR(-ERANGE);
3184         }
3185
3186         err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
3187         if (err) {
3188                 dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
3189                 return ERR_PTR(err);
3190         }
3191
3192         vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
3193         if (!vfid)
3194                 goto err_allocate_vfid;
3195
3196         vfid->vfid = n_vfid;
3197         vfid->br_dev = br_dev;
3198
3199         list_add(&vfid->list, &mlxsw_sp->br_vfids.list);
3200         set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped);
3201
3202         return vfid;
3203
3204 err_allocate_vfid:
3205         __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
3206         return ERR_PTR(-ENOMEM);
3207 }
3208
3209 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
3210                                      struct mlxsw_sp_vfid *vfid)
3211 {
3212         u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
3213
3214         clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
3215         list_del(&vfid->list);
3216
3217         __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
3218
3219         kfree(vfid);
3220 }
3221
3222 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
3223                                        struct net_device *br_dev,
3224                                        bool flush_fdb)
3225 {
3226         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3227         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3228         struct net_device *dev = mlxsw_sp_vport->dev;
3229         struct mlxsw_sp_vfid *vfid, *new_vfid;
3230         int err;
3231
3232         vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
3233         if (!vfid) {
3234                 WARN_ON(!vfid);
3235                 return -EINVAL;
3236         }
3237
3238         /* We need a vFID to go back to after leaving the bridge's vFID. */
3239         new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
3240         if (!new_vfid) {
3241                 new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
3242                 if (IS_ERR(new_vfid)) {
3243                         netdev_err(dev, "Failed to create vFID for VID=%d\n",
3244                                    vid);
3245                         return PTR_ERR(new_vfid);
3246                 }
3247         }
3248
3249         /* Invalidate existing {Port, VID} to vFID mapping and create a new
3250          * one for the new vFID.
3251          */
3252         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3253                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3254                                            false,
3255                                            mlxsw_sp_vfid_to_fid(vfid->vfid),
3256                                            vid);
3257         if (err) {
3258                 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3259                            vfid->vfid);
3260                 goto err_port_vid_to_fid_invalidate;
3261         }
3262
3263         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3264                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3265                                            true,
3266                                            mlxsw_sp_vfid_to_fid(new_vfid->vfid),
3267                                            vid);
3268         if (err) {
3269                 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3270                            new_vfid->vfid);
3271                 goto err_port_vid_to_fid_validate;
3272         }
3273
3274         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3275         if (err) {
3276                 netdev_err(dev, "Failed to disable learning\n");
3277                 goto err_port_vid_learning_set;
3278         }
3279
3280         err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
3281                                        false);
3282         if (err) {
3283                 netdev_err(dev, "Failed clear to clear flooding\n");
3284                 goto err_vport_flood_set;
3285         }
3286
3287         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
3288                                           MLXSW_REG_SPMS_STATE_FORWARDING);
3289         if (err) {
3290                 netdev_err(dev, "Failed to set STP state\n");
3291                 goto err_port_stp_state_set;
3292         }
3293
3294         if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
3295                 netdev_err(dev, "Failed to flush FDB\n");
3296
3297         /* Switch between the vFIDs and destroy the old one if needed. */
3298         new_vfid->nr_vports++;
3299         mlxsw_sp_vport->vport.vfid = new_vfid;
3300         vfid->nr_vports--;
3301         if (!vfid->nr_vports)
3302                 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3303
3304         mlxsw_sp_vport->learning = 0;
3305         mlxsw_sp_vport->learning_sync = 0;
3306         mlxsw_sp_vport->uc_flood = 0;
3307         mlxsw_sp_vport->bridged = 0;
3308
3309         return 0;
3310
3311 err_port_stp_state_set:
3312 err_vport_flood_set:
3313 err_port_vid_learning_set:
3314 err_port_vid_to_fid_validate:
3315 err_port_vid_to_fid_invalidate:
3316         /* Rollback vFID only if new. */
3317         if (!new_vfid->nr_vports)
3318                 mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
3319         return err;
3320 }
3321
3322 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3323                                       struct net_device *br_dev)
3324 {
3325         struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
3326         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3327         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3328         struct net_device *dev = mlxsw_sp_vport->dev;
3329         struct mlxsw_sp_vfid *vfid;
3330         int err;
3331
3332         vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
3333         if (!vfid) {
3334                 vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
3335                 if (IS_ERR(vfid)) {
3336                         netdev_err(dev, "Failed to create bridge vFID\n");
3337                         return PTR_ERR(vfid);
3338                 }
3339         }
3340
3341         err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false);
3342         if (err) {
3343                 netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
3344                            vfid->vfid);
3345                 goto err_port_flood_set;
3346         }
3347
3348         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3349         if (err) {
3350                 netdev_err(dev, "Failed to enable learning\n");
3351                 goto err_port_vid_learning_set;
3352         }
3353
3354         /* We need to invalidate existing {Port, VID} to vFID mapping and
3355          * create a new one for the bridge's vFID.
3356          */
3357         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3358                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3359                                            false,
3360                                            mlxsw_sp_vfid_to_fid(old_vfid->vfid),
3361                                            vid);
3362         if (err) {
3363                 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3364                            old_vfid->vfid);
3365                 goto err_port_vid_to_fid_invalidate;
3366         }
3367
3368         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3369                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3370                                            true,
3371                                            mlxsw_sp_vfid_to_fid(vfid->vfid),
3372                                            vid);
3373         if (err) {
3374                 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3375                            vfid->vfid);
3376                 goto err_port_vid_to_fid_validate;
3377         }
3378
3379         /* Switch between the vFIDs and destroy the old one if needed. */
3380         vfid->nr_vports++;
3381         mlxsw_sp_vport->vport.vfid = vfid;
3382         old_vfid->nr_vports--;
3383         if (!old_vfid->nr_vports)
3384                 mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
3385
3386         mlxsw_sp_vport->learning = 1;
3387         mlxsw_sp_vport->learning_sync = 1;
3388         mlxsw_sp_vport->uc_flood = 1;
3389         mlxsw_sp_vport->bridged = 1;
3390
3391         return 0;
3392
3393 err_port_vid_to_fid_validate:
3394         mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3395                                      MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
3396                                      mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid);
3397 err_port_vid_to_fid_invalidate:
3398         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3399 err_port_vid_learning_set:
3400         mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false);
3401 err_port_flood_set:
3402         if (!vfid->nr_vports)
3403                 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3404         return err;
3405 }
3406
3407 static bool
3408 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3409                                   const struct net_device *br_dev)
3410 {
3411         struct mlxsw_sp_port *mlxsw_sp_vport;
3412
3413         list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3414                             vport.list) {
3415                 if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
3416                         return false;
3417         }
3418
3419         return true;
3420 }
3421
3422 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3423                                           unsigned long event, void *ptr,
3424                                           u16 vid)
3425 {
3426         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3427         struct netdev_notifier_changeupper_info *info = ptr;
3428         struct mlxsw_sp_port *mlxsw_sp_vport;
3429         struct net_device *upper_dev;
3430         int err;
3431
3432         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3433
3434         switch (event) {
3435         case NETDEV_PRECHANGEUPPER:
3436                 upper_dev = info->upper_dev;
3437                 if (!info->master || !info->linking)
3438                         break;
3439                 if (!netif_is_bridge_master(upper_dev))
3440                         return NOTIFY_BAD;
3441                 /* We can't have multiple VLAN interfaces configured on
3442                  * the same port and being members in the same bridge.
3443                  */
3444                 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3445                                                        upper_dev))
3446                         return NOTIFY_BAD;
3447                 break;
3448         case NETDEV_CHANGEUPPER:
3449                 upper_dev = info->upper_dev;
3450                 if (!info->master)
3451                         break;
3452                 if (info->linking) {
3453                         if (!mlxsw_sp_vport) {
3454                                 WARN_ON(!mlxsw_sp_vport);
3455                                 return NOTIFY_BAD;
3456                         }
3457                         err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3458                                                          upper_dev);
3459                         if (err) {
3460                                 netdev_err(dev, "Failed to join bridge\n");
3461                                 return NOTIFY_BAD;
3462                         }
3463                 } else {
3464                         /* We ignore bridge's unlinking notifications if vPort
3465                          * is gone, since we already left the bridge when the
3466                          * VLAN device was unlinked from the real device.
3467                          */
3468                         if (!mlxsw_sp_vport)
3469                                 return NOTIFY_DONE;
3470                         err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
3471                                                           upper_dev, true);
3472                         if (err) {
3473                                 netdev_err(dev, "Failed to leave bridge\n");
3474                                 return NOTIFY_BAD;
3475                         }
3476                 }
3477         }
3478
3479         return NOTIFY_DONE;
3480 }
3481
3482 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3483                                               unsigned long event, void *ptr,
3484                                               u16 vid)
3485 {
3486         struct net_device *dev;
3487         struct list_head *iter;
3488         int ret;
3489
3490         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3491                 if (mlxsw_sp_port_dev_check(dev)) {
3492                         ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
3493                                                              vid);
3494                         if (ret == NOTIFY_BAD)
3495                                 return ret;
3496                 }
3497         }
3498
3499         return NOTIFY_DONE;
3500 }
3501
3502 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
3503                                          unsigned long event, void *ptr)
3504 {
3505         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3506         u16 vid = vlan_dev_vlan_id(vlan_dev);
3507
3508         if (mlxsw_sp_port_dev_check(real_dev))
3509                 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
3510                                                       vid);
3511         else if (netif_is_lag_master(real_dev))
3512                 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
3513                                                           vid);
3514
3515         return NOTIFY_DONE;
3516 }
3517
3518 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3519                                     unsigned long event, void *ptr)
3520 {
3521         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3522
3523         if (mlxsw_sp_port_dev_check(dev))
3524                 return mlxsw_sp_netdevice_port_event(dev, event, ptr);
3525
3526         if (netif_is_lag_master(dev))
3527                 return mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3528
3529         if (is_vlan_dev(dev))
3530                 return mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3531
3532         return NOTIFY_DONE;
3533 }
3534
3535 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3536         .notifier_call = mlxsw_sp_netdevice_event,
3537 };
3538
3539 static int __init mlxsw_sp_module_init(void)
3540 {
3541         int err;
3542
3543         register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3544         err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3545         if (err)
3546                 goto err_core_driver_register;
3547         return 0;
3548
3549 err_core_driver_register:
3550         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3551         return err;
3552 }
3553
3554 static void __exit mlxsw_sp_module_exit(void)
3555 {
3556         mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3557         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3558 }
3559
3560 module_init(mlxsw_sp_module_init);
3561 module_exit(mlxsw_sp_module_exit);
3562
3563 MODULE_LICENSE("Dual BSD/GPL");
3564 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3565 MODULE_DESCRIPTION("Mellanox Spectrum driver");
3566 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);