mlxsw: spectrum: Create a function to map vPort's FID
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/notifier.h>
53 #include <linux/dcbnl.h>
54 #include <net/switchdev.h>
55 #include <generated/utsrelease.h>
56
57 #include "spectrum.h"
58 #include "core.h"
59 #include "reg.h"
60 #include "port.h"
61 #include "trap.h"
62 #include "txheader.h"
63
64 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
65 static const char mlxsw_sp_driver_version[] = "1.0";
66
67 /* tx_hdr_version
68  * Tx header version.
69  * Must be set to 1.
70  */
71 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
72
73 /* tx_hdr_ctl
74  * Packet control type.
75  * 0 - Ethernet control (e.g. EMADs, LACP)
76  * 1 - Ethernet data
77  */
78 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
79
80 /* tx_hdr_proto
81  * Packet protocol type. Must be set to 1 (Ethernet).
82  */
83 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
84
85 /* tx_hdr_rx_is_router
86  * Packet is sent from the router. Valid for data packets only.
87  */
88 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
89
90 /* tx_hdr_fid_valid
91  * Indicates if the 'fid' field is valid and should be used for
92  * forwarding lookup. Valid for data packets only.
93  */
94 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
95
96 /* tx_hdr_swid
97  * Switch partition ID. Must be set to 0.
98  */
99 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
100
101 /* tx_hdr_control_tclass
102  * Indicates if the packet should use the control TClass and not one
103  * of the data TClasses.
104  */
105 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
106
107 /* tx_hdr_etclass
108  * Egress TClass to be used on the egress device on the egress port.
109  */
110 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
111
112 /* tx_hdr_port_mid
113  * Destination local port for unicast packets.
114  * Destination multicast ID for multicast packets.
115  *
116  * Control packets are directed to a specific egress port, while data
117  * packets are transmitted through the CPU port (0) into the switch partition,
118  * where forwarding rules are applied.
119  */
120 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
121
122 /* tx_hdr_fid
123  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
124  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
125  * Valid for data packets only.
126  */
127 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
128
129 /* tx_hdr_type
130  * 0 - Data packets
131  * 6 - Control packets
132  */
133 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
134
135 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
136                                      const struct mlxsw_tx_info *tx_info)
137 {
138         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
139
140         memset(txhdr, 0, MLXSW_TXHDR_LEN);
141
142         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
143         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
144         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
145         mlxsw_tx_hdr_swid_set(txhdr, 0);
146         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
147         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
148         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
149 }
150
151 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
152 {
153         char spad_pl[MLXSW_REG_SPAD_LEN];
154         int err;
155
156         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
157         if (err)
158                 return err;
159         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
160         return 0;
161 }
162
163 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
164                                           bool is_up)
165 {
166         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
167         char paos_pl[MLXSW_REG_PAOS_LEN];
168
169         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
170                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
171                             MLXSW_PORT_ADMIN_STATUS_DOWN);
172         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
173 }
174
175 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
176                                          bool *p_is_up)
177 {
178         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
179         char paos_pl[MLXSW_REG_PAOS_LEN];
180         u8 oper_status;
181         int err;
182
183         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
184         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
185         if (err)
186                 return err;
187         oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
188         *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
189         return 0;
190 }
191
192 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
193                                       unsigned char *addr)
194 {
195         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
196         char ppad_pl[MLXSW_REG_PPAD_LEN];
197
198         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
199         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
200         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
201 }
202
203 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
204 {
205         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
206         unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
207
208         ether_addr_copy(addr, mlxsw_sp->base_mac);
209         addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
210         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
211 }
212
213 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
214                                        u16 vid, enum mlxsw_reg_spms_state state)
215 {
216         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
217         char *spms_pl;
218         int err;
219
220         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
221         if (!spms_pl)
222                 return -ENOMEM;
223         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
224         mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
225         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
226         kfree(spms_pl);
227         return err;
228 }
229
230 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
231 {
232         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
233         char pmtu_pl[MLXSW_REG_PMTU_LEN];
234         int max_mtu;
235         int err;
236
237         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
238         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
239         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
240         if (err)
241                 return err;
242         max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
243
244         if (mtu > max_mtu)
245                 return -EINVAL;
246
247         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
248         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
249 }
250
251 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
252                                     u8 swid)
253 {
254         char pspa_pl[MLXSW_REG_PSPA_LEN];
255
256         mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
257         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
258 }
259
260 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
261 {
262         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
263
264         return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
265                                         swid);
266 }
267
268 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
269                                      bool enable)
270 {
271         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
272         char svpe_pl[MLXSW_REG_SVPE_LEN];
273
274         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
275         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
276 }
277
278 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
279                                  enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
280                                  u16 vid)
281 {
282         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
283         char svfa_pl[MLXSW_REG_SVFA_LEN];
284
285         mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
286                             fid, vid);
287         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
288 }
289
290 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
291                                           u16 vid, bool learn_enable)
292 {
293         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
294         char *spvmlr_pl;
295         int err;
296
297         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
298         if (!spvmlr_pl)
299                 return -ENOMEM;
300         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
301                               learn_enable);
302         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
303         kfree(spvmlr_pl);
304         return err;
305 }
306
307 static int
308 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
309 {
310         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
311         char sspr_pl[MLXSW_REG_SSPR_LEN];
312
313         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
314         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
315 }
316
317 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
318                                          u8 local_port, u8 *p_module,
319                                          u8 *p_width, u8 *p_lane)
320 {
321         char pmlp_pl[MLXSW_REG_PMLP_LEN];
322         int err;
323
324         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
325         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
326         if (err)
327                 return err;
328         *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
329         *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
330         *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
331         return 0;
332 }
333
334 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
335                                     u8 module, u8 width, u8 lane)
336 {
337         char pmlp_pl[MLXSW_REG_PMLP_LEN];
338         int i;
339
340         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
341         mlxsw_reg_pmlp_width_set(pmlp_pl, width);
342         for (i = 0; i < width; i++) {
343                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
344                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
345         }
346
347         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
348 }
349
350 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
351 {
352         char pmlp_pl[MLXSW_REG_PMLP_LEN];
353
354         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
355         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
356         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
357 }
358
359 static int mlxsw_sp_port_open(struct net_device *dev)
360 {
361         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
362         int err;
363
364         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
365         if (err)
366                 return err;
367         netif_start_queue(dev);
368         return 0;
369 }
370
371 static int mlxsw_sp_port_stop(struct net_device *dev)
372 {
373         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
374
375         netif_stop_queue(dev);
376         return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
377 }
378
379 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
380                                       struct net_device *dev)
381 {
382         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
383         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
384         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
385         const struct mlxsw_tx_info tx_info = {
386                 .local_port = mlxsw_sp_port->local_port,
387                 .is_emad = false,
388         };
389         u64 len;
390         int err;
391
392         if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
393                 return NETDEV_TX_BUSY;
394
395         if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
396                 struct sk_buff *skb_orig = skb;
397
398                 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
399                 if (!skb) {
400                         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
401                         dev_kfree_skb_any(skb_orig);
402                         return NETDEV_TX_OK;
403                 }
404         }
405
406         if (eth_skb_pad(skb)) {
407                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
408                 return NETDEV_TX_OK;
409         }
410
411         mlxsw_sp_txhdr_construct(skb, &tx_info);
412         len = skb->len;
413         /* Due to a race we might fail here because of a full queue. In that
414          * unlikely case we simply drop the packet.
415          */
416         err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
417
418         if (!err) {
419                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
420                 u64_stats_update_begin(&pcpu_stats->syncp);
421                 pcpu_stats->tx_packets++;
422                 pcpu_stats->tx_bytes += len;
423                 u64_stats_update_end(&pcpu_stats->syncp);
424         } else {
425                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
426                 dev_kfree_skb_any(skb);
427         }
428         return NETDEV_TX_OK;
429 }
430
431 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
432 {
433 }
434
435 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
436 {
437         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
438         struct sockaddr *addr = p;
439         int err;
440
441         if (!is_valid_ether_addr(addr->sa_data))
442                 return -EADDRNOTAVAIL;
443
444         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
445         if (err)
446                 return err;
447         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
448         return 0;
449 }
450
451 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
452                                  bool pause_en, bool pfc_en, u16 delay)
453 {
454         u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
455
456         delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
457                          MLXSW_SP_PAUSE_DELAY;
458
459         if (pause_en || pfc_en)
460                 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
461                                                     pg_size + delay, pg_size);
462         else
463                 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
464 }
465
466 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
467                                  u8 *prio_tc, bool pause_en,
468                                  struct ieee_pfc *my_pfc)
469 {
470         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
471         u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
472         u16 delay = !!my_pfc ? my_pfc->delay : 0;
473         char pbmc_pl[MLXSW_REG_PBMC_LEN];
474         int i, j, err;
475
476         mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
477         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
478         if (err)
479                 return err;
480
481         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
482                 bool configure = false;
483                 bool pfc = false;
484
485                 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
486                         if (prio_tc[j] == i) {
487                                 pfc = pfc_en & BIT(j);
488                                 configure = true;
489                                 break;
490                         }
491                 }
492
493                 if (!configure)
494                         continue;
495                 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
496         }
497
498         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
499 }
500
501 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
502                                       int mtu, bool pause_en)
503 {
504         u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
505         bool dcb_en = !!mlxsw_sp_port->dcb.ets;
506         struct ieee_pfc *my_pfc;
507         u8 *prio_tc;
508
509         prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
510         my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
511
512         return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
513                                             pause_en, my_pfc);
514 }
515
516 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
517 {
518         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
519         bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
520         int err;
521
522         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
523         if (err)
524                 return err;
525         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
526         if (err)
527                 goto err_port_mtu_set;
528         dev->mtu = mtu;
529         return 0;
530
531 err_port_mtu_set:
532         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
533         return err;
534 }
535
536 static struct rtnl_link_stats64 *
537 mlxsw_sp_port_get_stats64(struct net_device *dev,
538                           struct rtnl_link_stats64 *stats)
539 {
540         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
541         struct mlxsw_sp_port_pcpu_stats *p;
542         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
543         u32 tx_dropped = 0;
544         unsigned int start;
545         int i;
546
547         for_each_possible_cpu(i) {
548                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
549                 do {
550                         start = u64_stats_fetch_begin_irq(&p->syncp);
551                         rx_packets      = p->rx_packets;
552                         rx_bytes        = p->rx_bytes;
553                         tx_packets      = p->tx_packets;
554                         tx_bytes        = p->tx_bytes;
555                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
556
557                 stats->rx_packets       += rx_packets;
558                 stats->rx_bytes         += rx_bytes;
559                 stats->tx_packets       += tx_packets;
560                 stats->tx_bytes         += tx_bytes;
561                 /* tx_dropped is u32, updated without syncp protection. */
562                 tx_dropped      += p->tx_dropped;
563         }
564         stats->tx_dropped       = tx_dropped;
565         return stats;
566 }
567
568 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
569                            u16 vid_end, bool is_member, bool untagged)
570 {
571         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
572         char *spvm_pl;
573         int err;
574
575         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
576         if (!spvm_pl)
577                 return -ENOMEM;
578
579         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
580                             vid_end, is_member, untagged);
581         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
582         kfree(spvm_pl);
583         return err;
584 }
585
586 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
587 {
588         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
589         u16 vid, last_visited_vid;
590         int err;
591
592         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
593                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
594                                                    vid);
595                 if (err) {
596                         last_visited_vid = vid;
597                         goto err_port_vid_to_fid_set;
598                 }
599         }
600
601         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
602         if (err) {
603                 last_visited_vid = VLAN_N_VID;
604                 goto err_port_vid_to_fid_set;
605         }
606
607         return 0;
608
609 err_port_vid_to_fid_set:
610         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
611                 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
612                                              vid);
613         return err;
614 }
615
616 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
617 {
618         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
619         u16 vid;
620         int err;
621
622         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
623         if (err)
624                 return err;
625
626         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
627                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
628                                                    vid, vid);
629                 if (err)
630                         return err;
631         }
632
633         return 0;
634 }
635
636 static struct mlxsw_sp_vfid *
637 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
638 {
639         struct mlxsw_sp_vfid *vfid;
640
641         list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
642                 if (vfid->vid == vid)
643                         return vfid;
644         }
645
646         return NULL;
647 }
648
649 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
650 {
651         return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
652                                    MLXSW_SP_VFID_PORT_MAX);
653 }
654
655 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
656 {
657         char sfmr_pl[MLXSW_REG_SFMR_LEN];
658
659         mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
660         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
661 }
662
663 static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
664                                                   u16 vid)
665 {
666         struct device *dev = mlxsw_sp->bus_info->dev;
667         struct mlxsw_sp_vfid *f;
668         u16 vfid, fid;
669         int err;
670
671         vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
672         if (vfid == MLXSW_SP_VFID_PORT_MAX) {
673                 dev_err(dev, "No available vFIDs\n");
674                 return ERR_PTR(-ERANGE);
675         }
676
677         fid = mlxsw_sp_vfid_to_fid(vfid);
678         err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
679         if (err) {
680                 dev_err(dev, "Failed to create FID=%d\n", fid);
681                 return ERR_PTR(err);
682         }
683
684         f = kzalloc(sizeof(*f), GFP_KERNEL);
685         if (!f)
686                 goto err_allocate_vfid;
687
688         f->vfid = vfid;
689         f->vid = vid;
690
691         list_add(&f->list, &mlxsw_sp->port_vfids.list);
692         set_bit(vfid, mlxsw_sp->port_vfids.mapped);
693
694         return f;
695
696 err_allocate_vfid:
697         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
698         return ERR_PTR(-ENOMEM);
699 }
700
701 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
702                                   struct mlxsw_sp_vfid *vfid)
703 {
704         u16 fid = mlxsw_sp_vfid_to_fid(vfid->vfid);
705
706         clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
707         list_del(&vfid->list);
708
709         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
710
711         kfree(vfid);
712 }
713
714 static struct mlxsw_sp_port *
715 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
716                            struct mlxsw_sp_vfid *vfid)
717 {
718         struct mlxsw_sp_port *mlxsw_sp_vport;
719
720         mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
721         if (!mlxsw_sp_vport)
722                 return NULL;
723
724         /* dev will be set correctly after the VLAN device is linked
725          * with the real device. In case of bridge SELF invocation, dev
726          * will remain as is.
727          */
728         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
729         mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
730         mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
731         mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
732         mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
733         mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
734         mlxsw_sp_vport->vport.vfid = vfid;
735         mlxsw_sp_vport->vport.vid = vfid->vid;
736
737         list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
738
739         return mlxsw_sp_vport;
740 }
741
742 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
743 {
744         list_del(&mlxsw_sp_vport->vport.list);
745         kfree(mlxsw_sp_vport);
746 }
747
748 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
749                                   bool valid)
750 {
751         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
752         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
753
754         return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
755                                             vid);
756 }
757
758 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
759                           u16 vid)
760 {
761         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
762         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
763         struct mlxsw_sp_port *mlxsw_sp_vport;
764         struct mlxsw_sp_vfid *vfid;
765         u16 fid;
766         int err;
767
768         /* VLAN 0 is added to HW filter when device goes up, but it is
769          * reserved in our case, so simply return.
770          */
771         if (!vid)
772                 return 0;
773
774         if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
775                 netdev_warn(dev, "VID=%d already configured\n", vid);
776                 return 0;
777         }
778
779         vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
780         if (!vfid) {
781                 vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
782                 if (IS_ERR(vfid)) {
783                         netdev_err(dev, "Failed to create vFID for VID=%d\n",
784                                    vid);
785                         return PTR_ERR(vfid);
786                 }
787         }
788
789         mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
790         if (!mlxsw_sp_vport) {
791                 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
792                 err = -ENOMEM;
793                 goto err_port_vport_create;
794         }
795
796         if (!vfid->nr_vports) {
797                 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
798                                                true);
799                 if (err) {
800                         netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
801                                    vfid->vfid);
802                         goto err_vport_flood_set;
803                 }
804         }
805
806         /* When adding the first VLAN interface on a bridged port we need to
807          * transition all the active 802.1Q bridge VLANs to use explicit
808          * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
809          */
810         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
811                 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
812                 if (err) {
813                         netdev_err(dev, "Failed to set to Virtual mode\n");
814                         goto err_port_vp_mode_trans;
815                 }
816         }
817
818         fid = mlxsw_sp_vfid_to_fid(vfid->vfid);
819         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, fid, true);
820         if (err) {
821                 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
822                            vid, vfid->vfid);
823                 goto err_vport_fid_map;
824         }
825
826         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
827         if (err) {
828                 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
829                 goto err_port_vid_learning_set;
830         }
831
832         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
833         if (err) {
834                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
835                            vid);
836                 goto err_port_add_vid;
837         }
838
839         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
840                                           MLXSW_REG_SPMS_STATE_FORWARDING);
841         if (err) {
842                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
843                 goto err_port_stp_state_set;
844         }
845
846         vfid->nr_vports++;
847
848         return 0;
849
850 err_port_stp_state_set:
851         mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
852 err_port_add_vid:
853         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
854 err_port_vid_learning_set:
855         mlxsw_sp_vport_fid_map(mlxsw_sp_vport, fid, false);
856 err_vport_fid_map:
857         if (list_is_singular(&mlxsw_sp_port->vports_list))
858                 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
859 err_port_vp_mode_trans:
860         if (!vfid->nr_vports)
861                 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false);
862 err_vport_flood_set:
863         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
864 err_port_vport_create:
865         if (!vfid->nr_vports)
866                 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
867         return err;
868 }
869
870 int mlxsw_sp_port_kill_vid(struct net_device *dev,
871                            __be16 __always_unused proto, u16 vid)
872 {
873         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
874         struct mlxsw_sp_port *mlxsw_sp_vport;
875         struct mlxsw_sp_vfid *vfid;
876         u16 fid;
877         int err;
878
879         /* VLAN 0 is removed from HW filter when device goes down, but
880          * it is reserved in our case, so simply return.
881          */
882         if (!vid)
883                 return 0;
884
885         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
886         if (!mlxsw_sp_vport) {
887                 netdev_warn(dev, "VID=%d does not exist\n", vid);
888                 return 0;
889         }
890
891         vfid = mlxsw_sp_vport->vport.vfid;
892
893         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
894                                           MLXSW_REG_SPMS_STATE_DISCARDING);
895         if (err) {
896                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
897                 return err;
898         }
899
900         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
901         if (err) {
902                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
903                            vid);
904                 return err;
905         }
906
907         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
908         if (err) {
909                 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
910                 return err;
911         }
912
913         fid = mlxsw_sp_vfid_to_fid(vfid->vfid);
914         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, fid, false);
915         if (err) {
916                 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
917                            vid, vfid->vfid);
918                 return err;
919         }
920
921         /* When removing the last VLAN interface on a bridged port we need to
922          * transition all active 802.1Q bridge VLANs to use VID to FID
923          * mappings and set port's mode to VLAN mode.
924          */
925         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
926                 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
927                 if (err) {
928                         netdev_err(dev, "Failed to set to VLAN mode\n");
929                         return err;
930                 }
931         }
932
933         vfid->nr_vports--;
934         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
935
936         /* Destroy the vFID if no vPorts are assigned to it anymore. */
937         if (!vfid->nr_vports)
938                 mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
939
940         return 0;
941 }
942
943 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
944                                             size_t len)
945 {
946         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
947         u8 module = mlxsw_sp_port->mapping.module;
948         u8 width = mlxsw_sp_port->mapping.width;
949         u8 lane = mlxsw_sp_port->mapping.lane;
950         int err;
951
952         if (!mlxsw_sp_port->split)
953                 err = snprintf(name, len, "p%d", module + 1);
954         else
955                 err = snprintf(name, len, "p%ds%d", module + 1,
956                                lane / width);
957
958         if (err >= len)
959                 return -EINVAL;
960
961         return 0;
962 }
963
964 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
965         .ndo_open               = mlxsw_sp_port_open,
966         .ndo_stop               = mlxsw_sp_port_stop,
967         .ndo_start_xmit         = mlxsw_sp_port_xmit,
968         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
969         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
970         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
971         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
972         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
973         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
974         .ndo_fdb_add            = switchdev_port_fdb_add,
975         .ndo_fdb_del            = switchdev_port_fdb_del,
976         .ndo_fdb_dump           = switchdev_port_fdb_dump,
977         .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
978         .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
979         .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
980         .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
981 };
982
983 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
984                                       struct ethtool_drvinfo *drvinfo)
985 {
986         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
987         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
988
989         strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
990         strlcpy(drvinfo->version, mlxsw_sp_driver_version,
991                 sizeof(drvinfo->version));
992         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
993                  "%d.%d.%d",
994                  mlxsw_sp->bus_info->fw_rev.major,
995                  mlxsw_sp->bus_info->fw_rev.minor,
996                  mlxsw_sp->bus_info->fw_rev.subminor);
997         strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
998                 sizeof(drvinfo->bus_info));
999 }
1000
1001 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1002                                          struct ethtool_pauseparam *pause)
1003 {
1004         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1005
1006         pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1007         pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1008 }
1009
1010 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1011                                    struct ethtool_pauseparam *pause)
1012 {
1013         char pfcc_pl[MLXSW_REG_PFCC_LEN];
1014
1015         mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1016         mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1017         mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1018
1019         return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1020                                pfcc_pl);
1021 }
1022
1023 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1024                                         struct ethtool_pauseparam *pause)
1025 {
1026         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1027         bool pause_en = pause->tx_pause || pause->rx_pause;
1028         int err;
1029
1030         if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1031                 netdev_err(dev, "PFC already enabled on port\n");
1032                 return -EINVAL;
1033         }
1034
1035         if (pause->autoneg) {
1036                 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1037                 return -EINVAL;
1038         }
1039
1040         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1041         if (err) {
1042                 netdev_err(dev, "Failed to configure port's headroom\n");
1043                 return err;
1044         }
1045
1046         err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1047         if (err) {
1048                 netdev_err(dev, "Failed to set PAUSE parameters\n");
1049                 goto err_port_pause_configure;
1050         }
1051
1052         mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1053         mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1054
1055         return 0;
1056
1057 err_port_pause_configure:
1058         pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1059         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1060         return err;
1061 }
1062
1063 struct mlxsw_sp_port_hw_stats {
1064         char str[ETH_GSTRING_LEN];
1065         u64 (*getter)(char *payload);
1066 };
1067
1068 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1069         {
1070                 .str = "a_frames_transmitted_ok",
1071                 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1072         },
1073         {
1074                 .str = "a_frames_received_ok",
1075                 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1076         },
1077         {
1078                 .str = "a_frame_check_sequence_errors",
1079                 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1080         },
1081         {
1082                 .str = "a_alignment_errors",
1083                 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1084         },
1085         {
1086                 .str = "a_octets_transmitted_ok",
1087                 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1088         },
1089         {
1090                 .str = "a_octets_received_ok",
1091                 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1092         },
1093         {
1094                 .str = "a_multicast_frames_xmitted_ok",
1095                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1096         },
1097         {
1098                 .str = "a_broadcast_frames_xmitted_ok",
1099                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1100         },
1101         {
1102                 .str = "a_multicast_frames_received_ok",
1103                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1104         },
1105         {
1106                 .str = "a_broadcast_frames_received_ok",
1107                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1108         },
1109         {
1110                 .str = "a_in_range_length_errors",
1111                 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1112         },
1113         {
1114                 .str = "a_out_of_range_length_field",
1115                 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1116         },
1117         {
1118                 .str = "a_frame_too_long_errors",
1119                 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1120         },
1121         {
1122                 .str = "a_symbol_error_during_carrier",
1123                 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1124         },
1125         {
1126                 .str = "a_mac_control_frames_transmitted",
1127                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1128         },
1129         {
1130                 .str = "a_mac_control_frames_received",
1131                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1132         },
1133         {
1134                 .str = "a_unsupported_opcodes_received",
1135                 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1136         },
1137         {
1138                 .str = "a_pause_mac_ctrl_frames_received",
1139                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1140         },
1141         {
1142                 .str = "a_pause_mac_ctrl_frames_xmitted",
1143                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1144         },
1145 };
1146
1147 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1148
1149 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1150                                       u32 stringset, u8 *data)
1151 {
1152         u8 *p = data;
1153         int i;
1154
1155         switch (stringset) {
1156         case ETH_SS_STATS:
1157                 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1158                         memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1159                                ETH_GSTRING_LEN);
1160                         p += ETH_GSTRING_LEN;
1161                 }
1162                 break;
1163         }
1164 }
1165
1166 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1167                                      enum ethtool_phys_id_state state)
1168 {
1169         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1170         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1171         char mlcr_pl[MLXSW_REG_MLCR_LEN];
1172         bool active;
1173
1174         switch (state) {
1175         case ETHTOOL_ID_ACTIVE:
1176                 active = true;
1177                 break;
1178         case ETHTOOL_ID_INACTIVE:
1179                 active = false;
1180                 break;
1181         default:
1182                 return -EOPNOTSUPP;
1183         }
1184
1185         mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1186         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1187 }
1188
1189 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1190                                     struct ethtool_stats *stats, u64 *data)
1191 {
1192         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1193         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1194         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1195         int i;
1196         int err;
1197
1198         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
1199                              MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
1200         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1201         for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
1202                 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
1203 }
1204
1205 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1206 {
1207         switch (sset) {
1208         case ETH_SS_STATS:
1209                 return MLXSW_SP_PORT_HW_STATS_LEN;
1210         default:
1211                 return -EOPNOTSUPP;
1212         }
1213 }
1214
1215 struct mlxsw_sp_port_link_mode {
1216         u32 mask;
1217         u32 supported;
1218         u32 advertised;
1219         u32 speed;
1220 };
1221
1222 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1223         {
1224                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1225                 .supported      = SUPPORTED_100baseT_Full,
1226                 .advertised     = ADVERTISED_100baseT_Full,
1227                 .speed          = 100,
1228         },
1229         {
1230                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1231                 .speed          = 100,
1232         },
1233         {
1234                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1235                                   MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1236                 .supported      = SUPPORTED_1000baseKX_Full,
1237                 .advertised     = ADVERTISED_1000baseKX_Full,
1238                 .speed          = 1000,
1239         },
1240         {
1241                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1242                 .supported      = SUPPORTED_10000baseT_Full,
1243                 .advertised     = ADVERTISED_10000baseT_Full,
1244                 .speed          = 10000,
1245         },
1246         {
1247                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1248                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1249                 .supported      = SUPPORTED_10000baseKX4_Full,
1250                 .advertised     = ADVERTISED_10000baseKX4_Full,
1251                 .speed          = 10000,
1252         },
1253         {
1254                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1255                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1256                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1257                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1258                 .supported      = SUPPORTED_10000baseKR_Full,
1259                 .advertised     = ADVERTISED_10000baseKR_Full,
1260                 .speed          = 10000,
1261         },
1262         {
1263                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1264                 .supported      = SUPPORTED_20000baseKR2_Full,
1265                 .advertised     = ADVERTISED_20000baseKR2_Full,
1266                 .speed          = 20000,
1267         },
1268         {
1269                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1270                 .supported      = SUPPORTED_40000baseCR4_Full,
1271                 .advertised     = ADVERTISED_40000baseCR4_Full,
1272                 .speed          = 40000,
1273         },
1274         {
1275                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1276                 .supported      = SUPPORTED_40000baseKR4_Full,
1277                 .advertised     = ADVERTISED_40000baseKR4_Full,
1278                 .speed          = 40000,
1279         },
1280         {
1281                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1282                 .supported      = SUPPORTED_40000baseSR4_Full,
1283                 .advertised     = ADVERTISED_40000baseSR4_Full,
1284                 .speed          = 40000,
1285         },
1286         {
1287                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1288                 .supported      = SUPPORTED_40000baseLR4_Full,
1289                 .advertised     = ADVERTISED_40000baseLR4_Full,
1290                 .speed          = 40000,
1291         },
1292         {
1293                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1294                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1295                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1296                 .speed          = 25000,
1297         },
1298         {
1299                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1300                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1301                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1302                 .speed          = 50000,
1303         },
1304         {
1305                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1306                 .supported      = SUPPORTED_56000baseKR4_Full,
1307                 .advertised     = ADVERTISED_56000baseKR4_Full,
1308                 .speed          = 56000,
1309         },
1310         {
1311                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1312                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1313                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1314                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1315                 .speed          = 100000,
1316         },
1317 };
1318
1319 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1320
1321 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1322 {
1323         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1324                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1325                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1326                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1327                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1328                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1329                 return SUPPORTED_FIBRE;
1330
1331         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1332                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1333                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1334                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1335                               MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1336                 return SUPPORTED_Backplane;
1337         return 0;
1338 }
1339
1340 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1341 {
1342         u32 modes = 0;
1343         int i;
1344
1345         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1346                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1347                         modes |= mlxsw_sp_port_link_mode[i].supported;
1348         }
1349         return modes;
1350 }
1351
1352 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1353 {
1354         u32 modes = 0;
1355         int i;
1356
1357         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1358                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1359                         modes |= mlxsw_sp_port_link_mode[i].advertised;
1360         }
1361         return modes;
1362 }
1363
1364 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1365                                             struct ethtool_cmd *cmd)
1366 {
1367         u32 speed = SPEED_UNKNOWN;
1368         u8 duplex = DUPLEX_UNKNOWN;
1369         int i;
1370
1371         if (!carrier_ok)
1372                 goto out;
1373
1374         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1375                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1376                         speed = mlxsw_sp_port_link_mode[i].speed;
1377                         duplex = DUPLEX_FULL;
1378                         break;
1379                 }
1380         }
1381 out:
1382         ethtool_cmd_speed_set(cmd, speed);
1383         cmd->duplex = duplex;
1384 }
1385
1386 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1387 {
1388         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1389                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1390                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1391                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1392                 return PORT_FIBRE;
1393
1394         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1395                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1396                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1397                 return PORT_DA;
1398
1399         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1400                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1401                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1402                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1403                 return PORT_NONE;
1404
1405         return PORT_OTHER;
1406 }
1407
1408 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1409                                       struct ethtool_cmd *cmd)
1410 {
1411         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1412         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1413         char ptys_pl[MLXSW_REG_PTYS_LEN];
1414         u32 eth_proto_cap;
1415         u32 eth_proto_admin;
1416         u32 eth_proto_oper;
1417         int err;
1418
1419         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1420         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1421         if (err) {
1422                 netdev_err(dev, "Failed to get proto");
1423                 return err;
1424         }
1425         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1426                               &eth_proto_admin, &eth_proto_oper);
1427
1428         cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1429                          mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1430                          SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1431         cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1432         mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1433                                         eth_proto_oper, cmd);
1434
1435         eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1436         cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1437         cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1438
1439         cmd->transceiver = XCVR_INTERNAL;
1440         return 0;
1441 }
1442
1443 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1444 {
1445         u32 ptys_proto = 0;
1446         int i;
1447
1448         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1449                 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1450                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1451         }
1452         return ptys_proto;
1453 }
1454
1455 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1456 {
1457         u32 ptys_proto = 0;
1458         int i;
1459
1460         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1461                 if (speed == mlxsw_sp_port_link_mode[i].speed)
1462                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1463         }
1464         return ptys_proto;
1465 }
1466
1467 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1468 {
1469         u32 ptys_proto = 0;
1470         int i;
1471
1472         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1473                 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1474                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1475         }
1476         return ptys_proto;
1477 }
1478
1479 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1480                                       struct ethtool_cmd *cmd)
1481 {
1482         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1483         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1484         char ptys_pl[MLXSW_REG_PTYS_LEN];
1485         u32 speed;
1486         u32 eth_proto_new;
1487         u32 eth_proto_cap;
1488         u32 eth_proto_admin;
1489         bool is_up;
1490         int err;
1491
1492         speed = ethtool_cmd_speed(cmd);
1493
1494         eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1495                 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1496                 mlxsw_sp_to_ptys_speed(speed);
1497
1498         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1499         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1500         if (err) {
1501                 netdev_err(dev, "Failed to get proto");
1502                 return err;
1503         }
1504         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1505
1506         eth_proto_new = eth_proto_new & eth_proto_cap;
1507         if (!eth_proto_new) {
1508                 netdev_err(dev, "Not supported proto admin requested");
1509                 return -EINVAL;
1510         }
1511         if (eth_proto_new == eth_proto_admin)
1512                 return 0;
1513
1514         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1515         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1516         if (err) {
1517                 netdev_err(dev, "Failed to set proto admin");
1518                 return err;
1519         }
1520
1521         err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1522         if (err) {
1523                 netdev_err(dev, "Failed to get oper status");
1524                 return err;
1525         }
1526         if (!is_up)
1527                 return 0;
1528
1529         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1530         if (err) {
1531                 netdev_err(dev, "Failed to set admin status");
1532                 return err;
1533         }
1534
1535         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1536         if (err) {
1537                 netdev_err(dev, "Failed to set admin status");
1538                 return err;
1539         }
1540
1541         return 0;
1542 }
1543
1544 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1545         .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
1546         .get_link               = ethtool_op_get_link,
1547         .get_pauseparam         = mlxsw_sp_port_get_pauseparam,
1548         .set_pauseparam         = mlxsw_sp_port_set_pauseparam,
1549         .get_strings            = mlxsw_sp_port_get_strings,
1550         .set_phys_id            = mlxsw_sp_port_set_phys_id,
1551         .get_ethtool_stats      = mlxsw_sp_port_get_stats,
1552         .get_sset_count         = mlxsw_sp_port_get_sset_count,
1553         .get_settings           = mlxsw_sp_port_get_settings,
1554         .set_settings           = mlxsw_sp_port_set_settings,
1555 };
1556
1557 static int
1558 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1559 {
1560         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1561         u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1562         char ptys_pl[MLXSW_REG_PTYS_LEN];
1563         u32 eth_proto_admin;
1564
1565         eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1566         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1567                             eth_proto_admin);
1568         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1569 }
1570
1571 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1572                           enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1573                           bool dwrr, u8 dwrr_weight)
1574 {
1575         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1576         char qeec_pl[MLXSW_REG_QEEC_LEN];
1577
1578         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1579                             next_index);
1580         mlxsw_reg_qeec_de_set(qeec_pl, true);
1581         mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1582         mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1583         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1584 }
1585
1586 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1587                                   enum mlxsw_reg_qeec_hr hr, u8 index,
1588                                   u8 next_index, u32 maxrate)
1589 {
1590         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1591         char qeec_pl[MLXSW_REG_QEEC_LEN];
1592
1593         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1594                             next_index);
1595         mlxsw_reg_qeec_mase_set(qeec_pl, true);
1596         mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1597         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1598 }
1599
1600 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1601                               u8 switch_prio, u8 tclass)
1602 {
1603         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1604         char qtct_pl[MLXSW_REG_QTCT_LEN];
1605
1606         mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1607                             tclass);
1608         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1609 }
1610
1611 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1612 {
1613         int err, i;
1614
1615         /* Setup the elements hierarcy, so that each TC is linked to
1616          * one subgroup, which are all member in the same group.
1617          */
1618         err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1619                                     MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
1620                                     0);
1621         if (err)
1622                 return err;
1623         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1624                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1625                                             MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
1626                                             0, false, 0);
1627                 if (err)
1628                         return err;
1629         }
1630         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1631                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1632                                             MLXSW_REG_QEEC_HIERARCY_TC, i, i,
1633                                             false, 0);
1634                 if (err)
1635                         return err;
1636         }
1637
1638         /* Make sure the max shaper is disabled in all hierarcies that
1639          * support it.
1640          */
1641         err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1642                                             MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
1643                                             MLXSW_REG_QEEC_MAS_DIS);
1644         if (err)
1645                 return err;
1646         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1647                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1648                                                     MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
1649                                                     i, 0,
1650                                                     MLXSW_REG_QEEC_MAS_DIS);
1651                 if (err)
1652                         return err;
1653         }
1654         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1655                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1656                                                     MLXSW_REG_QEEC_HIERARCY_TC,
1657                                                     i, i,
1658                                                     MLXSW_REG_QEEC_MAS_DIS);
1659                 if (err)
1660                         return err;
1661         }
1662
1663         /* Map all priorities to traffic class 0. */
1664         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1665                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1666                 if (err)
1667                         return err;
1668         }
1669
1670         return 0;
1671 }
1672
1673 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1674                                 bool split, u8 module, u8 width, u8 lane)
1675 {
1676         struct mlxsw_sp_port *mlxsw_sp_port;
1677         struct net_device *dev;
1678         size_t bytes;
1679         int err;
1680
1681         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1682         if (!dev)
1683                 return -ENOMEM;
1684         mlxsw_sp_port = netdev_priv(dev);
1685         mlxsw_sp_port->dev = dev;
1686         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1687         mlxsw_sp_port->local_port = local_port;
1688         mlxsw_sp_port->split = split;
1689         mlxsw_sp_port->mapping.module = module;
1690         mlxsw_sp_port->mapping.width = width;
1691         mlxsw_sp_port->mapping.lane = lane;
1692         bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1693         mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1694         if (!mlxsw_sp_port->active_vlans) {
1695                 err = -ENOMEM;
1696                 goto err_port_active_vlans_alloc;
1697         }
1698         mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1699         if (!mlxsw_sp_port->untagged_vlans) {
1700                 err = -ENOMEM;
1701                 goto err_port_untagged_vlans_alloc;
1702         }
1703         INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1704
1705         mlxsw_sp_port->pcpu_stats =
1706                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1707         if (!mlxsw_sp_port->pcpu_stats) {
1708                 err = -ENOMEM;
1709                 goto err_alloc_stats;
1710         }
1711
1712         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1713         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1714
1715         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1716         if (err) {
1717                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1718                         mlxsw_sp_port->local_port);
1719                 goto err_dev_addr_init;
1720         }
1721
1722         netif_carrier_off(dev);
1723
1724         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1725                          NETIF_F_HW_VLAN_CTAG_FILTER;
1726
1727         /* Each packet needs to have a Tx header (metadata) on top all other
1728          * headers.
1729          */
1730         dev->hard_header_len += MLXSW_TXHDR_LEN;
1731
1732         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1733         if (err) {
1734                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1735                         mlxsw_sp_port->local_port);
1736                 goto err_port_system_port_mapping_set;
1737         }
1738
1739         err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1740         if (err) {
1741                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1742                         mlxsw_sp_port->local_port);
1743                 goto err_port_swid_set;
1744         }
1745
1746         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
1747         if (err) {
1748                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1749                         mlxsw_sp_port->local_port);
1750                 goto err_port_speed_by_width_set;
1751         }
1752
1753         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1754         if (err) {
1755                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1756                         mlxsw_sp_port->local_port);
1757                 goto err_port_mtu_set;
1758         }
1759
1760         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1761         if (err)
1762                 goto err_port_admin_status_set;
1763
1764         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1765         if (err) {
1766                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1767                         mlxsw_sp_port->local_port);
1768                 goto err_port_buffers_init;
1769         }
1770
1771         err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1772         if (err) {
1773                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1774                         mlxsw_sp_port->local_port);
1775                 goto err_port_ets_init;
1776         }
1777
1778         /* ETS and buffers must be initialized before DCB. */
1779         err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1780         if (err) {
1781                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1782                         mlxsw_sp_port->local_port);
1783                 goto err_port_dcb_init;
1784         }
1785
1786         mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1787         err = register_netdev(dev);
1788         if (err) {
1789                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1790                         mlxsw_sp_port->local_port);
1791                 goto err_register_netdev;
1792         }
1793
1794         err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
1795                                    mlxsw_sp_port->local_port, dev,
1796                                    mlxsw_sp_port->split, module);
1797         if (err) {
1798                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1799                         mlxsw_sp_port->local_port);
1800                 goto err_core_port_init;
1801         }
1802
1803         err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1804         if (err)
1805                 goto err_port_vlan_init;
1806
1807         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1808         return 0;
1809
1810 err_port_vlan_init:
1811         mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1812 err_core_port_init:
1813         unregister_netdev(dev);
1814 err_register_netdev:
1815 err_port_dcb_init:
1816 err_port_ets_init:
1817 err_port_buffers_init:
1818 err_port_admin_status_set:
1819 err_port_mtu_set:
1820 err_port_speed_by_width_set:
1821 err_port_swid_set:
1822 err_port_system_port_mapping_set:
1823 err_dev_addr_init:
1824         free_percpu(mlxsw_sp_port->pcpu_stats);
1825 err_alloc_stats:
1826         kfree(mlxsw_sp_port->untagged_vlans);
1827 err_port_untagged_vlans_alloc:
1828         kfree(mlxsw_sp_port->active_vlans);
1829 err_port_active_vlans_alloc:
1830         free_netdev(dev);
1831         return err;
1832 }
1833
1834 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1835 {
1836         struct net_device *dev = mlxsw_sp_port->dev;
1837         struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
1838
1839         list_for_each_entry_safe(mlxsw_sp_vport, tmp,
1840                                  &mlxsw_sp_port->vports_list, vport.list) {
1841                 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1842
1843                 /* vPorts created for VLAN devices should already be gone
1844                  * by now, since we unregistered the port netdev.
1845                  */
1846                 WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
1847                 mlxsw_sp_port_kill_vid(dev, 0, vid);
1848         }
1849 }
1850
1851 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1852 {
1853         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1854
1855         if (!mlxsw_sp_port)
1856                 return;
1857         mlxsw_sp->ports[local_port] = NULL;
1858         mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1859         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1860         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1861         mlxsw_sp_port_vports_fini(mlxsw_sp_port);
1862         mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1863         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1864         mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1865         free_percpu(mlxsw_sp_port->pcpu_stats);
1866         kfree(mlxsw_sp_port->untagged_vlans);
1867         kfree(mlxsw_sp_port->active_vlans);
1868         free_netdev(mlxsw_sp_port->dev);
1869 }
1870
1871 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1872 {
1873         int i;
1874
1875         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1876                 mlxsw_sp_port_remove(mlxsw_sp, i);
1877         kfree(mlxsw_sp->ports);
1878 }
1879
1880 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1881 {
1882         u8 module, width, lane;
1883         size_t alloc_size;
1884         int i;
1885         int err;
1886
1887         alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1888         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1889         if (!mlxsw_sp->ports)
1890                 return -ENOMEM;
1891
1892         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1893                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1894                                                     &width, &lane);
1895                 if (err)
1896                         goto err_port_module_info_get;
1897                 if (!width)
1898                         continue;
1899                 mlxsw_sp->port_to_module[i] = module;
1900                 err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
1901                                            lane);
1902                 if (err)
1903                         goto err_port_create;
1904         }
1905         return 0;
1906
1907 err_port_create:
1908 err_port_module_info_get:
1909         for (i--; i >= 1; i--)
1910                 mlxsw_sp_port_remove(mlxsw_sp, i);
1911         kfree(mlxsw_sp->ports);
1912         return err;
1913 }
1914
1915 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1916 {
1917         u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
1918
1919         return local_port - offset;
1920 }
1921
1922 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1923                                       u8 module, unsigned int count)
1924 {
1925         u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1926         int err, i;
1927
1928         for (i = 0; i < count; i++) {
1929                 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
1930                                                width, i * width);
1931                 if (err)
1932                         goto err_port_module_map;
1933         }
1934
1935         for (i = 0; i < count; i++) {
1936                 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
1937                 if (err)
1938                         goto err_port_swid_set;
1939         }
1940
1941         for (i = 0; i < count; i++) {
1942                 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
1943                                            module, width, i * width);
1944                 if (err)
1945                         goto err_port_create;
1946         }
1947
1948         return 0;
1949
1950 err_port_create:
1951         for (i--; i >= 0; i--)
1952                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1953         i = count;
1954 err_port_swid_set:
1955         for (i--; i >= 0; i--)
1956                 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
1957                                          MLXSW_PORT_SWID_DISABLED_PORT);
1958         i = count;
1959 err_port_module_map:
1960         for (i--; i >= 0; i--)
1961                 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
1962         return err;
1963 }
1964
1965 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1966                                          u8 base_port, unsigned int count)
1967 {
1968         u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
1969         int i;
1970
1971         /* Split by four means we need to re-create two ports, otherwise
1972          * only one.
1973          */
1974         count = count / 2;
1975
1976         for (i = 0; i < count; i++) {
1977                 local_port = base_port + i * 2;
1978                 module = mlxsw_sp->port_to_module[local_port];
1979
1980                 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1981                                          0);
1982         }
1983
1984         for (i = 0; i < count; i++)
1985                 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
1986
1987         for (i = 0; i < count; i++) {
1988                 local_port = base_port + i * 2;
1989                 module = mlxsw_sp->port_to_module[local_port];
1990
1991                 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
1992                                      width, 0);
1993         }
1994 }
1995
1996 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1997                                unsigned int count)
1998 {
1999         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2000         struct mlxsw_sp_port *mlxsw_sp_port;
2001         u8 module, cur_width, base_port;
2002         int i;
2003         int err;
2004
2005         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2006         if (!mlxsw_sp_port) {
2007                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2008                         local_port);
2009                 return -EINVAL;
2010         }
2011
2012         module = mlxsw_sp_port->mapping.module;
2013         cur_width = mlxsw_sp_port->mapping.width;
2014
2015         if (count != 2 && count != 4) {
2016                 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
2017                 return -EINVAL;
2018         }
2019
2020         if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
2021                 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
2022                 return -EINVAL;
2023         }
2024
2025         /* Make sure we have enough slave (even) ports for the split. */
2026         if (count == 2) {
2027                 base_port = local_port;
2028                 if (mlxsw_sp->ports[base_port + 1]) {
2029                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2030                         return -EINVAL;
2031                 }
2032         } else {
2033                 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2034                 if (mlxsw_sp->ports[base_port + 1] ||
2035                     mlxsw_sp->ports[base_port + 3]) {
2036                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2037                         return -EINVAL;
2038                 }
2039         }
2040
2041         for (i = 0; i < count; i++)
2042                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2043
2044         err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2045         if (err) {
2046                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2047                 goto err_port_split_create;
2048         }
2049
2050         return 0;
2051
2052 err_port_split_create:
2053         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2054         return err;
2055 }
2056
2057 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2058 {
2059         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2060         struct mlxsw_sp_port *mlxsw_sp_port;
2061         u8 cur_width, base_port;
2062         unsigned int count;
2063         int i;
2064
2065         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2066         if (!mlxsw_sp_port) {
2067                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2068                         local_port);
2069                 return -EINVAL;
2070         }
2071
2072         if (!mlxsw_sp_port->split) {
2073                 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2074                 return -EINVAL;
2075         }
2076
2077         cur_width = mlxsw_sp_port->mapping.width;
2078         count = cur_width == 1 ? 4 : 2;
2079
2080         base_port = mlxsw_sp_cluster_base_port_get(local_port);
2081
2082         /* Determine which ports to remove. */
2083         if (count == 2 && local_port >= base_port + 2)
2084                 base_port = base_port + 2;
2085
2086         for (i = 0; i < count; i++)
2087                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2088
2089         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2090
2091         return 0;
2092 }
2093
2094 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2095                                      char *pude_pl, void *priv)
2096 {
2097         struct mlxsw_sp *mlxsw_sp = priv;
2098         struct mlxsw_sp_port *mlxsw_sp_port;
2099         enum mlxsw_reg_pude_oper_status status;
2100         u8 local_port;
2101
2102         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2103         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2104         if (!mlxsw_sp_port) {
2105                 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
2106                          local_port);
2107                 return;
2108         }
2109
2110         status = mlxsw_reg_pude_oper_status_get(pude_pl);
2111         if (status == MLXSW_PORT_OPER_STATUS_UP) {
2112                 netdev_info(mlxsw_sp_port->dev, "link up\n");
2113                 netif_carrier_on(mlxsw_sp_port->dev);
2114         } else {
2115                 netdev_info(mlxsw_sp_port->dev, "link down\n");
2116                 netif_carrier_off(mlxsw_sp_port->dev);
2117         }
2118 }
2119
2120 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
2121         .func = mlxsw_sp_pude_event_func,
2122         .trap_id = MLXSW_TRAP_ID_PUDE,
2123 };
2124
2125 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
2126                                    enum mlxsw_event_trap_id trap_id)
2127 {
2128         struct mlxsw_event_listener *el;
2129         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2130         int err;
2131
2132         switch (trap_id) {
2133         case MLXSW_TRAP_ID_PUDE:
2134                 el = &mlxsw_sp_pude_event;
2135                 break;
2136         }
2137         err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
2138         if (err)
2139                 return err;
2140
2141         mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
2142         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2143         if (err)
2144                 goto err_event_trap_set;
2145
2146         return 0;
2147
2148 err_event_trap_set:
2149         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2150         return err;
2151 }
2152
2153 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
2154                                       enum mlxsw_event_trap_id trap_id)
2155 {
2156         struct mlxsw_event_listener *el;
2157
2158         switch (trap_id) {
2159         case MLXSW_TRAP_ID_PUDE:
2160                 el = &mlxsw_sp_pude_event;
2161                 break;
2162         }
2163         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2164 }
2165
2166 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
2167                                       void *priv)
2168 {
2169         struct mlxsw_sp *mlxsw_sp = priv;
2170         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2171         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2172
2173         if (unlikely(!mlxsw_sp_port)) {
2174                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2175                                      local_port);
2176                 return;
2177         }
2178
2179         skb->dev = mlxsw_sp_port->dev;
2180
2181         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2182         u64_stats_update_begin(&pcpu_stats->syncp);
2183         pcpu_stats->rx_packets++;
2184         pcpu_stats->rx_bytes += skb->len;
2185         u64_stats_update_end(&pcpu_stats->syncp);
2186
2187         skb->protocol = eth_type_trans(skb, skb->dev);
2188         netif_receive_skb(skb);
2189 }
2190
2191 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2192         {
2193                 .func = mlxsw_sp_rx_listener_func,
2194                 .local_port = MLXSW_PORT_DONT_CARE,
2195                 .trap_id = MLXSW_TRAP_ID_FDB_MC,
2196         },
2197         /* Traps for specific L2 packet types, not trapped as FDB MC */
2198         {
2199                 .func = mlxsw_sp_rx_listener_func,
2200                 .local_port = MLXSW_PORT_DONT_CARE,
2201                 .trap_id = MLXSW_TRAP_ID_STP,
2202         },
2203         {
2204                 .func = mlxsw_sp_rx_listener_func,
2205                 .local_port = MLXSW_PORT_DONT_CARE,
2206                 .trap_id = MLXSW_TRAP_ID_LACP,
2207         },
2208         {
2209                 .func = mlxsw_sp_rx_listener_func,
2210                 .local_port = MLXSW_PORT_DONT_CARE,
2211                 .trap_id = MLXSW_TRAP_ID_EAPOL,
2212         },
2213         {
2214                 .func = mlxsw_sp_rx_listener_func,
2215                 .local_port = MLXSW_PORT_DONT_CARE,
2216                 .trap_id = MLXSW_TRAP_ID_LLDP,
2217         },
2218         {
2219                 .func = mlxsw_sp_rx_listener_func,
2220                 .local_port = MLXSW_PORT_DONT_CARE,
2221                 .trap_id = MLXSW_TRAP_ID_MMRP,
2222         },
2223         {
2224                 .func = mlxsw_sp_rx_listener_func,
2225                 .local_port = MLXSW_PORT_DONT_CARE,
2226                 .trap_id = MLXSW_TRAP_ID_MVRP,
2227         },
2228         {
2229                 .func = mlxsw_sp_rx_listener_func,
2230                 .local_port = MLXSW_PORT_DONT_CARE,
2231                 .trap_id = MLXSW_TRAP_ID_RPVST,
2232         },
2233         {
2234                 .func = mlxsw_sp_rx_listener_func,
2235                 .local_port = MLXSW_PORT_DONT_CARE,
2236                 .trap_id = MLXSW_TRAP_ID_DHCP,
2237         },
2238         {
2239                 .func = mlxsw_sp_rx_listener_func,
2240                 .local_port = MLXSW_PORT_DONT_CARE,
2241                 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
2242         },
2243         {
2244                 .func = mlxsw_sp_rx_listener_func,
2245                 .local_port = MLXSW_PORT_DONT_CARE,
2246                 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
2247         },
2248         {
2249                 .func = mlxsw_sp_rx_listener_func,
2250                 .local_port = MLXSW_PORT_DONT_CARE,
2251                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
2252         },
2253         {
2254                 .func = mlxsw_sp_rx_listener_func,
2255                 .local_port = MLXSW_PORT_DONT_CARE,
2256                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
2257         },
2258         {
2259                 .func = mlxsw_sp_rx_listener_func,
2260                 .local_port = MLXSW_PORT_DONT_CARE,
2261                 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
2262         },
2263 };
2264
2265 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2266 {
2267         char htgt_pl[MLXSW_REG_HTGT_LEN];
2268         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2269         int i;
2270         int err;
2271
2272         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2273         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2274         if (err)
2275                 return err;
2276
2277         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2278         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2279         if (err)
2280                 return err;
2281
2282         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2283                 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2284                                                       &mlxsw_sp_rx_listener[i],
2285                                                       mlxsw_sp);
2286                 if (err)
2287                         goto err_rx_listener_register;
2288
2289                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2290                                     mlxsw_sp_rx_listener[i].trap_id);
2291                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2292                 if (err)
2293                         goto err_rx_trap_set;
2294         }
2295         return 0;
2296
2297 err_rx_trap_set:
2298         mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2299                                           &mlxsw_sp_rx_listener[i],
2300                                           mlxsw_sp);
2301 err_rx_listener_register:
2302         for (i--; i >= 0; i--) {
2303                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2304                                     mlxsw_sp_rx_listener[i].trap_id);
2305                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2306
2307                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2308                                                   &mlxsw_sp_rx_listener[i],
2309                                                   mlxsw_sp);
2310         }
2311         return err;
2312 }
2313
2314 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2315 {
2316         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2317         int i;
2318
2319         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2320                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2321                                     mlxsw_sp_rx_listener[i].trap_id);
2322                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2323
2324                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2325                                                   &mlxsw_sp_rx_listener[i],
2326                                                   mlxsw_sp);
2327         }
2328 }
2329
2330 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2331                                  enum mlxsw_reg_sfgc_type type,
2332                                  enum mlxsw_reg_sfgc_bridge_type bridge_type)
2333 {
2334         enum mlxsw_flood_table_type table_type;
2335         enum mlxsw_sp_flood_table flood_table;
2336         char sfgc_pl[MLXSW_REG_SFGC_LEN];
2337
2338         if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2339                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2340         else
2341                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2342
2343         if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2344                 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2345         else
2346                 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2347
2348         mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2349                             flood_table);
2350         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2351 }
2352
2353 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2354 {
2355         int type, err;
2356
2357         for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2358                 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2359                         continue;
2360
2361                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2362                                             MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2363                 if (err)
2364                         return err;
2365
2366                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2367                                             MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2368                 if (err)
2369                         return err;
2370         }
2371
2372         return 0;
2373 }
2374
2375 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2376 {
2377         char slcr_pl[MLXSW_REG_SLCR_LEN];
2378
2379         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2380                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
2381                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2382                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
2383                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
2384                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
2385                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
2386                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
2387                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2388         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2389 }
2390
2391 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2392                          const struct mlxsw_bus_info *mlxsw_bus_info)
2393 {
2394         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2395         int err;
2396
2397         mlxsw_sp->core = mlxsw_core;
2398         mlxsw_sp->bus_info = mlxsw_bus_info;
2399         INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
2400         INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
2401         INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2402
2403         err = mlxsw_sp_base_mac_get(mlxsw_sp);
2404         if (err) {
2405                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2406                 return err;
2407         }
2408
2409         err = mlxsw_sp_ports_create(mlxsw_sp);
2410         if (err) {
2411                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2412                 return err;
2413         }
2414
2415         err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2416         if (err) {
2417                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2418                 goto err_event_register;
2419         }
2420
2421         err = mlxsw_sp_traps_init(mlxsw_sp);
2422         if (err) {
2423                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2424                 goto err_rx_listener_register;
2425         }
2426
2427         err = mlxsw_sp_flood_init(mlxsw_sp);
2428         if (err) {
2429                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2430                 goto err_flood_init;
2431         }
2432
2433         err = mlxsw_sp_buffers_init(mlxsw_sp);
2434         if (err) {
2435                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2436                 goto err_buffers_init;
2437         }
2438
2439         err = mlxsw_sp_lag_init(mlxsw_sp);
2440         if (err) {
2441                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2442                 goto err_lag_init;
2443         }
2444
2445         err = mlxsw_sp_switchdev_init(mlxsw_sp);
2446         if (err) {
2447                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2448                 goto err_switchdev_init;
2449         }
2450
2451         return 0;
2452
2453 err_switchdev_init:
2454 err_lag_init:
2455         mlxsw_sp_buffers_fini(mlxsw_sp);
2456 err_buffers_init:
2457 err_flood_init:
2458         mlxsw_sp_traps_fini(mlxsw_sp);
2459 err_rx_listener_register:
2460         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2461 err_event_register:
2462         mlxsw_sp_ports_remove(mlxsw_sp);
2463         return err;
2464 }
2465
2466 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2467 {
2468         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2469
2470         mlxsw_sp_switchdev_fini(mlxsw_sp);
2471         mlxsw_sp_buffers_fini(mlxsw_sp);
2472         mlxsw_sp_traps_fini(mlxsw_sp);
2473         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2474         mlxsw_sp_ports_remove(mlxsw_sp);
2475 }
2476
2477 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2478         .used_max_vepa_channels         = 1,
2479         .max_vepa_channels              = 0,
2480         .used_max_lag                   = 1,
2481         .max_lag                        = MLXSW_SP_LAG_MAX,
2482         .used_max_port_per_lag          = 1,
2483         .max_port_per_lag               = MLXSW_SP_PORT_PER_LAG_MAX,
2484         .used_max_mid                   = 1,
2485         .max_mid                        = MLXSW_SP_MID_MAX,
2486         .used_max_pgt                   = 1,
2487         .max_pgt                        = 0,
2488         .used_max_system_port           = 1,
2489         .max_system_port                = 64,
2490         .used_max_vlan_groups           = 1,
2491         .max_vlan_groups                = 127,
2492         .used_max_regions               = 1,
2493         .max_regions                    = 400,
2494         .used_flood_tables              = 1,
2495         .used_flood_mode                = 1,
2496         .flood_mode                     = 3,
2497         .max_fid_offset_flood_tables    = 2,
2498         .fid_offset_flood_table_size    = VLAN_N_VID - 1,
2499         .max_fid_flood_tables           = 2,
2500         .fid_flood_table_size           = MLXSW_SP_VFID_MAX,
2501         .used_max_ib_mc                 = 1,
2502         .max_ib_mc                      = 0,
2503         .used_max_pkey                  = 1,
2504         .max_pkey                       = 0,
2505         .swid_config                    = {
2506                 {
2507                         .used_type      = 1,
2508                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
2509                 }
2510         },
2511 };
2512
2513 static struct mlxsw_driver mlxsw_sp_driver = {
2514         .kind                           = MLXSW_DEVICE_KIND_SPECTRUM,
2515         .owner                          = THIS_MODULE,
2516         .priv_size                      = sizeof(struct mlxsw_sp),
2517         .init                           = mlxsw_sp_init,
2518         .fini                           = mlxsw_sp_fini,
2519         .port_split                     = mlxsw_sp_port_split,
2520         .port_unsplit                   = mlxsw_sp_port_unsplit,
2521         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
2522         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
2523         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
2524         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
2525         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
2526         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
2527         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
2528         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
2529         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
2530         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
2531         .txhdr_construct                = mlxsw_sp_txhdr_construct,
2532         .txhdr_len                      = MLXSW_TXHDR_LEN,
2533         .profile                        = &mlxsw_sp_config_profile,
2534 };
2535
2536 static int
2537 mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
2538 {
2539         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2540         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2541
2542         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
2543         mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
2544
2545         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2546 }
2547
2548 static int
2549 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2550                                     u16 fid)
2551 {
2552         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2553         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2554
2555         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
2556         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2557         mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2558                                                 mlxsw_sp_port->local_port);
2559
2560         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2561 }
2562
2563 static int
2564 mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
2565 {
2566         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2567         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2568
2569         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
2570         mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2571
2572         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2573 }
2574
2575 static int
2576 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2577                                       u16 fid)
2578 {
2579         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2580         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2581
2582         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
2583         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2584         mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2585
2586         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2587 }
2588
2589 static int
2590 __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
2591 {
2592         int err, last_err = 0;
2593         u16 vid;
2594
2595         for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2596                 err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
2597                 if (err)
2598                         last_err = err;
2599         }
2600
2601         return last_err;
2602 }
2603
2604 static int
2605 __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
2606 {
2607         int err, last_err = 0;
2608         u16 vid;
2609
2610         for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2611                 err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
2612                 if (err)
2613                         last_err = err;
2614         }
2615
2616         return last_err;
2617 }
2618
2619 static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
2620 {
2621         if (!list_empty(&mlxsw_sp_port->vports_list))
2622                 if (mlxsw_sp_port->lagged)
2623                         return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
2624                 else
2625                         return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
2626         else
2627                 if (mlxsw_sp_port->lagged)
2628                         return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
2629                 else
2630                         return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
2631 }
2632
2633 static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
2634 {
2635         u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
2636         u16 fid = mlxsw_sp_vfid_to_fid(vfid);
2637
2638         if (mlxsw_sp_vport->lagged)
2639                 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
2640                                                              fid);
2641         else
2642                 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
2643 }
2644
2645 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2646 {
2647         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2648 }
2649
2650 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
2651                                          struct net_device *br_dev)
2652 {
2653         return !mlxsw_sp->master_bridge.dev ||
2654                mlxsw_sp->master_bridge.dev == br_dev;
2655 }
2656
2657 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2658                                        struct net_device *br_dev)
2659 {
2660         mlxsw_sp->master_bridge.dev = br_dev;
2661         mlxsw_sp->master_bridge.ref_count++;
2662 }
2663
2664 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
2665 {
2666         if (--mlxsw_sp->master_bridge.ref_count == 0)
2667                 mlxsw_sp->master_bridge.dev = NULL;
2668 }
2669
2670 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2671                                      struct net_device *br_dev)
2672 {
2673         struct net_device *dev = mlxsw_sp_port->dev;
2674         int err;
2675
2676         /* When port is not bridged untagged packets are tagged with
2677          * PVID=VID=1, thereby creating an implicit VLAN interface in
2678          * the device. Remove it and let bridge code take care of its
2679          * own VLANs.
2680          */
2681         err = mlxsw_sp_port_kill_vid(dev, 0, 1);
2682         if (err)
2683                 return err;
2684
2685         mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
2686
2687         mlxsw_sp_port->learning = 1;
2688         mlxsw_sp_port->learning_sync = 1;
2689         mlxsw_sp_port->uc_flood = 1;
2690         mlxsw_sp_port->bridged = 1;
2691
2692         return 0;
2693 }
2694
2695 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2696                                        bool flush_fdb)
2697 {
2698         struct net_device *dev = mlxsw_sp_port->dev;
2699
2700         if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2701                 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2702
2703         mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2704
2705         mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
2706
2707         mlxsw_sp_port->learning = 0;
2708         mlxsw_sp_port->learning_sync = 0;
2709         mlxsw_sp_port->uc_flood = 0;
2710         mlxsw_sp_port->bridged = 0;
2711
2712         /* Add implicit VLAN interface in the device, so that untagged
2713          * packets will be classified to the default vFID.
2714          */
2715         mlxsw_sp_port_add_vid(dev, 0, 1);
2716 }
2717
2718 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2719 {
2720         char sldr_pl[MLXSW_REG_SLDR_LEN];
2721
2722         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
2723         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2724 }
2725
2726 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2727 {
2728         char sldr_pl[MLXSW_REG_SLDR_LEN];
2729
2730         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
2731         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2732 }
2733
2734 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2735                                      u16 lag_id, u8 port_index)
2736 {
2737         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2738         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2739
2740         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
2741                                       lag_id, port_index);
2742         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2743 }
2744
2745 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2746                                         u16 lag_id)
2747 {
2748         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2749         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2750
2751         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
2752                                          lag_id);
2753         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2754 }
2755
2756 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
2757                                         u16 lag_id)
2758 {
2759         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2760         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2761
2762         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
2763                                         lag_id);
2764         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2765 }
2766
2767 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
2768                                          u16 lag_id)
2769 {
2770         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2771         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2772
2773         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
2774                                          lag_id);
2775         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2776 }
2777
2778 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2779                                   struct net_device *lag_dev,
2780                                   u16 *p_lag_id)
2781 {
2782         struct mlxsw_sp_upper *lag;
2783         int free_lag_id = -1;
2784         int i;
2785
2786         for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
2787                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
2788                 if (lag->ref_count) {
2789                         if (lag->dev == lag_dev) {
2790                                 *p_lag_id = i;
2791                                 return 0;
2792                         }
2793                 } else if (free_lag_id < 0) {
2794                         free_lag_id = i;
2795                 }
2796         }
2797         if (free_lag_id < 0)
2798                 return -EBUSY;
2799         *p_lag_id = free_lag_id;
2800         return 0;
2801 }
2802
2803 static bool
2804 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
2805                           struct net_device *lag_dev,
2806                           struct netdev_lag_upper_info *lag_upper_info)
2807 {
2808         u16 lag_id;
2809
2810         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
2811                 return false;
2812         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2813                 return false;
2814         return true;
2815 }
2816
2817 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2818                                        u16 lag_id, u8 *p_port_index)
2819 {
2820         int i;
2821
2822         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2823                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
2824                         *p_port_index = i;
2825                         return 0;
2826                 }
2827         }
2828         return -EBUSY;
2829 }
2830
2831 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2832                                   struct net_device *lag_dev)
2833 {
2834         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2835         struct mlxsw_sp_upper *lag;
2836         u16 lag_id;
2837         u8 port_index;
2838         int err;
2839
2840         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
2841         if (err)
2842                 return err;
2843         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2844         if (!lag->ref_count) {
2845                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
2846                 if (err)
2847                         return err;
2848                 lag->dev = lag_dev;
2849         }
2850
2851         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
2852         if (err)
2853                 return err;
2854         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
2855         if (err)
2856                 goto err_col_port_add;
2857         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
2858         if (err)
2859                 goto err_col_port_enable;
2860
2861         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
2862                                    mlxsw_sp_port->local_port);
2863         mlxsw_sp_port->lag_id = lag_id;
2864         mlxsw_sp_port->lagged = 1;
2865         lag->ref_count++;
2866         return 0;
2867
2868 err_col_port_enable:
2869         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2870 err_col_port_add:
2871         if (!lag->ref_count)
2872                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2873         return err;
2874 }
2875
2876 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2877                                         struct net_device *br_dev,
2878                                         bool flush_fdb);
2879
2880 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2881                                     struct net_device *lag_dev)
2882 {
2883         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2884         struct mlxsw_sp_port *mlxsw_sp_vport;
2885         struct mlxsw_sp_upper *lag;
2886         u16 lag_id = mlxsw_sp_port->lag_id;
2887
2888         if (!mlxsw_sp_port->lagged)
2889                 return;
2890         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2891         WARN_ON(lag->ref_count == 0);
2892
2893         mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2894         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2895
2896         /* In case we leave a LAG device that has bridges built on top,
2897          * then their teardown sequence is never issued and we need to
2898          * invoke the necessary cleanup routines ourselves.
2899          */
2900         list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
2901                             vport.list) {
2902                 struct net_device *br_dev;
2903
2904                 if (!mlxsw_sp_vport->bridged)
2905                         continue;
2906
2907                 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2908                 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
2909         }
2910
2911         if (mlxsw_sp_port->bridged) {
2912                 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
2913                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
2914         }
2915
2916         if (lag->ref_count == 1) {
2917                 if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
2918                         netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2919                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2920         }
2921
2922         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2923                                      mlxsw_sp_port->local_port);
2924         mlxsw_sp_port->lagged = 0;
2925         lag->ref_count--;
2926 }
2927
2928 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2929                                       u16 lag_id)
2930 {
2931         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2932         char sldr_pl[MLXSW_REG_SLDR_LEN];
2933
2934         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
2935                                          mlxsw_sp_port->local_port);
2936         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2937 }
2938
2939 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2940                                          u16 lag_id)
2941 {
2942         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2943         char sldr_pl[MLXSW_REG_SLDR_LEN];
2944
2945         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
2946                                             mlxsw_sp_port->local_port);
2947         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2948 }
2949
2950 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
2951                                        bool lag_tx_enabled)
2952 {
2953         if (lag_tx_enabled)
2954                 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
2955                                                   mlxsw_sp_port->lag_id);
2956         else
2957                 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
2958                                                      mlxsw_sp_port->lag_id);
2959 }
2960
2961 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
2962                                      struct netdev_lag_lower_state_info *info)
2963 {
2964         return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
2965 }
2966
2967 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
2968                                    struct net_device *vlan_dev)
2969 {
2970         struct mlxsw_sp_port *mlxsw_sp_vport;
2971         u16 vid = vlan_dev_vlan_id(vlan_dev);
2972
2973         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2974         if (WARN_ON(!mlxsw_sp_vport))
2975                 return -EINVAL;
2976
2977         mlxsw_sp_vport->dev = vlan_dev;
2978
2979         return 0;
2980 }
2981
2982 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
2983                                       struct net_device *vlan_dev)
2984 {
2985         struct mlxsw_sp_port *mlxsw_sp_vport;
2986         u16 vid = vlan_dev_vlan_id(vlan_dev);
2987
2988         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2989         if (WARN_ON(!mlxsw_sp_vport))
2990                 return;
2991
2992         /* When removing a VLAN device while still bridged we should first
2993          * remove it from the bridge, as we receive the bridge's notification
2994          * when the vPort is already gone.
2995          */
2996         if (mlxsw_sp_vport->bridged) {
2997                 struct net_device *br_dev;
2998
2999                 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
3000                 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
3001         }
3002
3003         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3004 }
3005
3006 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3007                                                unsigned long event, void *ptr)
3008 {
3009         struct netdev_notifier_changeupper_info *info;
3010         struct mlxsw_sp_port *mlxsw_sp_port;
3011         struct net_device *upper_dev;
3012         struct mlxsw_sp *mlxsw_sp;
3013         int err = 0;
3014
3015         mlxsw_sp_port = netdev_priv(dev);
3016         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3017         info = ptr;
3018
3019         switch (event) {
3020         case NETDEV_PRECHANGEUPPER:
3021                 upper_dev = info->upper_dev;
3022                 if (!is_vlan_dev(upper_dev) &&
3023                     !netif_is_lag_master(upper_dev) &&
3024                     !netif_is_bridge_master(upper_dev))
3025                         return -EINVAL;
3026                 if (!info->linking)
3027                         break;
3028                 /* HW limitation forbids to put ports to multiple bridges. */
3029                 if (netif_is_bridge_master(upper_dev) &&
3030                     !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
3031                         return -EINVAL;
3032                 if (netif_is_lag_master(upper_dev) &&
3033                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3034                                                info->upper_info))
3035                         return -EINVAL;
3036                 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
3037                         return -EINVAL;
3038                 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
3039                     !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
3040                         return -EINVAL;
3041                 break;
3042         case NETDEV_CHANGEUPPER:
3043                 upper_dev = info->upper_dev;
3044                 if (is_vlan_dev(upper_dev)) {
3045                         if (info->linking)
3046                                 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
3047                                                               upper_dev);
3048                         else
3049                                  mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
3050                                                            upper_dev);
3051                 } else if (netif_is_bridge_master(upper_dev)) {
3052                         if (info->linking)
3053                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
3054                                                                 upper_dev);
3055                         else
3056                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, true);
3057                 } else if (netif_is_lag_master(upper_dev)) {
3058                         if (info->linking)
3059                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3060                                                              upper_dev);
3061                         else
3062                                 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3063                                                         upper_dev);
3064                 } else {
3065                         err = -EINVAL;
3066                         WARN_ON(1);
3067                 }
3068                 break;
3069         }
3070
3071         return err;
3072 }
3073
3074 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
3075                                                unsigned long event, void *ptr)
3076 {
3077         struct netdev_notifier_changelowerstate_info *info;
3078         struct mlxsw_sp_port *mlxsw_sp_port;
3079         int err;
3080
3081         mlxsw_sp_port = netdev_priv(dev);
3082         info = ptr;
3083
3084         switch (event) {
3085         case NETDEV_CHANGELOWERSTATE:
3086                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
3087                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
3088                                                         info->lower_state_info);
3089                         if (err)
3090                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
3091                 }
3092                 break;
3093         }
3094
3095         return 0;
3096 }
3097
3098 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
3099                                          unsigned long event, void *ptr)
3100 {
3101         switch (event) {
3102         case NETDEV_PRECHANGEUPPER:
3103         case NETDEV_CHANGEUPPER:
3104                 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
3105         case NETDEV_CHANGELOWERSTATE:
3106                 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
3107         }
3108
3109         return 0;
3110 }
3111
3112 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3113                                         unsigned long event, void *ptr)
3114 {
3115         struct net_device *dev;
3116         struct list_head *iter;
3117         int ret;
3118
3119         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3120                 if (mlxsw_sp_port_dev_check(dev)) {
3121                         ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3122                         if (ret)
3123                                 return ret;
3124                 }
3125         }
3126
3127         return 0;
3128 }
3129
3130 static struct mlxsw_sp_vfid *
3131 mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
3132                       const struct net_device *br_dev)
3133 {
3134         struct mlxsw_sp_vfid *vfid;
3135
3136         list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
3137                 if (vfid->br_dev == br_dev)
3138                         return vfid;
3139         }
3140
3141         return NULL;
3142 }
3143
3144 static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
3145 {
3146         return vfid - MLXSW_SP_VFID_PORT_MAX;
3147 }
3148
3149 static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
3150 {
3151         return MLXSW_SP_VFID_PORT_MAX + br_vfid;
3152 }
3153
3154 static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
3155 {
3156         return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
3157                                    MLXSW_SP_VFID_BR_MAX);
3158 }
3159
3160 static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
3161                                                      struct net_device *br_dev)
3162 {
3163         struct device *dev = mlxsw_sp->bus_info->dev;
3164         struct mlxsw_sp_vfid *f;
3165         u16 vfid, fid;
3166         int err;
3167
3168         vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
3169         if (vfid == MLXSW_SP_VFID_MAX) {
3170                 dev_err(dev, "No available vFIDs\n");
3171                 return ERR_PTR(-ERANGE);
3172         }
3173
3174         fid = mlxsw_sp_vfid_to_fid(vfid);
3175         err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
3176         if (err) {
3177                 dev_err(dev, "Failed to create FID=%d\n", fid);
3178                 return ERR_PTR(err);
3179         }
3180
3181         f = kzalloc(sizeof(*f), GFP_KERNEL);
3182         if (!f)
3183                 goto err_allocate_vfid;
3184
3185         f->vfid = vfid;
3186         f->br_dev = br_dev;
3187
3188         list_add(&f->list, &mlxsw_sp->br_vfids.list);
3189         set_bit(mlxsw_sp_vfid_to_br_vfid(vfid), mlxsw_sp->br_vfids.mapped);
3190
3191         return f;
3192
3193 err_allocate_vfid:
3194         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3195         return ERR_PTR(-ENOMEM);
3196 }
3197
3198 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
3199                                      struct mlxsw_sp_vfid *vfid)
3200 {
3201         u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
3202         u16 fid = mlxsw_sp_vfid_to_fid(vfid->vfid);
3203
3204         clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
3205         list_del(&vfid->list);
3206
3207         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3208
3209         kfree(vfid);
3210 }
3211
3212 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
3213                                         struct net_device *br_dev,
3214                                         bool flush_fdb)
3215 {
3216         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3217         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3218         struct net_device *dev = mlxsw_sp_vport->dev;
3219         struct mlxsw_sp_vfid *vfid, *new_vfid;
3220         u16 fid, new_fid;
3221         int err;
3222
3223         vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
3224         if (WARN_ON(!vfid))
3225                 return;
3226
3227         /* We need a vFID to go back to after leaving the bridge's vFID. */
3228         new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
3229         if (!new_vfid) {
3230                 new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
3231                 if (IS_ERR(new_vfid)) {
3232                         netdev_err(dev, "Failed to create vFID for VID=%d\n",
3233                                    vid);
3234                         return;
3235                 }
3236         }
3237
3238         /* Invalidate existing {Port, VID} to vFID mapping and create a new
3239          * one for the new vFID.
3240          */
3241         fid = mlxsw_sp_vfid_to_fid(vfid->vfid);
3242         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, fid, false);
3243         if (err) {
3244                 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3245                            vfid->vfid);
3246                 goto err_vport_fid_unmap;
3247         }
3248
3249         new_fid = mlxsw_sp_vfid_to_fid(new_vfid->vfid);
3250         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, new_fid, true);
3251         if (err) {
3252                 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3253                            new_vfid->vfid);
3254                 goto err_vport_fid_map;
3255         }
3256
3257         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3258         if (err) {
3259                 netdev_err(dev, "Failed to disable learning\n");
3260                 goto err_port_vid_learning_set;
3261         }
3262
3263         err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false);
3264         if (err) {
3265                 netdev_err(dev, "Failed clear to clear flooding\n");
3266                 goto err_vport_flood_set;
3267         }
3268
3269         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
3270                                           MLXSW_REG_SPMS_STATE_FORWARDING);
3271         if (err) {
3272                 netdev_err(dev, "Failed to set STP state\n");
3273                 goto err_port_stp_state_set;
3274         }
3275
3276         if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
3277                 netdev_err(dev, "Failed to flush FDB\n");
3278
3279         /* Switch between the vFIDs and destroy the old one if needed. */
3280         new_vfid->nr_vports++;
3281         mlxsw_sp_vport->vport.vfid = new_vfid;
3282         vfid->nr_vports--;
3283         if (!vfid->nr_vports)
3284                 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3285
3286         mlxsw_sp_vport->learning = 0;
3287         mlxsw_sp_vport->learning_sync = 0;
3288         mlxsw_sp_vport->uc_flood = 0;
3289         mlxsw_sp_vport->bridged = 0;
3290
3291         return;
3292
3293 err_port_stp_state_set:
3294 err_vport_flood_set:
3295 err_port_vid_learning_set:
3296 err_vport_fid_map:
3297 err_vport_fid_unmap:
3298         /* Rollback vFID only if new. */
3299         if (!new_vfid->nr_vports)
3300                 mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
3301 }
3302
3303 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3304                                       struct net_device *br_dev)
3305 {
3306         struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
3307         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3308         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3309         struct net_device *dev = mlxsw_sp_vport->dev;
3310         struct mlxsw_sp_vfid *vfid;
3311         u16 fid, old_fid;
3312         int err;
3313
3314         vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
3315         if (!vfid) {
3316                 vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
3317                 if (IS_ERR(vfid)) {
3318                         netdev_err(dev, "Failed to create bridge vFID\n");
3319                         return PTR_ERR(vfid);
3320                 }
3321         }
3322
3323         err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true);
3324         if (err) {
3325                 netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
3326                            vfid->vfid);
3327                 goto err_port_flood_set;
3328         }
3329
3330         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3331         if (err) {
3332                 netdev_err(dev, "Failed to enable learning\n");
3333                 goto err_port_vid_learning_set;
3334         }
3335
3336         /* We need to invalidate existing {Port, VID} to vFID mapping and
3337          * create a new one for the bridge's vFID.
3338          */
3339         old_fid = mlxsw_sp_vfid_to_fid(old_vfid->vfid);
3340         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, old_fid, false);
3341         if (err) {
3342                 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3343                            old_vfid->vfid);
3344                 goto err_vport_fid_unmap;
3345         }
3346
3347         fid = mlxsw_sp_vfid_to_fid(vfid->vfid);
3348         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, fid, true);
3349         if (err) {
3350                 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3351                            vfid->vfid);
3352                 goto err_vport_fid_map;
3353         }
3354
3355         /* Switch between the vFIDs and destroy the old one if needed. */
3356         vfid->nr_vports++;
3357         mlxsw_sp_vport->vport.vfid = vfid;
3358         old_vfid->nr_vports--;
3359         if (!old_vfid->nr_vports)
3360                 mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
3361
3362         mlxsw_sp_vport->learning = 1;
3363         mlxsw_sp_vport->learning_sync = 1;
3364         mlxsw_sp_vport->uc_flood = 1;
3365         mlxsw_sp_vport->bridged = 1;
3366
3367         return 0;
3368
3369 err_vport_fid_map:
3370         mlxsw_sp_vport_fid_map(mlxsw_sp_vport, old_fid, true);
3371 err_vport_fid_unmap:
3372         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3373 err_port_vid_learning_set:
3374         mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false);
3375 err_port_flood_set:
3376         if (!vfid->nr_vports)
3377                 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3378         return err;
3379 }
3380
3381 static bool
3382 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3383                                   const struct net_device *br_dev)
3384 {
3385         struct mlxsw_sp_port *mlxsw_sp_vport;
3386
3387         list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3388                             vport.list) {
3389                 if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
3390                         return false;
3391         }
3392
3393         return true;
3394 }
3395
3396 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3397                                           unsigned long event, void *ptr,
3398                                           u16 vid)
3399 {
3400         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3401         struct netdev_notifier_changeupper_info *info = ptr;
3402         struct mlxsw_sp_port *mlxsw_sp_vport;
3403         struct net_device *upper_dev;
3404         int err = 0;
3405
3406         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3407
3408         switch (event) {
3409         case NETDEV_PRECHANGEUPPER:
3410                 upper_dev = info->upper_dev;
3411                 if (!netif_is_bridge_master(upper_dev))
3412                         return -EINVAL;
3413                 if (!info->linking)
3414                         break;
3415                 /* We can't have multiple VLAN interfaces configured on
3416                  * the same port and being members in the same bridge.
3417                  */
3418                 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3419                                                        upper_dev))
3420                         return -EINVAL;
3421                 break;
3422         case NETDEV_CHANGEUPPER:
3423                 upper_dev = info->upper_dev;
3424                 if (info->linking) {
3425                         if (WARN_ON(!mlxsw_sp_vport))
3426                                 return -EINVAL;
3427                         err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3428                                                          upper_dev);
3429                 } else {
3430                         /* We ignore bridge's unlinking notifications if vPort
3431                          * is gone, since we already left the bridge when the
3432                          * VLAN device was unlinked from the real device.
3433                          */
3434                         if (!mlxsw_sp_vport)
3435                                 return 0;
3436                         mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, upper_dev,
3437                                                     true);
3438                 }
3439         }
3440
3441         return err;
3442 }
3443
3444 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3445                                               unsigned long event, void *ptr,
3446                                               u16 vid)
3447 {
3448         struct net_device *dev;
3449         struct list_head *iter;
3450         int ret;
3451
3452         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3453                 if (mlxsw_sp_port_dev_check(dev)) {
3454                         ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
3455                                                              vid);
3456                         if (ret)
3457                                 return ret;
3458                 }
3459         }
3460
3461         return 0;
3462 }
3463
3464 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
3465                                          unsigned long event, void *ptr)
3466 {
3467         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3468         u16 vid = vlan_dev_vlan_id(vlan_dev);
3469
3470         if (mlxsw_sp_port_dev_check(real_dev))
3471                 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
3472                                                       vid);
3473         else if (netif_is_lag_master(real_dev))
3474                 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
3475                                                           vid);
3476
3477         return 0;
3478 }
3479
3480 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3481                                     unsigned long event, void *ptr)
3482 {
3483         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3484         int err = 0;
3485
3486         if (mlxsw_sp_port_dev_check(dev))
3487                 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3488         else if (netif_is_lag_master(dev))
3489                 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3490         else if (is_vlan_dev(dev))
3491                 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3492
3493         return notifier_from_errno(err);
3494 }
3495
3496 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3497         .notifier_call = mlxsw_sp_netdevice_event,
3498 };
3499
3500 static int __init mlxsw_sp_module_init(void)
3501 {
3502         int err;
3503
3504         register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3505         err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3506         if (err)
3507                 goto err_core_driver_register;
3508         return 0;
3509
3510 err_core_driver_register:
3511         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3512         return err;
3513 }
3514
3515 static void __exit mlxsw_sp_module_exit(void)
3516 {
3517         mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3518         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3519 }
3520
3521 module_init(mlxsw_sp_module_init);
3522 module_exit(mlxsw_sp_module_exit);
3523
3524 MODULE_LICENSE("Dual BSD/GPL");
3525 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3526 MODULE_DESCRIPTION("Mellanox Spectrum driver");
3527 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);