14a9a9ff89edb3ecd68313b6f72a099fb9b09f06
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <net/switchdev.h>
52 #include <generated/utsrelease.h>
53
54 #include "spectrum.h"
55 #include "core.h"
56 #include "reg.h"
57 #include "port.h"
58 #include "trap.h"
59 #include "txheader.h"
60
61 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
62 static const char mlxsw_sp_driver_version[] = "1.0";
63
64 /* tx_hdr_version
65  * Tx header version.
66  * Must be set to 1.
67  */
68 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
69
70 /* tx_hdr_ctl
71  * Packet control type.
72  * 0 - Ethernet control (e.g. EMADs, LACP)
73  * 1 - Ethernet data
74  */
75 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
76
77 /* tx_hdr_proto
78  * Packet protocol type. Must be set to 1 (Ethernet).
79  */
80 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
81
82 /* tx_hdr_rx_is_router
83  * Packet is sent from the router. Valid for data packets only.
84  */
85 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
86
87 /* tx_hdr_fid_valid
88  * Indicates if the 'fid' field is valid and should be used for
89  * forwarding lookup. Valid for data packets only.
90  */
91 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
92
93 /* tx_hdr_swid
94  * Switch partition ID. Must be set to 0.
95  */
96 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
97
98 /* tx_hdr_control_tclass
99  * Indicates if the packet should use the control TClass and not one
100  * of the data TClasses.
101  */
102 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
103
104 /* tx_hdr_etclass
105  * Egress TClass to be used on the egress device on the egress port.
106  */
107 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
108
109 /* tx_hdr_port_mid
110  * Destination local port for unicast packets.
111  * Destination multicast ID for multicast packets.
112  *
113  * Control packets are directed to a specific egress port, while data
114  * packets are transmitted through the CPU port (0) into the switch partition,
115  * where forwarding rules are applied.
116  */
117 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
118
119 /* tx_hdr_fid
120  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
121  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
122  * Valid for data packets only.
123  */
124 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
125
126 /* tx_hdr_type
127  * 0 - Data packets
128  * 6 - Control packets
129  */
130 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
131
132 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
133                                      const struct mlxsw_tx_info *tx_info)
134 {
135         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
136
137         memset(txhdr, 0, MLXSW_TXHDR_LEN);
138
139         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
140         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
141         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
142         mlxsw_tx_hdr_swid_set(txhdr, 0);
143         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
144         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
145         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
146 }
147
148 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
149 {
150         char spad_pl[MLXSW_REG_SPAD_LEN];
151         int err;
152
153         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
154         if (err)
155                 return err;
156         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
157         return 0;
158 }
159
160 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
161                                           bool is_up)
162 {
163         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
164         char paos_pl[MLXSW_REG_PAOS_LEN];
165
166         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
167                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
168                             MLXSW_PORT_ADMIN_STATUS_DOWN);
169         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
170 }
171
172 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
173                                          bool *p_is_up)
174 {
175         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
176         char paos_pl[MLXSW_REG_PAOS_LEN];
177         u8 oper_status;
178         int err;
179
180         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
181         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
182         if (err)
183                 return err;
184         oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
185         *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
186         return 0;
187 }
188
189 static int mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
190 {
191         char sfmr_pl[MLXSW_REG_SFMR_LEN];
192         int err;
193
194         mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
195                             MLXSW_SP_VFID_BASE + vfid, 0);
196         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
197
198         if (err)
199                 return err;
200
201         set_bit(vfid, mlxsw_sp->active_vfids);
202         return 0;
203 }
204
205 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
206 {
207         char sfmr_pl[MLXSW_REG_SFMR_LEN];
208
209         clear_bit(vfid, mlxsw_sp->active_vfids);
210
211         mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
212                             MLXSW_SP_VFID_BASE + vfid, 0);
213         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
214 }
215
216 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
217                                       unsigned char *addr)
218 {
219         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
220         char ppad_pl[MLXSW_REG_PPAD_LEN];
221
222         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
223         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
224         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
225 }
226
227 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
228 {
229         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
230         unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
231
232         ether_addr_copy(addr, mlxsw_sp->base_mac);
233         addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
234         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
235 }
236
237 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
238                                        u16 vid, enum mlxsw_reg_spms_state state)
239 {
240         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
241         char *spms_pl;
242         int err;
243
244         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
245         if (!spms_pl)
246                 return -ENOMEM;
247         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
248         mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
249         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
250         kfree(spms_pl);
251         return err;
252 }
253
254 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
255 {
256         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
257         char pmtu_pl[MLXSW_REG_PMTU_LEN];
258         int max_mtu;
259         int err;
260
261         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
262         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
263         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
264         if (err)
265                 return err;
266         max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
267
268         if (mtu > max_mtu)
269                 return -EINVAL;
270
271         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
272         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
273 }
274
275 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
276 {
277         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
278         char pspa_pl[MLXSW_REG_PSPA_LEN];
279
280         mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
281         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
282 }
283
284 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
285                                      bool enable)
286 {
287         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
288         char svpe_pl[MLXSW_REG_SVPE_LEN];
289
290         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
291         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
292 }
293
294 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
295                                  enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
296                                  u16 vid)
297 {
298         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
299         char svfa_pl[MLXSW_REG_SVFA_LEN];
300
301         mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
302                             fid, vid);
303         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
304 }
305
306 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
307                                           u16 vid, bool learn_enable)
308 {
309         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
310         char *spvmlr_pl;
311         int err;
312
313         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
314         if (!spvmlr_pl)
315                 return -ENOMEM;
316         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
317                               learn_enable);
318         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
319         kfree(spvmlr_pl);
320         return err;
321 }
322
323 static int
324 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
325 {
326         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
327         char sspr_pl[MLXSW_REG_SSPR_LEN];
328
329         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
330         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
331 }
332
333 static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port,
334                                       bool *p_usable)
335 {
336         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
337         char pmlp_pl[MLXSW_REG_PMLP_LEN];
338         int err;
339
340         mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
341         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
342         if (err)
343                 return err;
344         *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
345         return 0;
346 }
347
348 static int mlxsw_sp_port_open(struct net_device *dev)
349 {
350         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
351         int err;
352
353         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
354         if (err)
355                 return err;
356         netif_start_queue(dev);
357         return 0;
358 }
359
360 static int mlxsw_sp_port_stop(struct net_device *dev)
361 {
362         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
363
364         netif_stop_queue(dev);
365         return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
366 }
367
368 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
369                                       struct net_device *dev)
370 {
371         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
372         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
373         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
374         const struct mlxsw_tx_info tx_info = {
375                 .local_port = mlxsw_sp_port->local_port,
376                 .is_emad = false,
377         };
378         u64 len;
379         int err;
380
381         if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
382                 return NETDEV_TX_BUSY;
383
384         if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
385                 struct sk_buff *skb_orig = skb;
386
387                 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
388                 if (!skb) {
389                         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
390                         dev_kfree_skb_any(skb_orig);
391                         return NETDEV_TX_OK;
392                 }
393         }
394
395         if (eth_skb_pad(skb)) {
396                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
397                 return NETDEV_TX_OK;
398         }
399
400         mlxsw_sp_txhdr_construct(skb, &tx_info);
401         len = skb->len;
402         /* Due to a race we might fail here because of a full queue. In that
403          * unlikely case we simply drop the packet.
404          */
405         err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
406
407         if (!err) {
408                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
409                 u64_stats_update_begin(&pcpu_stats->syncp);
410                 pcpu_stats->tx_packets++;
411                 pcpu_stats->tx_bytes += len;
412                 u64_stats_update_end(&pcpu_stats->syncp);
413         } else {
414                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
415                 dev_kfree_skb_any(skb);
416         }
417         return NETDEV_TX_OK;
418 }
419
420 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
421 {
422         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
423         struct sockaddr *addr = p;
424         int err;
425
426         if (!is_valid_ether_addr(addr->sa_data))
427                 return -EADDRNOTAVAIL;
428
429         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
430         if (err)
431                 return err;
432         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
433         return 0;
434 }
435
436 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
437 {
438         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
439         int err;
440
441         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
442         if (err)
443                 return err;
444         dev->mtu = mtu;
445         return 0;
446 }
447
448 static struct rtnl_link_stats64 *
449 mlxsw_sp_port_get_stats64(struct net_device *dev,
450                           struct rtnl_link_stats64 *stats)
451 {
452         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
453         struct mlxsw_sp_port_pcpu_stats *p;
454         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
455         u32 tx_dropped = 0;
456         unsigned int start;
457         int i;
458
459         for_each_possible_cpu(i) {
460                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
461                 do {
462                         start = u64_stats_fetch_begin_irq(&p->syncp);
463                         rx_packets      = p->rx_packets;
464                         rx_bytes        = p->rx_bytes;
465                         tx_packets      = p->tx_packets;
466                         tx_bytes        = p->tx_bytes;
467                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
468
469                 stats->rx_packets       += rx_packets;
470                 stats->rx_bytes         += rx_bytes;
471                 stats->tx_packets       += tx_packets;
472                 stats->tx_bytes         += tx_bytes;
473                 /* tx_dropped is u32, updated without syncp protection. */
474                 tx_dropped      += p->tx_dropped;
475         }
476         stats->tx_dropped       = tx_dropped;
477         return stats;
478 }
479
480 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
481                            u16 vid_end, bool is_member, bool untagged)
482 {
483         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
484         char *spvm_pl;
485         int err;
486
487         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
488         if (!spvm_pl)
489                 return -ENOMEM;
490
491         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
492                             vid_end, is_member, untagged);
493         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
494         kfree(spvm_pl);
495         return err;
496 }
497
498 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
499 {
500         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
501         u16 vid, last_visited_vid;
502         int err;
503
504         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
505                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
506                                                    vid);
507                 if (err) {
508                         last_visited_vid = vid;
509                         goto err_port_vid_to_fid_set;
510                 }
511         }
512
513         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
514         if (err) {
515                 last_visited_vid = VLAN_N_VID;
516                 goto err_port_vid_to_fid_set;
517         }
518
519         return 0;
520
521 err_port_vid_to_fid_set:
522         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
523                 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
524                                              vid);
525         return err;
526 }
527
528 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
529 {
530         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
531         u16 vid;
532         int err;
533
534         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
535         if (err)
536                 return err;
537
538         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
539                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
540                                                    vid, vid);
541                 if (err)
542                         return err;
543         }
544
545         return 0;
546 }
547
548 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
549                           u16 vid)
550 {
551         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
552         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
553         char *sftr_pl;
554         int err;
555
556         /* VLAN 0 is added to HW filter when device goes up, but it is
557          * reserved in our case, so simply return.
558          */
559         if (!vid)
560                 return 0;
561
562         if (test_bit(vid, mlxsw_sp_port->active_vfids)) {
563                 netdev_warn(dev, "VID=%d already configured\n", vid);
564                 return 0;
565         }
566
567         if (!test_bit(vid, mlxsw_sp->active_vfids)) {
568                 err = mlxsw_sp_vfid_create(mlxsw_sp, vid);
569                 if (err) {
570                         netdev_err(dev, "Failed to create vFID=%d\n",
571                                    MLXSW_SP_VFID_BASE + vid);
572                         return err;
573                 }
574
575                 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
576                 if (!sftr_pl) {
577                         err = -ENOMEM;
578                         goto err_flood_table_alloc;
579                 }
580                 mlxsw_reg_sftr_pack(sftr_pl, 0, vid,
581                                     MLXSW_REG_SFGC_TABLE_TYPE_FID, 0,
582                                     MLXSW_PORT_CPU_PORT, true);
583                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
584                 kfree(sftr_pl);
585                 if (err) {
586                         netdev_err(dev, "Failed to configure flood table\n");
587                         goto err_flood_table_config;
588                 }
589         }
590
591         /* In case we fail in the following steps, we intentionally do not
592          * destroy the associated vFID.
593          */
594
595         /* When adding the first VLAN interface on a bridged port we need to
596          * transition all the active 802.1Q bridge VLANs to use explicit
597          * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
598          */
599         if (!mlxsw_sp_port->nr_vfids) {
600                 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
601                 if (err) {
602                         netdev_err(dev, "Failed to set to Virtual mode\n");
603                         return err;
604                 }
605         }
606
607         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
608                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
609                                            true, MLXSW_SP_VFID_BASE + vid, vid);
610         if (err) {
611                 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
612                            vid, MLXSW_SP_VFID_BASE + vid);
613                 goto err_port_vid_to_fid_set;
614         }
615
616         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
617         if (err) {
618                 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
619                 goto err_port_vid_learning_set;
620         }
621
622         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, false);
623         if (err) {
624                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
625                            vid);
626                 goto err_port_add_vid;
627         }
628
629         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
630                                           MLXSW_REG_SPMS_STATE_FORWARDING);
631         if (err) {
632                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
633                 goto err_port_stp_state_set;
634         }
635
636         mlxsw_sp_port->nr_vfids++;
637         set_bit(vid, mlxsw_sp_port->active_vfids);
638
639         return 0;
640
641 err_flood_table_config:
642 err_flood_table_alloc:
643         mlxsw_sp_vfid_destroy(mlxsw_sp, vid);
644         return err;
645
646 err_port_stp_state_set:
647         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
648 err_port_add_vid:
649         mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
650 err_port_vid_learning_set:
651         mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
652                                      MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
653                                      MLXSW_SP_VFID_BASE + vid, vid);
654 err_port_vid_to_fid_set:
655         mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
656         return err;
657 }
658
659 int mlxsw_sp_port_kill_vid(struct net_device *dev,
660                            __be16 __always_unused proto, u16 vid)
661 {
662         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
663         int err;
664
665         /* VLAN 0 is removed from HW filter when device goes down, but
666          * it is reserved in our case, so simply return.
667          */
668         if (!vid)
669                 return 0;
670
671         if (!test_bit(vid, mlxsw_sp_port->active_vfids)) {
672                 netdev_warn(dev, "VID=%d does not exist\n", vid);
673                 return 0;
674         }
675
676         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
677                                           MLXSW_REG_SPMS_STATE_DISCARDING);
678         if (err) {
679                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
680                 return err;
681         }
682
683         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
684         if (err) {
685                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
686                            vid);
687                 return err;
688         }
689
690         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
691         if (err) {
692                 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
693                 return err;
694         }
695
696         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
697                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
698                                            false, MLXSW_SP_VFID_BASE + vid,
699                                            vid);
700         if (err) {
701                 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
702                            vid, MLXSW_SP_VFID_BASE + vid);
703                 return err;
704         }
705
706         /* When removing the last VLAN interface on a bridged port we need to
707          * transition all active 802.1Q bridge VLANs to use VID to FID
708          * mappings and set port's mode to VLAN mode.
709          */
710         if (mlxsw_sp_port->nr_vfids == 1) {
711                 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
712                 if (err) {
713                         netdev_err(dev, "Failed to set to VLAN mode\n");
714                         return err;
715                 }
716         }
717
718         mlxsw_sp_port->nr_vfids--;
719         clear_bit(vid, mlxsw_sp_port->active_vfids);
720
721         return 0;
722 }
723
724 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
725         .ndo_open               = mlxsw_sp_port_open,
726         .ndo_stop               = mlxsw_sp_port_stop,
727         .ndo_start_xmit         = mlxsw_sp_port_xmit,
728         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
729         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
730         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
731         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
732         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
733         .ndo_fdb_add            = switchdev_port_fdb_add,
734         .ndo_fdb_del            = switchdev_port_fdb_del,
735         .ndo_fdb_dump           = switchdev_port_fdb_dump,
736         .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
737         .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
738         .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
739 };
740
741 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
742                                       struct ethtool_drvinfo *drvinfo)
743 {
744         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
745         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
746
747         strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
748         strlcpy(drvinfo->version, mlxsw_sp_driver_version,
749                 sizeof(drvinfo->version));
750         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
751                  "%d.%d.%d",
752                  mlxsw_sp->bus_info->fw_rev.major,
753                  mlxsw_sp->bus_info->fw_rev.minor,
754                  mlxsw_sp->bus_info->fw_rev.subminor);
755         strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
756                 sizeof(drvinfo->bus_info));
757 }
758
759 struct mlxsw_sp_port_hw_stats {
760         char str[ETH_GSTRING_LEN];
761         u64 (*getter)(char *payload);
762 };
763
764 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
765         {
766                 .str = "a_frames_transmitted_ok",
767                 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
768         },
769         {
770                 .str = "a_frames_received_ok",
771                 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
772         },
773         {
774                 .str = "a_frame_check_sequence_errors",
775                 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
776         },
777         {
778                 .str = "a_alignment_errors",
779                 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
780         },
781         {
782                 .str = "a_octets_transmitted_ok",
783                 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
784         },
785         {
786                 .str = "a_octets_received_ok",
787                 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
788         },
789         {
790                 .str = "a_multicast_frames_xmitted_ok",
791                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
792         },
793         {
794                 .str = "a_broadcast_frames_xmitted_ok",
795                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
796         },
797         {
798                 .str = "a_multicast_frames_received_ok",
799                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
800         },
801         {
802                 .str = "a_broadcast_frames_received_ok",
803                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
804         },
805         {
806                 .str = "a_in_range_length_errors",
807                 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
808         },
809         {
810                 .str = "a_out_of_range_length_field",
811                 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
812         },
813         {
814                 .str = "a_frame_too_long_errors",
815                 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
816         },
817         {
818                 .str = "a_symbol_error_during_carrier",
819                 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
820         },
821         {
822                 .str = "a_mac_control_frames_transmitted",
823                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
824         },
825         {
826                 .str = "a_mac_control_frames_received",
827                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
828         },
829         {
830                 .str = "a_unsupported_opcodes_received",
831                 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
832         },
833         {
834                 .str = "a_pause_mac_ctrl_frames_received",
835                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
836         },
837         {
838                 .str = "a_pause_mac_ctrl_frames_xmitted",
839                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
840         },
841 };
842
843 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
844
845 static void mlxsw_sp_port_get_strings(struct net_device *dev,
846                                       u32 stringset, u8 *data)
847 {
848         u8 *p = data;
849         int i;
850
851         switch (stringset) {
852         case ETH_SS_STATS:
853                 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
854                         memcpy(p, mlxsw_sp_port_hw_stats[i].str,
855                                ETH_GSTRING_LEN);
856                         p += ETH_GSTRING_LEN;
857                 }
858                 break;
859         }
860 }
861
862 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
863                                      enum ethtool_phys_id_state state)
864 {
865         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
866         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
867         char mlcr_pl[MLXSW_REG_MLCR_LEN];
868         bool active;
869
870         switch (state) {
871         case ETHTOOL_ID_ACTIVE:
872                 active = true;
873                 break;
874         case ETHTOOL_ID_INACTIVE:
875                 active = false;
876                 break;
877         default:
878                 return -EOPNOTSUPP;
879         }
880
881         mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
882         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
883 }
884
885 static void mlxsw_sp_port_get_stats(struct net_device *dev,
886                                     struct ethtool_stats *stats, u64 *data)
887 {
888         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
889         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
890         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
891         int i;
892         int err;
893
894         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
895         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
896         for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
897                 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
898 }
899
900 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
901 {
902         switch (sset) {
903         case ETH_SS_STATS:
904                 return MLXSW_SP_PORT_HW_STATS_LEN;
905         default:
906                 return -EOPNOTSUPP;
907         }
908 }
909
910 struct mlxsw_sp_port_link_mode {
911         u32 mask;
912         u32 supported;
913         u32 advertised;
914         u32 speed;
915 };
916
917 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
918         {
919                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
920                 .supported      = SUPPORTED_100baseT_Full,
921                 .advertised     = ADVERTISED_100baseT_Full,
922                 .speed          = 100,
923         },
924         {
925                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
926                 .speed          = 100,
927         },
928         {
929                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
930                                   MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
931                 .supported      = SUPPORTED_1000baseKX_Full,
932                 .advertised     = ADVERTISED_1000baseKX_Full,
933                 .speed          = 1000,
934         },
935         {
936                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
937                 .supported      = SUPPORTED_10000baseT_Full,
938                 .advertised     = ADVERTISED_10000baseT_Full,
939                 .speed          = 10000,
940         },
941         {
942                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
943                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
944                 .supported      = SUPPORTED_10000baseKX4_Full,
945                 .advertised     = ADVERTISED_10000baseKX4_Full,
946                 .speed          = 10000,
947         },
948         {
949                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
950                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
951                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
952                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
953                 .supported      = SUPPORTED_10000baseKR_Full,
954                 .advertised     = ADVERTISED_10000baseKR_Full,
955                 .speed          = 10000,
956         },
957         {
958                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
959                 .supported      = SUPPORTED_20000baseKR2_Full,
960                 .advertised     = ADVERTISED_20000baseKR2_Full,
961                 .speed          = 20000,
962         },
963         {
964                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
965                 .supported      = SUPPORTED_40000baseCR4_Full,
966                 .advertised     = ADVERTISED_40000baseCR4_Full,
967                 .speed          = 40000,
968         },
969         {
970                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
971                 .supported      = SUPPORTED_40000baseKR4_Full,
972                 .advertised     = ADVERTISED_40000baseKR4_Full,
973                 .speed          = 40000,
974         },
975         {
976                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
977                 .supported      = SUPPORTED_40000baseSR4_Full,
978                 .advertised     = ADVERTISED_40000baseSR4_Full,
979                 .speed          = 40000,
980         },
981         {
982                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
983                 .supported      = SUPPORTED_40000baseLR4_Full,
984                 .advertised     = ADVERTISED_40000baseLR4_Full,
985                 .speed          = 40000,
986         },
987         {
988                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
989                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
990                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
991                 .speed          = 25000,
992         },
993         {
994                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
995                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
996                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
997                 .speed          = 50000,
998         },
999         {
1000                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1001                 .supported      = SUPPORTED_56000baseKR4_Full,
1002                 .advertised     = ADVERTISED_56000baseKR4_Full,
1003                 .speed          = 56000,
1004         },
1005         {
1006                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1007                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1008                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1009                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1010                 .speed          = 100000,
1011         },
1012 };
1013
1014 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1015
1016 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1017 {
1018         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1019                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1020                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1021                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1022                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1023                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1024                 return SUPPORTED_FIBRE;
1025
1026         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1027                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1028                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1029                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1030                               MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1031                 return SUPPORTED_Backplane;
1032         return 0;
1033 }
1034
1035 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1036 {
1037         u32 modes = 0;
1038         int i;
1039
1040         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1041                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1042                         modes |= mlxsw_sp_port_link_mode[i].supported;
1043         }
1044         return modes;
1045 }
1046
1047 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1048 {
1049         u32 modes = 0;
1050         int i;
1051
1052         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1053                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1054                         modes |= mlxsw_sp_port_link_mode[i].advertised;
1055         }
1056         return modes;
1057 }
1058
1059 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1060                                             struct ethtool_cmd *cmd)
1061 {
1062         u32 speed = SPEED_UNKNOWN;
1063         u8 duplex = DUPLEX_UNKNOWN;
1064         int i;
1065
1066         if (!carrier_ok)
1067                 goto out;
1068
1069         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1070                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1071                         speed = mlxsw_sp_port_link_mode[i].speed;
1072                         duplex = DUPLEX_FULL;
1073                         break;
1074                 }
1075         }
1076 out:
1077         ethtool_cmd_speed_set(cmd, speed);
1078         cmd->duplex = duplex;
1079 }
1080
1081 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1082 {
1083         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1084                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1085                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1086                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1087                 return PORT_FIBRE;
1088
1089         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1090                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1091                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1092                 return PORT_DA;
1093
1094         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1095                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1096                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1097                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1098                 return PORT_NONE;
1099
1100         return PORT_OTHER;
1101 }
1102
1103 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1104                                       struct ethtool_cmd *cmd)
1105 {
1106         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1107         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1108         char ptys_pl[MLXSW_REG_PTYS_LEN];
1109         u32 eth_proto_cap;
1110         u32 eth_proto_admin;
1111         u32 eth_proto_oper;
1112         int err;
1113
1114         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1115         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1116         if (err) {
1117                 netdev_err(dev, "Failed to get proto");
1118                 return err;
1119         }
1120         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1121                               &eth_proto_admin, &eth_proto_oper);
1122
1123         cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1124                          mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1125                          SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1126         cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1127         mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1128                                         eth_proto_oper, cmd);
1129
1130         eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1131         cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1132         cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1133
1134         cmd->transceiver = XCVR_INTERNAL;
1135         return 0;
1136 }
1137
1138 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1139 {
1140         u32 ptys_proto = 0;
1141         int i;
1142
1143         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1144                 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1145                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1146         }
1147         return ptys_proto;
1148 }
1149
1150 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1151 {
1152         u32 ptys_proto = 0;
1153         int i;
1154
1155         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1156                 if (speed == mlxsw_sp_port_link_mode[i].speed)
1157                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1158         }
1159         return ptys_proto;
1160 }
1161
1162 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1163                                       struct ethtool_cmd *cmd)
1164 {
1165         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1166         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1167         char ptys_pl[MLXSW_REG_PTYS_LEN];
1168         u32 speed;
1169         u32 eth_proto_new;
1170         u32 eth_proto_cap;
1171         u32 eth_proto_admin;
1172         bool is_up;
1173         int err;
1174
1175         speed = ethtool_cmd_speed(cmd);
1176
1177         eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1178                 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1179                 mlxsw_sp_to_ptys_speed(speed);
1180
1181         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1182         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1183         if (err) {
1184                 netdev_err(dev, "Failed to get proto");
1185                 return err;
1186         }
1187         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1188
1189         eth_proto_new = eth_proto_new & eth_proto_cap;
1190         if (!eth_proto_new) {
1191                 netdev_err(dev, "Not supported proto admin requested");
1192                 return -EINVAL;
1193         }
1194         if (eth_proto_new == eth_proto_admin)
1195                 return 0;
1196
1197         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1198         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1199         if (err) {
1200                 netdev_err(dev, "Failed to set proto admin");
1201                 return err;
1202         }
1203
1204         err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1205         if (err) {
1206                 netdev_err(dev, "Failed to get oper status");
1207                 return err;
1208         }
1209         if (!is_up)
1210                 return 0;
1211
1212         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1213         if (err) {
1214                 netdev_err(dev, "Failed to set admin status");
1215                 return err;
1216         }
1217
1218         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1219         if (err) {
1220                 netdev_err(dev, "Failed to set admin status");
1221                 return err;
1222         }
1223
1224         return 0;
1225 }
1226
1227 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1228         .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
1229         .get_link               = ethtool_op_get_link,
1230         .get_strings            = mlxsw_sp_port_get_strings,
1231         .set_phys_id            = mlxsw_sp_port_set_phys_id,
1232         .get_ethtool_stats      = mlxsw_sp_port_get_stats,
1233         .get_sset_count         = mlxsw_sp_port_get_sset_count,
1234         .get_settings           = mlxsw_sp_port_get_settings,
1235         .set_settings           = mlxsw_sp_port_set_settings,
1236 };
1237
1238 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1239 {
1240         struct mlxsw_sp_port *mlxsw_sp_port;
1241         struct net_device *dev;
1242         bool usable;
1243         int err;
1244
1245         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1246         if (!dev)
1247                 return -ENOMEM;
1248         mlxsw_sp_port = netdev_priv(dev);
1249         mlxsw_sp_port->dev = dev;
1250         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1251         mlxsw_sp_port->local_port = local_port;
1252         mlxsw_sp_port->learning = 1;
1253         mlxsw_sp_port->learning_sync = 1;
1254         mlxsw_sp_port->uc_flood = 1;
1255         mlxsw_sp_port->pvid = 1;
1256
1257         mlxsw_sp_port->pcpu_stats =
1258                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1259         if (!mlxsw_sp_port->pcpu_stats) {
1260                 err = -ENOMEM;
1261                 goto err_alloc_stats;
1262         }
1263
1264         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1265         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1266
1267         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1268         if (err) {
1269                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1270                         mlxsw_sp_port->local_port);
1271                 goto err_dev_addr_init;
1272         }
1273
1274         netif_carrier_off(dev);
1275
1276         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1277                          NETIF_F_HW_VLAN_CTAG_FILTER;
1278
1279         /* Each packet needs to have a Tx header (metadata) on top all other
1280          * headers.
1281          */
1282         dev->hard_header_len += MLXSW_TXHDR_LEN;
1283
1284         err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable);
1285         if (err) {
1286                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n",
1287                         mlxsw_sp_port->local_port);
1288                 goto err_port_module_check;
1289         }
1290
1291         if (!usable) {
1292                 dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
1293                         mlxsw_sp_port->local_port);
1294                 goto port_not_usable;
1295         }
1296
1297         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1298         if (err) {
1299                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1300                         mlxsw_sp_port->local_port);
1301                 goto err_port_system_port_mapping_set;
1302         }
1303
1304         err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1305         if (err) {
1306                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1307                         mlxsw_sp_port->local_port);
1308                 goto err_port_swid_set;
1309         }
1310
1311         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1312         if (err) {
1313                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1314                         mlxsw_sp_port->local_port);
1315                 goto err_port_mtu_set;
1316         }
1317
1318         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1319         if (err)
1320                 goto err_port_admin_status_set;
1321
1322         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1323         if (err) {
1324                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1325                         mlxsw_sp_port->local_port);
1326                 goto err_port_buffers_init;
1327         }
1328
1329         mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1330         err = register_netdev(dev);
1331         if (err) {
1332                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1333                         mlxsw_sp_port->local_port);
1334                 goto err_register_netdev;
1335         }
1336
1337         err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1338         if (err)
1339                 goto err_port_vlan_init;
1340
1341         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1342         return 0;
1343
1344 err_port_vlan_init:
1345         unregister_netdev(dev);
1346 err_register_netdev:
1347 err_port_buffers_init:
1348 err_port_admin_status_set:
1349 err_port_mtu_set:
1350 err_port_swid_set:
1351 err_port_system_port_mapping_set:
1352 port_not_usable:
1353 err_port_module_check:
1354 err_dev_addr_init:
1355         free_percpu(mlxsw_sp_port->pcpu_stats);
1356 err_alloc_stats:
1357         free_netdev(dev);
1358         return err;
1359 }
1360
1361 static void mlxsw_sp_vfids_fini(struct mlxsw_sp *mlxsw_sp)
1362 {
1363         u16 vfid;
1364
1365         for_each_set_bit(vfid, mlxsw_sp->active_vfids, VLAN_N_VID)
1366                 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
1367 }
1368
1369 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1370 {
1371         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1372
1373         if (!mlxsw_sp_port)
1374                 return;
1375         mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
1376         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1377         mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1378         free_percpu(mlxsw_sp_port->pcpu_stats);
1379         free_netdev(mlxsw_sp_port->dev);
1380 }
1381
1382 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1383 {
1384         int i;
1385
1386         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1387                 mlxsw_sp_port_remove(mlxsw_sp, i);
1388         kfree(mlxsw_sp->ports);
1389 }
1390
1391 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1392 {
1393         size_t alloc_size;
1394         int i;
1395         int err;
1396
1397         alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1398         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1399         if (!mlxsw_sp->ports)
1400                 return -ENOMEM;
1401
1402         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1403                 err = mlxsw_sp_port_create(mlxsw_sp, i);
1404                 if (err)
1405                         goto err_port_create;
1406         }
1407         return 0;
1408
1409 err_port_create:
1410         for (i--; i >= 1; i--)
1411                 mlxsw_sp_port_remove(mlxsw_sp, i);
1412         kfree(mlxsw_sp->ports);
1413         return err;
1414 }
1415
1416 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
1417                                      char *pude_pl, void *priv)
1418 {
1419         struct mlxsw_sp *mlxsw_sp = priv;
1420         struct mlxsw_sp_port *mlxsw_sp_port;
1421         enum mlxsw_reg_pude_oper_status status;
1422         u8 local_port;
1423
1424         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1425         mlxsw_sp_port = mlxsw_sp->ports[local_port];
1426         if (!mlxsw_sp_port) {
1427                 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
1428                          local_port);
1429                 return;
1430         }
1431
1432         status = mlxsw_reg_pude_oper_status_get(pude_pl);
1433         if (status == MLXSW_PORT_OPER_STATUS_UP) {
1434                 netdev_info(mlxsw_sp_port->dev, "link up\n");
1435                 netif_carrier_on(mlxsw_sp_port->dev);
1436         } else {
1437                 netdev_info(mlxsw_sp_port->dev, "link down\n");
1438                 netif_carrier_off(mlxsw_sp_port->dev);
1439         }
1440 }
1441
1442 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
1443         .func = mlxsw_sp_pude_event_func,
1444         .trap_id = MLXSW_TRAP_ID_PUDE,
1445 };
1446
1447 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
1448                                    enum mlxsw_event_trap_id trap_id)
1449 {
1450         struct mlxsw_event_listener *el;
1451         char hpkt_pl[MLXSW_REG_HPKT_LEN];
1452         int err;
1453
1454         switch (trap_id) {
1455         case MLXSW_TRAP_ID_PUDE:
1456                 el = &mlxsw_sp_pude_event;
1457                 break;
1458         }
1459         err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
1460         if (err)
1461                 return err;
1462
1463         mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
1464         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1465         if (err)
1466                 goto err_event_trap_set;
1467
1468         return 0;
1469
1470 err_event_trap_set:
1471         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1472         return err;
1473 }
1474
1475 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
1476                                       enum mlxsw_event_trap_id trap_id)
1477 {
1478         struct mlxsw_event_listener *el;
1479
1480         switch (trap_id) {
1481         case MLXSW_TRAP_ID_PUDE:
1482                 el = &mlxsw_sp_pude_event;
1483                 break;
1484         }
1485         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1486 }
1487
1488 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
1489                                       void *priv)
1490 {
1491         struct mlxsw_sp *mlxsw_sp = priv;
1492         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1493         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1494
1495         if (unlikely(!mlxsw_sp_port)) {
1496                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
1497                                      local_port);
1498                 return;
1499         }
1500
1501         skb->dev = mlxsw_sp_port->dev;
1502
1503         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1504         u64_stats_update_begin(&pcpu_stats->syncp);
1505         pcpu_stats->rx_packets++;
1506         pcpu_stats->rx_bytes += skb->len;
1507         u64_stats_update_end(&pcpu_stats->syncp);
1508
1509         skb->protocol = eth_type_trans(skb, skb->dev);
1510         netif_receive_skb(skb);
1511 }
1512
1513 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
1514         {
1515                 .func = mlxsw_sp_rx_listener_func,
1516                 .local_port = MLXSW_PORT_DONT_CARE,
1517                 .trap_id = MLXSW_TRAP_ID_FDB_MC,
1518         },
1519         /* Traps for specific L2 packet types, not trapped as FDB MC */
1520         {
1521                 .func = mlxsw_sp_rx_listener_func,
1522                 .local_port = MLXSW_PORT_DONT_CARE,
1523                 .trap_id = MLXSW_TRAP_ID_STP,
1524         },
1525         {
1526                 .func = mlxsw_sp_rx_listener_func,
1527                 .local_port = MLXSW_PORT_DONT_CARE,
1528                 .trap_id = MLXSW_TRAP_ID_LACP,
1529         },
1530         {
1531                 .func = mlxsw_sp_rx_listener_func,
1532                 .local_port = MLXSW_PORT_DONT_CARE,
1533                 .trap_id = MLXSW_TRAP_ID_EAPOL,
1534         },
1535         {
1536                 .func = mlxsw_sp_rx_listener_func,
1537                 .local_port = MLXSW_PORT_DONT_CARE,
1538                 .trap_id = MLXSW_TRAP_ID_LLDP,
1539         },
1540         {
1541                 .func = mlxsw_sp_rx_listener_func,
1542                 .local_port = MLXSW_PORT_DONT_CARE,
1543                 .trap_id = MLXSW_TRAP_ID_MMRP,
1544         },
1545         {
1546                 .func = mlxsw_sp_rx_listener_func,
1547                 .local_port = MLXSW_PORT_DONT_CARE,
1548                 .trap_id = MLXSW_TRAP_ID_MVRP,
1549         },
1550         {
1551                 .func = mlxsw_sp_rx_listener_func,
1552                 .local_port = MLXSW_PORT_DONT_CARE,
1553                 .trap_id = MLXSW_TRAP_ID_RPVST,
1554         },
1555         {
1556                 .func = mlxsw_sp_rx_listener_func,
1557                 .local_port = MLXSW_PORT_DONT_CARE,
1558                 .trap_id = MLXSW_TRAP_ID_DHCP,
1559         },
1560         {
1561                 .func = mlxsw_sp_rx_listener_func,
1562                 .local_port = MLXSW_PORT_DONT_CARE,
1563                 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
1564         },
1565         {
1566                 .func = mlxsw_sp_rx_listener_func,
1567                 .local_port = MLXSW_PORT_DONT_CARE,
1568                 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
1569         },
1570         {
1571                 .func = mlxsw_sp_rx_listener_func,
1572                 .local_port = MLXSW_PORT_DONT_CARE,
1573                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
1574         },
1575         {
1576                 .func = mlxsw_sp_rx_listener_func,
1577                 .local_port = MLXSW_PORT_DONT_CARE,
1578                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
1579         },
1580         {
1581                 .func = mlxsw_sp_rx_listener_func,
1582                 .local_port = MLXSW_PORT_DONT_CARE,
1583                 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
1584         },
1585 };
1586
1587 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
1588 {
1589         char htgt_pl[MLXSW_REG_HTGT_LEN];
1590         char hpkt_pl[MLXSW_REG_HPKT_LEN];
1591         int i;
1592         int err;
1593
1594         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
1595         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
1596         if (err)
1597                 return err;
1598
1599         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
1600         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
1601         if (err)
1602                 return err;
1603
1604         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
1605                 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
1606                                                       &mlxsw_sp_rx_listener[i],
1607                                                       mlxsw_sp);
1608                 if (err)
1609                         goto err_rx_listener_register;
1610
1611                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
1612                                     mlxsw_sp_rx_listener[i].trap_id);
1613                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1614                 if (err)
1615                         goto err_rx_trap_set;
1616         }
1617         return 0;
1618
1619 err_rx_trap_set:
1620         mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1621                                           &mlxsw_sp_rx_listener[i],
1622                                           mlxsw_sp);
1623 err_rx_listener_register:
1624         for (i--; i >= 0; i--) {
1625                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
1626                                     mlxsw_sp_rx_listener[i].trap_id);
1627                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1628
1629                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1630                                                   &mlxsw_sp_rx_listener[i],
1631                                                   mlxsw_sp);
1632         }
1633         return err;
1634 }
1635
1636 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
1637 {
1638         char hpkt_pl[MLXSW_REG_HPKT_LEN];
1639         int i;
1640
1641         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
1642                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
1643                                     mlxsw_sp_rx_listener[i].trap_id);
1644                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1645
1646                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1647                                                   &mlxsw_sp_rx_listener[i],
1648                                                   mlxsw_sp);
1649         }
1650 }
1651
1652 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
1653                                  enum mlxsw_reg_sfgc_type type,
1654                                  enum mlxsw_reg_sfgc_bridge_type bridge_type)
1655 {
1656         enum mlxsw_flood_table_type table_type;
1657         enum mlxsw_sp_flood_table flood_table;
1658         char sfgc_pl[MLXSW_REG_SFGC_LEN];
1659
1660         if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) {
1661                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
1662                 flood_table = 0;
1663         } else {
1664                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
1665                 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
1666                         flood_table = MLXSW_SP_FLOOD_TABLE_UC;
1667                 else
1668                         flood_table = MLXSW_SP_FLOOD_TABLE_BM;
1669         }
1670
1671         mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
1672                             flood_table);
1673         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
1674 }
1675
1676 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
1677 {
1678         int type, err;
1679
1680         /* For non-offloaded netdevs, flood all traffic types to CPU
1681          * port.
1682          */
1683         for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
1684                 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
1685                         continue;
1686
1687                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
1688                                             MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
1689                 if (err)
1690                         return err;
1691         }
1692
1693         /* For bridged ports, use one flooding table for unknown unicast
1694          * traffic and a second table for unregistered multicast and
1695          * broadcast.
1696          */
1697         for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
1698                 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
1699                         continue;
1700
1701                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
1702                                             MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
1703                 if (err)
1704                         return err;
1705         }
1706
1707         return 0;
1708 }
1709
1710 static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
1711                          const struct mlxsw_bus_info *mlxsw_bus_info)
1712 {
1713         struct mlxsw_sp *mlxsw_sp = priv;
1714         int err;
1715
1716         mlxsw_sp->core = mlxsw_core;
1717         mlxsw_sp->bus_info = mlxsw_bus_info;
1718
1719         err = mlxsw_sp_base_mac_get(mlxsw_sp);
1720         if (err) {
1721                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
1722                 return err;
1723         }
1724
1725         err = mlxsw_sp_ports_create(mlxsw_sp);
1726         if (err) {
1727                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
1728                 goto err_ports_create;
1729         }
1730
1731         err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1732         if (err) {
1733                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
1734                 goto err_event_register;
1735         }
1736
1737         err = mlxsw_sp_traps_init(mlxsw_sp);
1738         if (err) {
1739                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
1740                 goto err_rx_listener_register;
1741         }
1742
1743         err = mlxsw_sp_flood_init(mlxsw_sp);
1744         if (err) {
1745                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
1746                 goto err_flood_init;
1747         }
1748
1749         err = mlxsw_sp_buffers_init(mlxsw_sp);
1750         if (err) {
1751                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
1752                 goto err_buffers_init;
1753         }
1754
1755         err = mlxsw_sp_switchdev_init(mlxsw_sp);
1756         if (err) {
1757                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
1758                 goto err_switchdev_init;
1759         }
1760
1761         return 0;
1762
1763 err_switchdev_init:
1764 err_buffers_init:
1765 err_flood_init:
1766         mlxsw_sp_traps_fini(mlxsw_sp);
1767 err_rx_listener_register:
1768         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1769 err_event_register:
1770         mlxsw_sp_ports_remove(mlxsw_sp);
1771 err_ports_create:
1772         mlxsw_sp_vfids_fini(mlxsw_sp);
1773         return err;
1774 }
1775
1776 static void mlxsw_sp_fini(void *priv)
1777 {
1778         struct mlxsw_sp *mlxsw_sp = priv;
1779
1780         mlxsw_sp_switchdev_fini(mlxsw_sp);
1781         mlxsw_sp_traps_fini(mlxsw_sp);
1782         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1783         mlxsw_sp_ports_remove(mlxsw_sp);
1784         mlxsw_sp_vfids_fini(mlxsw_sp);
1785 }
1786
1787 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
1788         .used_max_vepa_channels         = 1,
1789         .max_vepa_channels              = 0,
1790         .used_max_lag                   = 1,
1791         .max_lag                        = 64,
1792         .used_max_port_per_lag          = 1,
1793         .max_port_per_lag               = 16,
1794         .used_max_mid                   = 1,
1795         .max_mid                        = 7000,
1796         .used_max_pgt                   = 1,
1797         .max_pgt                        = 0,
1798         .used_max_system_port           = 1,
1799         .max_system_port                = 64,
1800         .used_max_vlan_groups           = 1,
1801         .max_vlan_groups                = 127,
1802         .used_max_regions               = 1,
1803         .max_regions                    = 400,
1804         .used_flood_tables              = 1,
1805         .used_flood_mode                = 1,
1806         .flood_mode                     = 3,
1807         .max_fid_offset_flood_tables    = 2,
1808         .fid_offset_flood_table_size    = VLAN_N_VID - 1,
1809         .max_fid_flood_tables           = 1,
1810         .fid_flood_table_size           = VLAN_N_VID,
1811         .used_max_ib_mc                 = 1,
1812         .max_ib_mc                      = 0,
1813         .used_max_pkey                  = 1,
1814         .max_pkey                       = 0,
1815         .swid_config                    = {
1816                 {
1817                         .used_type      = 1,
1818                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
1819                 }
1820         },
1821 };
1822
1823 static struct mlxsw_driver mlxsw_sp_driver = {
1824         .kind                   = MLXSW_DEVICE_KIND_SPECTRUM,
1825         .owner                  = THIS_MODULE,
1826         .priv_size              = sizeof(struct mlxsw_sp),
1827         .init                   = mlxsw_sp_init,
1828         .fini                   = mlxsw_sp_fini,
1829         .txhdr_construct        = mlxsw_sp_txhdr_construct,
1830         .txhdr_len              = MLXSW_TXHDR_LEN,
1831         .profile                = &mlxsw_sp_config_profile,
1832 };
1833
1834 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
1835 {
1836         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
1837 }
1838
1839 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
1840 {
1841         struct net_device *dev = mlxsw_sp_port->dev;
1842         int err;
1843
1844         /* When port is not bridged untagged packets are tagged with
1845          * PVID=VID=1, thereby creating an implicit VLAN interface in
1846          * the device. Remove it and let bridge code take care of its
1847          * own VLANs.
1848          */
1849         err = mlxsw_sp_port_kill_vid(dev, 0, 1);
1850         if (err)
1851                 netdev_err(dev, "Failed to remove VID 1\n");
1852
1853         return err;
1854 }
1855
1856 static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
1857 {
1858         struct net_device *dev = mlxsw_sp_port->dev;
1859         int err;
1860
1861         /* Add implicit VLAN interface in the device, so that untagged
1862          * packets will be classified to the default vFID.
1863          */
1864         err = mlxsw_sp_port_add_vid(dev, 0, 1);
1865         if (err)
1866                 netdev_err(dev, "Failed to add VID 1\n");
1867
1868         return err;
1869 }
1870
1871 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
1872                                          struct net_device *br_dev)
1873 {
1874         return !mlxsw_sp->master_bridge.dev ||
1875                mlxsw_sp->master_bridge.dev == br_dev;
1876 }
1877
1878 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
1879                                        struct net_device *br_dev)
1880 {
1881         mlxsw_sp->master_bridge.dev = br_dev;
1882         mlxsw_sp->master_bridge.ref_count++;
1883 }
1884
1885 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
1886                                        struct net_device *br_dev)
1887 {
1888         if (--mlxsw_sp->master_bridge.ref_count == 0)
1889                 mlxsw_sp->master_bridge.dev = NULL;
1890 }
1891
1892 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
1893                                     unsigned long event, void *ptr)
1894 {
1895         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1896         struct netdev_notifier_changeupper_info *info;
1897         struct mlxsw_sp_port *mlxsw_sp_port;
1898         struct net_device *upper_dev;
1899         struct mlxsw_sp *mlxsw_sp;
1900         int err;
1901
1902         if (!mlxsw_sp_port_dev_check(dev))
1903                 return NOTIFY_DONE;
1904
1905         mlxsw_sp_port = netdev_priv(dev);
1906         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1907         info = ptr;
1908
1909         switch (event) {
1910         case NETDEV_PRECHANGEUPPER:
1911                 upper_dev = info->upper_dev;
1912                 /* HW limitation forbids to put ports to multiple bridges. */
1913                 if (info->master && info->linking &&
1914                     netif_is_bridge_master(upper_dev) &&
1915                     !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
1916                         return NOTIFY_BAD;
1917                 break;
1918         case NETDEV_CHANGEUPPER:
1919                 upper_dev = info->upper_dev;
1920                 if (info->master &&
1921                     netif_is_bridge_master(upper_dev)) {
1922                         if (info->linking) {
1923                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
1924                                 if (err)
1925                                         netdev_err(dev, "Failed to join bridge\n");
1926                                 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
1927                                 mlxsw_sp_port->bridged = 1;
1928                         } else {
1929                                 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
1930                                 if (err)
1931                                         netdev_err(dev, "Failed to leave bridge\n");
1932                                 mlxsw_sp_port->bridged = 0;
1933                                 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
1934                         }
1935                 }
1936                 break;
1937         }
1938
1939         return NOTIFY_DONE;
1940 }
1941
1942 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
1943         .notifier_call = mlxsw_sp_netdevice_event,
1944 };
1945
1946 static int __init mlxsw_sp_module_init(void)
1947 {
1948         int err;
1949
1950         register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
1951         err = mlxsw_core_driver_register(&mlxsw_sp_driver);
1952         if (err)
1953                 goto err_core_driver_register;
1954         return 0;
1955
1956 err_core_driver_register:
1957         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
1958         return err;
1959 }
1960
1961 static void __exit mlxsw_sp_module_exit(void)
1962 {
1963         mlxsw_core_driver_unregister(&mlxsw_sp_driver);
1964         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
1965 }
1966
1967 module_init(mlxsw_sp_module_init);
1968 module_exit(mlxsw_sp_module_exit);
1969
1970 MODULE_LICENSE("Dual BSD/GPL");
1971 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1972 MODULE_DESCRIPTION("Mellanox Spectrum driver");
1973 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);