2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <net/devlink.h>
53 #include <net/switchdev.h>
54 #include <generated/utsrelease.h>
63 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
64 static const char mlxsw_sp_driver_version[] = "1.0";
70 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
73 * Packet control type.
74 * 0 - Ethernet control (e.g. EMADs, LACP)
77 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
80 * Packet protocol type. Must be set to 1 (Ethernet).
82 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
84 /* tx_hdr_rx_is_router
85 * Packet is sent from the router. Valid for data packets only.
87 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
90 * Indicates if the 'fid' field is valid and should be used for
91 * forwarding lookup. Valid for data packets only.
93 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
96 * Switch partition ID. Must be set to 0.
98 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
100 /* tx_hdr_control_tclass
101 * Indicates if the packet should use the control TClass and not one
102 * of the data TClasses.
104 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
107 * Egress TClass to be used on the egress device on the egress port.
109 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
112 * Destination local port for unicast packets.
113 * Destination multicast ID for multicast packets.
115 * Control packets are directed to a specific egress port, while data
116 * packets are transmitted through the CPU port (0) into the switch partition,
117 * where forwarding rules are applied.
119 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
122 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
123 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
124 * Valid for data packets only.
126 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
130 * 6 - Control packets
132 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
134 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
135 const struct mlxsw_tx_info *tx_info)
137 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
139 memset(txhdr, 0, MLXSW_TXHDR_LEN);
141 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
142 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
143 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
144 mlxsw_tx_hdr_swid_set(txhdr, 0);
145 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
146 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
147 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
150 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
152 char spad_pl[MLXSW_REG_SPAD_LEN];
155 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
158 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
162 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
165 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
166 char paos_pl[MLXSW_REG_PAOS_LEN];
168 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
169 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
170 MLXSW_PORT_ADMIN_STATUS_DOWN);
171 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
174 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
177 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
178 char paos_pl[MLXSW_REG_PAOS_LEN];
182 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
183 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
186 oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
187 *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
191 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
194 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
195 char ppad_pl[MLXSW_REG_PPAD_LEN];
197 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
198 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
199 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
202 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
204 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
205 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
207 ether_addr_copy(addr, mlxsw_sp->base_mac);
208 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
209 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
212 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
213 u16 vid, enum mlxsw_reg_spms_state state)
215 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
219 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
222 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
223 mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
224 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
229 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
231 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
232 char pmtu_pl[MLXSW_REG_PMTU_LEN];
236 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
237 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
238 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
241 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
246 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
247 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
250 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
252 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
253 char pspa_pl[MLXSW_REG_PSPA_LEN];
255 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
256 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
259 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
262 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
263 char svpe_pl[MLXSW_REG_SVPE_LEN];
265 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
266 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
269 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
270 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
273 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
274 char svfa_pl[MLXSW_REG_SVFA_LEN];
276 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
278 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
281 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
282 u16 vid, bool learn_enable)
284 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
288 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
291 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
293 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
299 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
301 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
302 char sspr_pl[MLXSW_REG_SSPR_LEN];
304 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
305 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
308 static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
309 u8 local_port, u8 *p_module,
310 u8 *p_width, u8 *p_lane)
312 char pmlp_pl[MLXSW_REG_PMLP_LEN];
315 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
316 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
319 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
320 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
321 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
325 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
326 u8 local_port, u8 *p_module,
331 return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module,
335 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
336 u8 module, u8 width, u8 lane)
338 char pmlp_pl[MLXSW_REG_PMLP_LEN];
341 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
342 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
343 for (i = 0; i < width; i++) {
344 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
345 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
348 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
351 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
353 char pmlp_pl[MLXSW_REG_PMLP_LEN];
355 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
356 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
357 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
360 static int mlxsw_sp_port_open(struct net_device *dev)
362 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
365 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
368 netif_start_queue(dev);
372 static int mlxsw_sp_port_stop(struct net_device *dev)
374 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
376 netif_stop_queue(dev);
377 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
380 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
381 struct net_device *dev)
383 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
384 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
385 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
386 const struct mlxsw_tx_info tx_info = {
387 .local_port = mlxsw_sp_port->local_port,
393 if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
394 return NETDEV_TX_BUSY;
396 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
397 struct sk_buff *skb_orig = skb;
399 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
401 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
402 dev_kfree_skb_any(skb_orig);
407 if (eth_skb_pad(skb)) {
408 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
412 mlxsw_sp_txhdr_construct(skb, &tx_info);
414 /* Due to a race we might fail here because of a full queue. In that
415 * unlikely case we simply drop the packet.
417 err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
420 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
421 u64_stats_update_begin(&pcpu_stats->syncp);
422 pcpu_stats->tx_packets++;
423 pcpu_stats->tx_bytes += len;
424 u64_stats_update_end(&pcpu_stats->syncp);
426 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
427 dev_kfree_skb_any(skb);
432 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
436 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
438 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
439 struct sockaddr *addr = p;
442 if (!is_valid_ether_addr(addr->sa_data))
443 return -EADDRNOTAVAIL;
445 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
448 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
452 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
455 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
456 u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
457 char pbmc_pl[MLXSW_REG_PBMC_LEN];
460 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
461 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
464 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, 0, pg_size);
465 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
468 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
470 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
473 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu);
476 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
478 goto err_port_mtu_set;
483 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu);
487 static struct rtnl_link_stats64 *
488 mlxsw_sp_port_get_stats64(struct net_device *dev,
489 struct rtnl_link_stats64 *stats)
491 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
492 struct mlxsw_sp_port_pcpu_stats *p;
493 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
498 for_each_possible_cpu(i) {
499 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
501 start = u64_stats_fetch_begin_irq(&p->syncp);
502 rx_packets = p->rx_packets;
503 rx_bytes = p->rx_bytes;
504 tx_packets = p->tx_packets;
505 tx_bytes = p->tx_bytes;
506 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
508 stats->rx_packets += rx_packets;
509 stats->rx_bytes += rx_bytes;
510 stats->tx_packets += tx_packets;
511 stats->tx_bytes += tx_bytes;
512 /* tx_dropped is u32, updated without syncp protection. */
513 tx_dropped += p->tx_dropped;
515 stats->tx_dropped = tx_dropped;
519 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
520 u16 vid_end, bool is_member, bool untagged)
522 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
526 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
530 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
531 vid_end, is_member, untagged);
532 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
537 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
539 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
540 u16 vid, last_visited_vid;
543 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
544 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
547 last_visited_vid = vid;
548 goto err_port_vid_to_fid_set;
552 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
554 last_visited_vid = VLAN_N_VID;
555 goto err_port_vid_to_fid_set;
560 err_port_vid_to_fid_set:
561 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
562 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
567 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
569 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
573 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
577 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
578 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
587 static struct mlxsw_sp_vfid *
588 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
590 struct mlxsw_sp_vfid *vfid;
592 list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
593 if (vfid->vid == vid)
600 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
602 return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
603 MLXSW_SP_VFID_PORT_MAX);
606 static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
608 u16 fid = mlxsw_sp_vfid_to_fid(vfid);
609 char sfmr_pl[MLXSW_REG_SFMR_LEN];
611 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0);
612 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
615 static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
617 u16 fid = mlxsw_sp_vfid_to_fid(vfid);
618 char sfmr_pl[MLXSW_REG_SFMR_LEN];
620 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0);
621 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
624 static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
627 struct device *dev = mlxsw_sp->bus_info->dev;
628 struct mlxsw_sp_vfid *vfid;
632 n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
633 if (n_vfid == MLXSW_SP_VFID_PORT_MAX) {
634 dev_err(dev, "No available vFIDs\n");
635 return ERR_PTR(-ERANGE);
638 err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
640 dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
644 vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
646 goto err_allocate_vfid;
651 list_add(&vfid->list, &mlxsw_sp->port_vfids.list);
652 set_bit(n_vfid, mlxsw_sp->port_vfids.mapped);
657 __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
658 return ERR_PTR(-ENOMEM);
661 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
662 struct mlxsw_sp_vfid *vfid)
664 clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
665 list_del(&vfid->list);
667 __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
672 static struct mlxsw_sp_port *
673 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
674 struct mlxsw_sp_vfid *vfid)
676 struct mlxsw_sp_port *mlxsw_sp_vport;
678 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
682 /* dev will be set correctly after the VLAN device is linked
683 * with the real device. In case of bridge SELF invocation, dev
686 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
687 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
688 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
689 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
690 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
691 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
692 mlxsw_sp_vport->vport.vfid = vfid;
693 mlxsw_sp_vport->vport.vid = vfid->vid;
695 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
697 return mlxsw_sp_vport;
700 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
702 list_del(&mlxsw_sp_vport->vport.list);
703 kfree(mlxsw_sp_vport);
706 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
709 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
710 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
711 struct mlxsw_sp_port *mlxsw_sp_vport;
712 struct mlxsw_sp_vfid *vfid;
715 /* VLAN 0 is added to HW filter when device goes up, but it is
716 * reserved in our case, so simply return.
721 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
722 netdev_warn(dev, "VID=%d already configured\n", vid);
726 vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
728 vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
730 netdev_err(dev, "Failed to create vFID for VID=%d\n",
732 return PTR_ERR(vfid);
736 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
737 if (!mlxsw_sp_vport) {
738 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
740 goto err_port_vport_create;
743 if (!vfid->nr_vports) {
744 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
747 netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
749 goto err_vport_flood_set;
753 /* When adding the first VLAN interface on a bridged port we need to
754 * transition all the active 802.1Q bridge VLANs to use explicit
755 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
757 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
758 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
760 netdev_err(dev, "Failed to set to Virtual mode\n");
761 goto err_port_vp_mode_trans;
765 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
766 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
768 mlxsw_sp_vfid_to_fid(vfid->vfid),
771 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
773 goto err_port_vid_to_fid_set;
776 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
778 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
779 goto err_port_vid_learning_set;
782 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
784 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
786 goto err_port_add_vid;
789 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
790 MLXSW_REG_SPMS_STATE_FORWARDING);
792 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
793 goto err_port_stp_state_set;
800 err_port_stp_state_set:
801 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
803 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
804 err_port_vid_learning_set:
805 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
806 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
807 mlxsw_sp_vfid_to_fid(vfid->vfid), vid);
808 err_port_vid_to_fid_set:
809 if (list_is_singular(&mlxsw_sp_port->vports_list))
810 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
811 err_port_vp_mode_trans:
812 if (!vfid->nr_vports)
813 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
816 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
817 err_port_vport_create:
818 if (!vfid->nr_vports)
819 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
823 int mlxsw_sp_port_kill_vid(struct net_device *dev,
824 __be16 __always_unused proto, u16 vid)
826 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
827 struct mlxsw_sp_port *mlxsw_sp_vport;
828 struct mlxsw_sp_vfid *vfid;
831 /* VLAN 0 is removed from HW filter when device goes down, but
832 * it is reserved in our case, so simply return.
837 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
838 if (!mlxsw_sp_vport) {
839 netdev_warn(dev, "VID=%d does not exist\n", vid);
843 vfid = mlxsw_sp_vport->vport.vfid;
845 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
846 MLXSW_REG_SPMS_STATE_DISCARDING);
848 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
852 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
854 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
859 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
861 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
865 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
866 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
868 mlxsw_sp_vfid_to_fid(vfid->vfid),
871 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
876 /* When removing the last VLAN interface on a bridged port we need to
877 * transition all active 802.1Q bridge VLANs to use VID to FID
878 * mappings and set port's mode to VLAN mode.
880 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
881 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
883 netdev_err(dev, "Failed to set to VLAN mode\n");
889 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
891 /* Destroy the vFID if no vPorts are assigned to it anymore. */
892 if (!vfid->nr_vports)
893 mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
898 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
901 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
902 u8 module, width, lane;
905 err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp,
906 mlxsw_sp_port->local_port,
907 &module, &width, &lane);
909 netdev_err(dev, "Failed to retrieve module information\n");
913 if (!mlxsw_sp_port->split)
914 err = snprintf(name, len, "p%d", module + 1);
916 err = snprintf(name, len, "p%ds%d", module + 1,
925 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
926 .ndo_open = mlxsw_sp_port_open,
927 .ndo_stop = mlxsw_sp_port_stop,
928 .ndo_start_xmit = mlxsw_sp_port_xmit,
929 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
930 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
931 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
932 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
933 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
934 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
935 .ndo_fdb_add = switchdev_port_fdb_add,
936 .ndo_fdb_del = switchdev_port_fdb_del,
937 .ndo_fdb_dump = switchdev_port_fdb_dump,
938 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
939 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
940 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
941 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
944 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
945 struct ethtool_drvinfo *drvinfo)
947 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
948 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
950 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
951 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
952 sizeof(drvinfo->version));
953 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
955 mlxsw_sp->bus_info->fw_rev.major,
956 mlxsw_sp->bus_info->fw_rev.minor,
957 mlxsw_sp->bus_info->fw_rev.subminor);
958 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
959 sizeof(drvinfo->bus_info));
962 struct mlxsw_sp_port_hw_stats {
963 char str[ETH_GSTRING_LEN];
964 u64 (*getter)(char *payload);
967 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
969 .str = "a_frames_transmitted_ok",
970 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
973 .str = "a_frames_received_ok",
974 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
977 .str = "a_frame_check_sequence_errors",
978 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
981 .str = "a_alignment_errors",
982 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
985 .str = "a_octets_transmitted_ok",
986 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
989 .str = "a_octets_received_ok",
990 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
993 .str = "a_multicast_frames_xmitted_ok",
994 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
997 .str = "a_broadcast_frames_xmitted_ok",
998 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1001 .str = "a_multicast_frames_received_ok",
1002 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1005 .str = "a_broadcast_frames_received_ok",
1006 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1009 .str = "a_in_range_length_errors",
1010 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1013 .str = "a_out_of_range_length_field",
1014 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1017 .str = "a_frame_too_long_errors",
1018 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1021 .str = "a_symbol_error_during_carrier",
1022 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1025 .str = "a_mac_control_frames_transmitted",
1026 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1029 .str = "a_mac_control_frames_received",
1030 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1033 .str = "a_unsupported_opcodes_received",
1034 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1037 .str = "a_pause_mac_ctrl_frames_received",
1038 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1041 .str = "a_pause_mac_ctrl_frames_xmitted",
1042 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1046 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1048 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1049 u32 stringset, u8 *data)
1054 switch (stringset) {
1056 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1057 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1059 p += ETH_GSTRING_LEN;
1065 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1066 enum ethtool_phys_id_state state)
1068 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1069 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1070 char mlcr_pl[MLXSW_REG_MLCR_LEN];
1074 case ETHTOOL_ID_ACTIVE:
1077 case ETHTOOL_ID_INACTIVE:
1084 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1085 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1088 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1089 struct ethtool_stats *stats, u64 *data)
1091 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1092 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1093 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1097 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
1098 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1099 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
1100 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
1103 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1107 return MLXSW_SP_PORT_HW_STATS_LEN;
1113 struct mlxsw_sp_port_link_mode {
1120 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1122 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1123 .supported = SUPPORTED_100baseT_Full,
1124 .advertised = ADVERTISED_100baseT_Full,
1128 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1132 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1133 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1134 .supported = SUPPORTED_1000baseKX_Full,
1135 .advertised = ADVERTISED_1000baseKX_Full,
1139 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1140 .supported = SUPPORTED_10000baseT_Full,
1141 .advertised = ADVERTISED_10000baseT_Full,
1145 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1146 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1147 .supported = SUPPORTED_10000baseKX4_Full,
1148 .advertised = ADVERTISED_10000baseKX4_Full,
1152 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1153 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1154 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1155 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1156 .supported = SUPPORTED_10000baseKR_Full,
1157 .advertised = ADVERTISED_10000baseKR_Full,
1161 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1162 .supported = SUPPORTED_20000baseKR2_Full,
1163 .advertised = ADVERTISED_20000baseKR2_Full,
1167 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1168 .supported = SUPPORTED_40000baseCR4_Full,
1169 .advertised = ADVERTISED_40000baseCR4_Full,
1173 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1174 .supported = SUPPORTED_40000baseKR4_Full,
1175 .advertised = ADVERTISED_40000baseKR4_Full,
1179 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1180 .supported = SUPPORTED_40000baseSR4_Full,
1181 .advertised = ADVERTISED_40000baseSR4_Full,
1185 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1186 .supported = SUPPORTED_40000baseLR4_Full,
1187 .advertised = ADVERTISED_40000baseLR4_Full,
1191 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1192 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1193 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1197 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1198 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1199 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1203 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1204 .supported = SUPPORTED_56000baseKR4_Full,
1205 .advertised = ADVERTISED_56000baseKR4_Full,
1209 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1210 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1211 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1212 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1217 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1219 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1221 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1222 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1223 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1224 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1225 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1226 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1227 return SUPPORTED_FIBRE;
1229 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1230 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1231 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1232 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1233 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1234 return SUPPORTED_Backplane;
1238 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1243 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1244 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1245 modes |= mlxsw_sp_port_link_mode[i].supported;
1250 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1255 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1256 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1257 modes |= mlxsw_sp_port_link_mode[i].advertised;
1262 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1263 struct ethtool_cmd *cmd)
1265 u32 speed = SPEED_UNKNOWN;
1266 u8 duplex = DUPLEX_UNKNOWN;
1272 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1273 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1274 speed = mlxsw_sp_port_link_mode[i].speed;
1275 duplex = DUPLEX_FULL;
1280 ethtool_cmd_speed_set(cmd, speed);
1281 cmd->duplex = duplex;
1284 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1286 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1287 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1288 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1289 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1292 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1293 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1294 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1297 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1298 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1299 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1300 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1306 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1307 struct ethtool_cmd *cmd)
1309 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1310 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1311 char ptys_pl[MLXSW_REG_PTYS_LEN];
1313 u32 eth_proto_admin;
1317 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1318 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1320 netdev_err(dev, "Failed to get proto");
1323 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap,
1324 ð_proto_admin, ð_proto_oper);
1326 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1327 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1328 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1329 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1330 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1331 eth_proto_oper, cmd);
1333 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1334 cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1335 cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1337 cmd->transceiver = XCVR_INTERNAL;
1341 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1346 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1347 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1348 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1353 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1358 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1359 if (speed == mlxsw_sp_port_link_mode[i].speed)
1360 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1365 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1370 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1371 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1372 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1377 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1378 struct ethtool_cmd *cmd)
1380 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1381 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1382 char ptys_pl[MLXSW_REG_PTYS_LEN];
1386 u32 eth_proto_admin;
1390 speed = ethtool_cmd_speed(cmd);
1392 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1393 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1394 mlxsw_sp_to_ptys_speed(speed);
1396 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1397 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1399 netdev_err(dev, "Failed to get proto");
1402 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, NULL);
1404 eth_proto_new = eth_proto_new & eth_proto_cap;
1405 if (!eth_proto_new) {
1406 netdev_err(dev, "Not supported proto admin requested");
1409 if (eth_proto_new == eth_proto_admin)
1412 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1413 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1415 netdev_err(dev, "Failed to set proto admin");
1419 err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1421 netdev_err(dev, "Failed to get oper status");
1427 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1429 netdev_err(dev, "Failed to set admin status");
1433 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1435 netdev_err(dev, "Failed to set admin status");
1442 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1443 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
1444 .get_link = ethtool_op_get_link,
1445 .get_strings = mlxsw_sp_port_get_strings,
1446 .set_phys_id = mlxsw_sp_port_set_phys_id,
1447 .get_ethtool_stats = mlxsw_sp_port_get_stats,
1448 .get_sset_count = mlxsw_sp_port_get_sset_count,
1449 .get_settings = mlxsw_sp_port_get_settings,
1450 .set_settings = mlxsw_sp_port_set_settings,
1454 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1456 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1457 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1458 char ptys_pl[MLXSW_REG_PTYS_LEN];
1459 u32 eth_proto_admin;
1461 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1462 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1464 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1467 static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1468 bool split, u8 module, u8 width)
1470 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
1471 struct mlxsw_sp_port *mlxsw_sp_port;
1472 struct devlink_port *devlink_port;
1473 struct net_device *dev;
1477 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1480 mlxsw_sp_port = netdev_priv(dev);
1481 mlxsw_sp_port->dev = dev;
1482 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1483 mlxsw_sp_port->local_port = local_port;
1484 mlxsw_sp_port->split = split;
1485 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1486 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1487 if (!mlxsw_sp_port->active_vlans) {
1489 goto err_port_active_vlans_alloc;
1491 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1492 if (!mlxsw_sp_port->untagged_vlans) {
1494 goto err_port_untagged_vlans_alloc;
1496 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1498 mlxsw_sp_port->pcpu_stats =
1499 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1500 if (!mlxsw_sp_port->pcpu_stats) {
1502 goto err_alloc_stats;
1505 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1506 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1508 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1510 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1511 mlxsw_sp_port->local_port);
1512 goto err_dev_addr_init;
1515 netif_carrier_off(dev);
1517 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1518 NETIF_F_HW_VLAN_CTAG_FILTER;
1520 /* Each packet needs to have a Tx header (metadata) on top all other
1523 dev->hard_header_len += MLXSW_TXHDR_LEN;
1525 devlink_port = &mlxsw_sp_port->devlink_port;
1526 if (mlxsw_sp_port->split)
1527 devlink_port_split_set(devlink_port, module);
1528 err = devlink_port_register(devlink, devlink_port, local_port);
1530 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register devlink port\n",
1531 mlxsw_sp_port->local_port);
1532 goto err_devlink_port_register;
1535 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1537 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1538 mlxsw_sp_port->local_port);
1539 goto err_port_system_port_mapping_set;
1542 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1544 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1545 mlxsw_sp_port->local_port);
1546 goto err_port_swid_set;
1549 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
1551 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1552 mlxsw_sp_port->local_port);
1553 goto err_port_speed_by_width_set;
1556 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1558 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1559 mlxsw_sp_port->local_port);
1560 goto err_port_mtu_set;
1563 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1565 goto err_port_admin_status_set;
1567 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1569 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1570 mlxsw_sp_port->local_port);
1571 goto err_port_buffers_init;
1574 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1575 err = register_netdev(dev);
1577 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1578 mlxsw_sp_port->local_port);
1579 goto err_register_netdev;
1582 devlink_port_type_eth_set(devlink_port, dev);
1584 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1586 goto err_port_vlan_init;
1588 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1592 unregister_netdev(dev);
1593 err_register_netdev:
1594 err_port_buffers_init:
1595 err_port_admin_status_set:
1597 err_port_speed_by_width_set:
1599 err_port_system_port_mapping_set:
1600 devlink_port_unregister(&mlxsw_sp_port->devlink_port);
1601 err_devlink_port_register:
1603 free_percpu(mlxsw_sp_port->pcpu_stats);
1605 kfree(mlxsw_sp_port->untagged_vlans);
1606 err_port_untagged_vlans_alloc:
1607 kfree(mlxsw_sp_port->active_vlans);
1608 err_port_active_vlans_alloc:
1613 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1614 bool split, u8 module, u8 width, u8 lane)
1618 err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1623 err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
1626 goto err_port_create;
1631 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
1635 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1637 struct net_device *dev = mlxsw_sp_port->dev;
1638 struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
1640 list_for_each_entry_safe(mlxsw_sp_vport, tmp,
1641 &mlxsw_sp_port->vports_list, vport.list) {
1642 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1644 /* vPorts created for VLAN devices should already be gone
1645 * by now, since we unregistered the port netdev.
1647 WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
1648 mlxsw_sp_port_kill_vid(dev, 0, vid);
1652 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1654 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1655 struct devlink_port *devlink_port;
1659 mlxsw_sp->ports[local_port] = NULL;
1660 devlink_port = &mlxsw_sp_port->devlink_port;
1661 devlink_port_type_clear(devlink_port);
1662 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1663 devlink_port_unregister(devlink_port);
1664 mlxsw_sp_port_vports_fini(mlxsw_sp_port);
1665 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1666 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1667 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1668 free_percpu(mlxsw_sp_port->pcpu_stats);
1669 kfree(mlxsw_sp_port->untagged_vlans);
1670 kfree(mlxsw_sp_port->active_vlans);
1671 free_netdev(mlxsw_sp_port->dev);
1674 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1678 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1679 mlxsw_sp_port_remove(mlxsw_sp, i);
1680 kfree(mlxsw_sp->ports);
1683 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1690 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1691 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1692 if (!mlxsw_sp->ports)
1695 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1696 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1699 goto err_port_module_info_get;
1702 mlxsw_sp->port_to_module[i] = module;
1703 err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width);
1705 goto err_port_create;
1710 err_port_module_info_get:
1711 for (i--; i >= 1; i--)
1712 mlxsw_sp_port_remove(mlxsw_sp, i);
1713 kfree(mlxsw_sp->ports);
1717 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1719 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
1721 return local_port - offset;
1724 static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count)
1726 struct mlxsw_sp *mlxsw_sp = priv;
1727 struct mlxsw_sp_port *mlxsw_sp_port;
1728 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1729 u8 module, cur_width, base_port;
1733 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1734 if (!mlxsw_sp_port) {
1735 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1740 if (count != 2 && count != 4) {
1741 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
1745 err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
1748 netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
1752 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
1753 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
1757 /* Make sure we have enough slave (even) ports for the split. */
1759 base_port = local_port;
1760 if (mlxsw_sp->ports[base_port + 1]) {
1761 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1765 base_port = mlxsw_sp_cluster_base_port_get(local_port);
1766 if (mlxsw_sp->ports[base_port + 1] ||
1767 mlxsw_sp->ports[base_port + 3]) {
1768 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1773 for (i = 0; i < count; i++)
1774 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1776 for (i = 0; i < count; i++) {
1777 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
1778 module, width, i * width);
1780 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
1781 goto err_port_create;
1788 for (i--; i >= 0; i--)
1789 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1790 for (i = 0; i < count / 2; i++) {
1791 module = mlxsw_sp->port_to_module[base_port + i * 2];
1792 mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
1793 module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
1798 static int mlxsw_sp_port_unsplit(void *priv, u8 local_port)
1800 struct mlxsw_sp *mlxsw_sp = priv;
1801 struct mlxsw_sp_port *mlxsw_sp_port;
1802 u8 module, cur_width, base_port;
1807 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1808 if (!mlxsw_sp_port) {
1809 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1814 if (!mlxsw_sp_port->split) {
1815 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
1819 err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
1822 netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
1825 count = cur_width == 1 ? 4 : 2;
1827 base_port = mlxsw_sp_cluster_base_port_get(local_port);
1829 /* Determine which ports to remove. */
1830 if (count == 2 && local_port >= base_port + 2)
1831 base_port = base_port + 2;
1833 for (i = 0; i < count; i++)
1834 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1836 for (i = 0; i < count / 2; i++) {
1837 module = mlxsw_sp->port_to_module[base_port + i * 2];
1838 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
1839 module, MLXSW_PORT_MODULE_MAX_WIDTH,
1842 dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
1848 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
1849 char *pude_pl, void *priv)
1851 struct mlxsw_sp *mlxsw_sp = priv;
1852 struct mlxsw_sp_port *mlxsw_sp_port;
1853 enum mlxsw_reg_pude_oper_status status;
1856 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1857 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1858 if (!mlxsw_sp_port) {
1859 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
1864 status = mlxsw_reg_pude_oper_status_get(pude_pl);
1865 if (status == MLXSW_PORT_OPER_STATUS_UP) {
1866 netdev_info(mlxsw_sp_port->dev, "link up\n");
1867 netif_carrier_on(mlxsw_sp_port->dev);
1869 netdev_info(mlxsw_sp_port->dev, "link down\n");
1870 netif_carrier_off(mlxsw_sp_port->dev);
1874 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
1875 .func = mlxsw_sp_pude_event_func,
1876 .trap_id = MLXSW_TRAP_ID_PUDE,
1879 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
1880 enum mlxsw_event_trap_id trap_id)
1882 struct mlxsw_event_listener *el;
1883 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1887 case MLXSW_TRAP_ID_PUDE:
1888 el = &mlxsw_sp_pude_event;
1891 err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
1895 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
1896 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1898 goto err_event_trap_set;
1903 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1907 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
1908 enum mlxsw_event_trap_id trap_id)
1910 struct mlxsw_event_listener *el;
1913 case MLXSW_TRAP_ID_PUDE:
1914 el = &mlxsw_sp_pude_event;
1917 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1920 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
1923 struct mlxsw_sp *mlxsw_sp = priv;
1924 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1925 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1927 if (unlikely(!mlxsw_sp_port)) {
1928 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
1933 skb->dev = mlxsw_sp_port->dev;
1935 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1936 u64_stats_update_begin(&pcpu_stats->syncp);
1937 pcpu_stats->rx_packets++;
1938 pcpu_stats->rx_bytes += skb->len;
1939 u64_stats_update_end(&pcpu_stats->syncp);
1941 skb->protocol = eth_type_trans(skb, skb->dev);
1942 netif_receive_skb(skb);
1945 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
1947 .func = mlxsw_sp_rx_listener_func,
1948 .local_port = MLXSW_PORT_DONT_CARE,
1949 .trap_id = MLXSW_TRAP_ID_FDB_MC,
1951 /* Traps for specific L2 packet types, not trapped as FDB MC */
1953 .func = mlxsw_sp_rx_listener_func,
1954 .local_port = MLXSW_PORT_DONT_CARE,
1955 .trap_id = MLXSW_TRAP_ID_STP,
1958 .func = mlxsw_sp_rx_listener_func,
1959 .local_port = MLXSW_PORT_DONT_CARE,
1960 .trap_id = MLXSW_TRAP_ID_LACP,
1963 .func = mlxsw_sp_rx_listener_func,
1964 .local_port = MLXSW_PORT_DONT_CARE,
1965 .trap_id = MLXSW_TRAP_ID_EAPOL,
1968 .func = mlxsw_sp_rx_listener_func,
1969 .local_port = MLXSW_PORT_DONT_CARE,
1970 .trap_id = MLXSW_TRAP_ID_LLDP,
1973 .func = mlxsw_sp_rx_listener_func,
1974 .local_port = MLXSW_PORT_DONT_CARE,
1975 .trap_id = MLXSW_TRAP_ID_MMRP,
1978 .func = mlxsw_sp_rx_listener_func,
1979 .local_port = MLXSW_PORT_DONT_CARE,
1980 .trap_id = MLXSW_TRAP_ID_MVRP,
1983 .func = mlxsw_sp_rx_listener_func,
1984 .local_port = MLXSW_PORT_DONT_CARE,
1985 .trap_id = MLXSW_TRAP_ID_RPVST,
1988 .func = mlxsw_sp_rx_listener_func,
1989 .local_port = MLXSW_PORT_DONT_CARE,
1990 .trap_id = MLXSW_TRAP_ID_DHCP,
1993 .func = mlxsw_sp_rx_listener_func,
1994 .local_port = MLXSW_PORT_DONT_CARE,
1995 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
1998 .func = mlxsw_sp_rx_listener_func,
1999 .local_port = MLXSW_PORT_DONT_CARE,
2000 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
2003 .func = mlxsw_sp_rx_listener_func,
2004 .local_port = MLXSW_PORT_DONT_CARE,
2005 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
2008 .func = mlxsw_sp_rx_listener_func,
2009 .local_port = MLXSW_PORT_DONT_CARE,
2010 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
2013 .func = mlxsw_sp_rx_listener_func,
2014 .local_port = MLXSW_PORT_DONT_CARE,
2015 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
2019 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2021 char htgt_pl[MLXSW_REG_HTGT_LEN];
2022 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2026 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2027 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2031 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2032 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2036 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2037 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2038 &mlxsw_sp_rx_listener[i],
2041 goto err_rx_listener_register;
2043 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2044 mlxsw_sp_rx_listener[i].trap_id);
2045 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2047 goto err_rx_trap_set;
2052 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2053 &mlxsw_sp_rx_listener[i],
2055 err_rx_listener_register:
2056 for (i--; i >= 0; i--) {
2057 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2058 mlxsw_sp_rx_listener[i].trap_id);
2059 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2061 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2062 &mlxsw_sp_rx_listener[i],
2068 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2070 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2073 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2074 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2075 mlxsw_sp_rx_listener[i].trap_id);
2076 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2078 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2079 &mlxsw_sp_rx_listener[i],
2084 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2085 enum mlxsw_reg_sfgc_type type,
2086 enum mlxsw_reg_sfgc_bridge_type bridge_type)
2088 enum mlxsw_flood_table_type table_type;
2089 enum mlxsw_sp_flood_table flood_table;
2090 char sfgc_pl[MLXSW_REG_SFGC_LEN];
2092 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2093 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2095 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2097 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2098 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2100 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2102 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2104 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2107 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2111 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2112 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2115 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2116 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2120 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2121 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2129 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2131 char slcr_pl[MLXSW_REG_SLCR_LEN];
2133 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2134 MLXSW_REG_SLCR_LAG_HASH_DMAC |
2135 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2136 MLXSW_REG_SLCR_LAG_HASH_VLANID |
2137 MLXSW_REG_SLCR_LAG_HASH_SIP |
2138 MLXSW_REG_SLCR_LAG_HASH_DIP |
2139 MLXSW_REG_SLCR_LAG_HASH_SPORT |
2140 MLXSW_REG_SLCR_LAG_HASH_DPORT |
2141 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2142 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2145 static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
2146 const struct mlxsw_bus_info *mlxsw_bus_info)
2148 struct mlxsw_sp *mlxsw_sp = priv;
2151 mlxsw_sp->core = mlxsw_core;
2152 mlxsw_sp->bus_info = mlxsw_bus_info;
2153 INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
2154 INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
2155 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2157 err = mlxsw_sp_base_mac_get(mlxsw_sp);
2159 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2163 err = mlxsw_sp_ports_create(mlxsw_sp);
2165 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2169 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2171 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2172 goto err_event_register;
2175 err = mlxsw_sp_traps_init(mlxsw_sp);
2177 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2178 goto err_rx_listener_register;
2181 err = mlxsw_sp_flood_init(mlxsw_sp);
2183 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2184 goto err_flood_init;
2187 err = mlxsw_sp_buffers_init(mlxsw_sp);
2189 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2190 goto err_buffers_init;
2193 err = mlxsw_sp_lag_init(mlxsw_sp);
2195 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2199 err = mlxsw_sp_switchdev_init(mlxsw_sp);
2201 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2202 goto err_switchdev_init;
2211 mlxsw_sp_traps_fini(mlxsw_sp);
2212 err_rx_listener_register:
2213 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2215 mlxsw_sp_ports_remove(mlxsw_sp);
2219 static void mlxsw_sp_fini(void *priv)
2221 struct mlxsw_sp *mlxsw_sp = priv;
2223 mlxsw_sp_switchdev_fini(mlxsw_sp);
2224 mlxsw_sp_traps_fini(mlxsw_sp);
2225 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2226 mlxsw_sp_ports_remove(mlxsw_sp);
2229 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2230 .used_max_vepa_channels = 1,
2231 .max_vepa_channels = 0,
2233 .max_lag = MLXSW_SP_LAG_MAX,
2234 .used_max_port_per_lag = 1,
2235 .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX,
2237 .max_mid = MLXSW_SP_MID_MAX,
2240 .used_max_system_port = 1,
2241 .max_system_port = 64,
2242 .used_max_vlan_groups = 1,
2243 .max_vlan_groups = 127,
2244 .used_max_regions = 1,
2246 .used_flood_tables = 1,
2247 .used_flood_mode = 1,
2249 .max_fid_offset_flood_tables = 2,
2250 .fid_offset_flood_table_size = VLAN_N_VID - 1,
2251 .max_fid_flood_tables = 2,
2252 .fid_flood_table_size = MLXSW_SP_VFID_MAX,
2253 .used_max_ib_mc = 1,
2260 .type = MLXSW_PORT_SWID_TYPE_ETH,
2265 static struct mlxsw_driver mlxsw_sp_driver = {
2266 .kind = MLXSW_DEVICE_KIND_SPECTRUM,
2267 .owner = THIS_MODULE,
2268 .priv_size = sizeof(struct mlxsw_sp),
2269 .init = mlxsw_sp_init,
2270 .fini = mlxsw_sp_fini,
2271 .port_split = mlxsw_sp_port_split,
2272 .port_unsplit = mlxsw_sp_port_unsplit,
2273 .txhdr_construct = mlxsw_sp_txhdr_construct,
2274 .txhdr_len = MLXSW_TXHDR_LEN,
2275 .profile = &mlxsw_sp_config_profile,
2279 mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
2281 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2282 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2284 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
2285 mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
2287 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2291 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2294 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2295 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2297 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
2298 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2299 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2300 mlxsw_sp_port->local_port);
2302 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2306 mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
2308 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2309 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2311 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
2312 mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2314 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2318 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2321 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2322 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2324 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
2325 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2326 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2328 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2332 __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
2334 int err, last_err = 0;
2337 for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2338 err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
2347 __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
2349 int err, last_err = 0;
2352 for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2353 err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
2361 static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
2363 if (!list_empty(&mlxsw_sp_port->vports_list))
2364 if (mlxsw_sp_port->lagged)
2365 return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
2367 return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
2369 if (mlxsw_sp_port->lagged)
2370 return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
2372 return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
2375 static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
2377 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
2378 u16 fid = mlxsw_sp_vfid_to_fid(vfid);
2380 if (mlxsw_sp_vport->lagged)
2381 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
2384 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
2387 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2389 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2392 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
2394 struct net_device *dev = mlxsw_sp_port->dev;
2397 /* When port is not bridged untagged packets are tagged with
2398 * PVID=VID=1, thereby creating an implicit VLAN interface in
2399 * the device. Remove it and let bridge code take care of its
2402 err = mlxsw_sp_port_kill_vid(dev, 0, 1);
2406 mlxsw_sp_port->learning = 1;
2407 mlxsw_sp_port->learning_sync = 1;
2408 mlxsw_sp_port->uc_flood = 1;
2409 mlxsw_sp_port->bridged = 1;
2414 static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2417 struct net_device *dev = mlxsw_sp_port->dev;
2419 if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2420 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2422 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2424 mlxsw_sp_port->learning = 0;
2425 mlxsw_sp_port->learning_sync = 0;
2426 mlxsw_sp_port->uc_flood = 0;
2427 mlxsw_sp_port->bridged = 0;
2429 /* Add implicit VLAN interface in the device, so that untagged
2430 * packets will be classified to the default vFID.
2432 return mlxsw_sp_port_add_vid(dev, 0, 1);
2435 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
2436 struct net_device *br_dev)
2438 return !mlxsw_sp->master_bridge.dev ||
2439 mlxsw_sp->master_bridge.dev == br_dev;
2442 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2443 struct net_device *br_dev)
2445 mlxsw_sp->master_bridge.dev = br_dev;
2446 mlxsw_sp->master_bridge.ref_count++;
2449 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
2450 struct net_device *br_dev)
2452 if (--mlxsw_sp->master_bridge.ref_count == 0)
2453 mlxsw_sp->master_bridge.dev = NULL;
2456 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2458 char sldr_pl[MLXSW_REG_SLDR_LEN];
2460 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
2461 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2464 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2466 char sldr_pl[MLXSW_REG_SLDR_LEN];
2468 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
2469 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2472 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2473 u16 lag_id, u8 port_index)
2475 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2476 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2478 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
2479 lag_id, port_index);
2480 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2483 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2486 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2487 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2489 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
2491 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2494 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
2497 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2498 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2500 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
2502 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2505 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
2508 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2509 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2511 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
2513 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2516 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2517 struct net_device *lag_dev,
2520 struct mlxsw_sp_upper *lag;
2521 int free_lag_id = -1;
2524 for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
2525 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
2526 if (lag->ref_count) {
2527 if (lag->dev == lag_dev) {
2531 } else if (free_lag_id < 0) {
2535 if (free_lag_id < 0)
2537 *p_lag_id = free_lag_id;
2542 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
2543 struct net_device *lag_dev,
2544 struct netdev_lag_upper_info *lag_upper_info)
2548 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
2550 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2555 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2556 u16 lag_id, u8 *p_port_index)
2560 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2561 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
2569 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2570 struct net_device *lag_dev)
2572 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2573 struct mlxsw_sp_upper *lag;
2578 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
2581 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2582 if (!lag->ref_count) {
2583 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
2589 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
2592 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
2594 goto err_col_port_add;
2595 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
2597 goto err_col_port_enable;
2599 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
2600 mlxsw_sp_port->local_port);
2601 mlxsw_sp_port->lag_id = lag_id;
2602 mlxsw_sp_port->lagged = 1;
2607 if (!lag->ref_count)
2608 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2609 err_col_port_enable:
2610 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2614 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2615 struct net_device *br_dev,
2618 static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2619 struct net_device *lag_dev)
2621 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2622 struct mlxsw_sp_port *mlxsw_sp_vport;
2623 struct mlxsw_sp_upper *lag;
2624 u16 lag_id = mlxsw_sp_port->lag_id;
2627 if (!mlxsw_sp_port->lagged)
2629 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2630 WARN_ON(lag->ref_count == 0);
2632 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2635 err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2639 /* In case we leave a LAG device that has bridges built on top,
2640 * then their teardown sequence is never issued and we need to
2641 * invoke the necessary cleanup routines ourselves.
2643 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
2645 struct net_device *br_dev;
2647 if (!mlxsw_sp_vport->bridged)
2650 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2651 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
2654 if (mlxsw_sp_port->bridged) {
2655 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
2656 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
2657 mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
2660 if (lag->ref_count == 1) {
2661 if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
2662 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2663 err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2668 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2669 mlxsw_sp_port->local_port);
2670 mlxsw_sp_port->lagged = 0;
2675 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2678 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2679 char sldr_pl[MLXSW_REG_SLDR_LEN];
2681 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
2682 mlxsw_sp_port->local_port);
2683 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2686 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2689 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2690 char sldr_pl[MLXSW_REG_SLDR_LEN];
2692 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
2693 mlxsw_sp_port->local_port);
2694 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2697 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
2698 bool lag_tx_enabled)
2701 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
2702 mlxsw_sp_port->lag_id);
2704 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
2705 mlxsw_sp_port->lag_id);
2708 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
2709 struct netdev_lag_lower_state_info *info)
2711 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
2714 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
2715 struct net_device *vlan_dev)
2717 struct mlxsw_sp_port *mlxsw_sp_vport;
2718 u16 vid = vlan_dev_vlan_id(vlan_dev);
2720 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2721 if (!mlxsw_sp_vport) {
2722 WARN_ON(!mlxsw_sp_vport);
2726 mlxsw_sp_vport->dev = vlan_dev;
2731 static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
2732 struct net_device *vlan_dev)
2734 struct mlxsw_sp_port *mlxsw_sp_vport;
2735 u16 vid = vlan_dev_vlan_id(vlan_dev);
2737 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2738 if (!mlxsw_sp_vport) {
2739 WARN_ON(!mlxsw_sp_vport);
2743 /* When removing a VLAN device while still bridged we should first
2744 * remove it from the bridge, as we receive the bridge's notification
2745 * when the vPort is already gone.
2747 if (mlxsw_sp_vport->bridged) {
2748 struct net_device *br_dev;
2750 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2751 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
2754 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
2759 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
2760 unsigned long event, void *ptr)
2762 struct netdev_notifier_changeupper_info *info;
2763 struct mlxsw_sp_port *mlxsw_sp_port;
2764 struct net_device *upper_dev;
2765 struct mlxsw_sp *mlxsw_sp;
2768 mlxsw_sp_port = netdev_priv(dev);
2769 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2773 case NETDEV_PRECHANGEUPPER:
2774 upper_dev = info->upper_dev;
2775 if (!info->master || !info->linking)
2777 /* HW limitation forbids to put ports to multiple bridges. */
2778 if (netif_is_bridge_master(upper_dev) &&
2779 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
2781 if (netif_is_lag_master(upper_dev) &&
2782 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
2786 case NETDEV_CHANGEUPPER:
2787 upper_dev = info->upper_dev;
2788 if (is_vlan_dev(upper_dev)) {
2789 if (info->linking) {
2790 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
2793 netdev_err(dev, "Failed to link VLAN device\n");
2797 err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
2800 netdev_err(dev, "Failed to unlink VLAN device\n");
2804 } else if (netif_is_bridge_master(upper_dev)) {
2805 if (info->linking) {
2806 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
2808 netdev_err(dev, "Failed to join bridge\n");
2811 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
2813 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
2815 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
2817 netdev_err(dev, "Failed to leave bridge\n");
2821 } else if (netif_is_lag_master(upper_dev)) {
2822 if (info->linking) {
2823 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
2826 netdev_err(dev, "Failed to join link aggregation\n");
2830 err = mlxsw_sp_port_lag_leave(mlxsw_sp_port,
2833 netdev_err(dev, "Failed to leave link aggregation\n");
2844 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
2845 unsigned long event, void *ptr)
2847 struct netdev_notifier_changelowerstate_info *info;
2848 struct mlxsw_sp_port *mlxsw_sp_port;
2851 mlxsw_sp_port = netdev_priv(dev);
2855 case NETDEV_CHANGELOWERSTATE:
2856 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
2857 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
2858 info->lower_state_info);
2860 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
2868 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
2869 unsigned long event, void *ptr)
2872 case NETDEV_PRECHANGEUPPER:
2873 case NETDEV_CHANGEUPPER:
2874 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
2875 case NETDEV_CHANGELOWERSTATE:
2876 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
2882 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
2883 unsigned long event, void *ptr)
2885 struct net_device *dev;
2886 struct list_head *iter;
2889 netdev_for_each_lower_dev(lag_dev, dev, iter) {
2890 if (mlxsw_sp_port_dev_check(dev)) {
2891 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
2892 if (ret == NOTIFY_BAD)
2900 static struct mlxsw_sp_vfid *
2901 mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
2902 const struct net_device *br_dev)
2904 struct mlxsw_sp_vfid *vfid;
2906 list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
2907 if (vfid->br_dev == br_dev)
2914 static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
2916 return vfid - MLXSW_SP_VFID_PORT_MAX;
2919 static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
2921 return MLXSW_SP_VFID_PORT_MAX + br_vfid;
2924 static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
2926 return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
2927 MLXSW_SP_VFID_BR_MAX);
2930 static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
2931 struct net_device *br_dev)
2933 struct device *dev = mlxsw_sp->bus_info->dev;
2934 struct mlxsw_sp_vfid *vfid;
2938 n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
2939 if (n_vfid == MLXSW_SP_VFID_MAX) {
2940 dev_err(dev, "No available vFIDs\n");
2941 return ERR_PTR(-ERANGE);
2944 err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
2946 dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
2947 return ERR_PTR(err);
2950 vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
2952 goto err_allocate_vfid;
2954 vfid->vfid = n_vfid;
2955 vfid->br_dev = br_dev;
2957 list_add(&vfid->list, &mlxsw_sp->br_vfids.list);
2958 set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped);
2963 __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
2964 return ERR_PTR(-ENOMEM);
2967 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
2968 struct mlxsw_sp_vfid *vfid)
2970 u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
2972 clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
2973 list_del(&vfid->list);
2975 __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
2980 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2981 struct net_device *br_dev,
2984 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2985 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
2986 struct net_device *dev = mlxsw_sp_vport->dev;
2987 struct mlxsw_sp_vfid *vfid, *new_vfid;
2990 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
2996 /* We need a vFID to go back to after leaving the bridge's vFID. */
2997 new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
2999 new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
3000 if (IS_ERR(new_vfid)) {
3001 netdev_err(dev, "Failed to create vFID for VID=%d\n",
3003 return PTR_ERR(new_vfid);
3007 /* Invalidate existing {Port, VID} to vFID mapping and create a new
3008 * one for the new vFID.
3010 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3011 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3013 mlxsw_sp_vfid_to_fid(vfid->vfid),
3016 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3018 goto err_port_vid_to_fid_invalidate;
3021 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3022 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3024 mlxsw_sp_vfid_to_fid(new_vfid->vfid),
3027 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3029 goto err_port_vid_to_fid_validate;
3032 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3034 netdev_err(dev, "Failed to disable learning\n");
3035 goto err_port_vid_learning_set;
3038 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
3041 netdev_err(dev, "Failed clear to clear flooding\n");
3042 goto err_vport_flood_set;
3045 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
3046 MLXSW_REG_SPMS_STATE_FORWARDING);
3048 netdev_err(dev, "Failed to set STP state\n");
3049 goto err_port_stp_state_set;
3052 if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
3053 netdev_err(dev, "Failed to flush FDB\n");
3055 /* Switch between the vFIDs and destroy the old one if needed. */
3056 new_vfid->nr_vports++;
3057 mlxsw_sp_vport->vport.vfid = new_vfid;
3059 if (!vfid->nr_vports)
3060 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3062 mlxsw_sp_vport->learning = 0;
3063 mlxsw_sp_vport->learning_sync = 0;
3064 mlxsw_sp_vport->uc_flood = 0;
3065 mlxsw_sp_vport->bridged = 0;
3069 err_port_stp_state_set:
3070 err_vport_flood_set:
3071 err_port_vid_learning_set:
3072 err_port_vid_to_fid_validate:
3073 err_port_vid_to_fid_invalidate:
3074 /* Rollback vFID only if new. */
3075 if (!new_vfid->nr_vports)
3076 mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
3080 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3081 struct net_device *br_dev)
3083 struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
3084 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3085 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3086 struct net_device *dev = mlxsw_sp_vport->dev;
3087 struct mlxsw_sp_vfid *vfid;
3090 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
3092 vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
3094 netdev_err(dev, "Failed to create bridge vFID\n");
3095 return PTR_ERR(vfid);
3099 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false);
3101 netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
3103 goto err_port_flood_set;
3106 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3108 netdev_err(dev, "Failed to enable learning\n");
3109 goto err_port_vid_learning_set;
3112 /* We need to invalidate existing {Port, VID} to vFID mapping and
3113 * create a new one for the bridge's vFID.
3115 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3116 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3118 mlxsw_sp_vfid_to_fid(old_vfid->vfid),
3121 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3123 goto err_port_vid_to_fid_invalidate;
3126 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3127 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3129 mlxsw_sp_vfid_to_fid(vfid->vfid),
3132 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3134 goto err_port_vid_to_fid_validate;
3137 /* Switch between the vFIDs and destroy the old one if needed. */
3139 mlxsw_sp_vport->vport.vfid = vfid;
3140 old_vfid->nr_vports--;
3141 if (!old_vfid->nr_vports)
3142 mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
3144 mlxsw_sp_vport->learning = 1;
3145 mlxsw_sp_vport->learning_sync = 1;
3146 mlxsw_sp_vport->uc_flood = 1;
3147 mlxsw_sp_vport->bridged = 1;
3151 err_port_vid_to_fid_validate:
3152 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3153 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
3154 mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid);
3155 err_port_vid_to_fid_invalidate:
3156 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3157 err_port_vid_learning_set:
3158 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false);
3160 if (!vfid->nr_vports)
3161 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3166 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3167 const struct net_device *br_dev)
3169 struct mlxsw_sp_port *mlxsw_sp_vport;
3171 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3173 if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
3180 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3181 unsigned long event, void *ptr,
3184 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3185 struct netdev_notifier_changeupper_info *info = ptr;
3186 struct mlxsw_sp_port *mlxsw_sp_vport;
3187 struct net_device *upper_dev;
3190 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3193 case NETDEV_PRECHANGEUPPER:
3194 upper_dev = info->upper_dev;
3195 if (!info->master || !info->linking)
3197 if (!netif_is_bridge_master(upper_dev))
3199 /* We can't have multiple VLAN interfaces configured on
3200 * the same port and being members in the same bridge.
3202 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3206 case NETDEV_CHANGEUPPER:
3207 upper_dev = info->upper_dev;
3210 if (info->linking) {
3211 if (!mlxsw_sp_vport) {
3212 WARN_ON(!mlxsw_sp_vport);
3215 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3218 netdev_err(dev, "Failed to join bridge\n");
3222 /* We ignore bridge's unlinking notifications if vPort
3223 * is gone, since we already left the bridge when the
3224 * VLAN device was unlinked from the real device.
3226 if (!mlxsw_sp_vport)
3228 err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
3231 netdev_err(dev, "Failed to leave bridge\n");
3240 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3241 unsigned long event, void *ptr,
3244 struct net_device *dev;
3245 struct list_head *iter;
3248 netdev_for_each_lower_dev(lag_dev, dev, iter) {
3249 if (mlxsw_sp_port_dev_check(dev)) {
3250 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
3252 if (ret == NOTIFY_BAD)
3260 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
3261 unsigned long event, void *ptr)
3263 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3264 u16 vid = vlan_dev_vlan_id(vlan_dev);
3266 if (mlxsw_sp_port_dev_check(real_dev))
3267 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
3269 else if (netif_is_lag_master(real_dev))
3270 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
3276 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3277 unsigned long event, void *ptr)
3279 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3281 if (mlxsw_sp_port_dev_check(dev))
3282 return mlxsw_sp_netdevice_port_event(dev, event, ptr);
3284 if (netif_is_lag_master(dev))
3285 return mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3287 if (is_vlan_dev(dev))
3288 return mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3293 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3294 .notifier_call = mlxsw_sp_netdevice_event,
3297 static int __init mlxsw_sp_module_init(void)
3301 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3302 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3304 goto err_core_driver_register;
3307 err_core_driver_register:
3308 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3312 static void __exit mlxsw_sp_module_exit(void)
3314 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3315 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3318 module_init(mlxsw_sp_module_init);
3319 module_exit(mlxsw_sp_module_exit);
3321 MODULE_LICENSE("Dual BSD/GPL");
3322 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3323 MODULE_DESCRIPTION("Mellanox Spectrum driver");
3324 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);