2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/notifier.h>
53 #include <linux/dcbnl.h>
54 #include <linux/inetdevice.h>
55 #include <net/switchdev.h>
56 #include <generated/utsrelease.h>
65 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
66 static const char mlxsw_sp_driver_version[] = "1.0";
72 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
75 * Packet control type.
76 * 0 - Ethernet control (e.g. EMADs, LACP)
79 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
82 * Packet protocol type. Must be set to 1 (Ethernet).
84 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
86 /* tx_hdr_rx_is_router
87 * Packet is sent from the router. Valid for data packets only.
89 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
92 * Indicates if the 'fid' field is valid and should be used for
93 * forwarding lookup. Valid for data packets only.
95 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
98 * Switch partition ID. Must be set to 0.
100 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
102 /* tx_hdr_control_tclass
103 * Indicates if the packet should use the control TClass and not one
104 * of the data TClasses.
106 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
109 * Egress TClass to be used on the egress device on the egress port.
111 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
114 * Destination local port for unicast packets.
115 * Destination multicast ID for multicast packets.
117 * Control packets are directed to a specific egress port, while data
118 * packets are transmitted through the CPU port (0) into the switch partition,
119 * where forwarding rules are applied.
121 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
124 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
125 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
126 * Valid for data packets only.
128 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
132 * 6 - Control packets
134 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
136 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
137 const struct mlxsw_tx_info *tx_info)
139 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
141 memset(txhdr, 0, MLXSW_TXHDR_LEN);
143 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
144 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
145 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
146 mlxsw_tx_hdr_swid_set(txhdr, 0);
147 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
148 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
149 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
152 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
154 char spad_pl[MLXSW_REG_SPAD_LEN];
157 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
160 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
164 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
167 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
168 char paos_pl[MLXSW_REG_PAOS_LEN];
170 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
171 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
172 MLXSW_PORT_ADMIN_STATUS_DOWN);
173 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
176 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
179 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
180 char paos_pl[MLXSW_REG_PAOS_LEN];
184 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
185 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
188 oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
189 *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
193 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
196 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
197 char ppad_pl[MLXSW_REG_PPAD_LEN];
199 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
200 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
201 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
204 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
206 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
207 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
209 ether_addr_copy(addr, mlxsw_sp->base_mac);
210 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
211 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
214 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
216 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
217 char pmtu_pl[MLXSW_REG_PMTU_LEN];
221 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
222 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
223 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
226 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
231 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
232 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
235 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
238 char pspa_pl[MLXSW_REG_PSPA_LEN];
240 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
241 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
244 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
246 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
248 return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
252 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
255 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
256 char svpe_pl[MLXSW_REG_SVPE_LEN];
258 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
259 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
262 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
263 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
266 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
267 char svfa_pl[MLXSW_REG_SVFA_LEN];
269 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
271 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
274 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
275 u16 vid, bool learn_enable)
277 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
281 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
284 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
286 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
292 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
294 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
295 char sspr_pl[MLXSW_REG_SSPR_LEN];
297 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
298 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
301 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
302 u8 local_port, u8 *p_module,
303 u8 *p_width, u8 *p_lane)
305 char pmlp_pl[MLXSW_REG_PMLP_LEN];
308 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
309 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
312 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
313 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
314 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
318 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
319 u8 module, u8 width, u8 lane)
321 char pmlp_pl[MLXSW_REG_PMLP_LEN];
324 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
325 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
326 for (i = 0; i < width; i++) {
327 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
328 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
331 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
334 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
336 char pmlp_pl[MLXSW_REG_PMLP_LEN];
338 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
339 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
340 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
343 static int mlxsw_sp_port_open(struct net_device *dev)
345 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
348 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
351 netif_start_queue(dev);
355 static int mlxsw_sp_port_stop(struct net_device *dev)
357 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
359 netif_stop_queue(dev);
360 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
363 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
364 struct net_device *dev)
366 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
367 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
368 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
369 const struct mlxsw_tx_info tx_info = {
370 .local_port = mlxsw_sp_port->local_port,
376 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
377 return NETDEV_TX_BUSY;
379 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
380 struct sk_buff *skb_orig = skb;
382 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
384 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
385 dev_kfree_skb_any(skb_orig);
390 if (eth_skb_pad(skb)) {
391 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
395 mlxsw_sp_txhdr_construct(skb, &tx_info);
396 /* TX header is consumed by HW on the way so we shouldn't count its
397 * bytes as being sent.
399 len = skb->len - MLXSW_TXHDR_LEN;
401 /* Due to a race we might fail here because of a full queue. In that
402 * unlikely case we simply drop the packet.
404 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
407 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
408 u64_stats_update_begin(&pcpu_stats->syncp);
409 pcpu_stats->tx_packets++;
410 pcpu_stats->tx_bytes += len;
411 u64_stats_update_end(&pcpu_stats->syncp);
413 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
414 dev_kfree_skb_any(skb);
419 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
423 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
425 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
426 struct sockaddr *addr = p;
429 if (!is_valid_ether_addr(addr->sa_data))
430 return -EADDRNOTAVAIL;
432 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
435 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
439 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
440 bool pause_en, bool pfc_en, u16 delay)
442 u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
444 delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
445 MLXSW_SP_PAUSE_DELAY;
447 if (pause_en || pfc_en)
448 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
449 pg_size + delay, pg_size);
451 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
454 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
455 u8 *prio_tc, bool pause_en,
456 struct ieee_pfc *my_pfc)
458 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
459 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
460 u16 delay = !!my_pfc ? my_pfc->delay : 0;
461 char pbmc_pl[MLXSW_REG_PBMC_LEN];
464 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
465 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
469 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
470 bool configure = false;
473 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
474 if (prio_tc[j] == i) {
475 pfc = pfc_en & BIT(j);
483 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
486 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
489 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
490 int mtu, bool pause_en)
492 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
493 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
494 struct ieee_pfc *my_pfc;
497 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
498 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
500 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
504 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
506 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
507 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
510 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
513 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
515 goto err_port_mtu_set;
520 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
524 static struct rtnl_link_stats64 *
525 mlxsw_sp_port_get_stats64(struct net_device *dev,
526 struct rtnl_link_stats64 *stats)
528 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
529 struct mlxsw_sp_port_pcpu_stats *p;
530 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
535 for_each_possible_cpu(i) {
536 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
538 start = u64_stats_fetch_begin_irq(&p->syncp);
539 rx_packets = p->rx_packets;
540 rx_bytes = p->rx_bytes;
541 tx_packets = p->tx_packets;
542 tx_bytes = p->tx_bytes;
543 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
545 stats->rx_packets += rx_packets;
546 stats->rx_bytes += rx_bytes;
547 stats->tx_packets += tx_packets;
548 stats->tx_bytes += tx_bytes;
549 /* tx_dropped is u32, updated without syncp protection. */
550 tx_dropped += p->tx_dropped;
552 stats->tx_dropped = tx_dropped;
556 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
557 u16 vid_end, bool is_member, bool untagged)
559 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
563 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
567 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
568 vid_end, is_member, untagged);
569 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
574 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
576 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
577 u16 vid, last_visited_vid;
580 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
581 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
584 last_visited_vid = vid;
585 goto err_port_vid_to_fid_set;
589 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
591 last_visited_vid = VLAN_N_VID;
592 goto err_port_vid_to_fid_set;
597 err_port_vid_to_fid_set:
598 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
599 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
604 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
606 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
610 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
614 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
615 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
624 static struct mlxsw_sp_port *
625 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
627 struct mlxsw_sp_port *mlxsw_sp_vport;
629 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
633 /* dev will be set correctly after the VLAN device is linked
634 * with the real device. In case of bridge SELF invocation, dev
637 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
638 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
639 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
640 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
641 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
642 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
643 mlxsw_sp_vport->vport.vid = vid;
645 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
647 return mlxsw_sp_vport;
650 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
652 list_del(&mlxsw_sp_vport->vport.list);
653 kfree(mlxsw_sp_vport);
656 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
659 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
660 struct mlxsw_sp_port *mlxsw_sp_vport;
661 bool untagged = vid == 1;
664 /* VLAN 0 is added to HW filter when device goes up, but it is
665 * reserved in our case, so simply return.
670 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
671 netdev_warn(dev, "VID=%d already configured\n", vid);
675 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
676 if (!mlxsw_sp_vport) {
677 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
681 /* When adding the first VLAN interface on a bridged port we need to
682 * transition all the active 802.1Q bridge VLANs to use explicit
683 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
685 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
686 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
688 netdev_err(dev, "Failed to set to Virtual mode\n");
689 goto err_port_vp_mode_trans;
693 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
695 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
696 goto err_port_vid_learning_set;
699 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
701 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
703 goto err_port_add_vid;
709 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
710 err_port_vid_learning_set:
711 if (list_is_singular(&mlxsw_sp_port->vports_list))
712 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
713 err_port_vp_mode_trans:
714 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
718 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
719 __be16 __always_unused proto, u16 vid)
721 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
722 struct mlxsw_sp_port *mlxsw_sp_vport;
723 struct mlxsw_sp_fid *f;
726 /* VLAN 0 is removed from HW filter when device goes down, but
727 * it is reserved in our case, so simply return.
732 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
733 if (!mlxsw_sp_vport) {
734 netdev_warn(dev, "VID=%d does not exist\n", vid);
738 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
740 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
745 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
747 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
751 /* Drop FID reference. If this was the last reference the
752 * resources will be freed.
754 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
755 if (f && !WARN_ON(!f->leave))
756 f->leave(mlxsw_sp_vport);
758 /* When removing the last VLAN interface on a bridged port we need to
759 * transition all active 802.1Q bridge VLANs to use VID to FID
760 * mappings and set port's mode to VLAN mode.
762 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
763 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
765 netdev_err(dev, "Failed to set to VLAN mode\n");
770 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
775 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
778 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
779 u8 module = mlxsw_sp_port->mapping.module;
780 u8 width = mlxsw_sp_port->mapping.width;
781 u8 lane = mlxsw_sp_port->mapping.lane;
784 if (!mlxsw_sp_port->split)
785 err = snprintf(name, len, "p%d", module + 1);
787 err = snprintf(name, len, "p%ds%d", module + 1,
796 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
797 .ndo_open = mlxsw_sp_port_open,
798 .ndo_stop = mlxsw_sp_port_stop,
799 .ndo_start_xmit = mlxsw_sp_port_xmit,
800 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
801 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
802 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
803 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
804 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
805 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
806 .ndo_neigh_construct = mlxsw_sp_router_neigh_construct,
807 .ndo_neigh_destroy = mlxsw_sp_router_neigh_destroy,
808 .ndo_fdb_add = switchdev_port_fdb_add,
809 .ndo_fdb_del = switchdev_port_fdb_del,
810 .ndo_fdb_dump = switchdev_port_fdb_dump,
811 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
812 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
813 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
814 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
817 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
818 struct ethtool_drvinfo *drvinfo)
820 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
821 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
823 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
824 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
825 sizeof(drvinfo->version));
826 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
828 mlxsw_sp->bus_info->fw_rev.major,
829 mlxsw_sp->bus_info->fw_rev.minor,
830 mlxsw_sp->bus_info->fw_rev.subminor);
831 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
832 sizeof(drvinfo->bus_info));
835 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
836 struct ethtool_pauseparam *pause)
838 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
840 pause->rx_pause = mlxsw_sp_port->link.rx_pause;
841 pause->tx_pause = mlxsw_sp_port->link.tx_pause;
844 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
845 struct ethtool_pauseparam *pause)
847 char pfcc_pl[MLXSW_REG_PFCC_LEN];
849 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
850 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
851 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
853 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
857 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
858 struct ethtool_pauseparam *pause)
860 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
861 bool pause_en = pause->tx_pause || pause->rx_pause;
864 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
865 netdev_err(dev, "PFC already enabled on port\n");
869 if (pause->autoneg) {
870 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
874 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
876 netdev_err(dev, "Failed to configure port's headroom\n");
880 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
882 netdev_err(dev, "Failed to set PAUSE parameters\n");
883 goto err_port_pause_configure;
886 mlxsw_sp_port->link.rx_pause = pause->rx_pause;
887 mlxsw_sp_port->link.tx_pause = pause->tx_pause;
891 err_port_pause_configure:
892 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
893 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
897 struct mlxsw_sp_port_hw_stats {
898 char str[ETH_GSTRING_LEN];
899 u64 (*getter)(char *payload);
902 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
904 .str = "a_frames_transmitted_ok",
905 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
908 .str = "a_frames_received_ok",
909 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
912 .str = "a_frame_check_sequence_errors",
913 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
916 .str = "a_alignment_errors",
917 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
920 .str = "a_octets_transmitted_ok",
921 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
924 .str = "a_octets_received_ok",
925 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
928 .str = "a_multicast_frames_xmitted_ok",
929 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
932 .str = "a_broadcast_frames_xmitted_ok",
933 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
936 .str = "a_multicast_frames_received_ok",
937 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
940 .str = "a_broadcast_frames_received_ok",
941 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
944 .str = "a_in_range_length_errors",
945 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
948 .str = "a_out_of_range_length_field",
949 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
952 .str = "a_frame_too_long_errors",
953 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
956 .str = "a_symbol_error_during_carrier",
957 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
960 .str = "a_mac_control_frames_transmitted",
961 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
964 .str = "a_mac_control_frames_received",
965 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
968 .str = "a_unsupported_opcodes_received",
969 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
972 .str = "a_pause_mac_ctrl_frames_received",
973 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
976 .str = "a_pause_mac_ctrl_frames_xmitted",
977 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
981 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
983 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
985 .str = "rx_octets_prio",
986 .getter = mlxsw_reg_ppcnt_rx_octets_get,
989 .str = "rx_frames_prio",
990 .getter = mlxsw_reg_ppcnt_rx_frames_get,
993 .str = "tx_octets_prio",
994 .getter = mlxsw_reg_ppcnt_tx_octets_get,
997 .str = "tx_frames_prio",
998 .getter = mlxsw_reg_ppcnt_tx_frames_get,
1001 .str = "rx_pause_prio",
1002 .getter = mlxsw_reg_ppcnt_rx_pause_get,
1005 .str = "rx_pause_duration_prio",
1006 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1009 .str = "tx_pause_prio",
1010 .getter = mlxsw_reg_ppcnt_tx_pause_get,
1013 .str = "tx_pause_duration_prio",
1014 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1018 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1020 static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(char *ppcnt_pl)
1022 u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
1024 return MLXSW_SP_CELLS_TO_BYTES(transmit_queue);
1027 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1029 .str = "tc_transmit_queue_tc",
1030 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get,
1033 .str = "tc_no_buffer_discard_uc_tc",
1034 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1038 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1040 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
1041 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1042 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
1043 IEEE_8021QAZ_MAX_TCS)
1045 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
1049 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
1050 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1051 mlxsw_sp_port_hw_prio_stats[i].str, prio);
1052 *p += ETH_GSTRING_LEN;
1056 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
1060 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
1061 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1062 mlxsw_sp_port_hw_tc_stats[i].str, tc);
1063 *p += ETH_GSTRING_LEN;
1067 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1068 u32 stringset, u8 *data)
1073 switch (stringset) {
1075 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1076 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1078 p += ETH_GSTRING_LEN;
1081 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1082 mlxsw_sp_port_get_prio_strings(&p, i);
1084 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1085 mlxsw_sp_port_get_tc_strings(&p, i);
1091 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1092 enum ethtool_phys_id_state state)
1094 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1095 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1096 char mlcr_pl[MLXSW_REG_MLCR_LEN];
1100 case ETHTOOL_ID_ACTIVE:
1103 case ETHTOOL_ID_INACTIVE:
1110 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1111 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1115 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
1116 int *p_len, enum mlxsw_reg_ppcnt_grp grp)
1119 case MLXSW_REG_PPCNT_IEEE_8023_CNT:
1120 *p_hw_stats = mlxsw_sp_port_hw_stats;
1121 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
1123 case MLXSW_REG_PPCNT_PRIO_CNT:
1124 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
1125 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1127 case MLXSW_REG_PPCNT_TC_CNT:
1128 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
1129 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
1138 static void __mlxsw_sp_port_get_stats(struct net_device *dev,
1139 enum mlxsw_reg_ppcnt_grp grp, int prio,
1140 u64 *data, int data_index)
1142 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1143 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1144 struct mlxsw_sp_port_hw_stats *hw_stats;
1145 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1149 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
1152 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
1153 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1154 for (i = 0; i < len; i++)
1155 data[data_index + i] = !err ? hw_stats[i].getter(ppcnt_pl) : 0;
1158 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1159 struct ethtool_stats *stats, u64 *data)
1161 int i, data_index = 0;
1163 /* IEEE 802.3 Counters */
1164 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
1166 data_index = MLXSW_SP_PORT_HW_STATS_LEN;
1168 /* Per-Priority Counters */
1169 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1170 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
1172 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1175 /* Per-TC Counters */
1176 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1177 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
1179 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
1183 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1187 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
1193 struct mlxsw_sp_port_link_mode {
1200 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1202 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1203 .supported = SUPPORTED_100baseT_Full,
1204 .advertised = ADVERTISED_100baseT_Full,
1208 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1212 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1213 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1214 .supported = SUPPORTED_1000baseKX_Full,
1215 .advertised = ADVERTISED_1000baseKX_Full,
1219 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1220 .supported = SUPPORTED_10000baseT_Full,
1221 .advertised = ADVERTISED_10000baseT_Full,
1225 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1226 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1227 .supported = SUPPORTED_10000baseKX4_Full,
1228 .advertised = ADVERTISED_10000baseKX4_Full,
1232 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1233 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1234 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1235 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1236 .supported = SUPPORTED_10000baseKR_Full,
1237 .advertised = ADVERTISED_10000baseKR_Full,
1241 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1242 .supported = SUPPORTED_20000baseKR2_Full,
1243 .advertised = ADVERTISED_20000baseKR2_Full,
1247 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1248 .supported = SUPPORTED_40000baseCR4_Full,
1249 .advertised = ADVERTISED_40000baseCR4_Full,
1253 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1254 .supported = SUPPORTED_40000baseKR4_Full,
1255 .advertised = ADVERTISED_40000baseKR4_Full,
1259 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1260 .supported = SUPPORTED_40000baseSR4_Full,
1261 .advertised = ADVERTISED_40000baseSR4_Full,
1265 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1266 .supported = SUPPORTED_40000baseLR4_Full,
1267 .advertised = ADVERTISED_40000baseLR4_Full,
1271 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1272 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1273 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1277 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1278 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1279 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1283 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1284 .supported = SUPPORTED_56000baseKR4_Full,
1285 .advertised = ADVERTISED_56000baseKR4_Full,
1289 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1290 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1291 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1292 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1297 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1299 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1301 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1302 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1303 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1304 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1305 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1306 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1307 return SUPPORTED_FIBRE;
1309 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1310 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1311 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1312 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1313 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1314 return SUPPORTED_Backplane;
1318 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1323 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1324 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1325 modes |= mlxsw_sp_port_link_mode[i].supported;
1330 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1335 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1336 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1337 modes |= mlxsw_sp_port_link_mode[i].advertised;
1342 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1343 struct ethtool_cmd *cmd)
1345 u32 speed = SPEED_UNKNOWN;
1346 u8 duplex = DUPLEX_UNKNOWN;
1352 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1353 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1354 speed = mlxsw_sp_port_link_mode[i].speed;
1355 duplex = DUPLEX_FULL;
1360 ethtool_cmd_speed_set(cmd, speed);
1361 cmd->duplex = duplex;
1364 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1366 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1367 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1368 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1369 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1372 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1373 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1374 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1377 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1378 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1379 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1380 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1386 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1387 struct ethtool_cmd *cmd)
1389 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1390 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1391 char ptys_pl[MLXSW_REG_PTYS_LEN];
1393 u32 eth_proto_admin;
1397 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1398 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1400 netdev_err(dev, "Failed to get proto");
1403 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap,
1404 ð_proto_admin, ð_proto_oper);
1406 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1407 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1408 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1409 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1410 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1411 eth_proto_oper, cmd);
1413 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1414 cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1415 cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1417 cmd->transceiver = XCVR_INTERNAL;
1421 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1426 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1427 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1428 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1433 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1438 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1439 if (speed == mlxsw_sp_port_link_mode[i].speed)
1440 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1445 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1450 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1451 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1452 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1457 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1458 struct ethtool_cmd *cmd)
1460 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1461 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1462 char ptys_pl[MLXSW_REG_PTYS_LEN];
1466 u32 eth_proto_admin;
1470 speed = ethtool_cmd_speed(cmd);
1472 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1473 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1474 mlxsw_sp_to_ptys_speed(speed);
1476 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1477 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1479 netdev_err(dev, "Failed to get proto");
1482 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, NULL);
1484 eth_proto_new = eth_proto_new & eth_proto_cap;
1485 if (!eth_proto_new) {
1486 netdev_err(dev, "Not supported proto admin requested");
1489 if (eth_proto_new == eth_proto_admin)
1492 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1493 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1495 netdev_err(dev, "Failed to set proto admin");
1499 err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1501 netdev_err(dev, "Failed to get oper status");
1507 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1509 netdev_err(dev, "Failed to set admin status");
1513 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1515 netdev_err(dev, "Failed to set admin status");
1522 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1523 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
1524 .get_link = ethtool_op_get_link,
1525 .get_pauseparam = mlxsw_sp_port_get_pauseparam,
1526 .set_pauseparam = mlxsw_sp_port_set_pauseparam,
1527 .get_strings = mlxsw_sp_port_get_strings,
1528 .set_phys_id = mlxsw_sp_port_set_phys_id,
1529 .get_ethtool_stats = mlxsw_sp_port_get_stats,
1530 .get_sset_count = mlxsw_sp_port_get_sset_count,
1531 .get_settings = mlxsw_sp_port_get_settings,
1532 .set_settings = mlxsw_sp_port_set_settings,
1536 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1538 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1539 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1540 char ptys_pl[MLXSW_REG_PTYS_LEN];
1541 u32 eth_proto_admin;
1543 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1544 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1546 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1549 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1550 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1551 bool dwrr, u8 dwrr_weight)
1553 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1554 char qeec_pl[MLXSW_REG_QEEC_LEN];
1556 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1558 mlxsw_reg_qeec_de_set(qeec_pl, true);
1559 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1560 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1561 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1564 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1565 enum mlxsw_reg_qeec_hr hr, u8 index,
1566 u8 next_index, u32 maxrate)
1568 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1569 char qeec_pl[MLXSW_REG_QEEC_LEN];
1571 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1573 mlxsw_reg_qeec_mase_set(qeec_pl, true);
1574 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1575 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1578 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1579 u8 switch_prio, u8 tclass)
1581 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1582 char qtct_pl[MLXSW_REG_QTCT_LEN];
1584 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1586 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1589 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1593 /* Setup the elements hierarcy, so that each TC is linked to
1594 * one subgroup, which are all member in the same group.
1596 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1597 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
1601 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1602 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1603 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
1608 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1609 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1610 MLXSW_REG_QEEC_HIERARCY_TC, i, i,
1616 /* Make sure the max shaper is disabled in all hierarcies that
1619 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1620 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
1621 MLXSW_REG_QEEC_MAS_DIS);
1624 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1625 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1626 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
1628 MLXSW_REG_QEEC_MAS_DIS);
1632 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1633 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1634 MLXSW_REG_QEEC_HIERARCY_TC,
1636 MLXSW_REG_QEEC_MAS_DIS);
1641 /* Map all priorities to traffic class 0. */
1642 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1643 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1651 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1652 bool split, u8 module, u8 width, u8 lane)
1654 struct mlxsw_sp_port *mlxsw_sp_port;
1655 struct net_device *dev;
1659 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1662 mlxsw_sp_port = netdev_priv(dev);
1663 mlxsw_sp_port->dev = dev;
1664 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1665 mlxsw_sp_port->local_port = local_port;
1666 mlxsw_sp_port->split = split;
1667 mlxsw_sp_port->mapping.module = module;
1668 mlxsw_sp_port->mapping.width = width;
1669 mlxsw_sp_port->mapping.lane = lane;
1670 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1671 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1672 if (!mlxsw_sp_port->active_vlans) {
1674 goto err_port_active_vlans_alloc;
1676 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1677 if (!mlxsw_sp_port->untagged_vlans) {
1679 goto err_port_untagged_vlans_alloc;
1681 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1683 mlxsw_sp_port->pcpu_stats =
1684 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1685 if (!mlxsw_sp_port->pcpu_stats) {
1687 goto err_alloc_stats;
1690 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1691 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1693 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1695 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1696 mlxsw_sp_port->local_port);
1697 goto err_dev_addr_init;
1700 netif_carrier_off(dev);
1702 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1703 NETIF_F_HW_VLAN_CTAG_FILTER;
1705 /* Each packet needs to have a Tx header (metadata) on top all other
1708 dev->hard_header_len += MLXSW_TXHDR_LEN;
1710 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1712 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1713 mlxsw_sp_port->local_port);
1714 goto err_port_system_port_mapping_set;
1717 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1719 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1720 mlxsw_sp_port->local_port);
1721 goto err_port_swid_set;
1724 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
1726 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1727 mlxsw_sp_port->local_port);
1728 goto err_port_speed_by_width_set;
1731 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1733 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1734 mlxsw_sp_port->local_port);
1735 goto err_port_mtu_set;
1738 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1740 goto err_port_admin_status_set;
1742 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1744 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1745 mlxsw_sp_port->local_port);
1746 goto err_port_buffers_init;
1749 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1751 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1752 mlxsw_sp_port->local_port);
1753 goto err_port_ets_init;
1756 /* ETS and buffers must be initialized before DCB. */
1757 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1759 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1760 mlxsw_sp_port->local_port);
1761 goto err_port_dcb_init;
1764 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1765 err = register_netdev(dev);
1767 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1768 mlxsw_sp_port->local_port);
1769 goto err_register_netdev;
1772 err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
1773 mlxsw_sp_port->local_port, dev,
1774 mlxsw_sp_port->split, module);
1776 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1777 mlxsw_sp_port->local_port);
1778 goto err_core_port_init;
1781 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1783 goto err_port_vlan_init;
1785 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1789 mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1791 unregister_netdev(dev);
1792 err_register_netdev:
1795 err_port_buffers_init:
1796 err_port_admin_status_set:
1798 err_port_speed_by_width_set:
1800 err_port_system_port_mapping_set:
1802 free_percpu(mlxsw_sp_port->pcpu_stats);
1804 kfree(mlxsw_sp_port->untagged_vlans);
1805 err_port_untagged_vlans_alloc:
1806 kfree(mlxsw_sp_port->active_vlans);
1807 err_port_active_vlans_alloc:
1812 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1814 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1818 mlxsw_sp->ports[local_port] = NULL;
1819 mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1820 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1821 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1822 mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
1823 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1824 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1825 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1826 free_percpu(mlxsw_sp_port->pcpu_stats);
1827 kfree(mlxsw_sp_port->untagged_vlans);
1828 kfree(mlxsw_sp_port->active_vlans);
1829 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
1830 free_netdev(mlxsw_sp_port->dev);
1833 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1837 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1838 mlxsw_sp_port_remove(mlxsw_sp, i);
1839 kfree(mlxsw_sp->ports);
1842 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1844 u8 module, width, lane;
1849 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1850 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1851 if (!mlxsw_sp->ports)
1854 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1855 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1858 goto err_port_module_info_get;
1861 mlxsw_sp->port_to_module[i] = module;
1862 err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
1865 goto err_port_create;
1870 err_port_module_info_get:
1871 for (i--; i >= 1; i--)
1872 mlxsw_sp_port_remove(mlxsw_sp, i);
1873 kfree(mlxsw_sp->ports);
1877 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1879 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
1881 return local_port - offset;
1884 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1885 u8 module, unsigned int count)
1887 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1890 for (i = 0; i < count; i++) {
1891 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
1894 goto err_port_module_map;
1897 for (i = 0; i < count; i++) {
1898 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
1900 goto err_port_swid_set;
1903 for (i = 0; i < count; i++) {
1904 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
1905 module, width, i * width);
1907 goto err_port_create;
1913 for (i--; i >= 0; i--)
1914 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1917 for (i--; i >= 0; i--)
1918 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
1919 MLXSW_PORT_SWID_DISABLED_PORT);
1921 err_port_module_map:
1922 for (i--; i >= 0; i--)
1923 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
1927 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1928 u8 base_port, unsigned int count)
1930 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
1933 /* Split by four means we need to re-create two ports, otherwise
1938 for (i = 0; i < count; i++) {
1939 local_port = base_port + i * 2;
1940 module = mlxsw_sp->port_to_module[local_port];
1942 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1946 for (i = 0; i < count; i++)
1947 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
1949 for (i = 0; i < count; i++) {
1950 local_port = base_port + i * 2;
1951 module = mlxsw_sp->port_to_module[local_port];
1953 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
1958 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1961 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1962 struct mlxsw_sp_port *mlxsw_sp_port;
1963 u8 module, cur_width, base_port;
1967 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1968 if (!mlxsw_sp_port) {
1969 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1974 module = mlxsw_sp_port->mapping.module;
1975 cur_width = mlxsw_sp_port->mapping.width;
1977 if (count != 2 && count != 4) {
1978 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
1982 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
1983 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
1987 /* Make sure we have enough slave (even) ports for the split. */
1989 base_port = local_port;
1990 if (mlxsw_sp->ports[base_port + 1]) {
1991 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1995 base_port = mlxsw_sp_cluster_base_port_get(local_port);
1996 if (mlxsw_sp->ports[base_port + 1] ||
1997 mlxsw_sp->ports[base_port + 3]) {
1998 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2003 for (i = 0; i < count; i++)
2004 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2006 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2008 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2009 goto err_port_split_create;
2014 err_port_split_create:
2015 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2019 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2021 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2022 struct mlxsw_sp_port *mlxsw_sp_port;
2023 u8 cur_width, base_port;
2027 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2028 if (!mlxsw_sp_port) {
2029 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2034 if (!mlxsw_sp_port->split) {
2035 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2039 cur_width = mlxsw_sp_port->mapping.width;
2040 count = cur_width == 1 ? 4 : 2;
2042 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2044 /* Determine which ports to remove. */
2045 if (count == 2 && local_port >= base_port + 2)
2046 base_port = base_port + 2;
2048 for (i = 0; i < count; i++)
2049 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2051 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2056 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2057 char *pude_pl, void *priv)
2059 struct mlxsw_sp *mlxsw_sp = priv;
2060 struct mlxsw_sp_port *mlxsw_sp_port;
2061 enum mlxsw_reg_pude_oper_status status;
2064 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2065 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2069 status = mlxsw_reg_pude_oper_status_get(pude_pl);
2070 if (status == MLXSW_PORT_OPER_STATUS_UP) {
2071 netdev_info(mlxsw_sp_port->dev, "link up\n");
2072 netif_carrier_on(mlxsw_sp_port->dev);
2074 netdev_info(mlxsw_sp_port->dev, "link down\n");
2075 netif_carrier_off(mlxsw_sp_port->dev);
2079 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
2080 .func = mlxsw_sp_pude_event_func,
2081 .trap_id = MLXSW_TRAP_ID_PUDE,
2084 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
2085 enum mlxsw_event_trap_id trap_id)
2087 struct mlxsw_event_listener *el;
2088 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2092 case MLXSW_TRAP_ID_PUDE:
2093 el = &mlxsw_sp_pude_event;
2096 err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
2100 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
2101 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2103 goto err_event_trap_set;
2108 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2112 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
2113 enum mlxsw_event_trap_id trap_id)
2115 struct mlxsw_event_listener *el;
2118 case MLXSW_TRAP_ID_PUDE:
2119 el = &mlxsw_sp_pude_event;
2122 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2125 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
2128 struct mlxsw_sp *mlxsw_sp = priv;
2129 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2130 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2132 if (unlikely(!mlxsw_sp_port)) {
2133 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2138 skb->dev = mlxsw_sp_port->dev;
2140 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2141 u64_stats_update_begin(&pcpu_stats->syncp);
2142 pcpu_stats->rx_packets++;
2143 pcpu_stats->rx_bytes += skb->len;
2144 u64_stats_update_end(&pcpu_stats->syncp);
2146 skb->protocol = eth_type_trans(skb, skb->dev);
2147 netif_receive_skb(skb);
2150 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2152 .func = mlxsw_sp_rx_listener_func,
2153 .local_port = MLXSW_PORT_DONT_CARE,
2154 .trap_id = MLXSW_TRAP_ID_FDB_MC,
2156 /* Traps for specific L2 packet types, not trapped as FDB MC */
2158 .func = mlxsw_sp_rx_listener_func,
2159 .local_port = MLXSW_PORT_DONT_CARE,
2160 .trap_id = MLXSW_TRAP_ID_STP,
2163 .func = mlxsw_sp_rx_listener_func,
2164 .local_port = MLXSW_PORT_DONT_CARE,
2165 .trap_id = MLXSW_TRAP_ID_LACP,
2168 .func = mlxsw_sp_rx_listener_func,
2169 .local_port = MLXSW_PORT_DONT_CARE,
2170 .trap_id = MLXSW_TRAP_ID_EAPOL,
2173 .func = mlxsw_sp_rx_listener_func,
2174 .local_port = MLXSW_PORT_DONT_CARE,
2175 .trap_id = MLXSW_TRAP_ID_LLDP,
2178 .func = mlxsw_sp_rx_listener_func,
2179 .local_port = MLXSW_PORT_DONT_CARE,
2180 .trap_id = MLXSW_TRAP_ID_MMRP,
2183 .func = mlxsw_sp_rx_listener_func,
2184 .local_port = MLXSW_PORT_DONT_CARE,
2185 .trap_id = MLXSW_TRAP_ID_MVRP,
2188 .func = mlxsw_sp_rx_listener_func,
2189 .local_port = MLXSW_PORT_DONT_CARE,
2190 .trap_id = MLXSW_TRAP_ID_RPVST,
2193 .func = mlxsw_sp_rx_listener_func,
2194 .local_port = MLXSW_PORT_DONT_CARE,
2195 .trap_id = MLXSW_TRAP_ID_DHCP,
2198 .func = mlxsw_sp_rx_listener_func,
2199 .local_port = MLXSW_PORT_DONT_CARE,
2200 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
2203 .func = mlxsw_sp_rx_listener_func,
2204 .local_port = MLXSW_PORT_DONT_CARE,
2205 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
2208 .func = mlxsw_sp_rx_listener_func,
2209 .local_port = MLXSW_PORT_DONT_CARE,
2210 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
2213 .func = mlxsw_sp_rx_listener_func,
2214 .local_port = MLXSW_PORT_DONT_CARE,
2215 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
2218 .func = mlxsw_sp_rx_listener_func,
2219 .local_port = MLXSW_PORT_DONT_CARE,
2220 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
2223 .func = mlxsw_sp_rx_listener_func,
2224 .local_port = MLXSW_PORT_DONT_CARE,
2225 .trap_id = MLXSW_TRAP_ID_ARPBC,
2228 .func = mlxsw_sp_rx_listener_func,
2229 .local_port = MLXSW_PORT_DONT_CARE,
2230 .trap_id = MLXSW_TRAP_ID_ARPUC,
2233 .func = mlxsw_sp_rx_listener_func,
2234 .local_port = MLXSW_PORT_DONT_CARE,
2235 .trap_id = MLXSW_TRAP_ID_IP2ME,
2238 .func = mlxsw_sp_rx_listener_func,
2239 .local_port = MLXSW_PORT_DONT_CARE,
2240 .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0,
2243 .func = mlxsw_sp_rx_listener_func,
2244 .local_port = MLXSW_PORT_DONT_CARE,
2245 .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4,
2249 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2251 char htgt_pl[MLXSW_REG_HTGT_LEN];
2252 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2256 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2257 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2261 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2262 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2266 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2267 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2268 &mlxsw_sp_rx_listener[i],
2271 goto err_rx_listener_register;
2273 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2274 mlxsw_sp_rx_listener[i].trap_id);
2275 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2277 goto err_rx_trap_set;
2282 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2283 &mlxsw_sp_rx_listener[i],
2285 err_rx_listener_register:
2286 for (i--; i >= 0; i--) {
2287 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
2288 mlxsw_sp_rx_listener[i].trap_id);
2289 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2291 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2292 &mlxsw_sp_rx_listener[i],
2298 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2300 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2303 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2304 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
2305 mlxsw_sp_rx_listener[i].trap_id);
2306 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2308 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2309 &mlxsw_sp_rx_listener[i],
2314 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2315 enum mlxsw_reg_sfgc_type type,
2316 enum mlxsw_reg_sfgc_bridge_type bridge_type)
2318 enum mlxsw_flood_table_type table_type;
2319 enum mlxsw_sp_flood_table flood_table;
2320 char sfgc_pl[MLXSW_REG_SFGC_LEN];
2322 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2323 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2325 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2327 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2328 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2330 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2332 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2334 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2337 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2341 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2342 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2345 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2346 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2350 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2351 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2359 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2361 char slcr_pl[MLXSW_REG_SLCR_LEN];
2363 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2364 MLXSW_REG_SLCR_LAG_HASH_DMAC |
2365 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2366 MLXSW_REG_SLCR_LAG_HASH_VLANID |
2367 MLXSW_REG_SLCR_LAG_HASH_SIP |
2368 MLXSW_REG_SLCR_LAG_HASH_DIP |
2369 MLXSW_REG_SLCR_LAG_HASH_SPORT |
2370 MLXSW_REG_SLCR_LAG_HASH_DPORT |
2371 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2372 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2375 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2376 const struct mlxsw_bus_info *mlxsw_bus_info)
2378 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2381 mlxsw_sp->core = mlxsw_core;
2382 mlxsw_sp->bus_info = mlxsw_bus_info;
2383 INIT_LIST_HEAD(&mlxsw_sp->fids);
2384 INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
2385 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2387 err = mlxsw_sp_base_mac_get(mlxsw_sp);
2389 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2393 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2395 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2399 err = mlxsw_sp_traps_init(mlxsw_sp);
2401 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2402 goto err_rx_listener_register;
2405 err = mlxsw_sp_flood_init(mlxsw_sp);
2407 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2408 goto err_flood_init;
2411 err = mlxsw_sp_buffers_init(mlxsw_sp);
2413 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2414 goto err_buffers_init;
2417 err = mlxsw_sp_lag_init(mlxsw_sp);
2419 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2423 err = mlxsw_sp_switchdev_init(mlxsw_sp);
2425 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2426 goto err_switchdev_init;
2429 err = mlxsw_sp_router_init(mlxsw_sp);
2431 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
2432 goto err_router_init;
2435 err = mlxsw_sp_ports_create(mlxsw_sp);
2437 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2438 goto err_ports_create;
2444 mlxsw_sp_router_fini(mlxsw_sp);
2446 mlxsw_sp_switchdev_fini(mlxsw_sp);
2449 mlxsw_sp_buffers_fini(mlxsw_sp);
2452 mlxsw_sp_traps_fini(mlxsw_sp);
2453 err_rx_listener_register:
2454 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2458 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2460 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2463 mlxsw_sp_ports_remove(mlxsw_sp);
2464 mlxsw_sp_router_fini(mlxsw_sp);
2465 mlxsw_sp_switchdev_fini(mlxsw_sp);
2466 mlxsw_sp_buffers_fini(mlxsw_sp);
2467 mlxsw_sp_traps_fini(mlxsw_sp);
2468 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2469 WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
2470 WARN_ON(!list_empty(&mlxsw_sp->fids));
2471 for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
2472 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
2475 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2476 .used_max_vepa_channels = 1,
2477 .max_vepa_channels = 0,
2479 .max_lag = MLXSW_SP_LAG_MAX,
2480 .used_max_port_per_lag = 1,
2481 .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX,
2483 .max_mid = MLXSW_SP_MID_MAX,
2486 .used_max_system_port = 1,
2487 .max_system_port = 64,
2488 .used_max_vlan_groups = 1,
2489 .max_vlan_groups = 127,
2490 .used_max_regions = 1,
2492 .used_flood_tables = 1,
2493 .used_flood_mode = 1,
2495 .max_fid_offset_flood_tables = 2,
2496 .fid_offset_flood_table_size = VLAN_N_VID - 1,
2497 .max_fid_flood_tables = 2,
2498 .fid_flood_table_size = MLXSW_SP_VFID_MAX,
2499 .used_max_ib_mc = 1,
2503 .used_kvd_sizes = 1,
2504 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
2505 .kvd_hash_single_size = MLXSW_SP_KVD_HASH_SINGLE_SIZE,
2506 .kvd_hash_double_size = MLXSW_SP_KVD_HASH_DOUBLE_SIZE,
2510 .type = MLXSW_PORT_SWID_TYPE_ETH,
2515 static struct mlxsw_driver mlxsw_sp_driver = {
2516 .kind = MLXSW_DEVICE_KIND_SPECTRUM,
2517 .owner = THIS_MODULE,
2518 .priv_size = sizeof(struct mlxsw_sp),
2519 .init = mlxsw_sp_init,
2520 .fini = mlxsw_sp_fini,
2521 .port_split = mlxsw_sp_port_split,
2522 .port_unsplit = mlxsw_sp_port_unsplit,
2523 .sb_pool_get = mlxsw_sp_sb_pool_get,
2524 .sb_pool_set = mlxsw_sp_sb_pool_set,
2525 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
2526 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
2527 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
2528 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
2529 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
2530 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
2531 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
2532 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
2533 .txhdr_construct = mlxsw_sp_txhdr_construct,
2534 .txhdr_len = MLXSW_TXHDR_LEN,
2535 .profile = &mlxsw_sp_config_profile,
2538 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2540 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2543 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
2545 struct net_device *lower_dev;
2546 struct list_head *iter;
2548 if (mlxsw_sp_port_dev_check(dev))
2549 return netdev_priv(dev);
2551 netdev_for_each_all_lower_dev(dev, lower_dev, iter) {
2552 if (mlxsw_sp_port_dev_check(lower_dev))
2553 return netdev_priv(lower_dev);
2558 static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
2560 struct mlxsw_sp_port *mlxsw_sp_port;
2562 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2563 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
2566 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
2568 struct net_device *lower_dev;
2569 struct list_head *iter;
2571 if (mlxsw_sp_port_dev_check(dev))
2572 return netdev_priv(dev);
2574 netdev_for_each_all_lower_dev_rcu(dev, lower_dev, iter) {
2575 if (mlxsw_sp_port_dev_check(lower_dev))
2576 return netdev_priv(lower_dev);
2581 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
2583 struct mlxsw_sp_port *mlxsw_sp_port;
2586 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
2588 dev_hold(mlxsw_sp_port->dev);
2590 return mlxsw_sp_port;
2593 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
2595 dev_put(mlxsw_sp_port->dev);
2598 static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
2599 unsigned long event)
2608 if (r && --r->ref_count == 0)
2610 /* It is possible we already removed the RIF ourselves
2611 * if it was assigned to a netdev that is now a bridge
2620 static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
2624 for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
2625 if (!mlxsw_sp->rifs[i])
2628 return MLXSW_SP_RIF_MAX;
2631 static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
2632 bool *p_lagged, u16 *p_system_port)
2634 u8 local_port = mlxsw_sp_vport->local_port;
2636 *p_lagged = mlxsw_sp_vport->lagged;
2637 *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
2640 static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
2641 struct net_device *l3_dev, u16 rif,
2644 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2645 bool lagged = mlxsw_sp_vport->lagged;
2646 char ritr_pl[MLXSW_REG_RITR_LEN];
2649 mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif,
2650 l3_dev->mtu, l3_dev->dev_addr);
2652 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
2653 mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
2654 mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
2656 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2659 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
2661 static struct mlxsw_sp_fid *
2662 mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
2664 struct mlxsw_sp_fid *f;
2666 f = kzalloc(sizeof(*f), GFP_KERNEL);
2670 f->leave = mlxsw_sp_vport_rif_sp_leave;
2678 static struct mlxsw_sp_rif *
2679 mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
2681 struct mlxsw_sp_rif *r;
2683 r = kzalloc(sizeof(*r), GFP_KERNEL);
2687 ether_addr_copy(r->addr, l3_dev->dev_addr);
2688 r->mtu = l3_dev->mtu;
2697 static struct mlxsw_sp_rif *
2698 mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
2699 struct net_device *l3_dev)
2701 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2702 struct mlxsw_sp_fid *f;
2703 struct mlxsw_sp_rif *r;
2707 rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
2708 if (rif == MLXSW_SP_RIF_MAX)
2709 return ERR_PTR(-ERANGE);
2711 err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
2713 return ERR_PTR(err);
2715 fid = mlxsw_sp_rif_sp_to_fid(rif);
2716 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
2718 goto err_rif_fdb_op;
2720 f = mlxsw_sp_rfid_alloc(fid, l3_dev);
2723 goto err_rfid_alloc;
2726 r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
2733 mlxsw_sp->rifs[rif] = r;
2740 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2742 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
2743 return ERR_PTR(err);
2746 static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
2747 struct mlxsw_sp_rif *r)
2749 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2750 struct net_device *l3_dev = r->dev;
2751 struct mlxsw_sp_fid *f = r->f;
2755 mlxsw_sp->rifs[rif] = NULL;
2762 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2764 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
2767 static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
2768 struct net_device *l3_dev)
2770 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2771 struct mlxsw_sp_rif *r;
2773 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
2775 r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
2780 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
2783 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
2788 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
2790 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
2792 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
2794 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
2795 if (--f->ref_count == 0)
2796 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
2799 static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
2800 struct net_device *port_dev,
2801 unsigned long event, u16 vid)
2803 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
2804 struct mlxsw_sp_port *mlxsw_sp_vport;
2806 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2807 if (WARN_ON(!mlxsw_sp_vport))
2812 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
2814 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
2821 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
2822 unsigned long event)
2824 if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
2827 return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
2830 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
2831 struct net_device *lag_dev,
2832 unsigned long event, u16 vid)
2834 struct net_device *port_dev;
2835 struct list_head *iter;
2838 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
2839 if (mlxsw_sp_port_dev_check(port_dev)) {
2840 err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
2850 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
2851 unsigned long event)
2853 if (netif_is_bridge_port(lag_dev))
2856 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
2859 static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
2860 struct net_device *l3_dev)
2864 if (is_vlan_dev(l3_dev))
2865 fid = vlan_dev_vlan_id(l3_dev);
2866 else if (mlxsw_sp->master_bridge.dev == l3_dev)
2869 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
2871 return mlxsw_sp_fid_find(mlxsw_sp, fid);
2874 static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
2876 if (mlxsw_sp_fid_is_vfid(fid))
2877 return MLXSW_REG_RITR_FID_IF;
2879 return MLXSW_REG_RITR_VLAN_IF;
2882 static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp,
2883 struct net_device *l3_dev,
2887 enum mlxsw_reg_ritr_if_type rif_type;
2888 char ritr_pl[MLXSW_REG_RITR_LEN];
2890 rif_type = mlxsw_sp_rif_type_get(fid);
2891 mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu,
2893 mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
2895 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2898 static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
2899 struct net_device *l3_dev,
2900 struct mlxsw_sp_fid *f)
2902 struct mlxsw_sp_rif *r;
2906 rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
2907 if (rif == MLXSW_SP_RIF_MAX)
2910 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
2914 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
2916 goto err_rif_fdb_op;
2918 r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
2925 mlxsw_sp->rifs[rif] = r;
2927 netdev_dbg(l3_dev, "RIF=%d created\n", rif);
2932 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
2934 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
2938 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
2939 struct mlxsw_sp_rif *r)
2941 struct net_device *l3_dev = r->dev;
2942 struct mlxsw_sp_fid *f = r->f;
2945 mlxsw_sp->rifs[rif] = NULL;
2950 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
2952 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
2954 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
2957 static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
2958 struct net_device *br_dev,
2959 unsigned long event)
2961 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
2962 struct mlxsw_sp_fid *f;
2964 /* FID can either be an actual FID if the L3 device is the
2965 * VLAN-aware bridge or a VLAN device on top. Otherwise, the
2966 * L3 device is a VLAN-unaware bridge and we get a vFID.
2968 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
2974 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
2976 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
2983 static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
2984 unsigned long event)
2986 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
2987 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
2988 u16 vid = vlan_dev_vlan_id(vlan_dev);
2990 if (mlxsw_sp_port_dev_check(real_dev))
2991 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
2993 else if (netif_is_lag_master(real_dev))
2994 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
2996 else if (netif_is_bridge_master(real_dev) &&
2997 mlxsw_sp->master_bridge.dev == real_dev)
2998 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
3004 static int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
3005 unsigned long event, void *ptr)
3007 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3008 struct net_device *dev = ifa->ifa_dev->dev;
3009 struct mlxsw_sp *mlxsw_sp;
3010 struct mlxsw_sp_rif *r;
3013 mlxsw_sp = mlxsw_sp_lower_get(dev);
3017 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3018 if (!mlxsw_sp_rif_should_config(r, event))
3021 if (mlxsw_sp_port_dev_check(dev))
3022 err = mlxsw_sp_inetaddr_port_event(dev, event);
3023 else if (netif_is_lag_master(dev))
3024 err = mlxsw_sp_inetaddr_lag_event(dev, event);
3025 else if (netif_is_bridge_master(dev))
3026 err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
3027 else if (is_vlan_dev(dev))
3028 err = mlxsw_sp_inetaddr_vlan_event(dev, event);
3031 return notifier_from_errno(err);
3034 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
3035 const char *mac, int mtu)
3037 char ritr_pl[MLXSW_REG_RITR_LEN];
3040 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
3041 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3045 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3046 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3047 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3048 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3051 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3053 struct mlxsw_sp *mlxsw_sp;
3054 struct mlxsw_sp_rif *r;
3057 mlxsw_sp = mlxsw_sp_lower_get(dev);
3061 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3065 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
3069 err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
3073 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
3075 goto err_rif_fdb_op;
3077 ether_addr_copy(r->addr, dev->dev_addr);
3080 netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
3085 mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
3087 mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
3091 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
3094 if (mlxsw_sp_fid_is_vfid(fid))
3095 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
3097 return test_bit(fid, lag_port->active_vlans);
3100 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
3103 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3104 u8 local_port = mlxsw_sp_port->local_port;
3105 u16 lag_id = mlxsw_sp_port->lag_id;
3108 if (!mlxsw_sp_port->lagged)
3111 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
3112 struct mlxsw_sp_port *lag_port;
3114 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
3115 if (!lag_port || lag_port->local_port == local_port)
3117 if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
3125 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3128 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3129 char sfdf_pl[MLXSW_REG_SFDF_LEN];
3131 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
3132 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3133 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
3134 mlxsw_sp_port->local_port);
3136 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
3137 mlxsw_sp_port->local_port, fid);
3139 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3143 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3146 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3147 char sfdf_pl[MLXSW_REG_SFDF_LEN];
3149 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
3150 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3151 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
3153 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
3154 mlxsw_sp_port->lag_id, fid);
3156 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3159 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
3161 if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
3164 if (mlxsw_sp_port->lagged)
3165 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
3168 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
3171 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
3173 struct mlxsw_sp_fid *f, *tmp;
3175 list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
3176 if (--f->ref_count == 0)
3177 mlxsw_sp_fid_destroy(mlxsw_sp, f);
3182 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
3183 struct net_device *br_dev)
3185 return !mlxsw_sp->master_bridge.dev ||
3186 mlxsw_sp->master_bridge.dev == br_dev;
3189 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
3190 struct net_device *br_dev)
3192 mlxsw_sp->master_bridge.dev = br_dev;
3193 mlxsw_sp->master_bridge.ref_count++;
3196 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
3198 if (--mlxsw_sp->master_bridge.ref_count == 0) {
3199 mlxsw_sp->master_bridge.dev = NULL;
3200 /* It's possible upper VLAN devices are still holding
3201 * references to underlying FIDs. Drop the reference
3202 * and release the resources if it was the last one.
3203 * If it wasn't, then something bad happened.
3205 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
3209 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
3210 struct net_device *br_dev)
3212 struct net_device *dev = mlxsw_sp_port->dev;
3215 /* When port is not bridged untagged packets are tagged with
3216 * PVID=VID=1, thereby creating an implicit VLAN interface in
3217 * the device. Remove it and let bridge code take care of its
3220 err = mlxsw_sp_port_kill_vid(dev, 0, 1);
3224 mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
3226 mlxsw_sp_port->learning = 1;
3227 mlxsw_sp_port->learning_sync = 1;
3228 mlxsw_sp_port->uc_flood = 1;
3229 mlxsw_sp_port->bridged = 1;
3234 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3236 struct net_device *dev = mlxsw_sp_port->dev;
3238 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
3240 mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
3242 mlxsw_sp_port->learning = 0;
3243 mlxsw_sp_port->learning_sync = 0;
3244 mlxsw_sp_port->uc_flood = 0;
3245 mlxsw_sp_port->bridged = 0;
3247 /* Add implicit VLAN interface in the device, so that untagged
3248 * packets will be classified to the default vFID.
3250 mlxsw_sp_port_add_vid(dev, 0, 1);
3253 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3255 char sldr_pl[MLXSW_REG_SLDR_LEN];
3257 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3258 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3261 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3263 char sldr_pl[MLXSW_REG_SLDR_LEN];
3265 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3266 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3269 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3270 u16 lag_id, u8 port_index)
3272 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3273 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3275 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3276 lag_id, port_index);
3277 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3280 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3283 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3284 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3286 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3288 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3291 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3294 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3295 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3297 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3299 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3302 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3305 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3306 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3308 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3310 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3313 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3314 struct net_device *lag_dev,
3317 struct mlxsw_sp_upper *lag;
3318 int free_lag_id = -1;
3321 for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
3322 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3323 if (lag->ref_count) {
3324 if (lag->dev == lag_dev) {
3328 } else if (free_lag_id < 0) {
3332 if (free_lag_id < 0)
3334 *p_lag_id = free_lag_id;
3339 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3340 struct net_device *lag_dev,
3341 struct netdev_lag_upper_info *lag_upper_info)
3345 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3347 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3352 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3353 u16 lag_id, u8 *p_port_index)
3357 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
3358 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3367 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3370 struct mlxsw_sp_port *mlxsw_sp_vport;
3371 struct mlxsw_sp_fid *f;
3373 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3374 if (WARN_ON(!mlxsw_sp_vport))
3377 /* If vPort is assigned a RIF, then leave it since it's no
3380 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3382 f->leave(mlxsw_sp_vport);
3384 mlxsw_sp_vport->lag_id = lag_id;
3385 mlxsw_sp_vport->lagged = 1;
3389 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3391 struct mlxsw_sp_port *mlxsw_sp_vport;
3392 struct mlxsw_sp_fid *f;
3394 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3395 if (WARN_ON(!mlxsw_sp_vport))
3398 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3400 f->leave(mlxsw_sp_vport);
3402 mlxsw_sp_vport->lagged = 0;
3405 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3406 struct net_device *lag_dev)
3408 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3409 struct mlxsw_sp_upper *lag;
3414 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3417 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3418 if (!lag->ref_count) {
3419 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3425 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3428 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3430 goto err_col_port_add;
3431 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
3433 goto err_col_port_enable;
3435 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3436 mlxsw_sp_port->local_port);
3437 mlxsw_sp_port->lag_id = lag_id;
3438 mlxsw_sp_port->lagged = 1;
3441 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id);
3445 err_col_port_enable:
3446 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3448 if (!lag->ref_count)
3449 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3453 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3454 struct net_device *lag_dev)
3456 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3457 u16 lag_id = mlxsw_sp_port->lag_id;
3458 struct mlxsw_sp_upper *lag;
3460 if (!mlxsw_sp_port->lagged)
3462 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3463 WARN_ON(lag->ref_count == 0);
3465 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
3466 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3468 if (mlxsw_sp_port->bridged) {
3469 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
3470 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3473 if (lag->ref_count == 1)
3474 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3476 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3477 mlxsw_sp_port->local_port);
3478 mlxsw_sp_port->lagged = 0;
3481 mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
3484 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3487 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3488 char sldr_pl[MLXSW_REG_SLDR_LEN];
3490 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3491 mlxsw_sp_port->local_port);
3492 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3495 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3498 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3499 char sldr_pl[MLXSW_REG_SLDR_LEN];
3501 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3502 mlxsw_sp_port->local_port);
3503 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3506 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
3507 bool lag_tx_enabled)
3510 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
3511 mlxsw_sp_port->lag_id);
3513 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3514 mlxsw_sp_port->lag_id);
3517 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3518 struct netdev_lag_lower_state_info *info)
3520 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
3523 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
3524 struct net_device *vlan_dev)
3526 struct mlxsw_sp_port *mlxsw_sp_vport;
3527 u16 vid = vlan_dev_vlan_id(vlan_dev);
3529 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3530 if (WARN_ON(!mlxsw_sp_vport))
3533 mlxsw_sp_vport->dev = vlan_dev;
3538 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
3539 struct net_device *vlan_dev)
3541 struct mlxsw_sp_port *mlxsw_sp_vport;
3542 u16 vid = vlan_dev_vlan_id(vlan_dev);
3544 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3545 if (WARN_ON(!mlxsw_sp_vport))
3548 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3551 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3552 unsigned long event, void *ptr)
3554 struct netdev_notifier_changeupper_info *info;
3555 struct mlxsw_sp_port *mlxsw_sp_port;
3556 struct net_device *upper_dev;
3557 struct mlxsw_sp *mlxsw_sp;
3560 mlxsw_sp_port = netdev_priv(dev);
3561 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3565 case NETDEV_PRECHANGEUPPER:
3566 upper_dev = info->upper_dev;
3567 if (!is_vlan_dev(upper_dev) &&
3568 !netif_is_lag_master(upper_dev) &&
3569 !netif_is_bridge_master(upper_dev))
3573 /* HW limitation forbids to put ports to multiple bridges. */
3574 if (netif_is_bridge_master(upper_dev) &&
3575 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
3577 if (netif_is_lag_master(upper_dev) &&
3578 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3581 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
3583 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
3584 !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
3587 case NETDEV_CHANGEUPPER:
3588 upper_dev = info->upper_dev;
3589 if (is_vlan_dev(upper_dev)) {
3591 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
3594 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
3596 } else if (netif_is_bridge_master(upper_dev)) {
3598 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
3601 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3602 } else if (netif_is_lag_master(upper_dev)) {
3604 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3607 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3619 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
3620 unsigned long event, void *ptr)
3622 struct netdev_notifier_changelowerstate_info *info;
3623 struct mlxsw_sp_port *mlxsw_sp_port;
3626 mlxsw_sp_port = netdev_priv(dev);
3630 case NETDEV_CHANGELOWERSTATE:
3631 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
3632 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
3633 info->lower_state_info);
3635 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
3643 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
3644 unsigned long event, void *ptr)
3647 case NETDEV_PRECHANGEUPPER:
3648 case NETDEV_CHANGEUPPER:
3649 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
3650 case NETDEV_CHANGELOWERSTATE:
3651 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
3657 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3658 unsigned long event, void *ptr)
3660 struct net_device *dev;
3661 struct list_head *iter;
3664 netdev_for_each_lower_dev(lag_dev, dev, iter) {
3665 if (mlxsw_sp_port_dev_check(dev)) {
3666 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3675 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
3676 struct net_device *vlan_dev)
3678 u16 fid = vlan_dev_vlan_id(vlan_dev);
3679 struct mlxsw_sp_fid *f;
3681 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
3683 f = mlxsw_sp_fid_create(mlxsw_sp, fid);
3693 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
3694 struct net_device *vlan_dev)
3696 u16 fid = vlan_dev_vlan_id(vlan_dev);
3697 struct mlxsw_sp_fid *f;
3699 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
3701 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
3702 if (f && --f->ref_count == 0)
3703 mlxsw_sp_fid_destroy(mlxsw_sp, f);
3706 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
3707 unsigned long event, void *ptr)
3709 struct netdev_notifier_changeupper_info *info;
3710 struct net_device *upper_dev;
3711 struct mlxsw_sp *mlxsw_sp;
3714 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3717 if (br_dev != mlxsw_sp->master_bridge.dev)
3723 case NETDEV_CHANGEUPPER:
3724 upper_dev = info->upper_dev;
3725 if (!is_vlan_dev(upper_dev))
3727 if (info->linking) {
3728 err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
3733 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev);
3741 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
3743 return find_first_zero_bit(mlxsw_sp->vfids.mapped,
3747 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
3749 char sfmr_pl[MLXSW_REG_SFMR_LEN];
3751 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
3752 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
3755 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
3757 static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
3758 struct net_device *br_dev)
3760 struct device *dev = mlxsw_sp->bus_info->dev;
3761 struct mlxsw_sp_fid *f;
3765 vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
3766 if (vfid == MLXSW_SP_VFID_MAX) {
3767 dev_err(dev, "No available vFIDs\n");
3768 return ERR_PTR(-ERANGE);
3771 fid = mlxsw_sp_vfid_to_fid(vfid);
3772 err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
3774 dev_err(dev, "Failed to create FID=%d\n", fid);
3775 return ERR_PTR(err);
3778 f = kzalloc(sizeof(*f), GFP_KERNEL);
3780 goto err_allocate_vfid;
3782 f->leave = mlxsw_sp_vport_vfid_leave;
3786 list_add(&f->list, &mlxsw_sp->vfids.list);
3787 set_bit(vfid, mlxsw_sp->vfids.mapped);
3792 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3793 return ERR_PTR(-ENOMEM);
3796 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
3797 struct mlxsw_sp_fid *f)
3799 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
3802 clear_bit(vfid, mlxsw_sp->vfids.mapped);
3806 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
3810 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3813 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
3816 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
3817 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3819 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
3823 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3824 struct net_device *br_dev)
3826 struct mlxsw_sp_fid *f;
3829 f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
3831 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
3836 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
3838 goto err_vport_flood_set;
3840 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
3842 goto err_vport_fid_map;
3844 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
3847 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
3852 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
3853 err_vport_flood_set:
3855 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3859 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3861 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3863 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
3865 mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
3867 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
3869 mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
3871 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
3872 if (--f->ref_count == 0)
3873 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3876 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3877 struct net_device *br_dev)
3879 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3880 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3881 struct net_device *dev = mlxsw_sp_vport->dev;
3884 if (f && !WARN_ON(!f->leave))
3885 f->leave(mlxsw_sp_vport);
3887 err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
3889 netdev_err(dev, "Failed to join vFID\n");
3893 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3895 netdev_err(dev, "Failed to enable learning\n");
3896 goto err_port_vid_learning_set;
3899 mlxsw_sp_vport->learning = 1;
3900 mlxsw_sp_vport->learning_sync = 1;
3901 mlxsw_sp_vport->uc_flood = 1;
3902 mlxsw_sp_vport->bridged = 1;
3906 err_port_vid_learning_set:
3907 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
3911 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3913 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3915 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3917 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
3919 mlxsw_sp_vport->learning = 0;
3920 mlxsw_sp_vport->learning_sync = 0;
3921 mlxsw_sp_vport->uc_flood = 0;
3922 mlxsw_sp_vport->bridged = 0;
3926 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3927 const struct net_device *br_dev)
3929 struct mlxsw_sp_port *mlxsw_sp_vport;
3931 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3933 struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
3935 if (dev && dev == br_dev)
3942 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3943 unsigned long event, void *ptr,
3946 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3947 struct netdev_notifier_changeupper_info *info = ptr;
3948 struct mlxsw_sp_port *mlxsw_sp_vport;
3949 struct net_device *upper_dev;
3952 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3955 case NETDEV_PRECHANGEUPPER:
3956 upper_dev = info->upper_dev;
3957 if (!netif_is_bridge_master(upper_dev))
3961 /* We can't have multiple VLAN interfaces configured on
3962 * the same port and being members in the same bridge.
3964 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3968 case NETDEV_CHANGEUPPER:
3969 upper_dev = info->upper_dev;
3970 if (info->linking) {
3971 if (WARN_ON(!mlxsw_sp_vport))
3973 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3976 if (!mlxsw_sp_vport)
3978 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
3985 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3986 unsigned long event, void *ptr,
3989 struct net_device *dev;
3990 struct list_head *iter;
3993 netdev_for_each_lower_dev(lag_dev, dev, iter) {
3994 if (mlxsw_sp_port_dev_check(dev)) {
3995 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
4005 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4006 unsigned long event, void *ptr)
4008 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4009 u16 vid = vlan_dev_vlan_id(vlan_dev);
4011 if (mlxsw_sp_port_dev_check(real_dev))
4012 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
4014 else if (netif_is_lag_master(real_dev))
4015 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
4021 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4022 unsigned long event, void *ptr)
4024 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4027 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4028 err = mlxsw_sp_netdevice_router_port_event(dev);
4029 else if (mlxsw_sp_port_dev_check(dev))
4030 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
4031 else if (netif_is_lag_master(dev))
4032 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4033 else if (netif_is_bridge_master(dev))
4034 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
4035 else if (is_vlan_dev(dev))
4036 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4038 return notifier_from_errno(err);
4041 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4042 .notifier_call = mlxsw_sp_netdevice_event,
4045 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4046 .notifier_call = mlxsw_sp_inetaddr_event,
4047 .priority = 10, /* Must be called before FIB notifier block */
4050 static int __init mlxsw_sp_module_init(void)
4054 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4055 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4056 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4058 goto err_core_driver_register;
4061 err_core_driver_register:
4062 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4066 static void __exit mlxsw_sp_module_exit(void)
4068 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4069 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4070 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4073 module_init(mlxsw_sp_module_init);
4074 module_exit(mlxsw_sp_module_exit);
4076 MODULE_LICENSE("Dual BSD/GPL");
4077 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4078 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4079 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);