2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <linux/rtnetlink.h>
49 #include <net/switchdev.h>
55 static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port,
58 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
61 fid = f ? f->fid : fid;
64 fid = mlxsw_sp_port->pvid;
69 static struct mlxsw_sp_port *
70 mlxsw_sp_port_orig_get(struct net_device *dev,
71 struct mlxsw_sp_port *mlxsw_sp_port)
73 struct mlxsw_sp_port *mlxsw_sp_vport;
76 if (!is_vlan_dev(dev))
79 vid = vlan_dev_vlan_id(dev);
80 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
81 WARN_ON(!mlxsw_sp_vport);
83 return mlxsw_sp_vport;
86 static int mlxsw_sp_port_attr_get(struct net_device *dev,
87 struct switchdev_attr *attr)
89 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
90 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
92 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
97 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
98 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
99 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
100 attr->u.ppid.id_len);
102 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
103 attr->u.brport_flags =
104 (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
105 (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
106 (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
115 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
118 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
119 enum mlxsw_reg_spms_state spms_state;
125 case BR_STATE_FORWARDING:
126 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
128 case BR_STATE_LEARNING:
129 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
131 case BR_STATE_LISTENING: /* fall-through */
132 case BR_STATE_DISABLED: /* fall-through */
133 case BR_STATE_BLOCKING:
134 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
140 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
143 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
145 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
146 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
147 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
149 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
150 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
153 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
158 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
159 struct switchdev_trans *trans,
162 if (switchdev_trans_ph_prepare(trans))
165 mlxsw_sp_port->stp_state = state;
166 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
169 static bool mlxsw_sp_vfid_is_vport_br(u16 vfid)
171 return vfid >= MLXSW_SP_VFID_PORT_MAX;
174 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
175 u16 idx_begin, u16 idx_end, bool set,
178 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
179 u16 local_port = mlxsw_sp_port->local_port;
180 enum mlxsw_flood_table_type table_type;
181 u16 range = idx_end - idx_begin + 1;
185 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
186 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
187 if (mlxsw_sp_vfid_is_vport_br(idx_begin))
188 local_port = mlxsw_sp_port->local_port;
190 local_port = MLXSW_PORT_CPU_PORT;
192 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
195 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
199 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
200 table_type, range, local_port, set);
201 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
205 /* Flooding control allows one to decide whether a given port will
206 * flood unicast traffic for which there is no FDB entry.
211 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
212 table_type, range, local_port, set);
213 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
215 goto err_flood_bm_set;
220 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
221 table_type, range, local_port, !set);
222 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
228 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
231 struct net_device *dev = mlxsw_sp_port->dev;
232 u16 vid, last_visited_vid;
235 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
236 u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid;
237 u16 vfid = mlxsw_sp_fid_to_vfid(fid);
239 return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid,
243 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
244 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
247 last_visited_vid = vid;
248 goto err_port_flood_set;
255 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
256 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
257 netdev_err(dev, "Failed to configure unicast flooding\n");
261 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
266 /* In case of vFIDs, index into the flooding table is relative to
267 * the start of the vFIDs range.
269 vfid = mlxsw_sp_fid_to_vfid(fid);
270 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set,
274 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
275 struct switchdev_trans *trans,
276 unsigned long brport_flags)
278 unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
282 if (!mlxsw_sp_port->bridged)
285 if (switchdev_trans_ph_prepare(trans))
288 if ((uc_flood ^ brport_flags) & BR_FLOOD) {
289 set = mlxsw_sp_port->uc_flood ? false : true;
290 err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
295 mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
296 mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
297 mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
302 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
304 char sfdat_pl[MLXSW_REG_SFDAT_LEN];
307 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
308 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
311 mlxsw_sp->ageing_time = ageing_time;
315 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
316 struct switchdev_trans *trans,
317 unsigned long ageing_clock_t)
319 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
320 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
321 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
323 if (switchdev_trans_ph_prepare(trans)) {
324 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
325 ageing_time > MLXSW_SP_MAX_AGEING_TIME)
331 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
334 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
335 struct switchdev_trans *trans,
336 struct net_device *orig_dev,
339 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
341 /* SWITCHDEV_TRANS_PREPARE phase */
342 if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) {
343 netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n");
350 static int mlxsw_sp_port_attr_set(struct net_device *dev,
351 const struct switchdev_attr *attr,
352 struct switchdev_trans *trans)
354 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
357 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
362 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
363 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
366 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
367 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
368 attr->u.brport_flags);
370 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
371 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
372 attr->u.ageing_time);
374 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
375 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
377 attr->u.vlan_filtering);
387 static struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
390 struct mlxsw_sp_fid *f;
392 list_for_each_entry(f, &mlxsw_sp->fids, list)
399 static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
401 char sfmr_pl[MLXSW_REG_SFMR_LEN];
403 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid);
404 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
407 static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid)
409 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
410 char svfa_pl[MLXSW_REG_SVFA_LEN];
412 mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid);
413 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
416 static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
418 struct mlxsw_sp_fid *f;
420 f = kzalloc(sizeof(*f), GFP_KERNEL);
429 static struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp,
432 struct mlxsw_sp_fid *f;
435 err = mlxsw_sp_fid_op(mlxsw_sp, fid, true);
439 /* Although all the ports member in the FID might be using a
440 * {Port, VID} to FID mapping, we create a global VID-to-FID
441 * mapping. This allows a port to transition to VLAN mode,
442 * knowing the global mapping exists.
444 err = mlxsw_sp_fid_map(mlxsw_sp, fid, true);
448 f = mlxsw_sp_fid_alloc(fid);
451 goto err_allocate_fid;
454 list_add(&f->list, &mlxsw_sp->fids);
459 mlxsw_sp_fid_map(mlxsw_sp, fid, false);
461 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
465 static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp,
466 struct mlxsw_sp_fid *f)
474 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
477 static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
480 struct mlxsw_sp_fid *f;
482 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
484 f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
491 netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid);
496 static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
499 struct mlxsw_sp_fid *f;
501 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
505 netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid);
507 mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid);
509 if (--f->ref_count == 0)
510 mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f);
513 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid,
516 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
518 /* If port doesn't have vPorts, then it can use the global
519 * VID-to-FID mapping.
521 if (list_empty(&mlxsw_sp_port->vports_list))
524 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid);
527 static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
528 u16 fid_begin, u16 fid_end)
532 for (fid = fid_begin; fid <= fid_end; fid++) {
533 err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid);
535 goto err_port_fid_join;
538 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
541 goto err_port_flood_set;
543 for (fid = fid_begin; fid <= fid_end; fid++) {
544 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true);
546 goto err_port_fid_map;
552 for (fid--; fid >= fid_begin; fid--)
553 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
554 __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
559 for (fid--; fid >= fid_begin; fid--)
560 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
564 static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
565 u16 fid_begin, u16 fid_end)
569 for (fid = fid_begin; fid <= fid_end; fid++)
570 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
572 __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
575 for (fid = fid_begin; fid <= fid_end; fid++)
576 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
579 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
582 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
583 char spvid_pl[MLXSW_REG_SPVID_LEN];
585 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
586 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
589 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
592 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
593 char spaft_pl[MLXSW_REG_SPAFT_LEN];
595 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
596 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
599 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
601 struct net_device *dev = mlxsw_sp_port->dev;
605 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
607 netdev_err(dev, "Failed to disallow untagged traffic\n");
611 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
613 netdev_err(dev, "Failed to set PVID\n");
617 /* Only allow if not already allowed. */
618 if (!mlxsw_sp_port->pvid) {
619 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
622 netdev_err(dev, "Failed to allow untagged traffic\n");
623 goto err_port_allow_untagged_set;
628 mlxsw_sp_port->pvid = vid;
631 err_port_allow_untagged_set:
632 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
636 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
637 u16 vid_begin, u16 vid_end, bool is_member,
643 for (vid = vid_begin; vid <= vid_end;
644 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
645 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
648 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
649 is_member, untagged);
657 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
658 u16 vid_begin, u16 vid_end,
659 bool flag_untagged, bool flag_pvid)
661 struct net_device *dev = mlxsw_sp_port->dev;
665 if (!mlxsw_sp_port->bridged)
668 err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end);
670 netdev_err(dev, "Failed to join FIDs\n");
674 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
675 true, flag_untagged);
677 netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
679 goto err_port_vlans_set;
682 old_pvid = mlxsw_sp_port->pvid;
683 if (flag_pvid && old_pvid != vid_begin) {
684 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin);
686 netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
687 goto err_port_pvid_set;
689 } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
690 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
692 netdev_err(dev, "Unable to del PVID\n");
693 goto err_port_pvid_set;
697 /* Changing activity bits only if HW operation succeded */
698 for (vid = vid_begin; vid <= vid_end; vid++) {
699 set_bit(vid, mlxsw_sp_port->active_vlans);
701 set_bit(vid, mlxsw_sp_port->untagged_vlans);
703 clear_bit(vid, mlxsw_sp_port->untagged_vlans);
706 /* STP state change must be done after we set active VLANs */
707 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
708 mlxsw_sp_port->stp_state);
710 netdev_err(dev, "Failed to set STP state\n");
711 goto err_port_stp_state_set;
716 err_port_stp_state_set:
717 for (vid = vid_begin; vid <= vid_end; vid++)
718 clear_bit(vid, mlxsw_sp_port->active_vlans);
719 if (old_pvid != mlxsw_sp_port->pvid)
720 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
722 __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
725 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
729 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
730 const struct switchdev_obj_port_vlan *vlan,
731 struct switchdev_trans *trans)
733 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
734 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
736 if (switchdev_trans_ph_prepare(trans))
739 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
740 vlan->vid_begin, vlan->vid_end,
741 flag_untagged, flag_pvid);
744 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
746 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
747 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
750 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
752 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
753 MLXSW_REG_SFD_OP_WRITE_REMOVE;
756 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
757 const char *mac, u16 fid, bool adding,
758 enum mlxsw_reg_sfd_rec_action action,
764 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
768 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
769 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
770 mac, fid, action, local_port);
771 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
777 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
778 const char *mac, u16 fid, bool adding,
781 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
782 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
785 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
788 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
789 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
793 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
794 const char *mac, u16 fid, u16 lag_vid,
795 bool adding, bool dynamic)
800 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
804 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
805 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
806 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
808 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
815 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
816 const struct switchdev_obj_port_fdb *fdb,
817 struct switchdev_trans *trans)
819 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
822 if (switchdev_trans_ph_prepare(trans))
825 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
826 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
829 if (!mlxsw_sp_port->lagged)
830 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
831 mlxsw_sp_port->local_port,
832 fdb->addr, fid, true, false);
834 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
835 mlxsw_sp_port->lag_id,
836 fdb->addr, fid, lag_vid,
840 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
841 u16 fid, u16 mid, bool adding)
846 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
850 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
851 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
852 MLXSW_REG_SFD_REC_ACTION_NOP, mid);
853 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
858 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
859 bool add, bool clear_all_ports)
861 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
865 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
869 mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add);
870 if (clear_all_ports) {
871 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
872 if (mlxsw_sp->ports[i])
873 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
875 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
880 static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
881 const unsigned char *addr,
884 struct mlxsw_sp_mid *mid;
886 list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) {
887 if (ether_addr_equal(mid->addr, addr) && mid->vid == vid)
893 static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
894 const unsigned char *addr,
897 struct mlxsw_sp_mid *mid;
900 mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped,
902 if (mid_idx == MLXSW_SP_MID_MAX)
905 mid = kzalloc(sizeof(*mid), GFP_KERNEL);
909 set_bit(mid_idx, mlxsw_sp->br_mids.mapped);
910 ether_addr_copy(mid->addr, addr);
914 list_add_tail(&mid->list, &mlxsw_sp->br_mids.list);
919 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp,
920 struct mlxsw_sp_mid *mid)
922 if (--mid->ref_count == 0) {
923 list_del(&mid->list);
924 clear_bit(mid->mid, mlxsw_sp->br_mids.mapped);
931 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
932 const struct switchdev_obj_port_mdb *mdb,
933 struct switchdev_trans *trans)
935 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
936 struct net_device *dev = mlxsw_sp_port->dev;
937 struct mlxsw_sp_mid *mid;
938 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
941 if (switchdev_trans_ph_prepare(trans))
944 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid);
946 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid);
948 netdev_err(dev, "Unable to allocate MC group\n");
954 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true,
955 mid->ref_count == 1);
957 netdev_err(dev, "Unable to set SMID\n");
961 if (mid->ref_count == 1) {
962 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid,
965 netdev_err(dev, "Unable to set MC SFD\n");
973 __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid);
977 static int mlxsw_sp_port_obj_add(struct net_device *dev,
978 const struct switchdev_obj *obj,
979 struct switchdev_trans *trans)
981 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
984 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
989 case SWITCHDEV_OBJ_ID_PORT_VLAN:
990 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
993 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
994 SWITCHDEV_OBJ_PORT_VLAN(obj),
997 case SWITCHDEV_OBJ_ID_IPV4_FIB:
998 err = mlxsw_sp_router_fib4_add(mlxsw_sp_port,
999 SWITCHDEV_OBJ_IPV4_FIB(obj),
1002 case SWITCHDEV_OBJ_ID_PORT_FDB:
1003 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
1004 SWITCHDEV_OBJ_PORT_FDB(obj),
1007 case SWITCHDEV_OBJ_ID_PORT_MDB:
1008 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1009 SWITCHDEV_OBJ_PORT_MDB(obj),
1020 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1021 u16 vid_begin, u16 vid_end, bool init)
1023 struct net_device *dev = mlxsw_sp_port->dev;
1027 if (!init && !mlxsw_sp_port->bridged)
1030 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
1033 netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin,
1041 pvid = mlxsw_sp_port->pvid;
1042 if (pvid >= vid_begin && pvid <= vid_end) {
1043 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
1045 netdev_err(dev, "Unable to del PVID %d\n", pvid);
1050 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
1053 /* Changing activity bits only if HW operation succeded */
1054 for (vid = vid_begin; vid <= vid_end; vid++)
1055 clear_bit(vid, mlxsw_sp_port->active_vlans);
1060 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1061 const struct switchdev_obj_port_vlan *vlan)
1063 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1064 vlan->vid_begin, vlan->vid_end, false);
1067 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
1071 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
1072 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false);
1076 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
1077 const struct switchdev_obj_port_fdb *fdb)
1079 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
1082 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1083 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1086 if (!mlxsw_sp_port->lagged)
1087 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
1088 mlxsw_sp_port->local_port,
1092 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
1093 mlxsw_sp_port->lag_id,
1094 fdb->addr, fid, lag_vid,
1098 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1099 const struct switchdev_obj_port_mdb *mdb)
1101 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1102 struct net_device *dev = mlxsw_sp_port->dev;
1103 struct mlxsw_sp_mid *mid;
1104 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
1108 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid);
1110 netdev_err(dev, "Unable to remove port from MC DB\n");
1114 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false);
1116 netdev_err(dev, "Unable to remove port from SMID\n");
1119 if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) {
1120 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx,
1123 netdev_err(dev, "Unable to remove MC SFD\n");
1129 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1130 const struct switchdev_obj *obj)
1132 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1135 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1140 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1141 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
1144 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1145 SWITCHDEV_OBJ_PORT_VLAN(obj));
1147 case SWITCHDEV_OBJ_ID_IPV4_FIB:
1148 err = mlxsw_sp_router_fib4_del(mlxsw_sp_port,
1149 SWITCHDEV_OBJ_IPV4_FIB(obj));
1151 case SWITCHDEV_OBJ_ID_PORT_FDB:
1152 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
1153 SWITCHDEV_OBJ_PORT_FDB(obj));
1155 case SWITCHDEV_OBJ_ID_PORT_MDB:
1156 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1157 SWITCHDEV_OBJ_PORT_MDB(obj));
1167 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1170 struct mlxsw_sp_port *mlxsw_sp_port;
1173 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
1174 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1176 return mlxsw_sp_port;
1181 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1182 struct switchdev_obj_port_fdb *fdb,
1183 switchdev_obj_dump_cb_t *cb,
1184 struct net_device *orig_dev)
1186 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1187 struct mlxsw_sp_port *tmp;
1188 struct mlxsw_sp_fid *f;
1200 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1204 f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
1205 vport_fid = f ? f->fid : 0;
1207 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
1209 mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
1210 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1214 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1216 /* Even in case of error, we have to run the dump to the end
1217 * so the session in firmware is finished.
1222 for (i = 0; i < num_rec; i++) {
1223 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
1224 case MLXSW_REG_SFD_REC_TYPE_UNICAST:
1225 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
1227 if (local_port == mlxsw_sp_port->local_port) {
1228 if (vport_fid && vport_fid == fid)
1230 else if (!vport_fid &&
1231 !mlxsw_sp_fid_is_vfid(fid))
1235 ether_addr_copy(fdb->addr, mac);
1236 fdb->ndm_state = NUD_REACHABLE;
1237 err = cb(&fdb->obj);
1242 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
1243 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
1244 mac, &fid, &lag_id);
1245 tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1246 if (tmp && tmp->local_port ==
1247 mlxsw_sp_port->local_port) {
1248 /* LAG records can only point to LAG
1249 * devices or VLAN devices on top.
1251 if (!netif_is_lag_master(orig_dev) &&
1252 !is_vlan_dev(orig_dev))
1254 if (vport_fid && vport_fid == fid)
1256 else if (!vport_fid &&
1257 !mlxsw_sp_fid_is_vfid(fid))
1261 ether_addr_copy(fdb->addr, mac);
1262 fdb->ndm_state = NUD_REACHABLE;
1263 err = cb(&fdb->obj);
1270 } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
1274 return stored_err ? stored_err : err;
1277 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1278 struct switchdev_obj_port_vlan *vlan,
1279 switchdev_obj_dump_cb_t *cb)
1284 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1286 vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1287 vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1288 return cb(&vlan->obj);
1291 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1293 if (vid == mlxsw_sp_port->pvid)
1294 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
1295 if (test_bit(vid, mlxsw_sp_port->untagged_vlans))
1296 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1297 vlan->vid_begin = vid;
1298 vlan->vid_end = vid;
1299 err = cb(&vlan->obj);
1306 static int mlxsw_sp_port_obj_dump(struct net_device *dev,
1307 struct switchdev_obj *obj,
1308 switchdev_obj_dump_cb_t *cb)
1310 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1313 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1318 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1319 err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
1320 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
1322 case SWITCHDEV_OBJ_ID_PORT_FDB:
1323 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
1324 SWITCHDEV_OBJ_PORT_FDB(obj), cb,
1335 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1336 .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
1337 .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
1338 .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
1339 .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
1340 .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
1343 static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
1345 struct net_device *dev)
1347 struct switchdev_notifier_fdb_info info;
1348 unsigned long notifier_type;
1350 if (learning_sync) {
1353 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
1354 call_switchdev_notifiers(notifier_type, dev, &info.info);
1358 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
1359 char *sfn_pl, int rec_index,
1362 struct mlxsw_sp_port *mlxsw_sp_port;
1366 bool do_notification = true;
1369 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
1370 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1371 if (!mlxsw_sp_port) {
1372 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
1376 if (mlxsw_sp_fid_is_vfid(fid)) {
1377 struct mlxsw_sp_port *mlxsw_sp_vport;
1379 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1381 if (!mlxsw_sp_vport) {
1382 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1386 /* Override the physical port with the vPort. */
1387 mlxsw_sp_port = mlxsw_sp_vport;
1392 adding = adding && mlxsw_sp_port->learning;
1395 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
1398 if (net_ratelimit())
1399 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1403 if (!do_notification)
1405 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
1406 adding, mac, vid, mlxsw_sp_port->dev);
1411 do_notification = false;
1415 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
1416 char *sfn_pl, int rec_index,
1419 struct mlxsw_sp_port *mlxsw_sp_port;
1420 struct net_device *dev;
1425 bool do_notification = true;
1428 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
1429 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1430 if (!mlxsw_sp_port) {
1431 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
1435 if (mlxsw_sp_fid_is_vfid(fid)) {
1436 struct mlxsw_sp_port *mlxsw_sp_vport;
1438 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1440 if (!mlxsw_sp_vport) {
1441 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1445 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1446 dev = mlxsw_sp_vport->dev;
1448 /* Override the physical port with the vPort. */
1449 mlxsw_sp_port = mlxsw_sp_vport;
1451 dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
1455 adding = adding && mlxsw_sp_port->learning;
1458 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
1461 if (net_ratelimit())
1462 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1466 if (!do_notification)
1468 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
1474 do_notification = false;
1478 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
1479 char *sfn_pl, int rec_index)
1481 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
1482 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
1483 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1486 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
1487 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1490 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
1491 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1494 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
1495 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1501 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
1503 mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw,
1504 msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
1507 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
1509 struct mlxsw_sp *mlxsw_sp;
1515 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
1519 mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
1523 mlxsw_reg_sfn_pack(sfn_pl);
1524 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
1526 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
1529 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
1530 for (i = 0; i < num_rec; i++)
1531 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
1537 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1540 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
1544 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
1546 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
1549 INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
1550 mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
1551 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1555 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
1557 cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
1560 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1562 return mlxsw_sp_fdb_init(mlxsw_sp);
1565 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1567 mlxsw_sp_fdb_fini(mlxsw_sp);
1570 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
1572 struct net_device *dev = mlxsw_sp_port->dev;
1575 /* Allow only untagged packets to ingress and tag them internally
1578 mlxsw_sp_port->pvid = 1;
1579 err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1,
1582 netdev_err(dev, "Unable to init VLANs\n");
1586 /* Add implicit VLAN interface in the device, so that untagged
1587 * packets will be classified to the default vFID.
1589 err = mlxsw_sp_port_add_vid(dev, 0, 1);
1591 netdev_err(dev, "Failed to configure default vFID\n");
1596 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
1598 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
1601 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)