2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <net/switchdev.h>
54 static struct mlxsw_sp_port *
55 mlxsw_sp_port_orig_get(struct net_device *dev,
56 struct mlxsw_sp_port *mlxsw_sp_port)
58 struct mlxsw_sp_port *mlxsw_sp_vport;
61 if (!is_vlan_dev(dev))
64 vid = vlan_dev_vlan_id(dev);
65 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
66 WARN_ON(!mlxsw_sp_vport);
68 return mlxsw_sp_vport;
71 static int mlxsw_sp_port_attr_get(struct net_device *dev,
72 struct switchdev_attr *attr)
74 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
75 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
77 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
82 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
83 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
84 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
87 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
88 attr->u.brport_flags =
89 (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
90 (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
91 (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
100 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
103 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
104 enum mlxsw_reg_spms_state spms_state;
110 case BR_STATE_DISABLED: /* fall-through */
111 case BR_STATE_FORWARDING:
112 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
114 case BR_STATE_LISTENING: /* fall-through */
115 case BR_STATE_LEARNING:
116 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
118 case BR_STATE_BLOCKING:
119 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
125 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
128 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
130 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
131 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
132 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
134 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
135 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
138 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
143 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
144 struct switchdev_trans *trans,
147 if (switchdev_trans_ph_prepare(trans))
150 mlxsw_sp_port->stp_state = state;
151 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
154 static bool mlxsw_sp_vfid_is_vport_br(u16 vfid)
156 return vfid >= MLXSW_SP_VFID_PORT_MAX;
159 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
160 u16 idx_begin, u16 idx_end, bool set,
163 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
164 u16 local_port = mlxsw_sp_port->local_port;
165 enum mlxsw_flood_table_type table_type;
166 u16 range = idx_end - idx_begin + 1;
170 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
171 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
172 if (mlxsw_sp_vfid_is_vport_br(idx_begin))
173 local_port = mlxsw_sp_port->local_port;
175 local_port = MLXSW_PORT_CPU_PORT;
177 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
180 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
184 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
185 table_type, range, local_port, set);
186 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
190 /* Flooding control allows one to decide whether a given port will
191 * flood unicast traffic for which there is no FDB entry.
196 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
197 table_type, range, local_port, set);
198 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
205 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
208 struct net_device *dev = mlxsw_sp_port->dev;
209 u16 vid, last_visited_vid;
212 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
213 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
215 return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid,
219 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
220 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
223 last_visited_vid = vid;
224 goto err_port_flood_set;
231 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
232 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
233 netdev_err(dev, "Failed to configure unicast flooding\n");
237 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
238 bool set, bool only_uc)
240 /* In case of vFIDs, index into the flooding table is relative to
241 * the start of the vFIDs range.
243 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set,
247 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
248 struct switchdev_trans *trans,
249 unsigned long brport_flags)
251 unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
255 if (!mlxsw_sp_port->bridged)
258 if (switchdev_trans_ph_prepare(trans))
261 if ((uc_flood ^ brport_flags) & BR_FLOOD) {
262 set = mlxsw_sp_port->uc_flood ? false : true;
263 err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
268 mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
269 mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
270 mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
275 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
277 char sfdat_pl[MLXSW_REG_SFDAT_LEN];
280 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
281 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
284 mlxsw_sp->ageing_time = ageing_time;
288 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
289 struct switchdev_trans *trans,
290 unsigned long ageing_clock_t)
292 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
293 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
294 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
296 if (switchdev_trans_ph_prepare(trans))
299 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
302 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
303 struct switchdev_trans *trans,
304 struct net_device *orig_dev,
307 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
309 /* SWITCHDEV_TRANS_PREPARE phase */
310 if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) {
311 netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n");
318 static int mlxsw_sp_port_attr_set(struct net_device *dev,
319 const struct switchdev_attr *attr,
320 struct switchdev_trans *trans)
322 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
325 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
330 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
331 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
334 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
335 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
336 attr->u.brport_flags);
338 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
339 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
340 attr->u.ageing_time);
342 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
343 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
345 attr->u.vlan_filtering);
355 static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
357 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
358 char spvid_pl[MLXSW_REG_SPVID_LEN];
360 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
361 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
364 static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
366 char sfmr_pl[MLXSW_REG_SFMR_LEN];
369 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid);
370 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
375 set_bit(fid, mlxsw_sp->active_fids);
379 static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid)
381 char sfmr_pl[MLXSW_REG_SFMR_LEN];
383 clear_bit(fid, mlxsw_sp->active_fids);
385 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
387 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
390 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
392 enum mlxsw_reg_svfa_mt mt;
394 if (!list_empty(&mlxsw_sp_port->vports_list))
395 mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
397 mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
399 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid);
402 static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
404 enum mlxsw_reg_svfa_mt mt;
406 if (list_empty(&mlxsw_sp_port->vports_list))
409 mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
410 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
413 static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
419 for (vid = vid_begin; vid <= vid_end; vid++) {
420 err = mlxsw_sp_port_add_vid(dev, 0, vid);
422 goto err_port_add_vid;
427 for (vid--; vid >= vid_begin; vid--)
428 mlxsw_sp_port_kill_vid(dev, 0, vid);
432 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
433 u16 vid_begin, u16 vid_end, bool is_member,
439 for (vid = vid_begin; vid <= vid_end;
440 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
441 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
444 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
445 is_member, untagged);
453 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
454 u16 vid_begin, u16 vid_end,
455 bool flag_untagged, bool flag_pvid)
457 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
458 struct net_device *dev = mlxsw_sp_port->dev;
459 u16 vid, last_visited_vid, old_pvid;
460 enum mlxsw_reg_svfa_mt mt;
463 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
464 * not bridged, then packets ingressing through the port with
465 * the specified VIDs will be directed to CPU.
467 if (!mlxsw_sp_port->bridged)
468 return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end);
470 for (vid = vid_begin; vid <= vid_end; vid++) {
471 if (!test_bit(vid, mlxsw_sp->active_fids)) {
472 err = mlxsw_sp_fid_create(mlxsw_sp, vid);
474 netdev_err(dev, "Failed to create FID=%d\n",
479 /* When creating a FID, we set a VID to FID mapping
480 * regardless of the port's mode.
482 mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
483 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt,
486 netdev_err(dev, "Failed to create FID=VID=%d mapping\n",
488 goto err_port_vid_to_fid_set;
493 /* Set FID mapping according to port's mode */
494 for (vid = vid_begin; vid <= vid_end; vid++) {
495 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid);
497 netdev_err(dev, "Failed to map FID=%d", vid);
498 last_visited_vid = --vid;
499 goto err_port_fid_map;
503 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
506 netdev_err(dev, "Failed to configure flooding\n");
507 goto err_port_flood_set;
510 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
511 true, flag_untagged);
513 netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
515 goto err_port_vlans_set;
518 old_pvid = mlxsw_sp_port->pvid;
519 if (flag_pvid && old_pvid != vid_begin) {
520 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin);
522 netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
523 goto err_port_pvid_set;
525 mlxsw_sp_port->pvid = vid_begin;
528 /* Changing activity bits only if HW operation succeded */
529 for (vid = vid_begin; vid <= vid_end; vid++)
530 set_bit(vid, mlxsw_sp_port->active_vlans);
532 /* STP state change must be done after we set active VLANs */
533 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
534 mlxsw_sp_port->stp_state);
536 netdev_err(dev, "Failed to set STP state\n");
537 goto err_port_stp_state_set;
542 err_port_vid_to_fid_set:
543 mlxsw_sp_fid_destroy(mlxsw_sp, vid);
546 err_port_stp_state_set:
547 for (vid = vid_begin; vid <= vid_end; vid++)
548 clear_bit(vid, mlxsw_sp_port->active_vlans);
549 if (old_pvid != mlxsw_sp_port->pvid)
550 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
552 __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
555 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, false,
558 last_visited_vid = vid_end;
560 for (vid = last_visited_vid; vid >= vid_begin; vid--)
561 mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
565 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
566 const struct switchdev_obj_port_vlan *vlan,
567 struct switchdev_trans *trans)
569 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
570 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
572 if (switchdev_trans_ph_prepare(trans))
575 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
576 vlan->vid_begin, vlan->vid_end,
577 flag_untagged, flag_pvid);
580 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
582 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
583 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
586 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
588 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
589 MLXSW_REG_SFD_OP_WRITE_REMOVE;
592 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp_port *mlxsw_sp_port,
593 const char *mac, u16 fid, bool adding,
596 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
600 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
604 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
605 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
606 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
607 mlxsw_sp_port->local_port);
608 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
614 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
615 const char *mac, u16 fid, u16 lag_vid,
616 bool adding, bool dynamic)
621 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
625 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
626 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
627 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
629 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
636 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
637 const struct switchdev_obj_port_fdb *fdb,
638 struct switchdev_trans *trans)
643 if (switchdev_trans_ph_prepare(trans))
646 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
647 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
649 fid = mlxsw_sp_vfid_to_fid(vfid);
650 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
654 fid = mlxsw_sp_port->pvid;
656 if (!mlxsw_sp_port->lagged)
657 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port,
658 fdb->addr, fid, true, false);
660 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
661 mlxsw_sp_port->lag_id,
662 fdb->addr, fid, lag_vid,
666 static int mlxsw_sp_port_obj_add(struct net_device *dev,
667 const struct switchdev_obj *obj,
668 struct switchdev_trans *trans)
670 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
673 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
678 case SWITCHDEV_OBJ_ID_PORT_VLAN:
679 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
682 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
683 SWITCHDEV_OBJ_PORT_VLAN(obj),
686 case SWITCHDEV_OBJ_ID_PORT_FDB:
687 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
688 SWITCHDEV_OBJ_PORT_FDB(obj),
699 static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin,
705 for (vid = vid_begin; vid <= vid_end; vid++) {
706 err = mlxsw_sp_port_kill_vid(dev, 0, vid);
714 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
715 u16 vid_begin, u16 vid_end, bool init)
717 struct net_device *dev = mlxsw_sp_port->dev;
721 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
722 * not bridged, then prevent packets ingressing through the
723 * port with the specified VIDs from being trapped to CPU.
725 if (!init && !mlxsw_sp_port->bridged)
726 return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end);
728 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
731 netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin,
736 pvid = mlxsw_sp_port->pvid;
737 if (pvid >= vid_begin && pvid <= vid_end && pvid != 1) {
738 /* Default VLAN is always 1 */
739 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
741 netdev_err(dev, "Unable to del PVID %d\n", pvid);
744 mlxsw_sp_port->pvid = 1;
750 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
753 netdev_err(dev, "Failed to clear flooding\n");
757 for (vid = vid_begin; vid <= vid_end; vid++) {
758 /* Remove FID mapping in case of Virtual mode */
759 err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
761 netdev_err(dev, "Failed to unmap FID=%d", vid);
767 /* Changing activity bits only if HW operation succeded */
768 for (vid = vid_begin; vid <= vid_end; vid++)
769 clear_bit(vid, mlxsw_sp_port->active_vlans);
774 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
775 const struct switchdev_obj_port_vlan *vlan)
777 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
778 vlan->vid_begin, vlan->vid_end, false);
782 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
783 const struct switchdev_obj_port_fdb *fdb)
788 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
789 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
791 fid = mlxsw_sp_vfid_to_fid(vfid);
792 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
795 if (!mlxsw_sp_port->lagged)
796 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port,
800 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
801 mlxsw_sp_port->lag_id,
802 fdb->addr, fid, lag_vid,
806 static int mlxsw_sp_port_obj_del(struct net_device *dev,
807 const struct switchdev_obj *obj)
809 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
812 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
817 case SWITCHDEV_OBJ_ID_PORT_VLAN:
818 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
821 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
822 SWITCHDEV_OBJ_PORT_VLAN(obj));
824 case SWITCHDEV_OBJ_ID_PORT_FDB:
825 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
826 SWITCHDEV_OBJ_PORT_FDB(obj));
836 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
839 struct mlxsw_sp_port *mlxsw_sp_port;
842 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
843 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
845 return mlxsw_sp_port;
850 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
851 struct switchdev_obj_port_fdb *fdb,
852 switchdev_obj_dump_cb_t *cb)
854 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
855 u16 vport_vid = 0, vport_fid = 0;
866 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
870 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
873 tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
874 vport_fid = mlxsw_sp_vfid_to_fid(tmp);
875 vport_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
878 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
880 mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
881 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
885 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
887 /* Even in case of error, we have to run the dump to the end
888 * so the session in firmware is finished.
893 for (i = 0; i < num_rec; i++) {
894 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
895 case MLXSW_REG_SFD_REC_TYPE_UNICAST:
896 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
898 if (local_port == mlxsw_sp_port->local_port) {
899 if (vport_fid && vport_fid != fid)
902 fdb->vid = vport_vid;
905 ether_addr_copy(fdb->addr, mac);
906 fdb->ndm_state = NUD_REACHABLE;
912 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
913 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
916 mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id)) {
917 if (vport_fid && vport_fid != fid)
920 fdb->vid = vport_vid;
923 ether_addr_copy(fdb->addr, mac);
924 fdb->ndm_state = NUD_REACHABLE;
932 } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
936 return stored_err ? stored_err : err;
939 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
940 struct switchdev_obj_port_vlan *vlan,
941 switchdev_obj_dump_cb_t *cb)
946 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
948 vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
949 vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
950 return cb(&vlan->obj);
953 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
955 if (vid == mlxsw_sp_port->pvid)
956 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
957 vlan->vid_begin = vid;
959 err = cb(&vlan->obj);
966 static int mlxsw_sp_port_obj_dump(struct net_device *dev,
967 struct switchdev_obj *obj,
968 switchdev_obj_dump_cb_t *cb)
970 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
973 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
978 case SWITCHDEV_OBJ_ID_PORT_VLAN:
979 err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
980 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
982 case SWITCHDEV_OBJ_ID_PORT_FDB:
983 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
984 SWITCHDEV_OBJ_PORT_FDB(obj), cb);
994 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
995 .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
996 .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
997 .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
998 .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
999 .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
1002 static void mlxsw_sp_fdb_call_notifiers(bool learning, bool learning_sync,
1003 bool adding, char *mac, u16 vid,
1004 struct net_device *dev)
1006 struct switchdev_notifier_fdb_info info;
1007 unsigned long notifier_type;
1009 if (learning && learning_sync) {
1012 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
1013 call_switchdev_notifiers(notifier_type, dev, &info.info);
1017 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
1018 char *sfn_pl, int rec_index,
1021 struct mlxsw_sp_port *mlxsw_sp_port;
1027 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
1028 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1029 if (!mlxsw_sp_port) {
1030 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
1034 if (mlxsw_sp_fid_is_vfid(fid)) {
1035 u16 vfid = mlxsw_sp_fid_to_vfid(fid);
1036 struct mlxsw_sp_port *mlxsw_sp_vport;
1038 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port,
1040 if (!mlxsw_sp_vport) {
1041 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1045 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1046 /* Override the physical port with the vPort. */
1047 mlxsw_sp_port = mlxsw_sp_vport;
1052 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port, mac, fid,
1053 adding && mlxsw_sp_port->learning, true);
1055 if (net_ratelimit())
1056 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1060 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning,
1061 mlxsw_sp_port->learning_sync,
1062 adding, mac, vid, mlxsw_sp_port->dev);
1065 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
1066 char *sfn_pl, int rec_index,
1069 struct mlxsw_sp_port *mlxsw_sp_port;
1076 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
1077 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1078 if (!mlxsw_sp_port) {
1079 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
1083 if (mlxsw_sp_fid_is_vfid(fid)) {
1084 u16 vfid = mlxsw_sp_fid_to_vfid(fid);
1085 struct mlxsw_sp_port *mlxsw_sp_vport;
1087 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port,
1089 if (!mlxsw_sp_vport) {
1090 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1094 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1096 /* Override the physical port with the vPort. */
1097 mlxsw_sp_port = mlxsw_sp_vport;
1102 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
1103 adding && mlxsw_sp_port->learning,
1106 if (net_ratelimit())
1107 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1111 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning,
1112 mlxsw_sp_port->learning_sync,
1114 mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev);
1117 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
1118 char *sfn_pl, int rec_index)
1120 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
1121 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
1122 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1125 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
1126 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1129 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
1130 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1133 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
1134 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1140 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
1142 schedule_delayed_work(&mlxsw_sp->fdb_notify.dw,
1143 msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
1146 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
1148 struct mlxsw_sp *mlxsw_sp;
1154 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
1158 mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
1161 mlxsw_reg_sfn_pack(sfn_pl);
1162 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
1164 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
1167 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
1168 for (i = 0; i < num_rec; i++)
1169 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
1174 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1177 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
1181 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
1183 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
1186 INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
1187 mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
1188 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1192 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
1194 cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
1197 static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
1201 for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID)
1202 mlxsw_sp_fid_destroy(mlxsw_sp, fid);
1205 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1207 return mlxsw_sp_fdb_init(mlxsw_sp);
1210 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1212 mlxsw_sp_fdb_fini(mlxsw_sp);
1213 mlxsw_sp_fids_fini(mlxsw_sp);
1216 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
1218 struct net_device *dev = mlxsw_sp_port->dev;
1221 /* Allow only untagged packets to ingress and tag them internally
1224 mlxsw_sp_port->pvid = 1;
1225 err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1,
1228 netdev_err(dev, "Unable to init VLANs\n");
1232 /* Add implicit VLAN interface in the device, so that untagged
1233 * packets will be classified to the default vFID.
1235 err = mlxsw_sp_port_add_vid(dev, 0, 1);
1237 netdev_err(dev, "Failed to configure default vFID\n");
1242 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
1244 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
1247 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)