mlxsw: spectrum: Use per-FID struct for the VLAN-aware bridge
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_switchdev.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <linux/rtnetlink.h>
49 #include <net/switchdev.h>
50
51 #include "spectrum.h"
52 #include "core.h"
53 #include "reg.h"
54
55 static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port,
56                                         u16 vid)
57 {
58         u16 fid = vid;
59
60         if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
61                 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
62
63         if (!fid)
64                 fid = mlxsw_sp_port->pvid;
65
66         return fid;
67 }
68
69 static struct mlxsw_sp_port *
70 mlxsw_sp_port_orig_get(struct net_device *dev,
71                        struct mlxsw_sp_port *mlxsw_sp_port)
72 {
73         struct mlxsw_sp_port *mlxsw_sp_vport;
74         u16 vid;
75
76         if (!is_vlan_dev(dev))
77                 return mlxsw_sp_port;
78
79         vid = vlan_dev_vlan_id(dev);
80         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
81         WARN_ON(!mlxsw_sp_vport);
82
83         return mlxsw_sp_vport;
84 }
85
86 static int mlxsw_sp_port_attr_get(struct net_device *dev,
87                                   struct switchdev_attr *attr)
88 {
89         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
90         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
91
92         mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
93         if (!mlxsw_sp_port)
94                 return -EINVAL;
95
96         switch (attr->id) {
97         case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
98                 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
99                 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
100                        attr->u.ppid.id_len);
101                 break;
102         case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
103                 attr->u.brport_flags =
104                         (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
105                         (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
106                         (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
107                 break;
108         default:
109                 return -EOPNOTSUPP;
110         }
111
112         return 0;
113 }
114
115 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
116                                        u8 state)
117 {
118         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
119         enum mlxsw_reg_spms_state spms_state;
120         char *spms_pl;
121         u16 vid;
122         int err;
123
124         switch (state) {
125         case BR_STATE_FORWARDING:
126                 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
127                 break;
128         case BR_STATE_LEARNING:
129                 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
130                 break;
131         case BR_STATE_LISTENING: /* fall-through */
132         case BR_STATE_DISABLED: /* fall-through */
133         case BR_STATE_BLOCKING:
134                 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
135                 break;
136         default:
137                 BUG();
138         }
139
140         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
141         if (!spms_pl)
142                 return -ENOMEM;
143         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
144
145         if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
146                 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
147                 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
148         } else {
149                 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
150                         mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
151         }
152
153         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
154         kfree(spms_pl);
155         return err;
156 }
157
158 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
159                                             struct switchdev_trans *trans,
160                                             u8 state)
161 {
162         if (switchdev_trans_ph_prepare(trans))
163                 return 0;
164
165         mlxsw_sp_port->stp_state = state;
166         return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
167 }
168
169 static bool mlxsw_sp_vfid_is_vport_br(u16 vfid)
170 {
171         return vfid >= MLXSW_SP_VFID_PORT_MAX;
172 }
173
174 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
175                                      u16 idx_begin, u16 idx_end, bool set,
176                                      bool only_uc)
177 {
178         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
179         u16 local_port = mlxsw_sp_port->local_port;
180         enum mlxsw_flood_table_type table_type;
181         u16 range = idx_end - idx_begin + 1;
182         char *sftr_pl;
183         int err;
184
185         if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
186                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
187                 if (mlxsw_sp_vfid_is_vport_br(idx_begin))
188                         local_port = mlxsw_sp_port->local_port;
189                 else
190                         local_port = MLXSW_PORT_CPU_PORT;
191         } else {
192                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
193         }
194
195         sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
196         if (!sftr_pl)
197                 return -ENOMEM;
198
199         mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
200                             table_type, range, local_port, set);
201         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
202         if (err)
203                 goto buffer_out;
204
205         /* Flooding control allows one to decide whether a given port will
206          * flood unicast traffic for which there is no FDB entry.
207          */
208         if (only_uc)
209                 goto buffer_out;
210
211         mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
212                             table_type, range, local_port, set);
213         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
214         if (err)
215                 goto err_flood_bm_set;
216         else
217                 goto buffer_out;
218
219 err_flood_bm_set:
220         mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
221                             table_type, range, local_port, !set);
222         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
223 buffer_out:
224         kfree(sftr_pl);
225         return err;
226 }
227
228 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
229                                       bool set)
230 {
231         struct net_device *dev = mlxsw_sp_port->dev;
232         u16 vid, last_visited_vid;
233         int err;
234
235         if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
236                 u16 vfid, fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
237
238                 vfid = mlxsw_sp_fid_to_vfid(fid);
239                 return  __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid,
240                                                   set, true);
241         }
242
243         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
244                 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
245                                                 true);
246                 if (err) {
247                         last_visited_vid = vid;
248                         goto err_port_flood_set;
249                 }
250         }
251
252         return 0;
253
254 err_port_flood_set:
255         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
256                 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
257         netdev_err(dev, "Failed to configure unicast flooding\n");
258         return err;
259 }
260
261 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
262                              bool set)
263 {
264         u16 vfid;
265
266         /* In case of vFIDs, index into the flooding table is relative to
267          * the start of the vFIDs range.
268          */
269         vfid = mlxsw_sp_fid_to_vfid(fid);
270         return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set,
271                                          false);
272 }
273
274 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
275                                            struct switchdev_trans *trans,
276                                            unsigned long brport_flags)
277 {
278         unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
279         bool set;
280         int err;
281
282         if (!mlxsw_sp_port->bridged)
283                 return -EINVAL;
284
285         if (switchdev_trans_ph_prepare(trans))
286                 return 0;
287
288         if ((uc_flood ^ brport_flags) & BR_FLOOD) {
289                 set = mlxsw_sp_port->uc_flood ? false : true;
290                 err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
291                 if (err)
292                         return err;
293         }
294
295         mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
296         mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
297         mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
298
299         return 0;
300 }
301
302 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
303 {
304         char sfdat_pl[MLXSW_REG_SFDAT_LEN];
305         int err;
306
307         mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
308         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
309         if (err)
310                 return err;
311         mlxsw_sp->ageing_time = ageing_time;
312         return 0;
313 }
314
315 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
316                                             struct switchdev_trans *trans,
317                                             unsigned long ageing_clock_t)
318 {
319         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
320         unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
321         u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
322
323         if (switchdev_trans_ph_prepare(trans)) {
324                 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
325                     ageing_time > MLXSW_SP_MAX_AGEING_TIME)
326                         return -ERANGE;
327                 else
328                         return 0;
329         }
330
331         return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
332 }
333
334 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
335                                           struct switchdev_trans *trans,
336                                           struct net_device *orig_dev,
337                                           bool vlan_enabled)
338 {
339         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
340
341         /* SWITCHDEV_TRANS_PREPARE phase */
342         if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) {
343                 netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n");
344                 return -EINVAL;
345         }
346
347         return 0;
348 }
349
350 static int mlxsw_sp_port_attr_set(struct net_device *dev,
351                                   const struct switchdev_attr *attr,
352                                   struct switchdev_trans *trans)
353 {
354         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
355         int err = 0;
356
357         mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
358         if (!mlxsw_sp_port)
359                 return -EINVAL;
360
361         switch (attr->id) {
362         case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
363                 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
364                                                        attr->u.stp_state);
365                 break;
366         case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
367                 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
368                                                       attr->u.brport_flags);
369                 break;
370         case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
371                 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
372                                                        attr->u.ageing_time);
373                 break;
374         case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
375                 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
376                                                      attr->orig_dev,
377                                                      attr->u.vlan_filtering);
378                 break;
379         default:
380                 err = -EOPNOTSUPP;
381                 break;
382         }
383
384         return err;
385 }
386
387 static struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
388                                               u16 fid)
389 {
390         struct mlxsw_sp_fid *f;
391
392         list_for_each_entry(f, &mlxsw_sp->fids, list)
393                 if (f->fid == fid)
394                         return f;
395
396         return NULL;
397 }
398
399 static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
400 {
401         char sfmr_pl[MLXSW_REG_SFMR_LEN];
402
403         mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid);
404         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
405 }
406
407 static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid)
408 {
409         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
410         char svfa_pl[MLXSW_REG_SVFA_LEN];
411
412         mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid);
413         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
414 }
415
416 static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
417 {
418         struct mlxsw_sp_fid *f;
419
420         f = kzalloc(sizeof(*f), GFP_KERNEL);
421         if (!f)
422                 return NULL;
423
424         f->fid = fid;
425
426         return f;
427 }
428
429 static struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp,
430                                                 u16 fid)
431 {
432         struct mlxsw_sp_fid *f;
433         int err;
434
435         err = mlxsw_sp_fid_op(mlxsw_sp, fid, true);
436         if (err)
437                 return ERR_PTR(err);
438
439         /* Although all the ports member in the FID might be using a
440          * {Port, VID} to FID mapping, we create a global VID-to-FID
441          * mapping. This allows a port to transition to VLAN mode,
442          * knowing the global mapping exists.
443          */
444         err = mlxsw_sp_fid_map(mlxsw_sp, fid, true);
445         if (err)
446                 goto err_fid_map;
447
448         f = mlxsw_sp_fid_alloc(fid);
449         if (!f) {
450                 err = -ENOMEM;
451                 goto err_allocate_fid;
452         }
453
454         list_add(&f->list, &mlxsw_sp->fids);
455
456         return f;
457
458 err_allocate_fid:
459         mlxsw_sp_fid_map(mlxsw_sp, fid, false);
460 err_fid_map:
461         mlxsw_sp_fid_op(mlxsw_sp, fid, false);
462         return ERR_PTR(err);
463 }
464
465 static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp,
466                                  struct mlxsw_sp_fid *f)
467 {
468         u16 fid = f->fid;
469
470         list_del(&f->list);
471
472         kfree(f);
473
474         mlxsw_sp_fid_op(mlxsw_sp, fid, false);
475 }
476
477 static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
478                                     u16 fid)
479 {
480         struct mlxsw_sp_fid *f;
481
482         f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
483         if (!f) {
484                 f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
485                 if (IS_ERR(f))
486                         return PTR_ERR(f);
487         }
488
489         f->ref_count++;
490
491         return 0;
492 }
493
494 static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
495                                       u16 fid)
496 {
497         struct mlxsw_sp_fid *f;
498
499         f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
500         if (WARN_ON(!f))
501                 return;
502
503         if (--f->ref_count == 0)
504                 mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f);
505 }
506
507 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid,
508                                  bool valid)
509 {
510         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
511
512         /* If port doesn't have vPorts, then it can use the global
513          * VID-to-FID mapping.
514          */
515         if (list_empty(&mlxsw_sp_port->vports_list))
516                 return 0;
517
518         return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid);
519 }
520
521 static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
522                                   u16 fid_begin, u16 fid_end)
523 {
524         int fid, err;
525
526         for (fid = fid_begin; fid <= fid_end; fid++) {
527                 err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid);
528                 if (err)
529                         goto err_port_fid_join;
530         }
531
532         err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
533                                         true, false);
534         if (err)
535                 goto err_port_flood_set;
536
537         for (fid = fid_begin; fid <= fid_end; fid++) {
538                 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true);
539                 if (err)
540                         goto err_port_fid_map;
541         }
542
543         return 0;
544
545 err_port_fid_map:
546         for (fid--; fid >= fid_begin; fid--)
547                 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
548         __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
549                                   false);
550 err_port_flood_set:
551         fid = fid_end;
552 err_port_fid_join:
553         for (fid--; fid >= fid_begin; fid--)
554                 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
555         return err;
556 }
557
558 static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
559                                     u16 fid_begin, u16 fid_end)
560 {
561         int fid;
562
563         for (fid = fid_begin; fid <= fid_end; fid++)
564                 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
565
566         __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
567                                   false);
568
569         for (fid = fid_begin; fid <= fid_end; fid++)
570                 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
571 }
572
573 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
574                                     u16 vid)
575 {
576         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
577         char spvid_pl[MLXSW_REG_SPVID_LEN];
578
579         mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
580         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
581 }
582
583 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
584                                             bool allow)
585 {
586         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
587         char spaft_pl[MLXSW_REG_SPAFT_LEN];
588
589         mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
590         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
591 }
592
593 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
594 {
595         struct net_device *dev = mlxsw_sp_port->dev;
596         int err;
597
598         if (!vid) {
599                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
600                 if (err) {
601                         netdev_err(dev, "Failed to disallow untagged traffic\n");
602                         return err;
603                 }
604         } else {
605                 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
606                 if (err) {
607                         netdev_err(dev, "Failed to set PVID\n");
608                         return err;
609                 }
610
611                 /* Only allow if not already allowed. */
612                 if (!mlxsw_sp_port->pvid) {
613                         err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
614                                                                true);
615                         if (err) {
616                                 netdev_err(dev, "Failed to allow untagged traffic\n");
617                                 goto err_port_allow_untagged_set;
618                         }
619                 }
620         }
621
622         mlxsw_sp_port->pvid = vid;
623         return 0;
624
625 err_port_allow_untagged_set:
626         __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
627         return err;
628 }
629
630 static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
631                                   u16 vid_end)
632 {
633         u16 vid;
634         int err;
635
636         for (vid = vid_begin; vid <= vid_end; vid++) {
637                 err = mlxsw_sp_port_add_vid(dev, 0, vid);
638                 if (err)
639                         goto err_port_add_vid;
640         }
641         return 0;
642
643 err_port_add_vid:
644         for (vid--; vid >= vid_begin; vid--)
645                 mlxsw_sp_port_kill_vid(dev, 0, vid);
646         return err;
647 }
648
649 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
650                                      u16 vid_begin, u16 vid_end, bool is_member,
651                                      bool untagged)
652 {
653         u16 vid, vid_e;
654         int err;
655
656         for (vid = vid_begin; vid <= vid_end;
657              vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
658                 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
659                             vid_end);
660
661                 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
662                                              is_member, untagged);
663                 if (err)
664                         return err;
665         }
666
667         return 0;
668 }
669
670 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
671                                      u16 vid_begin, u16 vid_end,
672                                      bool flag_untagged, bool flag_pvid)
673 {
674         struct net_device *dev = mlxsw_sp_port->dev;
675         u16 vid, old_pvid;
676         int err;
677
678         /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
679          * not bridged, then packets ingressing through the port with
680          * the specified VIDs will be directed to CPU.
681          */
682         if (!mlxsw_sp_port->bridged)
683                 return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end);
684
685         err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end);
686         if (err) {
687                 netdev_err(dev, "Failed to join FIDs\n");
688                 return err;
689         }
690
691         err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
692                                         true, flag_untagged);
693         if (err) {
694                 netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
695                            vid_end);
696                 goto err_port_vlans_set;
697         }
698
699         old_pvid = mlxsw_sp_port->pvid;
700         if (flag_pvid && old_pvid != vid_begin) {
701                 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin);
702                 if (err) {
703                         netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
704                         goto err_port_pvid_set;
705                 }
706         } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
707                 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
708                 if (err) {
709                         netdev_err(dev, "Unable to del PVID\n");
710                         goto err_port_pvid_set;
711                 }
712         }
713
714         /* Changing activity bits only if HW operation succeded */
715         for (vid = vid_begin; vid <= vid_end; vid++) {
716                 set_bit(vid, mlxsw_sp_port->active_vlans);
717                 if (flag_untagged)
718                         set_bit(vid, mlxsw_sp_port->untagged_vlans);
719                 else
720                         clear_bit(vid, mlxsw_sp_port->untagged_vlans);
721         }
722
723         /* STP state change must be done after we set active VLANs */
724         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
725                                           mlxsw_sp_port->stp_state);
726         if (err) {
727                 netdev_err(dev, "Failed to set STP state\n");
728                 goto err_port_stp_state_set;
729         }
730
731         return 0;
732
733 err_port_stp_state_set:
734         for (vid = vid_begin; vid <= vid_end; vid++)
735                 clear_bit(vid, mlxsw_sp_port->active_vlans);
736         if (old_pvid != mlxsw_sp_port->pvid)
737                 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
738 err_port_pvid_set:
739         __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
740                                   false);
741 err_port_vlans_set:
742         mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
743         return err;
744 }
745
746 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
747                                    const struct switchdev_obj_port_vlan *vlan,
748                                    struct switchdev_trans *trans)
749 {
750         bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
751         bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
752
753         if (switchdev_trans_ph_prepare(trans))
754                 return 0;
755
756         return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
757                                          vlan->vid_begin, vlan->vid_end,
758                                          flag_untagged, flag_pvid);
759 }
760
761 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
762 {
763         return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
764                          MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
765 }
766
767 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
768 {
769         return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
770                         MLXSW_REG_SFD_OP_WRITE_REMOVE;
771 }
772
773 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
774                                    const char *mac, u16 fid, bool adding,
775                                    bool dynamic)
776 {
777         char *sfd_pl;
778         int err;
779
780         sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
781         if (!sfd_pl)
782                 return -ENOMEM;
783
784         mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
785         mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
786                               mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
787                               local_port);
788         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
789         kfree(sfd_pl);
790
791         return err;
792 }
793
794 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
795                                        const char *mac, u16 fid, u16 lag_vid,
796                                        bool adding, bool dynamic)
797 {
798         char *sfd_pl;
799         int err;
800
801         sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
802         if (!sfd_pl)
803                 return -ENOMEM;
804
805         mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
806         mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
807                                   mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
808                                   lag_vid, lag_id);
809         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
810         kfree(sfd_pl);
811
812         return err;
813 }
814
815 static int
816 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
817                              const struct switchdev_obj_port_fdb *fdb,
818                              struct switchdev_trans *trans)
819 {
820         u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
821         u16 lag_vid = 0;
822
823         if (switchdev_trans_ph_prepare(trans))
824                 return 0;
825
826         if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
827                 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
828         }
829
830         if (!mlxsw_sp_port->lagged)
831                 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
832                                                mlxsw_sp_port->local_port,
833                                                fdb->addr, fid, true, false);
834         else
835                 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
836                                                    mlxsw_sp_port->lag_id,
837                                                    fdb->addr, fid, lag_vid,
838                                                    true, false);
839 }
840
841 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
842                                 u16 fid, u16 mid, bool adding)
843 {
844         char *sfd_pl;
845         int err;
846
847         sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
848         if (!sfd_pl)
849                 return -ENOMEM;
850
851         mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
852         mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
853                               MLXSW_REG_SFD_REC_ACTION_NOP, mid);
854         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
855         kfree(sfd_pl);
856         return err;
857 }
858
859 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
860                                   bool add, bool clear_all_ports)
861 {
862         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
863         char *smid_pl;
864         int err, i;
865
866         smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
867         if (!smid_pl)
868                 return -ENOMEM;
869
870         mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add);
871         if (clear_all_ports) {
872                 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
873                         if (mlxsw_sp->ports[i])
874                                 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
875         }
876         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
877         kfree(smid_pl);
878         return err;
879 }
880
881 static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
882                                               const unsigned char *addr,
883                                               u16 vid)
884 {
885         struct mlxsw_sp_mid *mid;
886
887         list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) {
888                 if (ether_addr_equal(mid->addr, addr) && mid->vid == vid)
889                         return mid;
890         }
891         return NULL;
892 }
893
894 static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
895                                                 const unsigned char *addr,
896                                                 u16 vid)
897 {
898         struct mlxsw_sp_mid *mid;
899         u16 mid_idx;
900
901         mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped,
902                                       MLXSW_SP_MID_MAX);
903         if (mid_idx == MLXSW_SP_MID_MAX)
904                 return NULL;
905
906         mid = kzalloc(sizeof(*mid), GFP_KERNEL);
907         if (!mid)
908                 return NULL;
909
910         set_bit(mid_idx, mlxsw_sp->br_mids.mapped);
911         ether_addr_copy(mid->addr, addr);
912         mid->vid = vid;
913         mid->mid = mid_idx;
914         mid->ref_count = 0;
915         list_add_tail(&mid->list, &mlxsw_sp->br_mids.list);
916
917         return mid;
918 }
919
920 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp,
921                                  struct mlxsw_sp_mid *mid)
922 {
923         if (--mid->ref_count == 0) {
924                 list_del(&mid->list);
925                 clear_bit(mid->mid, mlxsw_sp->br_mids.mapped);
926                 kfree(mid);
927                 return 1;
928         }
929         return 0;
930 }
931
932 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
933                                  const struct switchdev_obj_port_mdb *mdb,
934                                  struct switchdev_trans *trans)
935 {
936         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
937         struct net_device *dev = mlxsw_sp_port->dev;
938         struct mlxsw_sp_mid *mid;
939         u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
940         int err = 0;
941
942         if (switchdev_trans_ph_prepare(trans))
943                 return 0;
944
945         mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid);
946         if (!mid) {
947                 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid);
948                 if (!mid) {
949                         netdev_err(dev, "Unable to allocate MC group\n");
950                         return -ENOMEM;
951                 }
952         }
953         mid->ref_count++;
954
955         err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true,
956                                      mid->ref_count == 1);
957         if (err) {
958                 netdev_err(dev, "Unable to set SMID\n");
959                 goto err_out;
960         }
961
962         if (mid->ref_count == 1) {
963                 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid,
964                                            true);
965                 if (err) {
966                         netdev_err(dev, "Unable to set MC SFD\n");
967                         goto err_out;
968                 }
969         }
970
971         return 0;
972
973 err_out:
974         __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid);
975         return err;
976 }
977
978 static int mlxsw_sp_port_obj_add(struct net_device *dev,
979                                  const struct switchdev_obj *obj,
980                                  struct switchdev_trans *trans)
981 {
982         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
983         int err = 0;
984
985         mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
986         if (!mlxsw_sp_port)
987                 return -EINVAL;
988
989         switch (obj->id) {
990         case SWITCHDEV_OBJ_ID_PORT_VLAN:
991                 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
992                         return 0;
993
994                 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
995                                               SWITCHDEV_OBJ_PORT_VLAN(obj),
996                                               trans);
997                 break;
998         case SWITCHDEV_OBJ_ID_PORT_FDB:
999                 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
1000                                                    SWITCHDEV_OBJ_PORT_FDB(obj),
1001                                                    trans);
1002                 break;
1003         case SWITCHDEV_OBJ_ID_PORT_MDB:
1004                 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1005                                             SWITCHDEV_OBJ_PORT_MDB(obj),
1006                                             trans);
1007                 break;
1008         default:
1009                 err = -EOPNOTSUPP;
1010                 break;
1011         }
1012
1013         return err;
1014 }
1015
1016 static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin,
1017                                    u16 vid_end)
1018 {
1019         u16 vid;
1020         int err;
1021
1022         for (vid = vid_begin; vid <= vid_end; vid++) {
1023                 err = mlxsw_sp_port_kill_vid(dev, 0, vid);
1024                 if (err)
1025                         return err;
1026         }
1027
1028         return 0;
1029 }
1030
1031 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1032                                      u16 vid_begin, u16 vid_end, bool init)
1033 {
1034         struct net_device *dev = mlxsw_sp_port->dev;
1035         u16 vid, pvid;
1036         int err;
1037
1038         /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
1039          * not bridged, then prevent packets ingressing through the
1040          * port with the specified VIDs from being trapped to CPU.
1041          */
1042         if (!init && !mlxsw_sp_port->bridged)
1043                 return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end);
1044
1045         err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
1046                                         false, false);
1047         if (err) {
1048                 netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin,
1049                            vid_end);
1050                 return err;
1051         }
1052
1053         if (init)
1054                 goto out;
1055
1056         pvid = mlxsw_sp_port->pvid;
1057         if (pvid >= vid_begin && pvid <= vid_end) {
1058                 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
1059                 if (err) {
1060                         netdev_err(dev, "Unable to del PVID %d\n", pvid);
1061                         return err;
1062                 }
1063         }
1064
1065         mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
1066
1067 out:
1068         /* Changing activity bits only if HW operation succeded */
1069         for (vid = vid_begin; vid <= vid_end; vid++)
1070                 clear_bit(vid, mlxsw_sp_port->active_vlans);
1071
1072         return 0;
1073 }
1074
1075 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1076                                    const struct switchdev_obj_port_vlan *vlan)
1077 {
1078         return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1079                                          vlan->vid_begin, vlan->vid_end, false);
1080 }
1081
1082 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
1083 {
1084         u16 vid;
1085
1086         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
1087                 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false);
1088 }
1089
1090 static int
1091 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
1092                              const struct switchdev_obj_port_fdb *fdb)
1093 {
1094         u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
1095         u16 lag_vid = 0;
1096
1097         if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1098                 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1099         }
1100
1101         if (!mlxsw_sp_port->lagged)
1102                 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
1103                                                mlxsw_sp_port->local_port,
1104                                                fdb->addr, fid,
1105                                                false, false);
1106         else
1107                 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
1108                                                    mlxsw_sp_port->lag_id,
1109                                                    fdb->addr, fid, lag_vid,
1110                                                    false, false);
1111 }
1112
1113 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1114                                  const struct switchdev_obj_port_mdb *mdb)
1115 {
1116         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1117         struct net_device *dev = mlxsw_sp_port->dev;
1118         struct mlxsw_sp_mid *mid;
1119         u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
1120         u16 mid_idx;
1121         int err = 0;
1122
1123         mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid);
1124         if (!mid) {
1125                 netdev_err(dev, "Unable to remove port from MC DB\n");
1126                 return -EINVAL;
1127         }
1128
1129         err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false);
1130         if (err)
1131                 netdev_err(dev, "Unable to remove port from SMID\n");
1132
1133         mid_idx = mid->mid;
1134         if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) {
1135                 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx,
1136                                            false);
1137                 if (err)
1138                         netdev_err(dev, "Unable to remove MC SFD\n");
1139         }
1140
1141         return err;
1142 }
1143
1144 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1145                                  const struct switchdev_obj *obj)
1146 {
1147         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1148         int err = 0;
1149
1150         mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1151         if (!mlxsw_sp_port)
1152                 return -EINVAL;
1153
1154         switch (obj->id) {
1155         case SWITCHDEV_OBJ_ID_PORT_VLAN:
1156                 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
1157                         return 0;
1158
1159                 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1160                                               SWITCHDEV_OBJ_PORT_VLAN(obj));
1161                 break;
1162         case SWITCHDEV_OBJ_ID_PORT_FDB:
1163                 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
1164                                                    SWITCHDEV_OBJ_PORT_FDB(obj));
1165                 break;
1166         case SWITCHDEV_OBJ_ID_PORT_MDB:
1167                 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1168                                             SWITCHDEV_OBJ_PORT_MDB(obj));
1169                 break;
1170         default:
1171                 err = -EOPNOTSUPP;
1172                 break;
1173         }
1174
1175         return err;
1176 }
1177
1178 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1179                                                    u16 lag_id)
1180 {
1181         struct mlxsw_sp_port *mlxsw_sp_port;
1182         int i;
1183
1184         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
1185                 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1186                 if (mlxsw_sp_port)
1187                         return mlxsw_sp_port;
1188         }
1189         return NULL;
1190 }
1191
1192 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1193                                   struct switchdev_obj_port_fdb *fdb,
1194                                   switchdev_obj_dump_cb_t *cb,
1195                                   struct net_device *orig_dev)
1196 {
1197         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1198         struct mlxsw_sp_port *tmp;
1199         u16 vport_fid = 0;
1200         char *sfd_pl;
1201         char mac[ETH_ALEN];
1202         u16 fid;
1203         u8 local_port;
1204         u16 lag_id;
1205         u8 num_rec;
1206         int stored_err = 0;
1207         int i;
1208         int err;
1209
1210         sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1211         if (!sfd_pl)
1212                 return -ENOMEM;
1213
1214         if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
1215                 vport_fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
1216
1217         mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
1218         do {
1219                 mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
1220                 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1221                 if (err)
1222                         goto out;
1223
1224                 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1225
1226                 /* Even in case of error, we have to run the dump to the end
1227                  * so the session in firmware is finished.
1228                  */
1229                 if (stored_err)
1230                         continue;
1231
1232                 for (i = 0; i < num_rec; i++) {
1233                         switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
1234                         case MLXSW_REG_SFD_REC_TYPE_UNICAST:
1235                                 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
1236                                                         &local_port);
1237                                 if (local_port == mlxsw_sp_port->local_port) {
1238                                         if (vport_fid && vport_fid == fid)
1239                                                 fdb->vid = 0;
1240                                         else if (!vport_fid &&
1241                                                  !mlxsw_sp_fid_is_vfid(fid))
1242                                                 fdb->vid = fid;
1243                                         else
1244                                                 continue;
1245                                         ether_addr_copy(fdb->addr, mac);
1246                                         fdb->ndm_state = NUD_REACHABLE;
1247                                         err = cb(&fdb->obj);
1248                                         if (err)
1249                                                 stored_err = err;
1250                                 }
1251                                 break;
1252                         case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
1253                                 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
1254                                                             mac, &fid, &lag_id);
1255                                 tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1256                                 if (tmp && tmp->local_port ==
1257                                     mlxsw_sp_port->local_port) {
1258                                         /* LAG records can only point to LAG
1259                                          * devices or VLAN devices on top.
1260                                          */
1261                                         if (!netif_is_lag_master(orig_dev) &&
1262                                             !is_vlan_dev(orig_dev))
1263                                                 continue;
1264                                         if (vport_fid && vport_fid == fid)
1265                                                 fdb->vid = 0;
1266                                         else if (!vport_fid &&
1267                                                  !mlxsw_sp_fid_is_vfid(fid))
1268                                                 fdb->vid = fid;
1269                                         else
1270                                                 continue;
1271                                         ether_addr_copy(fdb->addr, mac);
1272                                         fdb->ndm_state = NUD_REACHABLE;
1273                                         err = cb(&fdb->obj);
1274                                         if (err)
1275                                                 stored_err = err;
1276                                 }
1277                                 break;
1278                         }
1279                 }
1280         } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
1281
1282 out:
1283         kfree(sfd_pl);
1284         return stored_err ? stored_err : err;
1285 }
1286
1287 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1288                                    struct switchdev_obj_port_vlan *vlan,
1289                                    switchdev_obj_dump_cb_t *cb)
1290 {
1291         u16 vid;
1292         int err = 0;
1293
1294         if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1295                 vlan->flags = 0;
1296                 vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1297                 vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1298                 return cb(&vlan->obj);
1299         }
1300
1301         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1302                 vlan->flags = 0;
1303                 if (vid == mlxsw_sp_port->pvid)
1304                         vlan->flags |= BRIDGE_VLAN_INFO_PVID;
1305                 if (test_bit(vid, mlxsw_sp_port->untagged_vlans))
1306                         vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1307                 vlan->vid_begin = vid;
1308                 vlan->vid_end = vid;
1309                 err = cb(&vlan->obj);
1310                 if (err)
1311                         break;
1312         }
1313         return err;
1314 }
1315
1316 static int mlxsw_sp_port_obj_dump(struct net_device *dev,
1317                                   struct switchdev_obj *obj,
1318                                   switchdev_obj_dump_cb_t *cb)
1319 {
1320         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1321         int err = 0;
1322
1323         mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1324         if (!mlxsw_sp_port)
1325                 return -EINVAL;
1326
1327         switch (obj->id) {
1328         case SWITCHDEV_OBJ_ID_PORT_VLAN:
1329                 err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
1330                                               SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
1331                 break;
1332         case SWITCHDEV_OBJ_ID_PORT_FDB:
1333                 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
1334                                              SWITCHDEV_OBJ_PORT_FDB(obj), cb,
1335                                              obj->orig_dev);
1336                 break;
1337         default:
1338                 err = -EOPNOTSUPP;
1339                 break;
1340         }
1341
1342         return err;
1343 }
1344
1345 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1346         .switchdev_port_attr_get        = mlxsw_sp_port_attr_get,
1347         .switchdev_port_attr_set        = mlxsw_sp_port_attr_set,
1348         .switchdev_port_obj_add         = mlxsw_sp_port_obj_add,
1349         .switchdev_port_obj_del         = mlxsw_sp_port_obj_del,
1350         .switchdev_port_obj_dump        = mlxsw_sp_port_obj_dump,
1351 };
1352
1353 static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
1354                                         char *mac, u16 vid,
1355                                         struct net_device *dev)
1356 {
1357         struct switchdev_notifier_fdb_info info;
1358         unsigned long notifier_type;
1359
1360         if (learning_sync) {
1361                 info.addr = mac;
1362                 info.vid = vid;
1363                 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
1364                 call_switchdev_notifiers(notifier_type, dev, &info.info);
1365         }
1366 }
1367
1368 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
1369                                             char *sfn_pl, int rec_index,
1370                                             bool adding)
1371 {
1372         struct mlxsw_sp_port *mlxsw_sp_port;
1373         char mac[ETH_ALEN];
1374         u8 local_port;
1375         u16 vid, fid;
1376         bool do_notification = true;
1377         int err;
1378
1379         mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
1380         mlxsw_sp_port = mlxsw_sp->ports[local_port];
1381         if (!mlxsw_sp_port) {
1382                 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
1383                 goto just_remove;
1384         }
1385
1386         if (mlxsw_sp_fid_is_vfid(fid)) {
1387                 struct mlxsw_sp_port *mlxsw_sp_vport;
1388
1389                 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1390                                                                  fid);
1391                 if (!mlxsw_sp_vport) {
1392                         netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1393                         goto just_remove;
1394                 }
1395                 vid = 0;
1396                 /* Override the physical port with the vPort. */
1397                 mlxsw_sp_port = mlxsw_sp_vport;
1398         } else {
1399                 vid = fid;
1400         }
1401
1402         adding = adding && mlxsw_sp_port->learning;
1403
1404 do_fdb_op:
1405         err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
1406                                       adding, true);
1407         if (err) {
1408                 if (net_ratelimit())
1409                         netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1410                 return;
1411         }
1412
1413         if (!do_notification)
1414                 return;
1415         mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
1416                                     adding, mac, vid, mlxsw_sp_port->dev);
1417         return;
1418
1419 just_remove:
1420         adding = false;
1421         do_notification = false;
1422         goto do_fdb_op;
1423 }
1424
1425 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
1426                                                 char *sfn_pl, int rec_index,
1427                                                 bool adding)
1428 {
1429         struct mlxsw_sp_port *mlxsw_sp_port;
1430         struct net_device *dev;
1431         char mac[ETH_ALEN];
1432         u16 lag_vid = 0;
1433         u16 lag_id;
1434         u16 vid, fid;
1435         bool do_notification = true;
1436         int err;
1437
1438         mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
1439         mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1440         if (!mlxsw_sp_port) {
1441                 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
1442                 goto just_remove;
1443         }
1444
1445         if (mlxsw_sp_fid_is_vfid(fid)) {
1446                 struct mlxsw_sp_port *mlxsw_sp_vport;
1447
1448                 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1449                                                                  fid);
1450                 if (!mlxsw_sp_vport) {
1451                         netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1452                         goto just_remove;
1453                 }
1454
1455                 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1456                 dev = mlxsw_sp_vport->dev;
1457                 vid = 0;
1458                 /* Override the physical port with the vPort. */
1459                 mlxsw_sp_port = mlxsw_sp_vport;
1460         } else {
1461                 dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
1462                 vid = fid;
1463         }
1464
1465         adding = adding && mlxsw_sp_port->learning;
1466
1467 do_fdb_op:
1468         err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
1469                                           adding, true);
1470         if (err) {
1471                 if (net_ratelimit())
1472                         netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1473                 return;
1474         }
1475
1476         if (!do_notification)
1477                 return;
1478         mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
1479                                     vid, dev);
1480         return;
1481
1482 just_remove:
1483         adding = false;
1484         do_notification = false;
1485         goto do_fdb_op;
1486 }
1487
1488 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
1489                                             char *sfn_pl, int rec_index)
1490 {
1491         switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
1492         case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
1493                 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1494                                                 rec_index, true);
1495                 break;
1496         case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
1497                 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1498                                                 rec_index, false);
1499                 break;
1500         case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
1501                 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1502                                                     rec_index, true);
1503                 break;
1504         case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
1505                 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1506                                                     rec_index, false);
1507                 break;
1508         }
1509 }
1510
1511 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
1512 {
1513         mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw,
1514                                msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
1515 }
1516
1517 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
1518 {
1519         struct mlxsw_sp *mlxsw_sp;
1520         char *sfn_pl;
1521         u8 num_rec;
1522         int i;
1523         int err;
1524
1525         sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
1526         if (!sfn_pl)
1527                 return;
1528
1529         mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
1530
1531         rtnl_lock();
1532         do {
1533                 mlxsw_reg_sfn_pack(sfn_pl);
1534                 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
1535                 if (err) {
1536                         dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
1537                         break;
1538                 }
1539                 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
1540                 for (i = 0; i < num_rec; i++)
1541                         mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
1542
1543         } while (num_rec);
1544         rtnl_unlock();
1545
1546         kfree(sfn_pl);
1547         mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1548 }
1549
1550 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
1551 {
1552         int err;
1553
1554         err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
1555         if (err) {
1556                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
1557                 return err;
1558         }
1559         INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
1560         mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
1561         mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1562         return 0;
1563 }
1564
1565 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
1566 {
1567         cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
1568 }
1569
1570 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1571 {
1572         return mlxsw_sp_fdb_init(mlxsw_sp);
1573 }
1574
1575 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1576 {
1577         mlxsw_sp_fdb_fini(mlxsw_sp);
1578 }
1579
1580 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
1581 {
1582         struct net_device *dev = mlxsw_sp_port->dev;
1583         int err;
1584
1585         /* Allow only untagged packets to ingress and tag them internally
1586          * with VID 1.
1587          */
1588         mlxsw_sp_port->pvid = 1;
1589         err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1,
1590                                         true);
1591         if (err) {
1592                 netdev_err(dev, "Unable to init VLANs\n");
1593                 return err;
1594         }
1595
1596         /* Add implicit VLAN interface in the device, so that untagged
1597          * packets will be classified to the default vFID.
1598          */
1599         err = mlxsw_sp_port_add_vid(dev, 0, 1);
1600         if (err)
1601                 netdev_err(dev, "Failed to configure default vFID\n");
1602
1603         return err;
1604 }
1605
1606 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
1607 {
1608         mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
1609 }
1610
1611 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1612 {
1613 }