2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
41 #define UPLINK_VPORT 0xFFFF
43 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
45 #define esw_info(dev, format, ...) \
46 pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
48 #define esw_warn(dev, format, ...) \
49 pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
51 #define esw_debug(dev, format, ...) \
52 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
60 /* E-Switch UC L2 table hash node */
62 struct l2addr_node node;
67 /* E-Switch MC FDB table hash node */
68 struct esw_mc_addr { /* SRIOV only */
69 struct l2addr_node node;
70 struct mlx5_flow_rule *uplink_rule; /* Forward to uplink rule */
74 /* Vport UC/MC hash node */
76 struct l2addr_node node;
79 struct mlx5_flow_rule *flow_rule; /* SRIOV only */
83 UC_ADDR_CHANGE = BIT(0),
84 MC_ADDR_CHANGE = BIT(1),
87 /* Vport context events */
88 #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
91 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
94 int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)];
95 int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
99 memset(out, 0, sizeof(out));
100 memset(in, 0, sizeof(in));
102 MLX5_SET(modify_nic_vport_context_in, in,
103 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
104 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
105 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
107 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
108 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
109 in, nic_vport_context);
111 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
113 if (events_mask & UC_ADDR_CHANGE)
114 MLX5_SET(nic_vport_context, nic_vport_ctx,
115 event_on_uc_address_change, 1);
116 if (events_mask & MC_ADDR_CHANGE)
117 MLX5_SET(nic_vport_context, nic_vport_ctx,
118 event_on_mc_address_change, 1);
120 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
123 err = mlx5_cmd_status_to_err_v2(out);
131 /* E-Switch vport context HW commands */
132 static int query_esw_vport_context_cmd(struct mlx5_core_dev *mdev, u32 vport,
133 u32 *out, int outlen)
135 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)];
137 memset(in, 0, sizeof(in));
139 MLX5_SET(query_nic_vport_context_in, in, opcode,
140 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
142 MLX5_SET(query_esw_vport_context_in, in, vport_number, vport);
144 MLX5_SET(query_esw_vport_context_in, in, other_vport, 1);
146 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
149 static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
152 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)];
157 memset(out, 0, sizeof(out));
162 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
163 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
166 err = query_esw_vport_context_cmd(dev, vport, out, sizeof(out));
170 cvlan_strip = MLX5_GET(query_esw_vport_context_out, out,
171 esw_vport_context.vport_cvlan_strip);
173 cvlan_insert = MLX5_GET(query_esw_vport_context_out, out,
174 esw_vport_context.vport_cvlan_insert);
176 if (cvlan_strip || cvlan_insert) {
177 *vlan = MLX5_GET(query_esw_vport_context_out, out,
178 esw_vport_context.cvlan_id);
179 *qos = MLX5_GET(query_esw_vport_context_out, out,
180 esw_vport_context.cvlan_pcp);
183 esw_debug(dev, "Query Vport[%d] cvlan: VLAN %d qos=%d\n",
189 static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
192 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)];
194 memset(out, 0, sizeof(out));
196 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
198 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
200 MLX5_SET(modify_esw_vport_context_in, in, opcode,
201 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
203 return mlx5_cmd_exec_check_status(dev, in, inlen,
207 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
208 u16 vlan, u8 qos, bool set)
210 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
212 memset(in, 0, sizeof(in));
214 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
215 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
218 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n",
219 vport, vlan, qos, set);
222 MLX5_SET(modify_esw_vport_context_in, in,
223 esw_vport_context.vport_cvlan_strip, 1);
224 /* insert only if no vlan in packet */
225 MLX5_SET(modify_esw_vport_context_in, in,
226 esw_vport_context.vport_cvlan_insert, 1);
227 MLX5_SET(modify_esw_vport_context_in, in,
228 esw_vport_context.cvlan_pcp, qos);
229 MLX5_SET(modify_esw_vport_context_in, in,
230 esw_vport_context.cvlan_id, vlan);
233 MLX5_SET(modify_esw_vport_context_in, in,
234 field_select.vport_cvlan_strip, 1);
235 MLX5_SET(modify_esw_vport_context_in, in,
236 field_select.vport_cvlan_insert, 1);
238 return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
241 /* HW L2 Table (MPFS) management */
242 static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index,
243 u8 *mac, u8 vlan_valid, u16 vlan)
245 u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)];
246 u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)];
249 memset(in, 0, sizeof(in));
250 memset(out, 0, sizeof(out));
252 MLX5_SET(set_l2_table_entry_in, in, opcode,
253 MLX5_CMD_OP_SET_L2_TABLE_ENTRY);
254 MLX5_SET(set_l2_table_entry_in, in, table_index, index);
255 MLX5_SET(set_l2_table_entry_in, in, vlan_valid, vlan_valid);
256 MLX5_SET(set_l2_table_entry_in, in, vlan, vlan);
258 in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address);
259 ether_addr_copy(&in_mac_addr[2], mac);
261 return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
265 static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index)
267 u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)];
268 u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)];
270 memset(in, 0, sizeof(in));
271 memset(out, 0, sizeof(out));
273 MLX5_SET(delete_l2_table_entry_in, in, opcode,
274 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
275 MLX5_SET(delete_l2_table_entry_in, in, table_index, index);
276 return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
280 static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix)
284 *ix = find_first_zero_bit(l2_table->bitmap, l2_table->size);
285 if (*ix >= l2_table->size)
288 __set_bit(*ix, l2_table->bitmap);
293 static void free_l2_table_index(struct mlx5_l2_table *l2_table, u32 ix)
295 __clear_bit(ix, l2_table->bitmap);
298 static int set_l2_table_entry(struct mlx5_core_dev *dev, u8 *mac,
299 u8 vlan_valid, u16 vlan,
302 struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table;
305 err = alloc_l2_table_index(l2_table, index);
309 err = set_l2_table_entry_cmd(dev, *index, mac, vlan_valid, vlan);
311 free_l2_table_index(l2_table, *index);
316 static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
318 struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table;
320 del_l2_table_entry_cmd(dev, index);
321 free_l2_table_index(l2_table, index);
325 static struct mlx5_flow_rule *
326 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
328 int match_header = MLX5_MATCH_OUTER_HEADERS;
329 struct mlx5_flow_destination dest;
330 struct mlx5_flow_rule *flow_rule = NULL;
336 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
337 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
338 if (!match_v || !match_c) {
339 pr_warn("FDB: Failed to alloc match parameters\n");
342 dmac_v = MLX5_ADDR_OF(fte_match_param, match_v,
343 outer_headers.dmac_47_16);
344 dmac_c = MLX5_ADDR_OF(fte_match_param, match_c,
345 outer_headers.dmac_47_16);
347 ether_addr_copy(dmac_v, mac);
348 /* Match criteria mask */
349 memset(dmac_c, 0xff, 6);
351 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
352 dest.vport_num = vport;
355 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
356 dmac_v, dmac_c, vport);
358 mlx5_add_flow_rule(esw->fdb_table.fdb,
362 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
364 if (IS_ERR_OR_NULL(flow_rule)) {
366 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
367 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
376 static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
378 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
379 struct mlx5_core_dev *dev = esw->dev;
380 struct mlx5_flow_namespace *root_ns;
381 struct mlx5_flow_table *fdb;
382 struct mlx5_flow_group *g;
383 void *match_criteria;
389 esw_debug(dev, "Create FDB log_max_size(%d)\n",
390 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
392 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
394 esw_warn(dev, "Failed to get FDB flow namespace\n");
398 flow_group_in = mlx5_vzalloc(inlen);
401 memset(flow_group_in, 0, inlen);
403 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
404 fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
405 if (IS_ERR_OR_NULL(fdb)) {
407 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
411 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
412 MLX5_MATCH_OUTER_HEADERS);
413 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
414 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
415 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
416 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
417 eth_broadcast_addr(dmac);
419 g = mlx5_create_flow_group(fdb, flow_group_in);
420 if (IS_ERR_OR_NULL(g)) {
422 esw_warn(dev, "Failed to create flow group err(%d)\n", err);
426 esw->fdb_table.addr_grp = g;
427 esw->fdb_table.fdb = fdb;
429 kfree(flow_group_in);
430 if (err && !IS_ERR_OR_NULL(fdb))
431 mlx5_destroy_flow_table(fdb);
435 static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
437 if (!esw->fdb_table.fdb)
440 esw_debug(esw->dev, "Destroy FDB Table\n");
441 mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
442 mlx5_destroy_flow_table(esw->fdb_table.fdb);
443 esw->fdb_table.fdb = NULL;
444 esw->fdb_table.addr_grp = NULL;
447 /* E-Switch vport UC/MC lists management */
448 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
449 struct vport_addr *vaddr);
451 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
453 struct hlist_head *hash = esw->l2_table.l2_hash;
454 struct esw_uc_addr *esw_uc;
455 u8 *mac = vaddr->node.addr;
456 u32 vport = vaddr->vport;
459 esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
462 "Failed to set L2 mac(%pM) for vport(%d), mac is already in use by vport(%d)\n",
463 mac, vport, esw_uc->vport);
467 esw_uc = l2addr_hash_add(hash, mac, struct esw_uc_addr, GFP_KERNEL);
470 esw_uc->vport = vport;
472 err = set_l2_table_entry(esw->dev, mac, 0, 0, &esw_uc->table_index);
476 if (esw->fdb_table.fdb) /* SRIOV is enabled: Forward UC MAC to vport */
477 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
479 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n",
480 vport, mac, esw_uc->table_index, vaddr->flow_rule);
483 l2addr_hash_del(esw_uc);
487 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
489 struct hlist_head *hash = esw->l2_table.l2_hash;
490 struct esw_uc_addr *esw_uc;
491 u8 *mac = vaddr->node.addr;
492 u32 vport = vaddr->vport;
494 esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
495 if (!esw_uc || esw_uc->vport != vport) {
497 "MAC(%pM) doesn't belong to vport (%d)\n",
501 esw_debug(esw->dev, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n",
502 vport, mac, esw_uc->table_index, vaddr->flow_rule);
504 del_l2_table_entry(esw->dev, esw_uc->table_index);
506 if (vaddr->flow_rule)
507 mlx5_del_flow_rule(vaddr->flow_rule);
508 vaddr->flow_rule = NULL;
510 l2addr_hash_del(esw_uc);
514 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
516 struct hlist_head *hash = esw->mc_table;
517 struct esw_mc_addr *esw_mc;
518 u8 *mac = vaddr->node.addr;
519 u32 vport = vaddr->vport;
521 if (!esw->fdb_table.fdb)
524 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
528 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
532 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
533 esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
536 /* Forward MC MAC to vport */
537 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
539 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
540 vport, mac, vaddr->flow_rule,
541 esw_mc->refcnt, esw_mc->uplink_rule);
545 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
547 struct hlist_head *hash = esw->mc_table;
548 struct esw_mc_addr *esw_mc;
549 u8 *mac = vaddr->node.addr;
550 u32 vport = vaddr->vport;
552 if (!esw->fdb_table.fdb)
555 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
558 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
563 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
564 vport, mac, vaddr->flow_rule, esw_mc->refcnt,
565 esw_mc->uplink_rule);
567 if (vaddr->flow_rule)
568 mlx5_del_flow_rule(vaddr->flow_rule);
569 vaddr->flow_rule = NULL;
571 if (--esw_mc->refcnt)
574 if (esw_mc->uplink_rule)
575 mlx5_del_flow_rule(esw_mc->uplink_rule);
577 l2addr_hash_del(esw_mc);
581 /* Apply vport UC/MC list to HW l2 table and FDB table */
582 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
583 u32 vport_num, int list_type)
585 struct mlx5_vport *vport = &esw->vports[vport_num];
586 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
587 vport_addr_action vport_addr_add;
588 vport_addr_action vport_addr_del;
589 struct vport_addr *addr;
590 struct l2addr_node *node;
591 struct hlist_head *hash;
592 struct hlist_node *tmp;
595 vport_addr_add = is_uc ? esw_add_uc_addr :
597 vport_addr_del = is_uc ? esw_del_uc_addr :
600 hash = is_uc ? vport->uc_list : vport->mc_list;
601 for_each_l2hash_node(node, tmp, hash, hi) {
602 addr = container_of(node, struct vport_addr, node);
603 switch (addr->action) {
604 case MLX5_ACTION_ADD:
605 vport_addr_add(esw, addr);
606 addr->action = MLX5_ACTION_NONE;
608 case MLX5_ACTION_DEL:
609 vport_addr_del(esw, addr);
610 l2addr_hash_del(addr);
616 /* Sync vport UC/MC list from vport context */
617 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
618 u32 vport_num, int list_type)
620 struct mlx5_vport *vport = &esw->vports[vport_num];
621 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
622 u8 (*mac_list)[ETH_ALEN];
623 struct l2addr_node *node;
624 struct vport_addr *addr;
625 struct hlist_head *hash;
626 struct hlist_node *tmp;
632 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
633 MLX5_MAX_MC_PER_VPORT(esw->dev);
635 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
639 hash = is_uc ? vport->uc_list : vport->mc_list;
641 for_each_l2hash_node(node, tmp, hash, hi) {
642 addr = container_of(node, struct vport_addr, node);
643 addr->action = MLX5_ACTION_DEL;
649 err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
653 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
654 vport_num, is_uc ? "UC" : "MC", size);
656 for (i = 0; i < size; i++) {
657 if (is_uc && !is_valid_ether_addr(mac_list[i]))
660 if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
663 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
665 addr->action = MLX5_ACTION_NONE;
669 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
673 "Failed to add MAC(%pM) to vport[%d] DB\n",
674 mac_list[i], vport_num);
677 addr->vport = vport_num;
678 addr->action = MLX5_ACTION_ADD;
684 static void esw_vport_change_handler(struct work_struct *work)
686 struct mlx5_vport *vport =
687 container_of(work, struct mlx5_vport, vport_change_handler);
688 struct mlx5_core_dev *dev = vport->dev;
689 struct mlx5_eswitch *esw = dev->priv.eswitch;
692 mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
693 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
696 if (vport->enabled_events & UC_ADDR_CHANGE) {
697 esw_update_vport_addr_list(esw, vport->vport,
698 MLX5_NVPRT_LIST_TYPE_UC);
699 esw_apply_vport_addr_list(esw, vport->vport,
700 MLX5_NVPRT_LIST_TYPE_UC);
703 if (vport->enabled_events & MC_ADDR_CHANGE) {
704 esw_update_vport_addr_list(esw, vport->vport,
705 MLX5_NVPRT_LIST_TYPE_MC);
706 esw_apply_vport_addr_list(esw, vport->vport,
707 MLX5_NVPRT_LIST_TYPE_MC);
710 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
712 arm_vport_context_events_cmd(dev, vport->vport,
713 vport->enabled_events);
716 static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
717 struct mlx5_vport *vport)
719 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
720 struct mlx5_flow_group *vlan_grp = NULL;
721 struct mlx5_flow_group *drop_grp = NULL;
722 struct mlx5_core_dev *dev = esw->dev;
723 struct mlx5_flow_namespace *root_ns;
724 struct mlx5_flow_table *acl;
725 void *match_criteria;
727 /* The egress acl table contains 2 rules:
728 * 1)Allow traffic with vlan_tag=vst_vlan_id
729 * 2)Drop all other traffic.
734 if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) ||
735 !IS_ERR_OR_NULL(vport->egress.acl))
738 esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
739 vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
741 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
743 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
747 flow_group_in = mlx5_vzalloc(inlen);
751 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
752 if (IS_ERR_OR_NULL(acl)) {
754 esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
759 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
760 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
761 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
762 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
763 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
764 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
766 vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
767 if (IS_ERR_OR_NULL(vlan_grp)) {
768 err = PTR_ERR(vlan_grp);
769 esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
774 memset(flow_group_in, 0, inlen);
775 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
776 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
777 drop_grp = mlx5_create_flow_group(acl, flow_group_in);
778 if (IS_ERR_OR_NULL(drop_grp)) {
779 err = PTR_ERR(drop_grp);
780 esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
785 vport->egress.acl = acl;
786 vport->egress.drop_grp = drop_grp;
787 vport->egress.allowed_vlans_grp = vlan_grp;
789 kfree(flow_group_in);
790 if (err && !IS_ERR_OR_NULL(vlan_grp))
791 mlx5_destroy_flow_group(vlan_grp);
792 if (err && !IS_ERR_OR_NULL(acl))
793 mlx5_destroy_flow_table(acl);
796 static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
797 struct mlx5_vport *vport)
799 if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
800 mlx5_del_flow_rule(vport->egress.allowed_vlan);
802 if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
803 mlx5_del_flow_rule(vport->egress.drop_rule);
805 vport->egress.allowed_vlan = NULL;
806 vport->egress.drop_rule = NULL;
809 static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
810 struct mlx5_vport *vport)
812 if (IS_ERR_OR_NULL(vport->egress.acl))
815 esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
817 esw_vport_cleanup_egress_rules(esw, vport);
818 mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
819 mlx5_destroy_flow_group(vport->egress.drop_grp);
820 mlx5_destroy_flow_table(vport->egress.acl);
821 vport->egress.allowed_vlans_grp = NULL;
822 vport->egress.drop_grp = NULL;
823 vport->egress.acl = NULL;
826 static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
827 struct mlx5_vport *vport)
829 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
830 struct mlx5_core_dev *dev = esw->dev;
831 struct mlx5_flow_namespace *root_ns;
832 struct mlx5_flow_table *acl;
833 struct mlx5_flow_group *g;
834 void *match_criteria;
836 /* The ingress acl table contains 4 groups
837 * (2 active rules at the same time -
838 * 1 allow rule from one of the first 3 groups.
839 * 1 drop rule from the last group):
840 * 1)Allow untagged traffic with smac=original mac.
841 * 2)Allow untagged traffic.
842 * 3)Allow traffic with smac=original mac.
843 * 4)Drop all other traffic.
848 if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) ||
849 !IS_ERR_OR_NULL(vport->ingress.acl))
852 esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
853 vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
855 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
857 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
861 flow_group_in = mlx5_vzalloc(inlen);
865 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
866 if (IS_ERR_OR_NULL(acl)) {
868 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
872 vport->ingress.acl = acl;
874 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
876 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
877 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
878 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
879 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
880 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
881 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
883 g = mlx5_create_flow_group(acl, flow_group_in);
884 if (IS_ERR_OR_NULL(g)) {
886 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
890 vport->ingress.allow_untagged_spoofchk_grp = g;
892 memset(flow_group_in, 0, inlen);
893 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
894 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
895 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
896 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
898 g = mlx5_create_flow_group(acl, flow_group_in);
899 if (IS_ERR_OR_NULL(g)) {
901 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
905 vport->ingress.allow_untagged_only_grp = g;
907 memset(flow_group_in, 0, inlen);
908 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
909 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
910 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
911 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
912 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
914 g = mlx5_create_flow_group(acl, flow_group_in);
915 if (IS_ERR_OR_NULL(g)) {
917 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
921 vport->ingress.allow_spoofchk_only_grp = g;
923 memset(flow_group_in, 0, inlen);
924 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
925 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
927 g = mlx5_create_flow_group(acl, flow_group_in);
928 if (IS_ERR_OR_NULL(g)) {
930 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
934 vport->ingress.drop_grp = g;
938 if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
939 mlx5_destroy_flow_group(
940 vport->ingress.allow_spoofchk_only_grp);
941 if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
942 mlx5_destroy_flow_group(
943 vport->ingress.allow_untagged_only_grp);
944 if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
945 mlx5_destroy_flow_group(
946 vport->ingress.allow_untagged_spoofchk_grp);
947 if (!IS_ERR_OR_NULL(vport->ingress.acl))
948 mlx5_destroy_flow_table(vport->ingress.acl);
951 kfree(flow_group_in);
954 static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
955 struct mlx5_vport *vport)
957 if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
958 mlx5_del_flow_rule(vport->ingress.drop_rule);
960 if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
961 mlx5_del_flow_rule(vport->ingress.allow_rule);
963 vport->ingress.drop_rule = NULL;
964 vport->ingress.allow_rule = NULL;
967 static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
968 struct mlx5_vport *vport)
970 if (IS_ERR_OR_NULL(vport->ingress.acl))
973 esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
975 esw_vport_cleanup_ingress_rules(esw, vport);
976 mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
977 mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
978 mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
979 mlx5_destroy_flow_group(vport->ingress.drop_grp);
980 mlx5_destroy_flow_table(vport->ingress.acl);
981 vport->ingress.acl = NULL;
982 vport->ingress.drop_grp = NULL;
983 vport->ingress.allow_spoofchk_only_grp = NULL;
984 vport->ingress.allow_untagged_only_grp = NULL;
985 vport->ingress.allow_untagged_spoofchk_grp = NULL;
988 static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
989 struct mlx5_vport *vport)
997 if (vport->spoofchk) {
998 err = mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, smac);
1001 "vport[%d] configure ingress rules failed, query smac failed, err(%d)\n",
1006 if (!is_valid_ether_addr(smac)) {
1007 mlx5_core_warn(esw->dev,
1008 "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
1014 esw_vport_cleanup_ingress_rules(esw, vport);
1016 if (!vport->vlan && !vport->qos && !vport->spoofchk) {
1017 esw_vport_disable_ingress_acl(esw, vport);
1021 esw_vport_enable_ingress_acl(esw, vport);
1024 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
1025 vport->vport, vport->vlan, vport->qos);
1027 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
1028 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
1029 if (!match_v || !match_c) {
1031 esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
1036 if (vport->vlan || vport->qos)
1037 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag);
1039 if (vport->spoofchk) {
1040 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_47_16);
1041 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_15_0);
1042 smac_v = MLX5_ADDR_OF(fte_match_param,
1044 outer_headers.smac_47_16);
1045 ether_addr_copy(smac_v, smac);
1048 vport->ingress.allow_rule =
1049 mlx5_add_flow_rule(vport->ingress.acl,
1050 MLX5_MATCH_OUTER_HEADERS,
1053 MLX5_FLOW_CONTEXT_ACTION_ALLOW,
1055 if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) {
1056 err = PTR_ERR(vport->ingress.allow_rule);
1057 pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
1059 vport->ingress.allow_rule = NULL;
1063 memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
1064 memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
1065 vport->ingress.drop_rule =
1066 mlx5_add_flow_rule(vport->ingress.acl,
1070 MLX5_FLOW_CONTEXT_ACTION_DROP,
1072 if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
1073 err = PTR_ERR(vport->ingress.drop_rule);
1074 pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
1076 vport->ingress.drop_rule = NULL;
1082 esw_vport_cleanup_ingress_rules(esw, vport);
1089 static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1090 struct mlx5_vport *vport)
1096 esw_vport_cleanup_egress_rules(esw, vport);
1098 if (!vport->vlan && !vport->qos) {
1099 esw_vport_disable_egress_acl(esw, vport);
1103 esw_vport_enable_egress_acl(esw, vport);
1106 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
1107 vport->vport, vport->vlan, vport->qos);
1109 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
1110 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
1111 if (!match_v || !match_c) {
1113 esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
1118 /* Allowed vlan rule */
1119 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag);
1120 MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.vlan_tag);
1121 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid);
1122 MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan);
1124 vport->egress.allowed_vlan =
1125 mlx5_add_flow_rule(vport->egress.acl,
1126 MLX5_MATCH_OUTER_HEADERS,
1129 MLX5_FLOW_CONTEXT_ACTION_ALLOW,
1131 if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
1132 err = PTR_ERR(vport->egress.allowed_vlan);
1133 pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
1135 vport->egress.allowed_vlan = NULL;
1139 /* Drop others rule (star rule) */
1140 memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
1141 memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
1142 vport->egress.drop_rule =
1143 mlx5_add_flow_rule(vport->egress.acl,
1147 MLX5_FLOW_CONTEXT_ACTION_DROP,
1149 if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
1150 err = PTR_ERR(vport->egress.drop_rule);
1151 pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
1153 vport->egress.drop_rule = NULL;
1161 static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
1164 struct mlx5_vport *vport = &esw->vports[vport_num];
1166 mutex_lock(&esw->state_lock);
1167 WARN_ON(vport->enabled);
1169 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
1171 if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */
1172 esw_vport_ingress_config(esw, vport);
1173 esw_vport_egress_config(esw, vport);
1176 mlx5_modify_vport_admin_state(esw->dev,
1177 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1179 MLX5_ESW_VPORT_ADMIN_STATE_AUTO);
1181 /* Sync with current vport context */
1182 vport->enabled_events = enable_events;
1183 esw_vport_change_handler(&vport->vport_change_handler);
1185 vport->enabled = true;
1187 arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
1189 esw->enabled_vports++;
1190 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
1191 mutex_unlock(&esw->state_lock);
1194 static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
1196 struct mlx5_vport *vport = &esw->vports[vport_num];
1198 if (!vport->enabled)
1201 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
1202 /* Mark this vport as disabled to discard new events */
1203 vport->enabled = false;
1205 synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC));
1207 mlx5_modify_vport_admin_state(esw->dev,
1208 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1210 MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
1211 /* Wait for current already scheduled events to complete */
1212 flush_workqueue(esw->work_queue);
1213 /* Disable events from this vport */
1214 arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
1215 mutex_lock(&esw->state_lock);
1216 /* We don't assume VFs will cleanup after themselves.
1217 * Calling vport change handler while vport is disabled will cleanup
1218 * the vport resources.
1220 esw_vport_change_handler(&vport->vport_change_handler);
1221 vport->enabled_events = 0;
1223 esw_vport_disable_egress_acl(esw, vport);
1224 esw_vport_disable_ingress_acl(esw, vport);
1226 esw->enabled_vports--;
1227 mutex_unlock(&esw->state_lock);
1230 /* Public E-Switch API */
1231 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
1236 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1237 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1240 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
1241 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1242 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1246 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
1247 esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
1249 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
1250 esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
1252 esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs);
1254 esw_disable_vport(esw, 0);
1256 err = esw_create_fdb_table(esw, nvfs + 1);
1260 for (i = 0; i <= nvfs; i++)
1261 esw_enable_vport(esw, i, SRIOV_VPORT_EVENTS);
1263 esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
1264 esw->enabled_vports);
1268 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1272 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
1276 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1277 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1280 esw_info(esw->dev, "disable SRIOV: active vports(%d)\n",
1281 esw->enabled_vports);
1283 for (i = 0; i < esw->total_vports; i++)
1284 esw_disable_vport(esw, i);
1286 esw_destroy_fdb_table(esw);
1288 /* VPORT 0 (PF) must be enabled back with non-sriov configuration */
1289 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1292 int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1294 int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
1295 int total_vports = MLX5_TOTAL_VPORTS(dev);
1296 struct mlx5_eswitch *esw;
1300 if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
1301 MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1305 "Total vports %d, l2 table size(%d), per vport: max uc(%d) max mc(%d)\n",
1306 total_vports, l2_table_size,
1307 MLX5_MAX_UC_PER_VPORT(dev),
1308 MLX5_MAX_MC_PER_VPORT(dev));
1310 esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1316 esw->l2_table.bitmap = kcalloc(BITS_TO_LONGS(l2_table_size),
1317 sizeof(uintptr_t), GFP_KERNEL);
1318 if (!esw->l2_table.bitmap) {
1322 esw->l2_table.size = l2_table_size;
1324 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1325 if (!esw->work_queue) {
1330 esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
1337 mutex_init(&esw->state_lock);
1339 for (vport_num = 0; vport_num < total_vports; vport_num++) {
1340 struct mlx5_vport *vport = &esw->vports[vport_num];
1342 vport->vport = vport_num;
1344 INIT_WORK(&vport->vport_change_handler,
1345 esw_vport_change_handler);
1348 esw->total_vports = total_vports;
1349 esw->enabled_vports = 0;
1351 dev->priv.eswitch = esw;
1352 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1353 /* VF Vports will be enabled when SRIOV is enabled */
1356 if (esw->work_queue)
1357 destroy_workqueue(esw->work_queue);
1358 kfree(esw->l2_table.bitmap);
1364 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1366 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1367 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1370 esw_info(esw->dev, "cleanup\n");
1371 esw_disable_vport(esw, 0);
1373 esw->dev->priv.eswitch = NULL;
1374 destroy_workqueue(esw->work_queue);
1375 kfree(esw->l2_table.bitmap);
1380 void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
1382 struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change;
1383 u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
1384 struct mlx5_vport *vport;
1387 pr_warn("MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n",
1392 vport = &esw->vports[vport_num];
1394 queue_work(esw->work_queue, &vport->vport_change_handler);
1397 /* Vport Administration */
1398 #define ESW_ALLOWED(esw) \
1399 (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
1400 #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
1402 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1403 int vport, u8 mac[ETH_ALEN])
1406 struct mlx5_vport *evport;
1408 if (!ESW_ALLOWED(esw))
1410 if (!LEGAL_VPORT(esw, vport))
1413 evport = &esw->vports[vport];
1415 if (evport->spoofchk && !is_valid_ether_addr(mac)) {
1416 mlx5_core_warn(esw->dev,
1417 "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
1422 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
1424 mlx5_core_warn(esw->dev,
1425 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1430 mutex_lock(&esw->state_lock);
1431 if (evport->enabled)
1432 err = esw_vport_ingress_config(esw, evport);
1433 mutex_unlock(&esw->state_lock);
1438 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1439 int vport, int link_state)
1441 if (!ESW_ALLOWED(esw))
1443 if (!LEGAL_VPORT(esw, vport))
1446 return mlx5_modify_vport_admin_state(esw->dev,
1447 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1451 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1452 int vport, struct ifla_vf_info *ivi)
1454 struct mlx5_vport *evport;
1458 if (!ESW_ALLOWED(esw))
1460 if (!LEGAL_VPORT(esw, vport))
1463 evport = &esw->vports[vport];
1465 memset(ivi, 0, sizeof(*ivi));
1466 ivi->vf = vport - 1;
1468 mlx5_query_nic_vport_mac_address(esw->dev, vport, ivi->mac);
1469 ivi->linkstate = mlx5_query_vport_admin_state(esw->dev,
1470 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1472 query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos);
1475 ivi->spoofchk = evport->spoofchk;
1480 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1481 int vport, u16 vlan, u8 qos)
1483 struct mlx5_vport *evport;
1487 if (!ESW_ALLOWED(esw))
1489 if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
1495 evport = &esw->vports[vport];
1497 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
1501 mutex_lock(&esw->state_lock);
1502 evport->vlan = vlan;
1504 if (evport->enabled) {
1505 err = esw_vport_ingress_config(esw, evport);
1508 err = esw_vport_egress_config(esw, evport);
1512 mutex_unlock(&esw->state_lock);
1516 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
1517 int vport, bool spoofchk)
1519 struct mlx5_vport *evport;
1523 if (!ESW_ALLOWED(esw))
1525 if (!LEGAL_VPORT(esw, vport))
1528 evport = &esw->vports[vport];
1530 mutex_lock(&esw->state_lock);
1531 pschk = evport->spoofchk;
1532 evport->spoofchk = spoofchk;
1533 if (evport->enabled)
1534 err = esw_vport_ingress_config(esw, evport);
1536 evport->spoofchk = pschk;
1537 mutex_unlock(&esw->state_lock);
1542 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
1544 struct ifla_vf_stats *vf_stats)
1546 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1547 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
1551 if (!ESW_ALLOWED(esw))
1553 if (!LEGAL_VPORT(esw, vport))
1556 out = mlx5_vzalloc(outlen);
1560 memset(in, 0, sizeof(in));
1562 MLX5_SET(query_vport_counter_in, in, opcode,
1563 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1564 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
1565 MLX5_SET(query_vport_counter_in, in, vport_number, vport);
1567 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1569 memset(out, 0, outlen);
1570 err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
1574 #define MLX5_GET_CTR(p, x) \
1575 MLX5_GET64(query_vport_counter_out, p, x)
1577 memset(vf_stats, 0, sizeof(*vf_stats));
1578 vf_stats->rx_packets =
1579 MLX5_GET_CTR(out, received_eth_unicast.packets) +
1580 MLX5_GET_CTR(out, received_eth_multicast.packets) +
1581 MLX5_GET_CTR(out, received_eth_broadcast.packets);
1583 vf_stats->rx_bytes =
1584 MLX5_GET_CTR(out, received_eth_unicast.octets) +
1585 MLX5_GET_CTR(out, received_eth_multicast.octets) +
1586 MLX5_GET_CTR(out, received_eth_broadcast.octets);
1588 vf_stats->tx_packets =
1589 MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
1590 MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
1591 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
1593 vf_stats->tx_bytes =
1594 MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
1595 MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
1596 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
1598 vf_stats->multicast =
1599 MLX5_GET_CTR(out, received_eth_multicast.packets);
1601 vf_stats->broadcast =
1602 MLX5_GET_CTR(out, received_eth_broadcast.packets);