+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
+ MLX5_ST_SZ_BYTES(dest_format_struct) *
+ dest_list_size);
+ if (!flow_context)
+ return -ENOMEM;
+
+ MLX5_SET(flow_context, flow_context, flow_tag, fr->flow_tag);
+ MLX5_SET(flow_context, flow_context, action, fr->action);
+ MLX5_SET(flow_context, flow_context, destination_list_size,
+ dest_list_size);
+
+ i = 0;
+ list_for_each_entry(dest_n, &fr->dest_list, list) {
+ void *dest_addr = MLX5_ADDR_OF(flow_context, flow_context,
+ destination[i++]);
+
+ MLX5_SET(dest_format_struct, dest_addr, destination_type,
+ dest_n->dest.type);
+ MLX5_SET(dest_format_struct, dest_addr, destination_id,
+ dest_n->dest.vport_num);
+ }
+
+ in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+ memcpy(in_match_value, fr->match_value, MLX5_ST_SZ_BYTES(fte_match_param));
+
+ err = mlx5_add_flow_table_entry(fr->ft, fr->match_criteria_enable,
+ fr->match_criteria, flow_context,
+ &flow_index);
+ if (!err) {
+ if (was_valid)
+ mlx5_del_flow_table_entry(fr->ft, fr->fi);
+ fr->fi = flow_index;
+ fr->valid = true;
+ }
+ kfree(flow_context);
+ return err;
+}
+
+static int mlx5_flow_rule_add_dest(struct mlx5_flow_rule *fr,
+ struct mlx5_flow_destination *new_dest)
+{
+ struct dest_node *dest_n;
+ int err;
+
+ dest_n = kzalloc(sizeof(*dest_n), GFP_KERNEL);
+ if (!dest_n)
+ return -ENOMEM;
+
+ memcpy(&dest_n->dest, new_dest, sizeof(dest_n->dest));
+ mutex_lock(&fr->mutex);
+ list_add(&dest_n->list, &fr->dest_list);
+ err = _mlx5_flow_rule_apply(fr);
+ if (err) {
+ list_del(&dest_n->list);
+ kfree(dest_n);
+ }
+ mutex_unlock(&fr->mutex);
+ return err;
+}
+
+static int mlx5_flow_rule_del_dest(struct mlx5_flow_rule *fr,
+ struct mlx5_flow_destination *dest)
+{
+ struct dest_node *dest_n;
+ struct dest_node *n;
+ int err;
+
+ mutex_lock(&fr->mutex);
+ list_for_each_entry_safe(dest_n, n, &fr->dest_list, list) {
+ if (dest->vport_num == dest_n->dest.vport_num)
+ goto found;
+ }
+ mutex_unlock(&fr->mutex);
+ return -ENOENT;
+
+found:
+ list_del(&dest_n->list);
+ err = _mlx5_flow_rule_apply(fr);
+ mutex_unlock(&fr->mutex);
+ kfree(dest_n);
+
+ return err;
+}
+
+static struct mlx5_flow_rule *find_fr(struct mlx5_eswitch *esw,
+ u8 match_criteria_enable,
+ u32 *match_value)
+{
+ struct hlist_head *hash = esw->mc_table;
+ struct esw_mc_addr *esw_mc;
+ u8 *dmac_v;
+
+ dmac_v = MLX5_ADDR_OF(fte_match_param, match_value,
+ outer_headers.dmac_47_16);
+
+ /* UNICAST FULL MATCH */
+ if (!is_multicast_ether_addr(dmac_v))
+ return NULL;
+
+ /* MULTICAST FULL MATCH */
+ esw_mc = l2addr_hash_find(hash, dmac_v, struct esw_mc_addr);
+
+ return esw_mc ? esw_mc->uplink_rule : NULL;
+}
+
+static struct mlx5_flow_rule *alloc_fr(void *ft,
+ u8 match_criteria_enable,
+ u32 *match_criteria,
+ u32 *match_value,
+ u32 action,
+ u32 flow_tag)
+{
+ struct mlx5_flow_rule *fr = kzalloc(sizeof(*fr), GFP_KERNEL);
+
+ if (!fr)
+ return NULL;
+
+ fr->match_criteria = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ fr->match_value = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!fr->match_criteria || !fr->match_value) {
+ kfree(fr->match_criteria);
+ kfree(fr->match_value);
+ kfree(fr);
+ return NULL;
+ }
+
+ memcpy(fr->match_criteria, match_criteria, MLX5_ST_SZ_BYTES(fte_match_param));
+ memcpy(fr->match_value, match_value, MLX5_ST_SZ_BYTES(fte_match_param));
+ fr->match_criteria_enable = match_criteria_enable;
+ fr->flow_tag = flow_tag;
+ fr->action = action;
+
+ mutex_init(&fr->mutex);
+ INIT_LIST_HEAD(&fr->dest_list);
+ atomic_set(&fr->refcount, 0);
+ fr->ft = ft;
+ return fr;
+}
+
+static void deref_fr(struct mlx5_flow_rule *fr)
+{
+ if (!atomic_dec_and_test(&fr->refcount))
+ return;
+
+ kfree(fr->match_criteria);
+ kfree(fr->match_value);
+ kfree(fr);
+}
+
+static struct mlx5_flow_rule *
+mlx5_add_flow_rule(struct mlx5_eswitch *esw,
+ u8 match_criteria_enable,
+ u32 *match_criteria,
+ u32 *match_value,
+ u32 action,
+ u32 flow_tag,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_rule *fr;
+ int err;
+
+ fr = find_fr(esw, match_criteria_enable, match_value);
+ fr = fr ? fr : alloc_fr(esw->fdb_table.fdb, match_criteria_enable, match_criteria,
+ match_value, action, flow_tag);
+ if (!fr)
+ return NULL;
+
+ atomic_inc(&fr->refcount);
+
+ err = mlx5_flow_rule_add_dest(fr, dest);
+ if (err) {
+ deref_fr(fr);
+ return NULL;
+ }
+
+ return fr;
+}
+
+static void mlx5_del_flow_rule(struct mlx5_flow_rule *fr, u32 vport)
+{
+ struct mlx5_flow_destination dest;
+
+ dest.vport_num = vport;
+ mlx5_flow_rule_del_dest(fr, &dest);
+ deref_fr(fr);
+}
+
+/* E-Switch FDB */
+static struct mlx5_flow_rule *
+esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
+{
+ int match_header = MLX5_MATCH_OUTER_HEADERS;
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_rule *flow_rule = NULL;
+ u32 *match_v;
+ u32 *match_c;
+ u8 *dmac_v;
+ u8 *dmac_c;
+
+ match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!match_v || !match_c) {
+ pr_warn("FDB: Failed to alloc match parameters\n");
+ goto out;
+ }
+ dmac_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ outer_headers.dmac_47_16);
+ dmac_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers.dmac_47_16);
+
+ ether_addr_copy(dmac_v, mac);
+ /* Match criteria mask */
+ memset(dmac_c, 0xff, 6);
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.vport_num = vport;
+
+ esw_debug(esw->dev,
+ "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
+ dmac_v, dmac_c, vport);
+ flow_rule =
+ mlx5_add_flow_rule(esw,
+ match_header,
+ match_c,
+ match_v,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ 0, &dest);
+ if (IS_ERR_OR_NULL(flow_rule)) {
+ pr_warn(
+ "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
+ dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
+ flow_rule = NULL;
+ }
+out:
+ kfree(match_v);
+ kfree(match_c);
+ return flow_rule;
+}
+
+static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_table_group g;
+ struct mlx5_flow_table *fdb;
+ u8 *dmac;
+
+ esw_debug(dev, "Create FDB log_max_size(%d)\n",
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+
+ memset(&g, 0, sizeof(g));
+ /* UC MC Full match rules*/
+ g.log_sz = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
+ g.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g.match_criteria,
+ outer_headers.dmac_47_16);
+ /* Match criteria mask */
+ memset(dmac, 0xff, 6);
+
+ fdb = mlx5_create_flow_table(dev, 0,
+ MLX5_FLOW_TABLE_TYPE_ESWITCH,
+ 1, &g);
+ if (fdb)
+ esw_debug(dev, "ESW: FDB Table created fdb->id %d\n", mlx5_get_flow_table_id(fdb));
+ else
+ esw_warn(dev, "ESW: Failed to create FDB Table\n");
+
+ esw->fdb_table.fdb = fdb;
+ return fdb ? 0 : -ENOMEM;
+}
+
+static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
+{
+ if (!esw->fdb_table.fdb)
+ return;
+
+ esw_debug(esw->dev, "Destroy FDB Table fdb(%d)\n",
+ mlx5_get_flow_table_id(esw->fdb_table.fdb));
+ mlx5_destroy_flow_table(esw->fdb_table.fdb);
+ esw->fdb_table.fdb = NULL;
+}
+
+/* E-Switch vport UC/MC lists management */
+typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
+ struct vport_addr *vaddr);
+
+static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
+{
+ struct hlist_head *hash = esw->l2_table.l2_hash;
+ struct esw_uc_addr *esw_uc;
+ u8 *mac = vaddr->node.addr;
+ u32 vport = vaddr->vport;
+ int err;
+
+ esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
+ if (esw_uc) {