2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/list.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
40 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
56 MLX5E_ACTION_NONE = 0,
61 struct mlx5e_eth_addr_hash_node {
62 struct hlist_node hlist;
64 struct mlx5e_eth_addr_info ai;
67 static inline int mlx5e_hash_eth_addr(u8 *addr)
72 static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
74 struct mlx5e_eth_addr_hash_node *hn;
75 int ix = mlx5e_hash_eth_addr(addr);
78 hlist_for_each_entry(hn, &hash[ix], hlist)
79 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
85 hn->action = MLX5E_ACTION_NONE;
89 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
93 ether_addr_copy(hn->ai.addr, addr);
94 hn->action = MLX5E_ACTION_ADD;
96 hlist_add_head(&hn->hlist, &hash[ix]);
99 static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
101 hlist_del(&hn->hlist);
105 static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
106 struct mlx5e_eth_addr_info *ai)
108 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
109 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
111 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
112 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
114 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
115 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
117 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
118 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
120 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
121 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
123 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
124 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
126 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
127 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
129 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
130 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
132 if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
133 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
135 if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
136 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
138 if (ai->tt_vec & BIT(MLX5E_TT_ANY))
139 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
142 static int mlx5e_get_eth_addr_type(u8 *addr)
144 if (is_unicast_ether_addr(addr))
147 if ((addr[0] == 0x01) &&
151 return MLX5E_MC_IPV4;
153 if ((addr[0] == 0x33) &&
155 return MLX5E_MC_IPV6;
157 return MLX5E_MC_OTHER;
160 static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
166 case MLX5E_FULLMATCH:
167 eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
168 switch (eth_addr_type) {
171 BIT(MLX5E_TT_IPV4_TCP) |
172 BIT(MLX5E_TT_IPV6_TCP) |
173 BIT(MLX5E_TT_IPV4_UDP) |
174 BIT(MLX5E_TT_IPV6_UDP) |
175 BIT(MLX5E_TT_IPV4_IPSEC_AH) |
176 BIT(MLX5E_TT_IPV6_IPSEC_AH) |
177 BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
178 BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
187 BIT(MLX5E_TT_IPV4_UDP) |
194 BIT(MLX5E_TT_IPV6_UDP) |
210 BIT(MLX5E_TT_IPV4_UDP) |
211 BIT(MLX5E_TT_IPV6_UDP) |
218 default: /* MLX5E_PROMISC */
220 BIT(MLX5E_TT_IPV4_TCP) |
221 BIT(MLX5E_TT_IPV6_TCP) |
222 BIT(MLX5E_TT_IPV4_UDP) |
223 BIT(MLX5E_TT_IPV6_UDP) |
224 BIT(MLX5E_TT_IPV4_IPSEC_AH) |
225 BIT(MLX5E_TT_IPV6_IPSEC_AH) |
226 BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
227 BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
238 static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
239 struct mlx5e_eth_addr_info *ai,
240 int type, u32 *mc, u32 *mv)
242 struct mlx5_flow_destination dest;
243 u8 match_criteria_enable = 0;
244 struct mlx5_flow_rule **rule_p;
245 struct mlx5_flow_table *ft = priv->fts.main.t;
246 u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
247 outer_headers.dmac_47_16);
248 u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
249 outer_headers.dmac_47_16);
250 u32 *tirn = priv->tirn;
254 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
257 case MLX5E_FULLMATCH:
258 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
259 eth_broadcast_addr(mc_dmac);
260 ether_addr_copy(mv_dmac, ai->addr);
264 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
273 tt_vec = mlx5e_get_tt_vec(ai, type);
275 if (tt_vec & BIT(MLX5E_TT_ANY)) {
276 rule_p = &ai->ft_rule[MLX5E_TT_ANY];
277 dest.tir_num = tirn[MLX5E_TT_ANY];
278 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
279 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
280 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
281 if (IS_ERR_OR_NULL(*rule_p))
283 ai->tt_vec |= BIT(MLX5E_TT_ANY);
286 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
287 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
289 if (tt_vec & BIT(MLX5E_TT_IPV4)) {
290 rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
291 dest.tir_num = tirn[MLX5E_TT_IPV4];
292 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
294 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
295 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
296 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
297 if (IS_ERR_OR_NULL(*rule_p))
299 ai->tt_vec |= BIT(MLX5E_TT_IPV4);
302 if (tt_vec & BIT(MLX5E_TT_IPV6)) {
303 rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
304 dest.tir_num = tirn[MLX5E_TT_IPV6];
305 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
307 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
308 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
309 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
310 if (IS_ERR_OR_NULL(*rule_p))
312 ai->tt_vec |= BIT(MLX5E_TT_IPV6);
315 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
316 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
318 if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
319 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
320 dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
321 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
323 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
324 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
325 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
326 if (IS_ERR_OR_NULL(*rule_p))
328 ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
331 if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
332 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
333 dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
334 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
336 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
337 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
338 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
339 if (IS_ERR_OR_NULL(*rule_p))
341 ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
344 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
346 if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
347 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
348 dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
349 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
351 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
352 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
353 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
354 if (IS_ERR_OR_NULL(*rule_p))
356 ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
359 if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
360 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
361 dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
362 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
364 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
365 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
366 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
367 if (IS_ERR_OR_NULL(*rule_p))
370 ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
373 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
375 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
376 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
377 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
378 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
380 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
381 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
382 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
383 if (IS_ERR_OR_NULL(*rule_p))
385 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
388 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
389 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
390 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
391 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
393 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
394 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
395 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
396 if (IS_ERR_OR_NULL(*rule_p))
398 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
401 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
403 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
404 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
405 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
406 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
408 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
409 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
410 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
411 if (IS_ERR_OR_NULL(*rule_p))
413 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
416 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
417 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
418 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
419 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
421 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
422 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
423 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
424 if (IS_ERR_OR_NULL(*rule_p))
426 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
432 err = PTR_ERR(*rule_p);
434 mlx5e_del_eth_addr_from_flow_table(priv, ai);
439 static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
440 struct mlx5e_eth_addr_info *ai, int type)
446 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
447 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
448 if (!match_value || !match_criteria) {
449 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
451 goto add_eth_addr_rule_out;
454 err = __mlx5e_add_eth_addr_rule(priv, ai, type, match_criteria,
457 add_eth_addr_rule_out:
458 kvfree(match_criteria);
464 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
466 struct net_device *ndev = priv->netdev;
475 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
478 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
480 if (list_size > max_list_size) {
482 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
483 list_size, max_list_size);
484 list_size = max_list_size;
487 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
492 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
498 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
500 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
507 enum mlx5e_vlan_rule_type {
508 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
509 MLX5E_VLAN_RULE_TYPE_ANY_VID,
510 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
513 static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
514 enum mlx5e_vlan_rule_type rule_type,
515 u16 vid, u32 *mc, u32 *mv)
517 struct mlx5_flow_table *ft = priv->fts.vlan.t;
518 struct mlx5_flow_destination dest;
519 u8 match_criteria_enable = 0;
520 struct mlx5_flow_rule **rule_p;
523 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
524 dest.ft = priv->fts.main.t;
526 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
527 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
530 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
531 rule_p = &priv->vlan.untagged_rule;
533 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
534 rule_p = &priv->vlan.any_vlan_rule;
535 MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
537 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
538 rule_p = &priv->vlan.active_vlans_rule[vid];
539 MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
540 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
541 MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
545 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
546 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
547 MLX5_FS_DEFAULT_FLOW_TAG,
550 if (IS_ERR(*rule_p)) {
551 err = PTR_ERR(*rule_p);
553 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
559 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
560 enum mlx5e_vlan_rule_type rule_type, u16 vid)
566 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
567 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
568 if (!match_value || !match_criteria) {
569 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
571 goto add_vlan_rule_out;
574 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
575 mlx5e_vport_context_update_vlans(priv);
577 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, match_criteria,
581 kvfree(match_criteria);
587 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
588 enum mlx5e_vlan_rule_type rule_type, u16 vid)
591 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
592 if (priv->vlan.untagged_rule) {
593 mlx5_del_flow_rule(priv->vlan.untagged_rule);
594 priv->vlan.untagged_rule = NULL;
597 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
598 if (priv->vlan.any_vlan_rule) {
599 mlx5_del_flow_rule(priv->vlan.any_vlan_rule);
600 priv->vlan.any_vlan_rule = NULL;
603 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
604 mlx5e_vport_context_update_vlans(priv);
605 if (priv->vlan.active_vlans_rule[vid]) {
606 mlx5_del_flow_rule(priv->vlan.active_vlans_rule[vid]);
607 priv->vlan.active_vlans_rule[vid] = NULL;
609 mlx5e_vport_context_update_vlans(priv);
614 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
616 if (!priv->vlan.filter_disabled)
619 priv->vlan.filter_disabled = false;
620 if (priv->netdev->flags & IFF_PROMISC)
622 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
625 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
627 if (priv->vlan.filter_disabled)
630 priv->vlan.filter_disabled = true;
631 if (priv->netdev->flags & IFF_PROMISC)
633 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
636 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
639 struct mlx5e_priv *priv = netdev_priv(dev);
641 set_bit(vid, priv->vlan.active_vlans);
643 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
646 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
649 struct mlx5e_priv *priv = netdev_priv(dev);
651 clear_bit(vid, priv->vlan.active_vlans);
653 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
658 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
659 for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
660 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
662 static void mlx5e_execute_action(struct mlx5e_priv *priv,
663 struct mlx5e_eth_addr_hash_node *hn)
665 switch (hn->action) {
666 case MLX5E_ACTION_ADD:
667 mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
668 hn->action = MLX5E_ACTION_NONE;
671 case MLX5E_ACTION_DEL:
672 mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
673 mlx5e_del_eth_addr_from_hash(hn);
678 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
680 struct net_device *netdev = priv->netdev;
681 struct netdev_hw_addr *ha;
683 netif_addr_lock_bh(netdev);
685 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
686 priv->netdev->dev_addr);
688 netdev_for_each_uc_addr(ha, netdev)
689 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
691 netdev_for_each_mc_addr(ha, netdev)
692 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
694 netif_addr_unlock_bh(netdev);
697 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
698 u8 addr_array[][ETH_ALEN], int size)
700 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
701 struct net_device *ndev = priv->netdev;
702 struct mlx5e_eth_addr_hash_node *hn;
703 struct hlist_head *addr_list;
704 struct hlist_node *tmp;
708 addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
710 if (is_uc) /* Make sure our own address is pushed first */
711 ether_addr_copy(addr_array[i++], ndev->dev_addr);
712 else if (priv->eth_addr.broadcast_enabled)
713 ether_addr_copy(addr_array[i++], ndev->broadcast);
715 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
716 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
720 ether_addr_copy(addr_array[i++], hn->ai.addr);
724 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
727 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
728 struct mlx5e_eth_addr_hash_node *hn;
729 u8 (*addr_array)[ETH_ALEN] = NULL;
730 struct hlist_head *addr_list;
731 struct hlist_node *tmp;
737 size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
739 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
740 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
742 addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
743 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
746 if (size > max_size) {
747 netdev_warn(priv->netdev,
748 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
749 is_uc ? "UC" : "MC", size, max_size);
754 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
759 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
762 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
765 netdev_err(priv->netdev,
766 "Failed to modify vport %s list err(%d)\n",
767 is_uc ? "UC" : "MC", err);
771 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
773 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
775 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
776 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
777 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
778 ea->allmulti_enabled,
779 ea->promisc_enabled);
782 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
784 struct mlx5e_eth_addr_hash_node *hn;
785 struct hlist_node *tmp;
788 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
789 mlx5e_execute_action(priv, hn);
791 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
792 mlx5e_execute_action(priv, hn);
795 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
797 struct mlx5e_eth_addr_hash_node *hn;
798 struct hlist_node *tmp;
801 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
802 hn->action = MLX5E_ACTION_DEL;
803 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
804 hn->action = MLX5E_ACTION_DEL;
806 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
807 mlx5e_sync_netdev_addr(priv);
809 mlx5e_apply_netdev_addr(priv);
812 void mlx5e_set_rx_mode_work(struct work_struct *work)
814 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
817 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
818 struct net_device *ndev = priv->netdev;
820 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
821 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
822 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
823 bool broadcast_enabled = rx_mode_enable;
825 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
826 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
827 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
828 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
829 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
830 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
832 if (enable_promisc) {
833 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
834 if (!priv->vlan.filter_disabled)
835 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
839 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
840 if (enable_broadcast)
841 mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
843 mlx5e_handle_netdev_addr(priv);
845 if (disable_broadcast)
846 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
847 if (disable_allmulti)
848 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
849 if (disable_promisc) {
850 if (!priv->vlan.filter_disabled)
851 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
853 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
856 ea->promisc_enabled = promisc_enabled;
857 ea->allmulti_enabled = allmulti_enabled;
858 ea->broadcast_enabled = broadcast_enabled;
860 mlx5e_vport_context_update(priv);
863 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
867 for (i = ft->num_groups - 1; i >= 0; i--) {
868 if (!IS_ERR_OR_NULL(ft->g[i]))
869 mlx5_destroy_flow_group(ft->g[i]);
875 void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
877 ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
880 #define MLX5E_MAIN_GROUP0_SIZE BIT(3)
881 #define MLX5E_MAIN_GROUP1_SIZE BIT(1)
882 #define MLX5E_MAIN_GROUP2_SIZE BIT(0)
883 #define MLX5E_MAIN_GROUP3_SIZE BIT(14)
884 #define MLX5E_MAIN_GROUP4_SIZE BIT(13)
885 #define MLX5E_MAIN_GROUP5_SIZE BIT(11)
886 #define MLX5E_MAIN_GROUP6_SIZE BIT(2)
887 #define MLX5E_MAIN_GROUP7_SIZE BIT(1)
888 #define MLX5E_MAIN_GROUP8_SIZE BIT(0)
889 #define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\
890 MLX5E_MAIN_GROUP1_SIZE +\
891 MLX5E_MAIN_GROUP2_SIZE +\
892 MLX5E_MAIN_GROUP3_SIZE +\
893 MLX5E_MAIN_GROUP4_SIZE +\
894 MLX5E_MAIN_GROUP5_SIZE +\
895 MLX5E_MAIN_GROUP6_SIZE +\
896 MLX5E_MAIN_GROUP7_SIZE +\
897 MLX5E_MAIN_GROUP8_SIZE)
899 static int __mlx5e_create_main_groups(struct mlx5e_flow_table *ft, u32 *in,
902 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
903 u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
904 match_criteria.outer_headers.dmac_47_16);
908 memset(in, 0, inlen);
909 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
910 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
911 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
912 MLX5_SET_CFG(in, start_flow_index, ix);
913 ix += MLX5E_MAIN_GROUP0_SIZE;
914 MLX5_SET_CFG(in, end_flow_index, ix - 1);
915 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
916 if (IS_ERR(ft->g[ft->num_groups]))
917 goto err_destroy_groups;
920 memset(in, 0, inlen);
921 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
922 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
923 MLX5_SET_CFG(in, start_flow_index, ix);
924 ix += MLX5E_MAIN_GROUP1_SIZE;
925 MLX5_SET_CFG(in, end_flow_index, ix - 1);
926 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
927 if (IS_ERR(ft->g[ft->num_groups]))
928 goto err_destroy_groups;
931 memset(in, 0, inlen);
932 MLX5_SET_CFG(in, start_flow_index, ix);
933 ix += MLX5E_MAIN_GROUP2_SIZE;
934 MLX5_SET_CFG(in, end_flow_index, ix - 1);
935 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
936 if (IS_ERR(ft->g[ft->num_groups]))
937 goto err_destroy_groups;
940 memset(in, 0, inlen);
941 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
942 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
943 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
944 eth_broadcast_addr(dmac);
945 MLX5_SET_CFG(in, start_flow_index, ix);
946 ix += MLX5E_MAIN_GROUP3_SIZE;
947 MLX5_SET_CFG(in, end_flow_index, ix - 1);
948 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
949 if (IS_ERR(ft->g[ft->num_groups]))
950 goto err_destroy_groups;
953 memset(in, 0, inlen);
954 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
955 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
956 eth_broadcast_addr(dmac);
957 MLX5_SET_CFG(in, start_flow_index, ix);
958 ix += MLX5E_MAIN_GROUP4_SIZE;
959 MLX5_SET_CFG(in, end_flow_index, ix - 1);
960 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
961 if (IS_ERR(ft->g[ft->num_groups]))
962 goto err_destroy_groups;
965 memset(in, 0, inlen);
966 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
967 eth_broadcast_addr(dmac);
968 MLX5_SET_CFG(in, start_flow_index, ix);
969 ix += MLX5E_MAIN_GROUP5_SIZE;
970 MLX5_SET_CFG(in, end_flow_index, ix - 1);
971 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
972 if (IS_ERR(ft->g[ft->num_groups]))
973 goto err_destroy_groups;
976 memset(in, 0, inlen);
977 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
978 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
979 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
981 MLX5_SET_CFG(in, start_flow_index, ix);
982 ix += MLX5E_MAIN_GROUP6_SIZE;
983 MLX5_SET_CFG(in, end_flow_index, ix - 1);
984 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
985 if (IS_ERR(ft->g[ft->num_groups]))
986 goto err_destroy_groups;
989 memset(in, 0, inlen);
990 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
991 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
993 MLX5_SET_CFG(in, start_flow_index, ix);
994 ix += MLX5E_MAIN_GROUP7_SIZE;
995 MLX5_SET_CFG(in, end_flow_index, ix - 1);
996 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
997 if (IS_ERR(ft->g[ft->num_groups]))
998 goto err_destroy_groups;
1001 memset(in, 0, inlen);
1002 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1004 MLX5_SET_CFG(in, start_flow_index, ix);
1005 ix += MLX5E_MAIN_GROUP8_SIZE;
1006 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1007 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1008 if (IS_ERR(ft->g[ft->num_groups]))
1009 goto err_destroy_groups;
1015 err = PTR_ERR(ft->g[ft->num_groups]);
1016 ft->g[ft->num_groups] = NULL;
1017 mlx5e_destroy_groups(ft);
1022 static int mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
1025 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1028 in = mlx5_vzalloc(inlen);
1032 err = __mlx5e_create_main_groups(ft, in, inlen);
1038 static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
1040 struct mlx5e_flow_table *ft = &priv->fts.main;
1044 ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_MAIN_TABLE_SIZE);
1046 if (IS_ERR(ft->t)) {
1047 err = PTR_ERR(ft->t);
1051 ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1054 goto err_destroy_main_flow_table;
1057 err = mlx5e_create_main_groups(ft);
1065 err_destroy_main_flow_table:
1066 mlx5_destroy_flow_table(ft->t);
1072 static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
1074 mlx5e_destroy_groups(ft);
1076 mlx5_destroy_flow_table(ft->t);
1080 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
1082 mlx5e_destroy_flow_table(&priv->fts.main);
1085 #define MLX5E_NUM_VLAN_GROUPS 2
1086 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1087 #define MLX5E_VLAN_GROUP1_SIZE BIT(1)
1088 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1089 MLX5E_VLAN_GROUP1_SIZE)
1091 static int __mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft, u32 *in,
1096 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1098 memset(in, 0, inlen);
1099 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1100 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
1101 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1102 MLX5_SET_CFG(in, start_flow_index, ix);
1103 ix += MLX5E_VLAN_GROUP0_SIZE;
1104 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1105 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1106 if (IS_ERR(ft->g[ft->num_groups]))
1107 goto err_destroy_groups;
1110 memset(in, 0, inlen);
1111 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1112 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
1113 MLX5_SET_CFG(in, start_flow_index, ix);
1114 ix += MLX5E_VLAN_GROUP1_SIZE;
1115 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1116 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1117 if (IS_ERR(ft->g[ft->num_groups]))
1118 goto err_destroy_groups;
1124 err = PTR_ERR(ft->g[ft->num_groups]);
1125 ft->g[ft->num_groups] = NULL;
1126 mlx5e_destroy_groups(ft);
1131 static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
1134 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1137 in = mlx5_vzalloc(inlen);
1141 err = __mlx5e_create_vlan_groups(ft, in, inlen);
1147 static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
1149 struct mlx5e_flow_table *ft = &priv->fts.vlan;
1153 ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_VLAN_TABLE_SIZE);
1155 if (IS_ERR(ft->t)) {
1156 err = PTR_ERR(ft->t);
1160 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1163 goto err_destroy_vlan_flow_table;
1166 err = mlx5e_create_vlan_groups(ft);
1175 err_destroy_vlan_flow_table:
1176 mlx5_destroy_flow_table(ft->t);
1182 static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
1184 mlx5e_destroy_flow_table(&priv->fts.vlan);
1187 int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
1191 priv->fts.ns = mlx5_get_flow_namespace(priv->mdev,
1192 MLX5_FLOW_NAMESPACE_KERNEL);
1197 err = mlx5e_create_vlan_flow_table(priv);
1201 err = mlx5e_create_main_flow_table(priv);
1203 goto err_destroy_vlan_flow_table;
1205 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
1207 goto err_destroy_main_flow_table;
1211 err_destroy_main_flow_table:
1212 mlx5e_destroy_main_flow_table(priv);
1213 err_destroy_vlan_flow_table:
1214 mlx5e_destroy_vlan_flow_table(priv);
1219 void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
1221 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
1222 mlx5e_destroy_main_flow_table(priv);
1223 mlx5e_destroy_vlan_flow_table(priv);