net/mlx5: E-Switch, Vport ingress/egress ACLs rules for spoofchk
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
index 04ad659..5d09113 100644 (file)
@@ -231,9 +231,8 @@ static void mlx5e_update_stats_work(struct work_struct *work)
        mutex_lock(&priv->state_lock);
        if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
                mlx5e_update_stats(priv);
-               schedule_delayed_work(dwork,
-                                     msecs_to_jiffies(
-                                             MLX5E_UPDATE_STATS_INTERVAL));
+               queue_delayed_work(priv->wq, dwork,
+                                  msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
        }
        mutex_unlock(&priv->state_lock);
 }
@@ -249,7 +248,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
        switch (event) {
        case MLX5_DEV_EVENT_PORT_UP:
        case MLX5_DEV_EVENT_PORT_DOWN:
-               schedule_work(&priv->update_carrier_work);
+               queue_work(priv->wq, &priv->update_carrier_work);
                break;
 
        default:
@@ -1691,8 +1690,11 @@ int mlx5e_open_locked(struct net_device *netdev)
        mlx5e_redirect_rqts(priv);
        mlx5e_update_carrier(priv);
        mlx5e_timestamp_init(priv);
+#ifdef CONFIG_RFS_ACCEL
+       priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
+#endif
 
-       schedule_delayed_work(&priv->update_stats_work, 0);
+       queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
 
        return 0;
 
@@ -2199,7 +2201,7 @@ static void mlx5e_set_rx_mode(struct net_device *dev)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
 
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
 }
 
 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
@@ -2214,7 +2216,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
        ether_addr_copy(netdev->dev_addr, saddr->sa_data);
        netif_addr_unlock_bh(netdev);
 
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
 
        return 0;
 }
@@ -2305,6 +2307,21 @@ static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
        return err;
 }
 
+#ifdef CONFIG_RFS_ACCEL
+static int set_feature_arfs(struct net_device *netdev, bool enable)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       int err;
+
+       if (enable)
+               err = mlx5e_arfs_enable(priv);
+       else
+               err = mlx5e_arfs_disable(priv);
+
+       return err;
+}
+#endif
+
 static int mlx5e_handle_feature(struct net_device *netdev,
                                netdev_features_t wanted_features,
                                netdev_features_t feature,
@@ -2344,6 +2361,10 @@ static int mlx5e_set_features(struct net_device *netdev,
                                    set_feature_rx_all);
        err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
                                    set_feature_rx_vlan);
+#ifdef CONFIG_RFS_ACCEL
+       err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
+                                   set_feature_arfs);
+#endif
 
        return err ? -EINVAL : 0;
 }
@@ -2417,6 +2438,14 @@ static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
                                           vlan, qos);
 }
 
+static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
+}
+
 static int mlx5_vport_link2ifla(u8 esw_link)
 {
        switch (esw_link) {
@@ -2481,7 +2510,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
        if (!mlx5e_vxlan_allowed(priv->mdev))
                return;
 
-       mlx5e_vxlan_add_port(priv, be16_to_cpu(port));
+       mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1);
 }
 
 static void mlx5e_del_vxlan_port(struct net_device *netdev,
@@ -2492,7 +2521,7 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev,
        if (!mlx5e_vxlan_allowed(priv->mdev))
                return;
 
-       mlx5e_vxlan_del_port(priv, be16_to_cpu(port));
+       mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0);
 }
 
 static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
@@ -2559,6 +2588,9 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
        .ndo_set_features        = mlx5e_set_features,
        .ndo_change_mtu          = mlx5e_change_mtu,
        .ndo_do_ioctl            = mlx5e_ioctl,
+#ifdef CONFIG_RFS_ACCEL
+       .ndo_rx_flow_steer       = mlx5e_rx_flow_steer,
+#endif
 };
 
 static const struct net_device_ops mlx5e_netdev_ops_sriov = {
@@ -2578,8 +2610,12 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
        .ndo_add_vxlan_port      = mlx5e_add_vxlan_port,
        .ndo_del_vxlan_port      = mlx5e_del_vxlan_port,
        .ndo_features_check      = mlx5e_features_check,
+#ifdef CONFIG_RFS_ACCEL
+       .ndo_rx_flow_steer       = mlx5e_rx_flow_steer,
+#endif
        .ndo_set_vf_mac          = mlx5e_set_vf_mac,
        .ndo_set_vf_vlan         = mlx5e_set_vf_vlan,
+       .ndo_set_vf_spoofchk     = mlx5e_set_vf_spoofchk,
        .ndo_get_vf_config       = mlx5e_get_vf_config,
        .ndo_set_vf_link_state   = mlx5e_set_vf_link_state,
        .ndo_get_vf_stats        = mlx5e_get_vf_stats,
@@ -2775,13 +2811,17 @@ static void mlx5e_build_netdev(struct net_device *netdev)
        netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_FILTER;
 
        if (mlx5e_vxlan_allowed(mdev)) {
-               netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL;
+               netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL |
+                                          NETIF_F_GSO_UDP_TUNNEL_CSUM |
+                                          NETIF_F_GSO_PARTIAL;
                netdev->hw_enc_features |= NETIF_F_IP_CSUM;
-               netdev->hw_enc_features |= NETIF_F_RXCSUM;
+               netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
                netdev->hw_enc_features |= NETIF_F_TSO;
                netdev->hw_enc_features |= NETIF_F_TSO6;
-               netdev->hw_enc_features |= NETIF_F_RXHASH;
                netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
+               netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
+                                          NETIF_F_GSO_PARTIAL;
+               netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
        }
 
        mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
@@ -2800,8 +2840,12 @@ static void mlx5e_build_netdev(struct net_device *netdev)
        if (FT_CAP(flow_modify_en) &&
            FT_CAP(modify_root) &&
            FT_CAP(identified_miss_table_mode) &&
-           FT_CAP(flow_table_modify))
-               priv->netdev->hw_features      |= NETIF_F_HW_TC;
+           FT_CAP(flow_table_modify)) {
+               netdev->hw_features      |= NETIF_F_HW_TC;
+#ifdef CONFIG_RFS_ACCEL
+               netdev->hw_features      |= NETIF_F_NTUPLE;
+#endif
+       }
 
        netdev->features         |= NETIF_F_HIGHDMA;
 
@@ -2915,10 +2959,14 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
 
        priv = netdev_priv(netdev);
 
+       priv->wq = create_singlethread_workqueue("mlx5e");
+       if (!priv->wq)
+               goto err_free_netdev;
+
        err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
        if (err) {
                mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
-               goto err_free_netdev;
+               goto err_destroy_wq;
        }
 
        err = mlx5_core_alloc_pd(mdev, &priv->pdn);
@@ -2969,15 +3017,15 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
                goto err_destroy_rqts;
        }
 
-       err = mlx5e_create_flow_tables(priv);
+       err = mlx5e_create_flow_steering(priv);
        if (err) {
-               mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
+               mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
                goto err_destroy_tirs;
        }
 
        mlx5e_create_q_counter(priv);
 
-       mlx5e_init_eth_addr(priv);
+       mlx5e_init_l2_addr(priv);
 
        mlx5e_vxlan_init(priv);
 
@@ -3002,7 +3050,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
        }
 
        mlx5e_enable_async_events(priv);
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
 
        return priv;
 
@@ -3011,7 +3059,7 @@ err_tc_cleanup:
 
 err_dealloc_q_counters:
        mlx5e_destroy_q_counter(priv);
-       mlx5e_destroy_flow_tables(priv);
+       mlx5e_destroy_flow_steering(priv);
 
 err_destroy_tirs:
        mlx5e_destroy_tirs(priv);
@@ -3040,6 +3088,9 @@ err_dealloc_pd:
 err_unmap_free_uar:
        mlx5_unmap_free_uar(mdev, &priv->cq_uar);
 
+err_destroy_wq:
+       destroy_workqueue(priv->wq);
+
 err_free_netdev:
        free_netdev(netdev);
 
@@ -3053,9 +3104,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
 
        set_bit(MLX5E_STATE_DESTROYING, &priv->state);
 
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
        mlx5e_disable_async_events(priv);
-       flush_scheduled_work();
+       flush_workqueue(priv->wq);
        if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
                netif_device_detach(netdev);
                mutex_lock(&priv->state_lock);
@@ -3069,7 +3120,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
        mlx5e_tc_cleanup(priv);
        mlx5e_vxlan_cleanup(priv);
        mlx5e_destroy_q_counter(priv);
-       mlx5e_destroy_flow_tables(priv);
+       mlx5e_destroy_flow_steering(priv);
        mlx5e_destroy_tirs(priv);
        mlx5e_destroy_rqts(priv);
        mlx5e_close_drop_rq(priv);
@@ -3079,6 +3130,8 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
        mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
        mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
        mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
+       cancel_delayed_work_sync(&priv->update_stats_work);
+       destroy_workqueue(priv->wq);
 
        if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
                free_netdev(netdev);