net/mlx5: E-Switch, Vport ingress/egress ACLs rules for spoofchk
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
index 4ccfc1a..5d09113 100644 (file)
@@ -231,9 +231,8 @@ static void mlx5e_update_stats_work(struct work_struct *work)
        mutex_lock(&priv->state_lock);
        if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
                mlx5e_update_stats(priv);
-               schedule_delayed_work(dwork,
-                                     msecs_to_jiffies(
-                                             MLX5E_UPDATE_STATS_INTERVAL));
+               queue_delayed_work(priv->wq, dwork,
+                                  msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
        }
        mutex_unlock(&priv->state_lock);
 }
@@ -249,7 +248,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
        switch (event) {
        case MLX5_DEV_EVENT_PORT_UP:
        case MLX5_DEV_EVENT_PORT_DOWN:
-               schedule_work(&priv->update_carrier_work);
+               queue_work(priv->wq, &priv->update_carrier_work);
                break;
 
        default:
@@ -1695,7 +1694,7 @@ int mlx5e_open_locked(struct net_device *netdev)
        priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
 #endif
 
-       schedule_delayed_work(&priv->update_stats_work, 0);
+       queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
 
        return 0;
 
@@ -2202,7 +2201,7 @@ static void mlx5e_set_rx_mode(struct net_device *dev)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
 
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
 }
 
 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
@@ -2217,7 +2216,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
        ether_addr_copy(netdev->dev_addr, saddr->sa_data);
        netif_addr_unlock_bh(netdev);
 
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
 
        return 0;
 }
@@ -2439,6 +2438,14 @@ static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
                                           vlan, qos);
 }
 
+static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
+}
+
 static int mlx5_vport_link2ifla(u8 esw_link)
 {
        switch (esw_link) {
@@ -2503,7 +2510,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
        if (!mlx5e_vxlan_allowed(priv->mdev))
                return;
 
-       mlx5e_vxlan_add_port(priv, be16_to_cpu(port));
+       mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1);
 }
 
 static void mlx5e_del_vxlan_port(struct net_device *netdev,
@@ -2514,7 +2521,7 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev,
        if (!mlx5e_vxlan_allowed(priv->mdev))
                return;
 
-       mlx5e_vxlan_del_port(priv, be16_to_cpu(port));
+       mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0);
 }
 
 static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
@@ -2608,6 +2615,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
 #endif
        .ndo_set_vf_mac          = mlx5e_set_vf_mac,
        .ndo_set_vf_vlan         = mlx5e_set_vf_vlan,
+       .ndo_set_vf_spoofchk     = mlx5e_set_vf_spoofchk,
        .ndo_get_vf_config       = mlx5e_get_vf_config,
        .ndo_set_vf_link_state   = mlx5e_set_vf_link_state,
        .ndo_get_vf_stats        = mlx5e_get_vf_stats,
@@ -2803,13 +2811,17 @@ static void mlx5e_build_netdev(struct net_device *netdev)
        netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_FILTER;
 
        if (mlx5e_vxlan_allowed(mdev)) {
-               netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL;
+               netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL |
+                                          NETIF_F_GSO_UDP_TUNNEL_CSUM |
+                                          NETIF_F_GSO_PARTIAL;
                netdev->hw_enc_features |= NETIF_F_IP_CSUM;
-               netdev->hw_enc_features |= NETIF_F_RXCSUM;
+               netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
                netdev->hw_enc_features |= NETIF_F_TSO;
                netdev->hw_enc_features |= NETIF_F_TSO6;
-               netdev->hw_enc_features |= NETIF_F_RXHASH;
                netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
+               netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
+                                          NETIF_F_GSO_PARTIAL;
+               netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
        }
 
        mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
@@ -2947,10 +2959,14 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
 
        priv = netdev_priv(netdev);
 
+       priv->wq = create_singlethread_workqueue("mlx5e");
+       if (!priv->wq)
+               goto err_free_netdev;
+
        err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
        if (err) {
                mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
-               goto err_free_netdev;
+               goto err_destroy_wq;
        }
 
        err = mlx5_core_alloc_pd(mdev, &priv->pdn);
@@ -3034,7 +3050,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
        }
 
        mlx5e_enable_async_events(priv);
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
 
        return priv;
 
@@ -3072,6 +3088,9 @@ err_dealloc_pd:
 err_unmap_free_uar:
        mlx5_unmap_free_uar(mdev, &priv->cq_uar);
 
+err_destroy_wq:
+       destroy_workqueue(priv->wq);
+
 err_free_netdev:
        free_netdev(netdev);
 
@@ -3085,9 +3104,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
 
        set_bit(MLX5E_STATE_DESTROYING, &priv->state);
 
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
        mlx5e_disable_async_events(priv);
-       flush_scheduled_work();
+       flush_workqueue(priv->wq);
        if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
                netif_device_detach(netdev);
                mutex_lock(&priv->state_lock);
@@ -3111,6 +3130,8 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
        mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
        mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
        mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
+       cancel_delayed_work_sync(&priv->update_stats_work);
+       destroy_workqueue(priv->wq);
 
        if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
                free_netdev(netdev);