net/mlx5: Implement SRIOV attach/detach flows
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / sriov.c
index b380a6b..f4f02b6 100644 (file)
 #include "eswitch.h"
 #endif
 
-static void enable_vfs(struct mlx5_core_dev *dev, int num_vfs)
+bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
+{
+       struct mlx5_core_sriov *sriov = &dev->priv.sriov;
+
+       return !!sriov->num_vfs;
+}
+
+static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
 {
        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
        int err;
        int vf;
 
-       for (vf = 1; vf <= num_vfs; vf++) {
-               err = mlx5_core_enable_hca(dev, vf);
+       if (sriov->enabled_vfs) {
+               mlx5_core_warn(dev,
+                              "failed to enable SRIOV on device, already enabled with %d vfs\n",
+                              sriov->enabled_vfs);
+               return -EBUSY;
+       }
+
+#ifdef CONFIG_MLX5_CORE_EN
+       err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
+       if (err) {
+               mlx5_core_warn(dev,
+                              "failed to enable eswitch SRIOV (%d)\n", err);
+               return err;
+       }
+#endif
+
+       for (vf = 0; vf < num_vfs; vf++) {
+               err = mlx5_core_enable_hca(dev, vf + 1);
                if (err) {
-                       mlx5_core_warn(dev, "failed to enable VF %d\n", vf - 1);
-               } else {
-                       sriov->vfs_ctx[vf - 1].enabled = 1;
-                       mlx5_core_dbg(dev, "successfully enabled VF %d\n", vf - 1);
+                       mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err);
+                       continue;
                }
+               sriov->vfs_ctx[vf].enabled = 1;
+               sriov->enabled_vfs++;
+               mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf);
+
        }
+
+       return 0;
 }
 
-static void disable_vfs(struct mlx5_core_dev *dev, int num_vfs)
+static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
 {
        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
+       int err;
        int vf;
 
-       for (vf = 1; vf <= num_vfs; vf++) {
-               if (sriov->vfs_ctx[vf - 1].enabled) {
-                       if (mlx5_core_disable_hca(dev, vf))
-                               mlx5_core_warn(dev, "failed to disable VF %d\n", vf - 1);
-                       else
-                               sriov->vfs_ctx[vf - 1].enabled = 0;
+       if (!sriov->enabled_vfs)
+               return;
+
+       for (vf = 0; vf < sriov->num_vfs; vf++) {
+               if (!sriov->vfs_ctx[vf].enabled)
+                       continue;
+               err = mlx5_core_disable_hca(dev, vf + 1);
+               if (err) {
+                       mlx5_core_warn(dev, "failed to disable VF %d\n", vf);
+                       continue;
                }
+               sriov->vfs_ctx[vf].enabled = 0;
+               sriov->enabled_vfs--;
        }
+
+#ifdef CONFIG_MLX5_CORE_EN
+       mlx5_eswitch_disable_sriov(dev->priv.eswitch);
+#endif
+
+       if (mlx5_wait_for_vf_pages(dev))
+               mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
 }
 
-static int mlx5_core_create_vfs(struct pci_dev *pdev, int num_vfs)
+static int mlx5_pci_enable_sriov(struct pci_dev *pdev, int num_vfs)
 {
        struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
-       int err;
-
-       if (pci_num_vf(pdev))
-               pci_disable_sriov(pdev);
+       int err = 0;
 
-       enable_vfs(dev, num_vfs);
-
-       err = pci_enable_sriov(pdev, num_vfs);
-       if (err) {
-               dev_warn(&pdev->dev, "enable sriov failed %d\n", err);
-               goto ex;
+       if (pci_num_vf(pdev)) {
+               mlx5_core_warn(dev, "Unable to enable pci sriov, already enabled\n");
+               return -EBUSY;
        }
 
-       return 0;
+       err = pci_enable_sriov(pdev, num_vfs);
+       if (err)
+               mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
 
-ex:
-       disable_vfs(dev, num_vfs);
        return err;
 }
 
-static int mlx5_core_sriov_enable(struct pci_dev *pdev, int num_vfs)
+static void mlx5_pci_disable_sriov(struct pci_dev *pdev)
+{
+       pci_disable_sriov(pdev);
+}
+
+static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
 {
        struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
-       int err;
+       int err = 0;
 
-       kfree(sriov->vfs_ctx);
-       sriov->vfs_ctx = kcalloc(num_vfs, sizeof(*sriov->vfs_ctx), GFP_ATOMIC);
-       if (!sriov->vfs_ctx)
-               return -ENOMEM;
+       err = mlx5_device_enable_sriov(dev, num_vfs);
+       if (err) {
+               mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
+               return err;
+       }
 
-       sriov->enabled_vfs = num_vfs;
-       err = mlx5_core_create_vfs(pdev, num_vfs);
+       err = mlx5_pci_enable_sriov(pdev, num_vfs);
        if (err) {
-               kfree(sriov->vfs_ctx);
-               sriov->vfs_ctx = NULL;
+               mlx5_core_warn(dev, "mlx5_pci_enable_sriov failed : %d\n", err);
+               mlx5_device_disable_sriov(dev);
                return err;
        }
 
+       sriov->num_vfs = num_vfs;
+
        return 0;
 }
 
-static void mlx5_core_init_vfs(struct mlx5_core_dev *dev, int num_vfs)
+static void mlx5_sriov_disable(struct pci_dev *pdev)
 {
+       struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
 
-       sriov->num_vfs = num_vfs;
-}
-
-static void mlx5_core_cleanup_vfs(struct mlx5_core_dev *dev)
-{
-       struct mlx5_core_sriov *sriov;
-
-       sriov = &dev->priv.sriov;
-       disable_vfs(dev, sriov->num_vfs);
-
-       if (mlx5_wait_for_vf_pages(dev))
-               mlx5_core_warn(dev, "timeout claiming VFs pages\n");
-
+       mlx5_pci_disable_sriov(pdev);
+       mlx5_device_disable_sriov(dev);
        sriov->num_vfs = 0;
 }
 
 int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
 {
        struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
-       struct mlx5_core_sriov *sriov = &dev->priv.sriov;
-       int err;
+       int err = 0;
 
        mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs);
        if (!mlx5_core_is_pf(dev))
                return -EPERM;
 
-       mlx5_core_cleanup_vfs(dev);
-
-       if (!num_vfs) {
-#ifdef CONFIG_MLX5_CORE_EN
-               mlx5_eswitch_disable_sriov(dev->priv.eswitch);
-#endif
-               kfree(sriov->vfs_ctx);
-               sriov->vfs_ctx = NULL;
-               if (!pci_vfs_assigned(pdev))
-                       pci_disable_sriov(pdev);
-               else
-                       pr_info("unloading PF driver while leaving orphan VFs\n");
-               return 0;
+       if (num_vfs && mlx5_lag_is_active(dev)) {
+               mlx5_core_warn(dev, "can't turn sriov on while LAG is active");
+               return -EINVAL;
        }
 
-       err = mlx5_core_sriov_enable(pdev, num_vfs);
-       if (err) {
-               dev_warn(&pdev->dev, "mlx5_core_sriov_enable failed %d\n", err);
-               return err;
-       }
+       if (num_vfs)
+               err = mlx5_sriov_enable(pdev, num_vfs);
+       else
+               mlx5_sriov_disable(pdev);
 
-       mlx5_core_init_vfs(dev, num_vfs);
-#ifdef CONFIG_MLX5_CORE_EN
-       mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
-#endif
-
-       return num_vfs;
+       return err ? err : num_vfs;
 }
 
-static int sync_required(struct pci_dev *pdev)
+int mlx5_sriov_attach(struct mlx5_core_dev *dev)
 {
-       struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
-       int cur_vfs = pci_num_vf(pdev);
 
-       if (cur_vfs != sriov->num_vfs) {
-               pr_info("current VFs %d, registered %d - sync needed\n", cur_vfs, sriov->num_vfs);
-               return 1;
-       }
+       if (!mlx5_core_is_pf(dev) || !sriov->num_vfs)
+               return 0;
 
-       return 0;
+       /* If sriov VFs exist in PCI level, enable them in device level */
+       return mlx5_device_enable_sriov(dev, sriov->num_vfs);
+}
+
+void mlx5_sriov_detach(struct mlx5_core_dev *dev)
+{
+       if (!mlx5_core_is_pf(dev))
+               return;
+
+       mlx5_device_disable_sriov(dev);
 }
 
 int mlx5_sriov_init(struct mlx5_core_dev *dev)
 {
        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
        struct pci_dev *pdev = dev->pdev;
-       int cur_vfs;
+       int total_vfs;
 
        if (!mlx5_core_is_pf(dev))
                return 0;
 
-       if (!sync_required(dev->pdev))
-               return 0;
-
-       cur_vfs = pci_num_vf(pdev);
-       sriov->vfs_ctx = kcalloc(cur_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL);
+       total_vfs = pci_sriov_get_totalvfs(pdev);
+       sriov->num_vfs = pci_num_vf(pdev);
+       sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL);
        if (!sriov->vfs_ctx)
                return -ENOMEM;
 
-       sriov->enabled_vfs = cur_vfs;
-
-       mlx5_core_init_vfs(dev, cur_vfs);
-#ifdef CONFIG_MLX5_CORE_EN
-       if (cur_vfs)
-               mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs,
-                                         SRIOV_LEGACY);
-#endif
-
-       enable_vfs(dev, cur_vfs);
-
-       return 0;
+       return mlx5_sriov_attach(dev);
 }
 
-int mlx5_sriov_cleanup(struct mlx5_core_dev *dev)
+void mlx5_sriov_cleanup(struct mlx5_core_dev *dev)
 {
-       struct pci_dev *pdev = dev->pdev;
-       int err;
+       struct mlx5_core_sriov *sriov = &dev->priv.sriov;
 
        if (!mlx5_core_is_pf(dev))
-               return 0;
-
-       err = mlx5_core_sriov_configure(pdev, 0);
-       if (err)
-               return err;
-
-       return 0;
+               return;
+       mlx5_sriov_detach(dev);
+       kfree(sriov->vfs_ctx);
 }