net/mlx5: Refactor mlx5_add_flow_rule
[cascardo/linux.git] / drivers / infiniband / hw / mlx5 / main.c
index c72797c..dad63f0 100644 (file)
@@ -524,6 +524,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
            MLX5_CAP_ETH(dev->mdev, scatter_fcs))
                props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
 
+       if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
+               props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
+
        props->vendor_part_id      = mdev->pdev->device;
        props->hw_ver              = mdev->pdev->revision;
 
@@ -915,7 +918,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
        gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
        resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
-       resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
+       if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
+               resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
        resp.cache_line_size = L1_CACHE_BYTES;
        resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
        resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
@@ -988,7 +992,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        if (field_avail(typeof(resp), cqe_version, udata->outlen))
                resp.response_length += sizeof(resp.cqe_version);
 
-       if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
+       /*
+        * We don't want to expose information from the PCI bar that is located
+        * after 4096 bytes, so if the arch only supports larger pages, let's
+        * pretend we don't support reading the HCA's core clock. This is also
+        * forced by mmap function.
+        */
+       if (PAGE_SIZE <= 4096 &&
+           field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
                resp.comp_mask |=
                        MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
                resp.hca_core_clock_offset =
@@ -1517,21 +1528,18 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
 {
        struct mlx5_flow_table  *ft = ft_prio->flow_table;
        struct mlx5_ib_flow_handler *handler;
+       struct mlx5_flow_spec *spec;
        void *ib_flow = flow_attr + 1;
-       u8 match_criteria_enable = 0;
        unsigned int spec_index;
-       u32 *match_c;
-       u32 *match_v;
        u32 action;
        int err = 0;
 
        if (!is_valid_attr(flow_attr))
                return ERR_PTR(-EINVAL);
 
-       match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
-       match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+       spec = mlx5_vzalloc(sizeof(*spec));
        handler = kzalloc(sizeof(*handler), GFP_KERNEL);
-       if (!handler || !match_c || !match_v) {
+       if (!handler || !spec) {
                err = -ENOMEM;
                goto free;
        }
@@ -1539,7 +1547,8 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
        INIT_LIST_HEAD(&handler->list);
 
        for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
-               err = parse_flow_attr(match_c, match_v, ib_flow);
+               err = parse_flow_attr(spec->match_criteria,
+                                     spec->match_value, ib_flow);
                if (err < 0)
                        goto free;
 
@@ -1547,11 +1556,11 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
        }
 
        /* Outer header support only */
-       match_criteria_enable = (!outer_header_zero(match_c)) << 0;
+       spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria))
+               << 0;
        action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
                MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
-       handler->rule = mlx5_add_flow_rule(ft, match_criteria_enable,
-                                          match_c, match_v,
+       handler->rule = mlx5_add_flow_rule(ft, spec,
                                           action,
                                           MLX5_FS_DEFAULT_FLOW_TAG,
                                           dst);
@@ -1567,8 +1576,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
 free:
        if (err)
                kfree(handler);
-       kfree(match_c);
-       kfree(match_v);
+       kvfree(spec);
        return err ? ERR_PTR(err) : handler;
 }
 
@@ -1798,7 +1806,7 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
 {
        struct mlx5_ib_dev *dev =
                container_of(device, struct mlx5_ib_dev, ib_dev.dev);
-       return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev),
+       return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev),
                       fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
 }
 
@@ -1866,14 +1874,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
                break;
 
        case MLX5_DEV_EVENT_PORT_DOWN:
+       case MLX5_DEV_EVENT_PORT_INITIALIZED:
                ibev.event = IB_EVENT_PORT_ERR;
                port = (u8)param;
                break;
 
-       case MLX5_DEV_EVENT_PORT_INITIALIZED:
-               /* not used by ULPs */
-               return;
-
        case MLX5_DEV_EVENT_LID_CHANGE:
                ibev.event = IB_EVENT_LID_CHANGE;
                port = (u8)param;