IB/mlx4: Get upper dev addresses as RoCE GIDs when port comes up
[cascardo/linux.git] / drivers / infiniband / hw / mlx4 / main.c
index 0f7027e..d404a2e 100644 (file)
@@ -59,6 +59,7 @@
 
 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
+#define MLX4_IB_CARD_REV_A0   0xA0
 
 MODULE_AUTHOR("Roland Dreier");
 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
@@ -158,7 +159,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
                props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
                props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
-       if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
+       if (dev->dev->caps.max_gso_sz &&
+           (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
+           (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
                props->device_cap_flags |= IB_DEVICE_UD_TSO;
        if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
                props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
@@ -910,8 +913,7 @@ static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
        const struct default_rules *pdefault_rules = default_table;
        u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
 
-       for (i = 0; i < sizeof(default_table)/sizeof(default_table[0]); i++,
-            pdefault_rules++) {
+       for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
                __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
                memset(&field_types, 0, sizeof(field_types));
 
@@ -965,8 +967,7 @@ static int __mlx4_ib_create_default_rules(
        int size = 0;
        int i;
 
-       for (i = 0; i < sizeof(pdefault_rules->rules_create_list)/
-                       sizeof(pdefault_rules->rules_create_list[0]); i++) {
+       for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
                int ret;
                union ib_flow_spec ib_spec;
                switch (pdefault_rules->rules_create_list[i]) {
@@ -1678,6 +1679,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
        struct inet6_dev *in6_dev;
        union ib_gid  *pgid;
        struct inet6_ifaddr *ifp;
+       union ib_gid default_gid;
 #endif
        union ib_gid gid;
 
@@ -1698,12 +1700,15 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
                in_dev_put(in_dev);
        }
 #if IS_ENABLED(CONFIG_IPV6)
+       mlx4_make_default_gid(dev, &default_gid);
        /* IPv6 gids */
        in6_dev = in6_dev_get(dev);
        if (in6_dev) {
                read_lock_bh(&in6_dev->lock);
                list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
                        pgid = (union ib_gid *)&ifp->addr;
+                       if (!memcmp(pgid, &default_gid, sizeof(*pgid)))
+                               continue;
                        update_gid_table(ibdev, port, pgid, 0, 0);
                }
                read_unlock_bh(&in6_dev->lock);
@@ -1725,24 +1730,33 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
        struct  net_device *dev;
        struct mlx4_ib_iboe *iboe = &ibdev->iboe;
        int i;
+       int err = 0;
 
-       for (i = 1; i <= ibdev->num_ports; ++i)
-               if (reset_gid_table(ibdev, i))
-                       return -1;
+       for (i = 1; i <= ibdev->num_ports; ++i) {
+               if (rdma_port_get_link_layer(&ibdev->ib_dev, i) ==
+                   IB_LINK_LAYER_ETHERNET) {
+                       err = reset_gid_table(ibdev, i);
+                       if (err)
+                               goto out;
+               }
+       }
 
        read_lock(&dev_base_lock);
        spin_lock(&iboe->lock);
 
        for_each_netdev(&init_net, dev) {
                u8 port = mlx4_ib_get_dev_port(dev, ibdev);
-               if (port)
+               /* port will be non-zero only for ETH ports */
+               if (port) {
+                       mlx4_ib_set_default_gid(ibdev, dev, port);
                        mlx4_ib_get_dev_addr(dev, ibdev, port);
+               }
        }
 
        spin_unlock(&iboe->lock);
        read_unlock(&dev_base_lock);
-
-       return 0;
+out:
+       return err;
 }
 
 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
@@ -1788,31 +1802,43 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
                        port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
                                                IB_PORT_ACTIVE : IB_PORT_DOWN;
                        mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
-               } else {
-                       reset_gid_table(ibdev, port);
-               }
-               /* if using bonding/team and a slave port is down, we don't the bond IP
-                * based gids in the table since flows that select port by gid may get
-                * the down port.
-                */
-               if (curr_master && (port_state == IB_PORT_DOWN)) {
-                       reset_gid_table(ibdev, port);
-                       mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
-               }
-               /* if bonding is used it is possible that we add it to masters
-                * only after IP address is assigned to the net bonding
-                * interface.
-               */
-               if (curr_master && (old_master != curr_master)) {
-                       reset_gid_table(ibdev, port);
-                       mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
-                       mlx4_ib_get_dev_addr(curr_master, ibdev, port);
-               }
+                       if (curr_master) {
+                               /* if using bonding/team and a slave port is down, we
+                                * don't want the bond IP based gids in the table since
+                                * flows that select port by gid may get the down port.
+                               */
+                               if (port_state == IB_PORT_DOWN) {
+                                       reset_gid_table(ibdev, port);
+                                       mlx4_ib_set_default_gid(ibdev,
+                                                               curr_netdev,
+                                                               port);
+                               } else {
+                                       /* gids from the upper dev (bond/team)
+                                        * should appear in port's gid table
+                                       */
+                                       mlx4_ib_get_dev_addr(curr_master,
+                                                            ibdev, port);
+                               }
+                       }
+                       /* if bonding is used it is possible that we add it to
+                        * masters only after IP address is assigned to the
+                        * net bonding interface.
+                       */
+                       if (curr_master && (old_master != curr_master)) {
+                               reset_gid_table(ibdev, port);
+                               mlx4_ib_set_default_gid(ibdev,
+                                                       curr_netdev, port);
+                               mlx4_ib_get_dev_addr(curr_master, ibdev, port);
+                       }
 
-               if (!curr_master && (old_master != curr_master)) {
+                       if (!curr_master && (old_master != curr_master)) {
+                               reset_gid_table(ibdev, port);
+                               mlx4_ib_set_default_gid(ibdev,
+                                                       curr_netdev, port);
+                               mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
+                       }
+               } else {
                        reset_gid_table(ibdev, port);
-                       mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
-                       mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
                }
        }
 
@@ -2007,6 +2033,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
                (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
                (1ull << IB_USER_VERBS_CMD_REG_MR)              |
+               (1ull << IB_USER_VERBS_CMD_REREG_MR)            |
                (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
                (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
                (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
@@ -2059,6 +2086,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
        ibdev->ib_dev.req_notify_cq     = mlx4_ib_arm_cq;
        ibdev->ib_dev.get_dma_mr        = mlx4_ib_get_dma_mr;
        ibdev->ib_dev.reg_user_mr       = mlx4_ib_reg_user_mr;
+       ibdev->ib_dev.rereg_user_mr     = mlx4_ib_rereg_user_mr;
        ibdev->ib_dev.dereg_mr          = mlx4_ib_dereg_mr;
        ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
        ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
@@ -2192,12 +2220,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                        }
                }
 #endif
-               for (i = 1 ; i <= ibdev->num_ports ; ++i)
-                       reset_gid_table(ibdev, i);
-               rtnl_lock();
-               mlx4_ib_scan_netdevs(ibdev, NULL, 0);
-               rtnl_unlock();
-               mlx4_ib_init_gid_table(ibdev);
+               if (mlx4_ib_init_gid_table(ibdev))
+                       goto err_notif;
        }
 
        for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {