be2net: avoid vxlan offloading on multichannel configs
[cascardo/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
index 7730f21..6ca693b 100644 (file)
@@ -2451,10 +2451,24 @@ static void be_eq_clean(struct be_eq_obj *eqo)
        be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
 }
 
-static void be_rx_cq_clean(struct be_rx_obj *rxo)
+/* Free posted rx buffers that were not used */
+static void be_rxq_clean(struct be_rx_obj *rxo)
 {
-       struct be_rx_page_info *page_info;
        struct be_queue_info *rxq = &rxo->q;
+       struct be_rx_page_info *page_info;
+
+       while (atomic_read(&rxq->used) > 0) {
+               page_info = get_rx_page_info(rxo);
+               put_page(page_info->page);
+               memset(page_info, 0, sizeof(*page_info));
+       }
+       BUG_ON(atomic_read(&rxq->used));
+       rxq->tail = 0;
+       rxq->head = 0;
+}
+
+static void be_rx_cq_clean(struct be_rx_obj *rxo)
+{
        struct be_queue_info *rx_cq = &rxo->cq;
        struct be_rx_compl_info *rxcp;
        struct be_adapter *adapter = rxo->adapter;
@@ -2491,16 +2505,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
 
        /* After cleanup, leave the CQ in unarmed state */
        be_cq_notify(adapter, rx_cq->id, false, 0);
-
-       /* Then free posted rx buffers that were not used */
-       while (atomic_read(&rxq->used) > 0) {
-               page_info = get_rx_page_info(rxo);
-               put_page(page_info->page);
-               memset(page_info, 0, sizeof(*page_info));
-       }
-       BUG_ON(atomic_read(&rxq->used));
-       rxq->tail = 0;
-       rxq->head = 0;
 }
 
 static void be_tx_compl_clean(struct be_adapter *adapter)
@@ -2580,8 +2584,8 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
                        be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
                        napi_hash_del(&eqo->napi);
                        netif_napi_del(&eqo->napi);
+                       free_cpumask_var(eqo->affinity_mask);
                }
-               free_cpumask_var(eqo->affinity_mask);
                be_queue_free(adapter, &eqo->q);
        }
 }
@@ -2598,13 +2602,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
 
        for_all_evt_queues(adapter, eqo, i) {
                int numa_node = dev_to_node(&adapter->pdev->dev);
-               if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
-                       return -ENOMEM;
-               cpumask_set_cpu(cpumask_local_spread(i, numa_node),
-                               eqo->affinity_mask);
-               netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
-                              BE_NAPI_WEIGHT);
-               napi_hash_add(&eqo->napi);
+
                aic = &adapter->aic_obj[i];
                eqo->adapter = adapter;
                eqo->idx = i;
@@ -2620,6 +2618,14 @@ static int be_evt_queues_create(struct be_adapter *adapter)
                rc = be_cmd_eq_create(adapter, eqo);
                if (rc)
                        return rc;
+
+               if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
+                       return -ENOMEM;
+               cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+                               eqo->affinity_mask);
+               netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
+                              BE_NAPI_WEIGHT);
+               napi_hash_add(&eqo->napi);
        }
        return 0;
 }
@@ -3358,8 +3364,22 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
        for_all_rx_queues(adapter, rxo, i) {
                q = &rxo->q;
                if (q->created) {
+                       /* If RXQs are destroyed while in an "out of buffer"
+                        * state, there is a possibility of an HW stall on
+                        * Lancer. So, post 64 buffers to each queue to relieve
+                        * the "out of buffer" condition.
+                        * Make sure there's space in the RXQ before posting.
+                        */
+                       if (lancer_chip(adapter)) {
+                               be_rx_cq_clean(rxo);
+                               if (atomic_read(&q->used) == 0)
+                                       be_post_rx_frags(rxo, GFP_KERNEL,
+                                                        MAX_RX_POST);
+                       }
+
                        be_cmd_rxq_destroy(adapter, q);
                        be_rx_cq_clean(rxo);
+                       be_rxq_clean(rxo);
                }
                be_queue_free(adapter, q);
        }
@@ -5154,7 +5174,7 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
        struct device *dev = &adapter->pdev->dev;
        int status;
 
-       if (lancer_chip(adapter) || BEx_chip(adapter))
+       if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
                return;
 
        if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
@@ -5201,7 +5221,7 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
-       if (lancer_chip(adapter) || BEx_chip(adapter))
+       if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
                return;
 
        if (adapter->vxlan_port != port)