enic: reconfigure resources for kdump crash kernel
[cascardo/linux.git] / drivers / net / ethernet / cisco / enic / enic_main.c
index 868d0f6..9027fc1 100644 (file)
@@ -45,6 +45,7 @@
 #ifdef CONFIG_NET_RX_BUSY_POLL
 #include <net/busy_poll.h>
 #endif
+#include <linux/crash_dump.h>
 
 #include "cq_enet_desc.h"
 #include "vnic_dev.h"
@@ -351,80 +352,94 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static inline void enic_queue_wq_skb_cont(struct enic *enic,
-       struct vnic_wq *wq, struct sk_buff *skb,
-       unsigned int len_left, int loopback)
+static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq,
+                                 struct sk_buff *skb, unsigned int len_left,
+                                 int loopback)
 {
        const skb_frag_t *frag;
+       dma_addr_t dma_addr;
 
        /* Queue additional data fragments */
        for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
                len_left -= skb_frag_size(frag);
-               enic_queue_wq_desc_cont(wq, skb,
-                       skb_frag_dma_map(&enic->pdev->dev,
-                                        frag, 0, skb_frag_size(frag),
-                                        DMA_TO_DEVICE),
-                       skb_frag_size(frag),
-                       (len_left == 0),        /* EOP? */
-                       loopback);
+               dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0,
+                                           skb_frag_size(frag),
+                                           DMA_TO_DEVICE);
+               if (unlikely(enic_dma_map_check(enic, dma_addr)))
+                       return -ENOMEM;
+               enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag),
+                                       (len_left == 0),        /* EOP? */
+                                       loopback);
        }
+
+       return 0;
 }
 
-static inline void enic_queue_wq_skb_vlan(struct enic *enic,
-       struct vnic_wq *wq, struct sk_buff *skb,
-       int vlan_tag_insert, unsigned int vlan_tag, int loopback)
+static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
+                                 struct sk_buff *skb, int vlan_tag_insert,
+                                 unsigned int vlan_tag, int loopback)
 {
        unsigned int head_len = skb_headlen(skb);
        unsigned int len_left = skb->len - head_len;
        int eop = (len_left == 0);
+       dma_addr_t dma_addr;
+       int err = 0;
+
+       dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
+                                 PCI_DMA_TODEVICE);
+       if (unlikely(enic_dma_map_check(enic, dma_addr)))
+               return -ENOMEM;
 
        /* Queue the main skb fragment. The fragments are no larger
         * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
         * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
         * per fragment is queued.
         */
-       enic_queue_wq_desc(wq, skb,
-               pci_map_single(enic->pdev, skb->data,
-                       head_len, PCI_DMA_TODEVICE),
-               head_len,
-               vlan_tag_insert, vlan_tag,
-               eop, loopback);
+       enic_queue_wq_desc(wq, skb, dma_addr, head_len, vlan_tag_insert,
+                          vlan_tag, eop, loopback);
 
        if (!eop)
-               enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+               err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+
+       return err;
 }
 
-static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
-       struct vnic_wq *wq, struct sk_buff *skb,
-       int vlan_tag_insert, unsigned int vlan_tag, int loopback)
+static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
+                                    struct sk_buff *skb, int vlan_tag_insert,
+                                    unsigned int vlan_tag, int loopback)
 {
        unsigned int head_len = skb_headlen(skb);
        unsigned int len_left = skb->len - head_len;
        unsigned int hdr_len = skb_checksum_start_offset(skb);
        unsigned int csum_offset = hdr_len + skb->csum_offset;
        int eop = (len_left == 0);
+       dma_addr_t dma_addr;
+       int err = 0;
+
+       dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
+                                 PCI_DMA_TODEVICE);
+       if (unlikely(enic_dma_map_check(enic, dma_addr)))
+               return -ENOMEM;
 
        /* Queue the main skb fragment. The fragments are no larger
         * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
         * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
         * per fragment is queued.
         */
-       enic_queue_wq_desc_csum_l4(wq, skb,
-               pci_map_single(enic->pdev, skb->data,
-                       head_len, PCI_DMA_TODEVICE),
-               head_len,
-               csum_offset,
-               hdr_len,
-               vlan_tag_insert, vlan_tag,
-               eop, loopback);
+       enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len, csum_offset,
+                                  hdr_len, vlan_tag_insert, vlan_tag, eop,
+                                  loopback);
 
        if (!eop)
-               enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+               err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+
+       return err;
 }
 
-static inline void enic_queue_wq_skb_tso(struct enic *enic,
-       struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
-       int vlan_tag_insert, unsigned int vlan_tag, int loopback)
+static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
+                                struct sk_buff *skb, unsigned int mss,
+                                int vlan_tag_insert, unsigned int vlan_tag,
+                                int loopback)
 {
        unsigned int frag_len_left = skb_headlen(skb);
        unsigned int len_left = skb->len - frag_len_left;
@@ -454,20 +469,19 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
         */
        while (frag_len_left) {
                len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
-               dma_addr = pci_map_single(enic->pdev, skb->data + offset,
-                               len, PCI_DMA_TODEVICE);
-               enic_queue_wq_desc_tso(wq, skb,
-                       dma_addr,
-                       len,
-                       mss, hdr_len,
-                       vlan_tag_insert, vlan_tag,
-                       eop && (len == frag_len_left), loopback);
+               dma_addr = pci_map_single(enic->pdev, skb->data + offset, len,
+                                         PCI_DMA_TODEVICE);
+               if (unlikely(enic_dma_map_check(enic, dma_addr)))
+                       return -ENOMEM;
+               enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len,
+                                      vlan_tag_insert, vlan_tag,
+                                      eop && (len == frag_len_left), loopback);
                frag_len_left -= len;
                offset += len;
        }
 
        if (eop)
-               return;
+               return 0;
 
        /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
         * for additional data fragments
@@ -483,16 +497,18 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
                        dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
                                                    offset, len,
                                                    DMA_TO_DEVICE);
-                       enic_queue_wq_desc_cont(wq, skb,
-                               dma_addr,
-                               len,
-                               (len_left == 0) &&
-                               (len == frag_len_left),         /* EOP? */
-                               loopback);
+                       if (unlikely(enic_dma_map_check(enic, dma_addr)))
+                               return -ENOMEM;
+                       enic_queue_wq_desc_cont(wq, skb, dma_addr, len,
+                                               (len_left == 0) &&
+                                                (len == frag_len_left),/*EOP*/
+                                               loopback);
                        frag_len_left -= len;
                        offset += len;
                }
        }
+
+       return 0;
 }
 
 static inline void enic_queue_wq_skb(struct enic *enic,
@@ -502,6 +518,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
        unsigned int vlan_tag = 0;
        int vlan_tag_insert = 0;
        int loopback = 0;
+       int err;
 
        if (vlan_tx_tag_present(skb)) {
                /* VLAN tag from trunking driver */
@@ -513,14 +530,30 @@ static inline void enic_queue_wq_skb(struct enic *enic,
        }
 
        if (mss)
-               enic_queue_wq_skb_tso(enic, wq, skb, mss,
-                       vlan_tag_insert, vlan_tag, loopback);
+               err = enic_queue_wq_skb_tso(enic, wq, skb, mss,
+                                           vlan_tag_insert, vlan_tag,
+                                           loopback);
        else if (skb->ip_summed == CHECKSUM_PARTIAL)
-               enic_queue_wq_skb_csum_l4(enic, wq, skb,
-                       vlan_tag_insert, vlan_tag, loopback);
+               err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
+                                               vlan_tag, loopback);
        else
-               enic_queue_wq_skb_vlan(enic, wq, skb,
-                       vlan_tag_insert, vlan_tag, loopback);
+               err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert,
+                                            vlan_tag, loopback);
+       if (unlikely(err)) {
+               struct vnic_wq_buf *buf;
+
+               buf = wq->to_use->prev;
+               /* while not EOP of previous pkt && queue not empty.
+                * For all non EOP bufs, os_buf is NULL.
+                */
+               while (!buf->os_buf && (buf->next != wq->to_clean)) {
+                       enic_free_wq_buf(wq, buf);
+                       wq->ring.desc_avail++;
+                       buf = buf->prev;
+               }
+               wq->to_use = buf->next;
+               dev_kfree_skb(skb);
+       }
 }
 
 /* netif_tx_lock held, process context with BHs disabled, or BH */
@@ -950,8 +983,12 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
        if (!skb)
                return -ENOMEM;
 
-       dma_addr = pci_map_single(enic->pdev, skb->data,
-               len, PCI_DMA_FROMDEVICE);
+       dma_addr = pci_map_single(enic->pdev, skb->data, len,
+                                 PCI_DMA_FROMDEVICE);
+       if (unlikely(enic_dma_map_check(enic, dma_addr))) {
+               dev_kfree_skb(skb);
+               return -ENOMEM;
+       }
 
        enic_queue_rq_desc(rq, skb, os_buf_index,
                dma_addr, len);
@@ -1060,10 +1097,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
                                     PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
                }
 
-               if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
-                       skb->csum = htons(checksum);
-                       skb->ip_summed = CHECKSUM_COMPLETE;
-               }
+               /* Hardware does not provide whole packet checksum. It only
+                * provides pseudo checksum. Since hw validates the packet
+                * checksum but not provide us the checksum value. use
+                * CHECSUM_UNNECESSARY.
+                */
+               if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
+                   ipv4_csum_ok)
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
 
                if (vlan_stripped)
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
@@ -2225,6 +2266,18 @@ static void enic_dev_deinit(struct enic *enic)
        enic_clear_intr_mode(enic);
 }
 
+static void enic_kdump_kernel_config(struct enic *enic)
+{
+       if (is_kdump_kernel()) {
+               dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
+               enic->rq_count = 1;
+               enic->wq_count = 1;
+               enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
+               enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
+               enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
+       }
+}
+
 static int enic_dev_init(struct enic *enic)
 {
        struct device *dev = enic_get_dev(enic);
@@ -2254,6 +2307,10 @@ static int enic_dev_init(struct enic *enic)
 
        enic_get_res_counts(enic);
 
+       /* modify resource count if we are in kdump_kernel
+        */
+       enic_kdump_kernel_config(enic);
+
        /* Set interrupt mode based on resource counts and system
         * capabilities
         */