drivers/net: fixup comments after "Future-proof tunnel offload handlers"
[cascardo/linux.git] / drivers / net / ethernet / intel / ixgbe / ixgbe_main.c
index 7df3fe2..918b94b 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2015 Intel Corporation.
+  Copyright(c) 1999 - 2016 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
 #include <linux/if_bridge.h>
 #include <linux/prefetch.h>
 #include <scsi/fc/fc_fcoe.h>
-#include <net/vxlan.h>
+#include <net/udp_tunnel.h>
 #include <net/pkt_cls.h>
 #include <net/tc_act/tc_gact.h>
-
-#ifdef CONFIG_OF
-#include <linux/of_net.h>
-#endif
-
-#ifdef CONFIG_SPARC
-#include <asm/idprom.h>
-#include <asm/prom.h>
-#endif
+#include <net/tc_act/tc_mirred.h>
 
 #include "ixgbe.h"
 #include "ixgbe_common.h"
@@ -79,10 +71,10 @@ char ixgbe_default_device_descr[] =
 static char ixgbe_default_device_descr[] =
                              "Intel(R) 10 Gigabit Network Connection";
 #endif
-#define DRV_VERSION "4.2.1-k"
+#define DRV_VERSION "4.4.0-k"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static const char ixgbe_copyright[] =
-                               "Copyright (c) 1999-2015 Intel Corporation.";
+                               "Copyright (c) 1999-2016 Intel Corporation.";
 
 static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
 
@@ -92,6 +84,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
        [board_X540]            = &ixgbe_X540_info,
        [board_X550]            = &ixgbe_X550_info,
        [board_X550EM_x]        = &ixgbe_X550EM_x_info,
+       [board_x550em_a]        = &ixgbe_x550em_a_info,
 };
 
 /* ixgbe_pci_tbl - PCI Device ID Table
@@ -134,10 +127,17 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
        /* required last entry */
        {0, }
 };
@@ -372,6 +372,27 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
 
        if (ixgbe_removed(reg_addr))
                return IXGBE_FAILED_READ_REG;
+       if (unlikely(hw->phy.nw_mng_if_sel &
+                    IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) {
+               struct ixgbe_adapter *adapter;
+               int i;
+
+               for (i = 0; i < 200; ++i) {
+                       value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
+                       if (likely(!value))
+                               goto writes_completed;
+                       if (value == IXGBE_FAILED_READ_REG) {
+                               ixgbe_remove_adapter(hw);
+                               return IXGBE_FAILED_READ_REG;
+                       }
+                       udelay(5);
+               }
+
+               adapter = hw->back;
+               e_warn(hw, "register writes incomplete %08x\n", value);
+       }
+
+writes_completed:
        value = readl(reg_addr + reg);
        if (unlikely(value == IXGBE_FAILED_READ_REG))
                ixgbe_check_remove(hw, reg);
@@ -588,7 +609,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
                pr_info("%-15s %016lX %016lX %016lX\n",
                        netdev->name,
                        netdev->state,
-                       netdev->trans_start,
+                       dev_trans_start(netdev),
                        netdev->last_rx);
        }
 
@@ -869,6 +890,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                if (direction == -1) {
                        /* other causes */
                        msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -907,6 +929,7 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                mask = (qmask & 0xFFFFFFFF);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
                mask = (qmask >> 32);
@@ -1086,10 +1109,41 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
        }
 }
 
+/**
+ * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate
+ **/
+static int ixgbe_tx_maxrate(struct net_device *netdev,
+                           int queue_index, u32 maxrate)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 bcnrc_val = ixgbe_link_mbps(adapter);
+
+       if (!maxrate)
+               return 0;
+
+       /* Calculate the rate factor values to set */
+       bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
+       bcnrc_val /= maxrate;
+
+       /* clear everything but the rate factor */
+       bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
+       IXGBE_RTTBCNRC_RF_DEC_MASK;
+
+       /* enable the rate scheduler */
+       bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
+
+       IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
+       IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+
+       return 0;
+}
+
 /**
  * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
  * @q_vector: structure containing interrupt and ring information
  * @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
  **/
 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                               struct ixgbe_ring *tx_ring, int napi_budget)
@@ -2192,7 +2246,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
 
        /* Populate MSIX to EITR Select */
        if (adapter->num_vfs > 32) {
-               u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+               u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
        }
 
@@ -2222,6 +2276,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                ixgbe_set_ivar(adapter, -1, 1, v_idx);
                break;
        default:
@@ -2333,6 +2388,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                /*
                 * set the WDIS bit to not clear the timer bits and cause an
                 * immediate assertion of the interrupt
@@ -2494,6 +2550,7 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
                return false;
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                switch (hw->mac.ops.get_media_type(hw)) {
                case ixgbe_media_type_fiber:
                case ixgbe_media_type_fiber_qsfp:
@@ -2568,6 +2625,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                mask = (qmask & 0xFFFFFFFF);
                if (mask)
                        IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
@@ -2596,6 +2654,7 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                mask = (qmask & 0xFFFFFFFF);
                if (mask)
                        IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
@@ -2631,6 +2690,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
                case ixgbe_mac_X540:
                case ixgbe_mac_X550:
                case ixgbe_mac_X550EM_x:
+               case ixgbe_mac_x550em_a:
                        mask |= IXGBE_EIMS_TS;
                        break;
                default:
@@ -2646,7 +2706,10 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
-               if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP)
+       case ixgbe_mac_x550em_a:
+               if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
+                   adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
+                   adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
                        mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
                if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
                        mask |= IXGBE_EICR_GPI_SDP0_X540;
@@ -2704,6 +2767,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
                    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
                        adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
@@ -2786,8 +2850,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
                ixgbe_update_dca(q_vector);
 #endif
 
-       ixgbe_for_each_ring(ring, q_vector->tx)
-               clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring, budget);
+       ixgbe_for_each_ring(ring, q_vector->tx) {
+               if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
+                       clean_complete = false;
+       }
 
        /* Exit if we are called by netpoll or busy polling is active */
        if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
@@ -2805,7 +2871,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
                                                 per_ring_budget);
 
                work_done += cleaned;
-               clean_complete &= (cleaned < per_ring_budget);
+               if (cleaned >= per_ring_budget)
+                       clean_complete = false;
        }
 
        ixgbe_qv_unlock_napi(q_vector);
@@ -2818,7 +2885,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
        if (adapter->rx_itr_setting & 1)
                ixgbe_set_itr(q_vector);
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
-               ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
+               ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
 
        return 0;
 }
@@ -2937,6 +3004,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                if (eicr & IXGBE_EICR_ECC) {
                        e_info(link, "Received ECC Err, initiating reset\n");
                        adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
@@ -3033,6 +3101,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
@@ -3109,15 +3178,15 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
         * currently 40.
         */
        if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
-               txdctl |= (1 << 16);    /* WTHRESH = 1 */
+               txdctl |= 1u << 16;     /* WTHRESH = 1 */
        else
-               txdctl |= (8 << 16);    /* WTHRESH = 8 */
+               txdctl |= 8u << 16;     /* WTHRESH = 8 */
 
        /*
         * Setting PTHRESH to 32 both improves performance
         * and avoids a TX hang with DFP enabled
         */
-       txdctl |= (1 << 8) |    /* HTHRESH = 1 */
+       txdctl |= (1u << 8) |   /* HTHRESH = 1 */
                   32;          /* PTHRESH = 32 */
 
        /* reinitialize flowdirector state */
@@ -3669,9 +3738,9 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
                return;
 
        if (rss_i > 3)
-               psrtype |= 2 << 29;
+               psrtype |= 2u << 29;
        else if (rss_i > 1)
-               psrtype |= 1 << 29;
+               psrtype |= 1u << 29;
 
        for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
                IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
@@ -3698,9 +3767,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
        reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
 
        /* Enable only the PF's pool for Tx/Rx */
-       IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
+       IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
        IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
-       IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
+       IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
        IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
        if (adapter->bridge_mode == BRIDGE_MODE_VEB)
                IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
@@ -3729,34 +3798,10 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
 
        IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
 
-
-       /* Enable MAC Anti-Spoofing */
-       hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
-                                         adapter->num_vfs);
-
-       /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
-        * calling set_ethertype_anti_spoofing for each VF in loop below
-        */
-       if (hw->mac.ops.set_ethertype_anti_spoofing) {
-               IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
-                               (IXGBE_ETQF_FILTER_EN    |
-                                IXGBE_ETQF_TX_ANTISPOOF |
-                                IXGBE_ETH_P_LLDP));
-
-               IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
-                               (IXGBE_ETQF_FILTER_EN |
-                                IXGBE_ETQF_TX_ANTISPOOF |
-                                ETH_P_PAUSE));
-       }
-
-       /* For VFs that have spoof checking turned off */
        for (i = 0; i < adapter->num_vfs; i++) {
-               if (!adapter->vfinfo[i].spoofchk_enabled)
-                       ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
-
-               /* enable ethertype anti spoofing if hw supports it */
-               if (hw->mac.ops.set_ethertype_anti_spoofing)
-                       hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
+               /* configure spoof checking */
+               ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
+                                         adapter->vfinfo[i].spoofchk_enabled);
 
                /* Enable/Disable RSS query feature  */
                ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
@@ -3832,6 +3877,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
                break;
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                if (adapter->num_vfs)
                        rdrxctl |= IXGBE_RDRXCTL_PSP;
                /* fall through for older HW */
@@ -3908,7 +3954,9 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
        struct ixgbe_hw *hw = &adapter->hw;
 
        /* add VID to filter table */
-       hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, true);
+       if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
+               hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
+
        set_bit(vid, adapter->active_vlans);
 
        return 0;
@@ -3947,7 +3995,7 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
         * entry other than the PF.
         */
        word = idx * 2 + (VMDQ_P(0) / 32);
-       bits = ~(1 << (VMDQ_P(0)) % 32);
+       bits = ~BIT(VMDQ_P(0) % 32);
        bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
 
        /* Disable the filter so this falls into the default pool. */
@@ -3965,9 +4013,7 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
        struct ixgbe_hw *hw = &adapter->hw;
 
        /* remove VID from filter table */
-       if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
-               ixgbe_update_pf_promisc_vlvf(adapter, vid);
-       else
+       if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
                hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
 
        clear_bit(vid, adapter->active_vlans);
@@ -3995,6 +4041,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                for (i = 0; i < adapter->num_rx_queues; i++) {
                        struct ixgbe_ring *ring = adapter->rx_ring[i];
 
@@ -4031,6 +4078,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                for (i = 0; i < adapter->num_rx_queues; i++) {
                        struct ixgbe_ring *ring = adapter->rx_ring[i];
 
@@ -4057,6 +4105,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
        default:
                if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
                        break;
@@ -4081,7 +4130,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
                u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
                u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
 
-               vlvfb |= 1 << (VMDQ_P(0) % 32);
+               vlvfb |= BIT(VMDQ_P(0) % 32);
                IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
        }
 
@@ -4111,7 +4160,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
 
                if (vlvf) {
                        /* record VLAN ID in VFTA */
-                       vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+                       vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
 
                        /* if PF is part of this then continue */
                        if (test_bit(vid, adapter->active_vlans))
@@ -4120,7 +4169,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
 
                /* remove PF from the pool */
                word = i * 2 + VMDQ_P(0) / 32;
-               bits = ~(1 << (VMDQ_P(0) % 32));
+               bits = ~BIT(VMDQ_P(0) % 32);
                bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
                IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
        }
@@ -4147,6 +4196,7 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
        default:
                if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
                        break;
@@ -4172,11 +4222,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
 
 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
 {
-       u16 vid;
+       u16 vid = 1;
 
        ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
 
-       for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+       for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
                ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
 }
 
@@ -4426,6 +4476,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
        u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+       netdev_features_t features = netdev->features;
        int count;
 
        /* Check for Promiscuous and All Multicast modes */
@@ -4443,14 +4494,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
                hw->addr_ctrl.user_set_promisc = true;
                fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
                vmolr |= IXGBE_VMOLR_MPE;
-               ixgbe_vlan_promisc_enable(adapter);
+               features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
        } else {
                if (netdev->flags & IFF_ALLMULTI) {
                        fctrl |= IXGBE_FCTRL_MPE;
                        vmolr |= IXGBE_VMOLR_MPE;
                }
                hw->addr_ctrl.user_set_promisc = false;
-               ixgbe_vlan_promisc_disable(adapter);
        }
 
        /*
@@ -4483,7 +4533,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
        }
 
        /* This is useful for sniffing bad packets. */
-       if (adapter->netdev->features & NETIF_F_RXALL) {
+       if (features & NETIF_F_RXALL) {
                /* UPE and MPE will be handled by normal PROMISC logic
                 * in e1000e_set_rx_mode */
                fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */
@@ -4496,10 +4546,15 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
 
        IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
 
-       if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+       if (features & NETIF_F_HW_VLAN_CTAG_RX)
                ixgbe_vlan_strip_enable(adapter);
        else
                ixgbe_vlan_strip_disable(adapter);
+
+       if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+               ixgbe_vlan_promisc_disable(adapter);
+       else
+               ixgbe_vlan_promisc_enable(adapter);
 }
 
 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -4530,6 +4585,7 @@ static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
        switch (adapter->hw.mac.type) {
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
                adapter->vxlan_port = 0;
                break;
@@ -4630,6 +4686,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                dv_id = IXGBE_DV_X540(link, tc);
                break;
        default:
@@ -4690,6 +4747,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                dv_id = IXGBE_LOW_DV_X540(tc);
                break;
        default:
@@ -4805,9 +4863,9 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
                return;
 
        if (rss_i > 3)
-               psrtype |= 2 << 29;
+               psrtype |= 2u << 29;
        else if (rss_i > 1)
-               psrtype |= 1 << 29;
+               psrtype |= 1u << 29;
 
        IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
 }
@@ -4871,7 +4929,7 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
        /* shutdown specific queue receive and wait for dma to settle */
        ixgbe_disable_rx_queue(adapter, rx_ring);
        usleep_range(10000, 20000);
-       ixgbe_irq_disable_queues(adapter, ((u64)1 << index));
+       ixgbe_irq_disable_queues(adapter, BIT_ULL(index));
        ixgbe_clean_rx_ring(rx_ring);
        rx_ring->l2_accel_priv = NULL;
 }
@@ -5106,6 +5164,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
                case ixgbe_mac_X540:
                case ixgbe_mac_X550:
                case ixgbe_mac_X550EM_x:
+               case ixgbe_mac_x550em_a:
                default:
                        IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
                        IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
@@ -5156,6 +5215,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
                gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
                break;
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                gpie |= IXGBE_SDP0_GPIEN_X540;
                break;
        default:
@@ -5228,7 +5288,7 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
 {
        WARN_ON(in_interrupt());
        /* put off any impending NetWatchDogTimeout */
-       adapter->netdev->trans_start = jiffies;
+       netif_trans_update(adapter->netdev);
 
        while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
                usleep_range(1000, 2000);
@@ -5467,6 +5527,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
                                (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
                                 ~IXGBE_DMATXCTL_TE));
@@ -5498,6 +5559,58 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
        ixgbe_tx_timeout_reset(adapter);
 }
 
+#ifdef CONFIG_IXGBE_DCB
+static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct tc_configuration *tc;
+       int j;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+       case ixgbe_mac_82599EB:
+               adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
+               adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
+               break;
+       case ixgbe_mac_X540:
+       case ixgbe_mac_X550:
+               adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
+               adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
+               break;
+       case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
+       default:
+               adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
+               adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
+               break;
+       }
+
+       /* Configure DCB traffic classes */
+       for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+               tc = &adapter->dcb_cfg.tc_config[j];
+               tc->path[DCB_TX_CONFIG].bwg_id = 0;
+               tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
+               tc->path[DCB_RX_CONFIG].bwg_id = 0;
+               tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
+               tc->dcb_pfc = pfc_disabled;
+       }
+
+       /* Initialize default user to priority mapping, UPx->TC0 */
+       tc = &adapter->dcb_cfg.tc_config[0];
+       tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
+       tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
+
+       adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
+       adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
+       adapter->dcb_cfg.pfc_mode_enable = false;
+       adapter->dcb_set_bitmap = 0x00;
+       if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
+               adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
+       memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
+              sizeof(adapter->temp_dcb_cfg));
+}
+#endif
+
 /**
  * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
  * @adapter: board private structure to initialize
@@ -5512,10 +5625,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
        struct pci_dev *pdev = adapter->pdev;
        unsigned int rss, fdir;
        u32 fwsm;
-#ifdef CONFIG_IXGBE_DCB
-       int j;
-       struct tc_configuration *tc;
-#endif
+       u16 device_caps;
+       int i;
 
        /* PCI config space info */
 
@@ -5537,6 +5648,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
 #ifdef CONFIG_IXGBE_DCA
        adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
 #endif
+#ifdef CONFIG_IXGBE_DCB
+       adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
+       adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+#endif
 #ifdef IXGBE_FCOE
        adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
        adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
@@ -5547,7 +5662,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
 #endif /* IXGBE_FCOE */
 
        /* initialize static ixgbe jump table entries */
-       adapter->jump_tables[0] = ixgbe_ipv4_fields;
+       adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
+                                         GFP_KERNEL);
+       if (!adapter->jump_tables[0])
+               return -ENOMEM;
+       adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
+
+       for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
+               adapter->jump_tables[i] = NULL;
 
        adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
                                     hw->mac.num_rar_entries,
@@ -5585,13 +5707,22 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
                        adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
                break;
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
+#ifdef CONFIG_IXGBE_DCB
+               adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
+#endif
+#ifdef IXGBE_FCOE
+               adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
+#ifdef CONFIG_IXGBE_DCB
+               adapter->fcoe.up = 0;
+#endif /* IXGBE_DCB */
+#endif /* IXGBE_FCOE */
+       /* Fall Through */
        case ixgbe_mac_X550:
 #ifdef CONFIG_IXGBE_DCA
                adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
 #endif
-#ifdef CONFIG_IXGBE_VXLAN
                adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
-#endif
                break;
        default:
                break;
@@ -5606,42 +5737,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
        spin_lock_init(&adapter->fdir_perfect_lock);
 
 #ifdef CONFIG_IXGBE_DCB
-       switch (hw->mac.type) {
-       case ixgbe_mac_X540:
-       case ixgbe_mac_X550:
-       case ixgbe_mac_X550EM_x:
-               adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
-               adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
-               break;
-       default:
-               adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
-               adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
-               break;
-       }
-
-       /* Configure DCB traffic classes */
-       for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
-               tc = &adapter->dcb_cfg.tc_config[j];
-               tc->path[DCB_TX_CONFIG].bwg_id = 0;
-               tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
-               tc->path[DCB_RX_CONFIG].bwg_id = 0;
-               tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
-               tc->dcb_pfc = pfc_disabled;
-       }
-
-       /* Initialize default user to priority mapping, UPx->TC0 */
-       tc = &adapter->dcb_cfg.tc_config[0];
-       tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
-       tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
-
-       adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
-       adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
-       adapter->dcb_cfg.pfc_mode_enable = false;
-       adapter->dcb_set_bitmap = 0x00;
-       adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
-       memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
-              sizeof(adapter->temp_dcb_cfg));
-
+       ixgbe_init_dcb(adapter);
 #endif
 
        /* default flow control settings */
@@ -5675,6 +5771,22 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
        adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
        adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
 
+       /* Cache bit indicating need for crosstalk fix */
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
+               hw->mac.ops.get_device_caps(hw, &device_caps);
+               if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
+                       adapter->need_crosstalk_fix = false;
+               else
+                       adapter->need_crosstalk_fix = true;
+               break;
+       default:
+               adapter->need_crosstalk_fix = false;
+               break;
+       }
+
        /* set default work limits */
        adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
 
@@ -6044,9 +6156,7 @@ int ixgbe_open(struct net_device *netdev)
        ixgbe_up_complete(adapter);
 
        ixgbe_clear_vxlan_port(adapter);
-#ifdef CONFIG_IXGBE_VXLAN
-       vxlan_get_rx_port(netdev);
-#endif
+       udp_tunnel_get_rx_info(netdev);
 
        return 0;
 
@@ -6217,6 +6327,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                pci_wake_from_d3(pdev, !!wufc);
                break;
        default:
@@ -6352,6 +6463,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                case ixgbe_mac_X540:
                case ixgbe_mac_X550:
                case ixgbe_mac_X550EM_x:
+               case ixgbe_mac_x550em_a:
                        hwstats->pxonrxc[i] +=
                                IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
                        break;
@@ -6367,7 +6479,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                if ((hw->mac.type == ixgbe_mac_82599EB) ||
                    (hw->mac.type == ixgbe_mac_X540) ||
                    (hw->mac.type == ixgbe_mac_X550) ||
-                   (hw->mac.type == ixgbe_mac_X550EM_x)) {
+                   (hw->mac.type == ixgbe_mac_X550EM_x) ||
+                   (hw->mac.type == ixgbe_mac_x550em_a)) {
                        hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
                        IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
                        hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
@@ -6392,6 +6505,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                /* OS2BMC stats are X540 and later */
                hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
                hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
@@ -6562,7 +6676,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
                for (i = 0; i < adapter->num_q_vectors; i++) {
                        struct ixgbe_q_vector *qv = adapter->q_vector[i];
                        if (qv->rx.ring || qv->tx.ring)
-                               eics |= ((u64)1 << i);
+                               eics |= BIT_ULL(i);
                }
        }
 
@@ -6593,6 +6707,18 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
                link_up = true;
        }
 
+       /* If Crosstalk fix enabled do the sanity check of making sure
+        * the SFP+ cage is empty.
+        */
+       if (adapter->need_crosstalk_fix) {
+               u32 sfp_cage_full;
+
+               sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+                               IXGBE_ESDP_SDP2;
+               if (ixgbe_is_sfp(hw) && link_up && !sfp_cage_full)
+                       link_up = false;
+       }
+
        if (adapter->ixgbe_ieee_pfc)
                pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
 
@@ -6662,6 +6788,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
        case ixgbe_mac_82599EB: {
                u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
                u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
@@ -6938,6 +7065,16 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        s32 err;
 
+       /* If crosstalk fix enabled verify the SFP+ cage is full */
+       if (adapter->need_crosstalk_fix) {
+               u32 sfp_cage_full;
+
+               sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+                               IXGBE_ESDP_SDP2;
+               if (!sfp_cage_full)
+                       return;
+       }
+
        /* not searching for SFP so there is nothing to do here */
        if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
            !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
@@ -7121,12 +7258,12 @@ static void ixgbe_service_task(struct work_struct *work)
                ixgbe_service_event_complete(adapter);
                return;
        }
-#ifdef CONFIG_IXGBE_VXLAN
        if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
+               rtnl_lock();
                adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
-               vxlan_get_rx_port(adapter->netdev);
+               udp_tunnel_get_rx_info(adapter->netdev);
+               rtnl_unlock();
        }
-#endif /* CONFIG_IXGBE_VXLAN */
        ixgbe_reset_subtask(adapter);
        ixgbe_phy_interrupt_subtask(adapter);
        ixgbe_sfp_detection_subtask(adapter);
@@ -7148,9 +7285,18 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
                     struct ixgbe_tx_buffer *first,
                     u8 *hdr_len)
 {
+       u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
        struct sk_buff *skb = first->skb;
-       u32 vlan_macip_lens, type_tucmd;
-       u32 mss_l4len_idx, l4len;
+       union {
+               struct iphdr *v4;
+               struct ipv6hdr *v6;
+               unsigned char *hdr;
+       } ip;
+       union {
+               struct tcphdr *tcp;
+               unsigned char *hdr;
+       } l4;
+       u32 paylen, l4_offset;
        int err;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -7163,46 +7309,52 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
        if (err < 0)
                return err;
 
+       ip.hdr = skb_network_header(skb);
+       l4.hdr = skb_checksum_start(skb);
+
        /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
        type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
 
-       if (first->protocol == htons(ETH_P_IP)) {
-               struct iphdr *iph = ip_hdr(skb);
-               iph->tot_len = 0;
-               iph->check = 0;
-               tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-                                                        iph->daddr, 0,
-                                                        IPPROTO_TCP,
-                                                        0);
+       /* initialize outer IP header fields */
+       if (ip.v4->version == 4) {
+               /* IP header will have to cancel out any data that
+                * is not a part of the outer IP header
+                */
+               ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+                                                 csum_unfold(l4.tcp->check)));
                type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+
+               ip.v4->tot_len = 0;
                first->tx_flags |= IXGBE_TX_FLAGS_TSO |
                                   IXGBE_TX_FLAGS_CSUM |
                                   IXGBE_TX_FLAGS_IPV4;
-       } else if (skb_is_gso_v6(skb)) {
-               ipv6_hdr(skb)->payload_len = 0;
-               tcp_hdr(skb)->check =
-                   ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                    &ipv6_hdr(skb)->daddr,
-                                    0, IPPROTO_TCP, 0);
+       } else {
+               ip.v6->payload_len = 0;
                first->tx_flags |= IXGBE_TX_FLAGS_TSO |
                                   IXGBE_TX_FLAGS_CSUM;
        }
 
-       /* compute header lengths */
-       l4len = tcp_hdrlen(skb);
-       *hdr_len = skb_transport_offset(skb) + l4len;
+       /* determine offset of inner transport header */
+       l4_offset = l4.hdr - skb->data;
+
+       /* compute length of segmentation header */
+       *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+
+       /* remove payload length from inner checksum */
+       paylen = skb->len - l4_offset;
+       csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
 
        /* update gso size and bytecount with header size */
        first->gso_segs = skb_shinfo(skb)->gso_segs;
        first->bytecount += (first->gso_segs - 1) * *hdr_len;
 
        /* mss_l4len_id: use 0 as index for TSO */
-       mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+       mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
        mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
 
        /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
-       vlan_macip_lens = skb_network_header_len(skb);
-       vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+       vlan_macip_lens = l4.hdr - ip.hdr;
+       vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
        vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 
        ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
@@ -7211,103 +7363,61 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
        return 1;
 }
 
+static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
+{
+       unsigned int offset = 0;
+
+       ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
+
+       return offset == skb_checksum_start_offset(skb);
+}
+
 static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
                          struct ixgbe_tx_buffer *first)
 {
        struct sk_buff *skb = first->skb;
        u32 vlan_macip_lens = 0;
-       u32 mss_l4len_idx = 0;
        u32 type_tucmd = 0;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
-               if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
-                   !(first->tx_flags & IXGBE_TX_FLAGS_CC))
+csum_failed:
+               if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
+                                        IXGBE_TX_FLAGS_CC)))
                        return;
-               vlan_macip_lens = skb_network_offset(skb) <<
-                                 IXGBE_ADVTXD_MACLEN_SHIFT;
-       } else {
-               u8 l4_hdr = 0;
-               union {
-                       struct iphdr *ipv4;
-                       struct ipv6hdr *ipv6;
-                       u8 *raw;
-               } network_hdr;
-               union {
-                       struct tcphdr *tcphdr;
-                       u8 *raw;
-               } transport_hdr;
-               __be16 frag_off;
-
-               if (skb->encapsulation) {
-                       network_hdr.raw = skb_inner_network_header(skb);
-                       transport_hdr.raw = skb_inner_transport_header(skb);
-                       vlan_macip_lens = skb_inner_network_offset(skb) <<
-                                         IXGBE_ADVTXD_MACLEN_SHIFT;
-               } else {
-                       network_hdr.raw = skb_network_header(skb);
-                       transport_hdr.raw = skb_transport_header(skb);
-                       vlan_macip_lens = skb_network_offset(skb) <<
-                                         IXGBE_ADVTXD_MACLEN_SHIFT;
-               }
-
-               /* use first 4 bits to determine IP version */
-               switch (network_hdr.ipv4->version) {
-               case IPVERSION:
-                       vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
-                       type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
-                       l4_hdr = network_hdr.ipv4->protocol;
-                       break;
-               case 6:
-                       vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
-                       l4_hdr = network_hdr.ipv6->nexthdr;
-                       if (likely((transport_hdr.raw - network_hdr.raw) ==
-                                  sizeof(struct ipv6hdr)))
-                               break;
-                       ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
-                                             sizeof(struct ipv6hdr),
-                                        &l4_hdr, &frag_off);
-                       if (unlikely(frag_off))
-                               l4_hdr = NEXTHDR_FRAGMENT;
-                       break;
-               default:
-                       break;
-               }
+               goto no_csum;
+       }
 
-               switch (l4_hdr) {
-               case IPPROTO_TCP:
-                       type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
-                       mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) <<
-                                       IXGBE_ADVTXD_L4LEN_SHIFT;
-                       break;
-               case IPPROTO_SCTP:
-                       type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
-                       mss_l4len_idx = sizeof(struct sctphdr) <<
-                                       IXGBE_ADVTXD_L4LEN_SHIFT;
-                       break;
-               case IPPROTO_UDP:
-                       mss_l4len_idx = sizeof(struct udphdr) <<
-                                       IXGBE_ADVTXD_L4LEN_SHIFT;
+       switch (skb->csum_offset) {
+       case offsetof(struct tcphdr, check):
+               type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+               /* fall through */
+       case offsetof(struct udphdr, check):
+               break;
+       case offsetof(struct sctphdr, checksum):
+               /* validate that this is actually an SCTP request */
+               if (((first->protocol == htons(ETH_P_IP)) &&
+                    (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
+                   ((first->protocol == htons(ETH_P_IPV6)) &&
+                    ixgbe_ipv6_csum_is_sctp(skb))) {
+                       type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
                        break;
-               default:
-                       if (unlikely(net_ratelimit())) {
-                               dev_warn(tx_ring->dev,
-                                        "partial checksum, version=%d, l4 proto=%x\n",
-                                        network_hdr.ipv4->version, l4_hdr);
-                       }
-                       skb_checksum_help(skb);
-                       goto no_csum;
                }
-
-               /* update TX checksum flag */
-               first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+               /* fall through */
+       default:
+               skb_checksum_help(skb);
+               goto csum_failed;
        }
 
+       /* update TX checksum flag */
+       first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+       vlan_macip_lens = skb_checksum_start_offset(skb) -
+                         skb_network_offset(skb);
 no_csum:
        /* vlan_macip_lens: MACLEN, VLAN tag */
+       vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
        vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 
-       ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
-                         type_tucmd, mss_l4len_idx);
+       ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0);
 }
 
 #define IXGBE_SET_FLAG(_input, _flag, _result) \
@@ -7581,7 +7691,6 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
        /* snag network header to get L4 type and address */
        skb = first->skb;
        hdr.network = skb_network_header(skb);
-#ifdef CONFIG_IXGBE_VXLAN
        if (skb->encapsulation &&
            first->protocol == htons(ETH_P_IP) &&
            hdr.ipv4->protocol != IPPROTO_UDP) {
@@ -7592,7 +7701,6 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
                    udp_hdr(skb)->dest == adapter->vxlan_port)
                        hdr.network = skb_inner_network_header(skb);
        }
-#endif /* CONFIG_IXGBE_VXLAN */
 
        /* Currently only IPv4/IPv6 with TCP is supported */
        switch (hdr.ipv4->version) {
@@ -8192,14 +8300,53 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
 static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
                               struct tc_cls_u32_offload *cls)
 {
+       u32 hdl = cls->knode.handle;
        u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
-       u32 loc;
-       int err;
+       u32 loc = cls->knode.handle & 0xfffff;
+       int err = 0, i, j;
+       struct ixgbe_jump_table *jump = NULL;
+
+       if (loc > IXGBE_MAX_HW_ENTRIES)
+               return -EINVAL;
 
        if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
                return -EINVAL;
 
-       loc = cls->knode.handle & 0xfffff;
+       /* Clear this filter in the link data it is associated with */
+       if (uhtid != 0x800) {
+               jump = adapter->jump_tables[uhtid];
+               if (!jump)
+                       return -EINVAL;
+               if (!test_bit(loc - 1, jump->child_loc_map))
+                       return -EINVAL;
+               clear_bit(loc - 1, jump->child_loc_map);
+       }
+
+       /* Check if the filter being deleted is a link */
+       for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
+               jump = adapter->jump_tables[i];
+               if (jump && jump->link_hdl == hdl) {
+                       /* Delete filters in the hardware in the child hash
+                        * table associated with this link
+                        */
+                       for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) {
+                               if (!test_bit(j, jump->child_loc_map))
+                                       continue;
+                               spin_lock(&adapter->fdir_perfect_lock);
+                               err = ixgbe_update_ethtool_fdir_entry(adapter,
+                                                                     NULL,
+                                                                     j + 1);
+                               spin_unlock(&adapter->fdir_perfect_lock);
+                               clear_bit(j, jump->child_loc_map);
+                       }
+                       /* Remove resources for this link */
+                       kfree(jump->input);
+                       kfree(jump->mask);
+                       kfree(jump);
+                       adapter->jump_tables[i] = NULL;
+                       return err;
+               }
+       }
 
        spin_lock(&adapter->fdir_perfect_lock);
        err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
@@ -8238,6 +8385,134 @@ static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
        return 0;
 }
 
+#ifdef CONFIG_NET_CLS_ACT
+static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
+                                 u8 *queue, u64 *action)
+{
+       unsigned int num_vfs = adapter->num_vfs, vf;
+       struct net_device *upper;
+       struct list_head *iter;
+
+       /* redirect to a SRIOV VF */
+       for (vf = 0; vf < num_vfs; ++vf) {
+               upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
+               if (upper->ifindex == ifindex) {
+                       if (adapter->num_rx_pools > 1)
+                               *queue = vf * 2;
+                       else
+                               *queue = vf * adapter->num_rx_queues_per_pool;
+
+                       *action = vf + 1;
+                       *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+                       return 0;
+               }
+       }
+
+       /* redirect to a offloaded macvlan netdev */
+       netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+               if (netif_is_macvlan(upper)) {
+                       struct macvlan_dev *dfwd = netdev_priv(upper);
+                       struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+
+                       if (vadapter && vadapter->netdev->ifindex == ifindex) {
+                               *queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
+                               *action = *queue;
+                               return 0;
+                       }
+               }
+       }
+
+       return -EINVAL;
+}
+
+static int parse_tc_actions(struct ixgbe_adapter *adapter,
+                           struct tcf_exts *exts, u64 *action, u8 *queue)
+{
+       const struct tc_action *a;
+       int err;
+
+       if (tc_no_actions(exts))
+               return -EINVAL;
+
+       tc_for_each_action(a, exts) {
+
+               /* Drop action */
+               if (is_tcf_gact_shot(a)) {
+                       *action = IXGBE_FDIR_DROP_QUEUE;
+                       *queue = IXGBE_FDIR_DROP_QUEUE;
+                       return 0;
+               }
+
+               /* Redirect to a VF or a offloaded macvlan */
+               if (is_tcf_mirred_redirect(a)) {
+                       int ifindex = tcf_mirred_ifindex(a);
+
+                       err = handle_redirect_action(adapter, ifindex, queue,
+                                                    action);
+                       if (err == 0)
+                               return err;
+               }
+       }
+
+       return -EINVAL;
+}
+#else
+static int parse_tc_actions(struct ixgbe_adapter *adapter,
+                           struct tcf_exts *exts, u64 *action, u8 *queue)
+{
+       return -EINVAL;
+}
+#endif /* CONFIG_NET_CLS_ACT */
+
+static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
+                                   union ixgbe_atr_input *mask,
+                                   struct tc_cls_u32_offload *cls,
+                                   struct ixgbe_mat_field *field_ptr,
+                                   struct ixgbe_nexthdr *nexthdr)
+{
+       int i, j, off;
+       __be32 val, m;
+       bool found_entry = false, found_jump_field = false;
+
+       for (i = 0; i < cls->knode.sel->nkeys; i++) {
+               off = cls->knode.sel->keys[i].off;
+               val = cls->knode.sel->keys[i].val;
+               m = cls->knode.sel->keys[i].mask;
+
+               for (j = 0; field_ptr[j].val; j++) {
+                       if (field_ptr[j].off == off) {
+                               field_ptr[j].val(input, mask, val, m);
+                               input->filter.formatted.flow_type |=
+                                       field_ptr[j].type;
+                               found_entry = true;
+                               break;
+                       }
+               }
+               if (nexthdr) {
+                       if (nexthdr->off == cls->knode.sel->keys[i].off &&
+                           nexthdr->val == cls->knode.sel->keys[i].val &&
+                           nexthdr->mask == cls->knode.sel->keys[i].mask)
+                               found_jump_field = true;
+                       else
+                               continue;
+               }
+       }
+
+       if (nexthdr && !found_jump_field)
+               return -EINVAL;
+
+       if (!found_entry)
+               return 0;
+
+       mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+                                   IXGBE_ATR_L4TYPE_MASK;
+
+       if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
+               mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
+
+       return 0;
+}
+
 static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
                                  __be16 protocol,
                                  struct tc_cls_u32_offload *cls)
@@ -8245,16 +8520,13 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
        u32 loc = cls->knode.handle & 0xfffff;
        struct ixgbe_hw *hw = &adapter->hw;
        struct ixgbe_mat_field *field_ptr;
-       struct ixgbe_fdir_filter *input;
-       union ixgbe_atr_input mask;
-#ifdef CONFIG_NET_CLS_ACT
-       const struct tc_action *a;
-#endif
-       int i, err = 0;
+       struct ixgbe_fdir_filter *input = NULL;
+       union ixgbe_atr_input *mask = NULL;
+       struct ixgbe_jump_table *jump = NULL;
+       int i, err = -EINVAL;
        u8 queue;
        u32 uhtid, link_uhtid;
 
-       memset(&mask, 0, sizeof(union ixgbe_atr_input));
        uhtid = TC_U32_USERHTID(cls->knode.handle);
        link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
 
@@ -8266,38 +8538,11 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
         * headers when needed.
         */
        if (protocol != htons(ETH_P_IP))
-               return -EINVAL;
-
-       if (link_uhtid) {
-               struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
-
-               if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
-                       return -EINVAL;
-
-               if (!test_bit(link_uhtid - 1, &adapter->tables))
-                       return -EINVAL;
-
-               for (i = 0; nexthdr[i].jump; i++) {
-                       if (nexthdr->o != cls->knode.sel->offoff ||
-                           nexthdr->s != cls->knode.sel->offshift ||
-                           nexthdr->m != cls->knode.sel->offmask ||
-                           /* do not support multiple key jumps its just mad */
-                           cls->knode.sel->nkeys > 1)
-                               return -EINVAL;
-
-                       if (nexthdr->off != cls->knode.sel->keys[0].off ||
-                           nexthdr->val != cls->knode.sel->keys[0].val ||
-                           nexthdr->mask != cls->knode.sel->keys[0].mask)
-                               return -EINVAL;
-
-                       adapter->jump_tables[link_uhtid] = nexthdr->jump;
-               }
-               return 0;
-       }
+               return err;
 
        if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
                e_err(drv, "Location out of range\n");
-               return -EINVAL;
+               return err;
        }
 
        /* cls u32 is a graph starting at root node 0x800. The driver tracks
@@ -8308,87 +8553,154 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
         * this function _should_ be generic try not to hardcode values here.
         */
        if (uhtid == 0x800) {
-               field_ptr = adapter->jump_tables[0];
+               field_ptr = (adapter->jump_tables[0])->mat;
        } else {
                if (uhtid >= IXGBE_MAX_LINK_HANDLE)
-                       return -EINVAL;
-
-               field_ptr = adapter->jump_tables[uhtid];
+                       return err;
+               if (!adapter->jump_tables[uhtid])
+                       return err;
+               field_ptr = (adapter->jump_tables[uhtid])->mat;
        }
 
        if (!field_ptr)
-               return -EINVAL;
+               return err;
 
-       input = kzalloc(sizeof(*input), GFP_KERNEL);
-       if (!input)
-               return -ENOMEM;
+       /* At this point we know the field_ptr is valid and need to either
+        * build cls_u32 link or attach filter. Because adding a link to
+        * a handle that does not exist is invalid and the same for adding
+        * rules to handles that don't exist.
+        */
 
-       for (i = 0; i < cls->knode.sel->nkeys; i++) {
-               int off = cls->knode.sel->keys[i].off;
-               __be32 val = cls->knode.sel->keys[i].val;
-               __be32 m = cls->knode.sel->keys[i].mask;
-               bool found_entry = false;
-               int j;
+       if (link_uhtid) {
+               struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
 
-               for (j = 0; field_ptr[j].val; j++) {
-                       if (field_ptr[j].off == off) {
-                               field_ptr[j].val(input, &mask, val, m);
-                               input->filter.formatted.flow_type |=
-                                       field_ptr[j].type;
-                               found_entry = true;
+               if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
+                       return err;
+
+               if (!test_bit(link_uhtid - 1, &adapter->tables))
+                       return err;
+
+               /* Multiple filters as links to the same hash table are not
+                * supported. To add a new filter with the same next header
+                * but different match/jump conditions, create a new hash table
+                * and link to it.
+                */
+               if (adapter->jump_tables[link_uhtid] &&
+                   (adapter->jump_tables[link_uhtid])->link_hdl) {
+                       e_err(drv, "Link filter exists for link: %x\n",
+                             link_uhtid);
+                       return err;
+               }
+
+               for (i = 0; nexthdr[i].jump; i++) {
+                       if (nexthdr[i].o != cls->knode.sel->offoff ||
+                           nexthdr[i].s != cls->knode.sel->offshift ||
+                           nexthdr[i].m != cls->knode.sel->offmask)
+                               return err;
+
+                       jump = kzalloc(sizeof(*jump), GFP_KERNEL);
+                       if (!jump)
+                               return -ENOMEM;
+                       input = kzalloc(sizeof(*input), GFP_KERNEL);
+                       if (!input) {
+                               err = -ENOMEM;
+                               goto free_jump;
+                       }
+                       mask = kzalloc(sizeof(*mask), GFP_KERNEL);
+                       if (!mask) {
+                               err = -ENOMEM;
+                               goto free_input;
+                       }
+                       jump->input = input;
+                       jump->mask = mask;
+                       jump->link_hdl = cls->knode.handle;
+
+                       err = ixgbe_clsu32_build_input(input, mask, cls,
+                                                      field_ptr, &nexthdr[i]);
+                       if (!err) {
+                               jump->mat = nexthdr[i].jump;
+                               adapter->jump_tables[link_uhtid] = jump;
                                break;
                        }
                }
-
-               if (!found_entry)
-                       goto err_out;
+               return 0;
        }
 
-       mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
-                                  IXGBE_ATR_L4TYPE_MASK;
+       input = kzalloc(sizeof(*input), GFP_KERNEL);
+       if (!input)
+               return -ENOMEM;
+       mask = kzalloc(sizeof(*mask), GFP_KERNEL);
+       if (!mask) {
+               err = -ENOMEM;
+               goto free_input;
+       }
 
-       if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
-               mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
+       if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
+               if ((adapter->jump_tables[uhtid])->input)
+                       memcpy(input, (adapter->jump_tables[uhtid])->input,
+                              sizeof(*input));
+               if ((adapter->jump_tables[uhtid])->mask)
+                       memcpy(mask, (adapter->jump_tables[uhtid])->mask,
+                              sizeof(*mask));
 
-#ifdef CONFIG_NET_CLS_ACT
-       if (list_empty(&cls->knode.exts->actions))
+               /* Lookup in all child hash tables if this location is already
+                * filled with a filter
+                */
+               for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
+                       struct ixgbe_jump_table *link = adapter->jump_tables[i];
+
+                       if (link && (test_bit(loc - 1, link->child_loc_map))) {
+                               e_err(drv, "Filter exists in location: %x\n",
+                                     loc);
+                               err = -EINVAL;
+                               goto err_out;
+                       }
+               }
+       }
+       err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
+       if (err)
                goto err_out;
 
-       list_for_each_entry(a, &cls->knode.exts->actions, list) {
-               if (!is_tcf_gact_shot(a))
-                       goto err_out;
-       }
-#endif
+       err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
+                              &queue);
+       if (err < 0)
+               goto err_out;
 
-       input->action = IXGBE_FDIR_DROP_QUEUE;
-       queue = IXGBE_FDIR_DROP_QUEUE;
        input->sw_idx = loc;
 
        spin_lock(&adapter->fdir_perfect_lock);
 
        if (hlist_empty(&adapter->fdir_filter_list)) {
-               memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
-               err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
+               memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
+               err = ixgbe_fdir_set_input_mask_82599(hw, mask);
                if (err)
                        goto err_out_w_lock;
-       } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
+       } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
                err = -EINVAL;
                goto err_out_w_lock;
        }
 
-       ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
+       ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
        err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
                                                    input->sw_idx, queue);
        if (!err)
                ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
        spin_unlock(&adapter->fdir_perfect_lock);
 
+       if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
+               set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map);
+
+       kfree(mask);
        return err;
 err_out_w_lock:
        spin_unlock(&adapter->fdir_perfect_lock);
 err_out:
+       kfree(mask);
+free_input:
        kfree(input);
-       return -EINVAL;
+free_jump:
+       kfree(jump);
+       return err;
 }
 
 static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
@@ -8515,48 +8827,46 @@ static int ixgbe_set_features(struct net_device *netdev,
                        adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
        }
 
-       if (features & NETIF_F_HW_VLAN_CTAG_RX)
-               ixgbe_vlan_strip_enable(adapter);
-       else
-               ixgbe_vlan_strip_disable(adapter);
-
        if (changed & NETIF_F_RXALL)
                need_reset = true;
 
        netdev->features = features;
 
-#ifdef CONFIG_IXGBE_VXLAN
        if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
                if (features & NETIF_F_RXCSUM)
                        adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
                else
                        ixgbe_clear_vxlan_port(adapter);
        }
-#endif /* CONFIG_IXGBE_VXLAN */
 
        if (need_reset)
                ixgbe_do_reset(netdev);
+       else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
+                           NETIF_F_HW_VLAN_CTAG_FILTER))
+               ixgbe_set_rx_mode(netdev);
 
        return 0;
 }
 
-#ifdef CONFIG_IXGBE_VXLAN
 /**
  * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up
  * @dev: The port's netdev
- * @sa_family: Socket Family that VXLAN is notifiying us about
- * @port: New UDP port number that VXLAN started listening to
+ * @ti: Tunnel endpoint information
  **/
-static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
-                                __be16 port)
+static void ixgbe_add_vxlan_port(struct net_device *dev,
+                                struct udp_tunnel_info *ti)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        struct ixgbe_hw *hw = &adapter->hw;
+       __be16 port = ti->port;
 
-       if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+       if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+               return;
+
+       if (ti->sa_family != AF_INET)
                return;
 
-       if (sa_family == AF_INET6)
+       if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
                return;
 
        if (adapter->vxlan_port == port)
@@ -8576,30 +8886,31 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
 /**
  * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away
  * @dev: The port's netdev
- * @sa_family: Socket Family that VXLAN is notifying us about
- * @port: UDP port number that VXLAN stopped listening to
+ * @ti: Tunnel endpoint information
  **/
-static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
-                                __be16 port)
+static void ixgbe_del_vxlan_port(struct net_device *dev,
+                                struct udp_tunnel_info *ti)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
 
-       if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+       if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
                return;
 
-       if (sa_family == AF_INET6)
+       if (ti->sa_family != AF_INET)
                return;
 
-       if (adapter->vxlan_port != port) {
+       if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+               return;
+
+       if (adapter->vxlan_port != ti->port) {
                netdev_info(dev, "Port %d was not found, not deleting\n",
-                           ntohs(port));
+                           ntohs(ti->port));
                return;
        }
 
        ixgbe_clear_vxlan_port(adapter);
        adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
 }
-#endif /* CONFIG_IXGBE_VXLAN */
 
 static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                             struct net_device *dev,
@@ -8833,17 +9144,36 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
        kfree(fwd_adapter);
 }
 
-#define IXGBE_MAX_TUNNEL_HDR_LEN 80
+#define IXGBE_MAX_MAC_HDR_LEN          127
+#define IXGBE_MAX_NETWORK_HDR_LEN      511
+
 static netdev_features_t
 ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
                     netdev_features_t features)
 {
-       if (!skb->encapsulation)
-               return features;
-
-       if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
-                    IXGBE_MAX_TUNNEL_HDR_LEN))
-               return features & ~NETIF_F_CSUM_MASK;
+       unsigned int network_hdr_len, mac_hdr_len;
+
+       /* Make certain the headers can be described by a context descriptor */
+       mac_hdr_len = skb_network_header(skb) - skb->data;
+       if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
+               return features & ~(NETIF_F_HW_CSUM |
+                                   NETIF_F_SCTP_CRC |
+                                   NETIF_F_HW_VLAN_CTAG_TX |
+                                   NETIF_F_TSO |
+                                   NETIF_F_TSO6);
+
+       network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+       if (unlikely(network_hdr_len >  IXGBE_MAX_NETWORK_HDR_LEN))
+               return features & ~(NETIF_F_HW_CSUM |
+                                   NETIF_F_SCTP_CRC |
+                                   NETIF_F_TSO |
+                                   NETIF_F_TSO6);
+
+       /* We can only support IPV4 TSO in tunnels if we can mangle the
+        * inner IP ID field, so strip TSO if MANGLEID is not supported.
+        */
+       if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+               features &= ~NETIF_F_TSO;
 
        return features;
 }
@@ -8858,6 +9188,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_set_mac_address    = ixgbe_set_mac,
        .ndo_change_mtu         = ixgbe_change_mtu,
        .ndo_tx_timeout         = ixgbe_tx_timeout,
+       .ndo_set_tx_maxrate     = ixgbe_tx_maxrate,
        .ndo_vlan_rx_add_vid    = ixgbe_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = ixgbe_vlan_rx_kill_vid,
        .ndo_do_ioctl           = ixgbe_ioctl,
@@ -8892,10 +9223,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_bridge_getlink     = ixgbe_ndo_bridge_getlink,
        .ndo_dfwd_add_station   = ixgbe_fwd_add,
        .ndo_dfwd_del_station   = ixgbe_fwd_del,
-#ifdef CONFIG_IXGBE_VXLAN
-       .ndo_add_vxlan_port     = ixgbe_add_vxlan_port,
-       .ndo_del_vxlan_port     = ixgbe_del_vxlan_port,
-#endif /* CONFIG_IXGBE_VXLAN */
+       .ndo_udp_tunnel_add     = ixgbe_add_vxlan_port,
+       .ndo_udp_tunnel_del     = ixgbe_del_vxlan_port,
        .ndo_features_check     = ixgbe_features_check,
 };
 
@@ -8943,7 +9272,7 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
 
 /**
  * ixgbe_wol_supported - Check whether device supports WoL
- * @hw: hw specific details
+ * @adapter: the adapter private structure
  * @device_id: the device ID
  * @subdev_id: the subsystem device ID
  *
@@ -8951,19 +9280,33 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
  * which devices have WoL support
  *
  **/
-int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
-                       u16 subdevice_id)
+bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+                        u16 subdevice_id)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
-       int is_wol_supported = 0;
 
+       /* WOL not supported on 82598 */
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               return false;
+
+       /* check eeprom to see if WOL is enabled for X540 and newer */
+       if (hw->mac.type >= ixgbe_mac_X540) {
+               if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
+                   ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
+                    (hw->bus.func == 0)))
+                       return true;
+       }
+
+       /* WOL is determined based on device IDs for 82599 MACs */
        switch (device_id) {
        case IXGBE_DEV_ID_82599_SFP:
                /* Only these subdevices could supports WOL */
                switch (subdevice_id) {
-               case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
                case IXGBE_SUBDEV_ID_82599_560FLR:
+               case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
+               case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
+               case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
                        /* only support first port */
                        if (hw->bus.func != 0)
                                break;
@@ -8971,66 +9314,31 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
                case IXGBE_SUBDEV_ID_82599_SFP:
                case IXGBE_SUBDEV_ID_82599_RNDC:
                case IXGBE_SUBDEV_ID_82599_ECNA_DP:
-               case IXGBE_SUBDEV_ID_82599_LOM_SFP:
-                       is_wol_supported = 1;
-                       break;
+               case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
+               case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
+               case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
+                       return true;
                }
                break;
        case IXGBE_DEV_ID_82599EN_SFP:
-               /* Only this subdevice supports WOL */
+               /* Only these subdevices support WOL */
                switch (subdevice_id) {
                case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
-                       is_wol_supported = 1;
-                       break;
+                       return true;
                }
                break;
        case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
                /* All except this subdevice support WOL */
                if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
-                       is_wol_supported = 1;
+                       return true;
                break;
        case IXGBE_DEV_ID_82599_KX4:
-               is_wol_supported = 1;
-               break;
-       case IXGBE_DEV_ID_X540T:
-       case IXGBE_DEV_ID_X540T1:
-       case IXGBE_DEV_ID_X550T:
-       case IXGBE_DEV_ID_X550EM_X_KX4:
-       case IXGBE_DEV_ID_X550EM_X_KR:
-       case IXGBE_DEV_ID_X550EM_X_10G_T:
-               /* check eeprom to see if enabled wol */
-               if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
-                   ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
-                    (hw->bus.func == 0))) {
-                       is_wol_supported = 1;
-               }
+               return  true;
+       default:
                break;
        }
 
-       return is_wol_supported;
-}
-
-/**
- * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM
- * @adapter: Pointer to adapter struct
- */
-static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter)
-{
-#ifdef CONFIG_OF
-       struct device_node *dp = pci_device_to_OF_node(adapter->pdev);
-       struct ixgbe_hw *hw = &adapter->hw;
-       const unsigned char *addr;
-
-       addr = of_get_mac_address(dp);
-       if (addr) {
-               ether_addr_copy(hw->mac.perm_addr, addr);
-               return;
-       }
-#endif /* CONFIG_OF */
-
-#ifdef CONFIG_SPARC
-       ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr);
-#endif /* CONFIG_SPARC */
+       return false;
 }
 
 /**
@@ -9136,23 +9444,23 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
 
        /* Setup hw api */
-       memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+       hw->mac.ops   = *ii->mac_ops;
        hw->mac.type  = ii->mac;
        hw->mvals     = ii->mvals;
 
        /* EEPROM */
-       memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
+       hw->eeprom.ops = *ii->eeprom_ops;
        eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
        if (ixgbe_removed(hw->hw_addr)) {
                err = -EIO;
                goto err_ioremap;
        }
        /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
-       if (!(eec & (1 << 8)))
+       if (!(eec & BIT(8)))
                hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
 
        /* PHY */
-       memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
+       hw->phy.ops = *ii->phy_ops;
        hw->phy.sfp_type = ixgbe_sfp_type_unknown;
        /* ixgbe_identify_phy_generic will set prtad and mmds properly */
        hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
@@ -9169,12 +9477,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto err_sw_init;
 
+       /* Make sure the SWFW semaphore is in a valid state */
+       if (hw->mac.ops.init_swfw_sync)
+               hw->mac.ops.init_swfw_sync(hw);
+
        /* Make it possible the adapter to be woken up via WOL */
        switch (adapter->hw.mac.type) {
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
                break;
        default:
@@ -9215,54 +9528,62 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto skip_sriov;
        /* Mailbox */
        ixgbe_init_mbx_params_pf(hw);
-       memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+       hw->mbx.ops = ii->mbx_ops;
        pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
        ixgbe_enable_sriov(adapter);
 skip_sriov:
 
 #endif
        netdev->features = NETIF_F_SG |
-                          NETIF_F_IP_CSUM |
-                          NETIF_F_IPV6_CSUM |
-                          NETIF_F_HW_VLAN_CTAG_TX |
-                          NETIF_F_HW_VLAN_CTAG_RX |
                           NETIF_F_TSO |
                           NETIF_F_TSO6 |
                           NETIF_F_RXHASH |
-                          NETIF_F_RXCSUM;
+                          NETIF_F_RXCSUM |
+                          NETIF_F_HW_CSUM;
 
-       netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD;
+#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+                                   NETIF_F_GSO_GRE_CSUM | \
+                                   NETIF_F_GSO_IPXIP4 | \
+                                   NETIF_F_GSO_IPXIP6 | \
+                                   NETIF_F_GSO_UDP_TUNNEL | \
+                                   NETIF_F_GSO_UDP_TUNNEL_CSUM)
 
-       switch (adapter->hw.mac.type) {
-       case ixgbe_mac_82599EB:
-       case ixgbe_mac_X540:
-       case ixgbe_mac_X550:
-       case ixgbe_mac_X550EM_x:
+       netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
+       netdev->features |= NETIF_F_GSO_PARTIAL |
+                           IXGBE_GSO_PARTIAL_FEATURES;
+
+       if (hw->mac.type >= ixgbe_mac_82599EB)
                netdev->features |= NETIF_F_SCTP_CRC;
-               netdev->hw_features |= NETIF_F_SCTP_CRC |
-                                      NETIF_F_NTUPLE |
+
+       /* copy netdev features into list of user selectable features */
+       netdev->hw_features |= netdev->features |
+                              NETIF_F_HW_VLAN_CTAG_RX |
+                              NETIF_F_HW_VLAN_CTAG_TX |
+                              NETIF_F_RXALL |
+                              NETIF_F_HW_L2FW_DOFFLOAD;
+
+       if (hw->mac.type >= ixgbe_mac_82599EB)
+               netdev->hw_features |= NETIF_F_NTUPLE |
                                       NETIF_F_HW_TC;
-               break;
-       default:
-               break;
-       }
 
-       netdev->hw_features |= NETIF_F_RXALL;
-       netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+       if (pci_using_dac)
+               netdev->features |= NETIF_F_HIGHDMA;
 
-       netdev->vlan_features |= NETIF_F_TSO;
-       netdev->vlan_features |= NETIF_F_TSO6;
-       netdev->vlan_features |= NETIF_F_IP_CSUM;
-       netdev->vlan_features |= NETIF_F_IPV6_CSUM;
-       netdev->vlan_features |= NETIF_F_SG;
+       netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+       netdev->hw_enc_features |= netdev->vlan_features;
+       netdev->mpls_features |= NETIF_F_HW_CSUM;
 
-       netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+       /* set this bit last since it cannot be part of vlan_features */
+       netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+                           NETIF_F_HW_VLAN_CTAG_RX |
+                           NETIF_F_HW_VLAN_CTAG_TX;
 
        netdev->priv_flags |= IFF_UNICAST_FLT;
        netdev->priv_flags |= IFF_SUPP_NOFCS;
 
 #ifdef CONFIG_IXGBE_DCB
-       netdev->dcbnl_ops = &dcbnl_ops;
+       if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
+               netdev->dcbnl_ops = &dcbnl_ops;
 #endif
 
 #ifdef IXGBE_FCOE
@@ -9287,10 +9608,6 @@ skip_sriov:
                                         NETIF_F_FCOE_MTU;
        }
 #endif /* IXGBE_FCOE */
-       if (pci_using_dac) {
-               netdev->features |= NETIF_F_HIGHDMA;
-               netdev->vlan_features |= NETIF_F_HIGHDMA;
-       }
 
        if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
                netdev->hw_features |= NETIF_F_LRO;
@@ -9304,7 +9621,8 @@ skip_sriov:
                goto err_sw_init;
        }
 
-       ixgbe_get_platform_mac_addr(adapter);
+       eth_platform_get_mac_address(&adapter->pdev->dev,
+                                    adapter->hw.mac.perm_addr);
 
        memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
 
@@ -9455,6 +9773,7 @@ err_sw_init:
        ixgbe_disable_sriov(adapter);
        adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
        iounmap(adapter->io_addr);
+       kfree(adapter->jump_tables[0]);
        kfree(adapter->mac_table);
 err_ioremap:
        disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
@@ -9483,6 +9802,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
        struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
        struct net_device *netdev;
        bool disable_dev;
+       int i;
 
        /* if !adapter then we already cleaned up in probe */
        if (!adapter)
@@ -9532,6 +9852,14 @@ static void ixgbe_remove(struct pci_dev *pdev)
 
        e_dev_info("complete\n");
 
+       for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
+               if (adapter->jump_tables[i]) {
+                       kfree(adapter->jump_tables[i]->input);
+                       kfree(adapter->jump_tables[i]->mask);
+               }
+               kfree(adapter->jump_tables[i]);
+       }
+
        kfree(adapter->mac_table);
        disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
        free_netdev(netdev);
@@ -9612,6 +9940,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
                case ixgbe_mac_X550EM_x:
                        device_id = IXGBE_DEV_ID_X550EM_X_VF;
                        break;
+               case ixgbe_mac_x550em_a:
+                       device_id = IXGBE_DEV_ID_X550EM_A_VF;
+                       break;
                default:
                        device_id = 0;
                        break;