Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Fri, 2 Dec 2011 18:49:21 +0000 (13:49 -0500)
committerDavid S. Miller <davem@davemloft.net>
Fri, 2 Dec 2011 18:49:21 +0000 (13:49 -0500)
21 files changed:
1  2 
MAINTAINERS
drivers/infiniband/core/addr.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/jme.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/virtio/virtio_mmio.c
drivers/virtio/virtio_pci.c
include/linux/netdevice.h
include/linux/virtio_config.h
include/net/inetpeer.h
net/core/dev.c
net/ipv4/route.c
net/ipv4/udp.c
net/ipv6/ipv6_sockglue.c
net/ipv6/udp.c
net/mac80211/agg-tx.c
net/netlabel/netlabel_kapi.c
net/sched/sch_teql.c
net/wireless/nl80211.c

diff --combined MAINTAINERS
@@@ -789,6 -789,7 +789,7 @@@ L: linux-arm-kernel@lists.infradead.or
  S:    Maintained
  T:    git git://git.pengutronix.de/git/imx/linux-2.6.git
  F:    arch/arm/mach-mx*/
+ F:    arch/arm/mach-imx/
  F:    arch/arm/plat-mxc/
  
  ARM/FREESCALE IMX51
@@@ -804,6 -805,13 +805,13 @@@ S:       Maintaine
  T:    git git://git.linaro.org/people/shawnguo/linux-2.6.git
  F:    arch/arm/mach-imx/*imx6*
  
+ ARM/FREESCALE MXS ARM ARCHITECTURE
+ M:    Shawn Guo <shawn.guo@linaro.org>
+ L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+ S:    Maintained
+ T:    git git://git.linaro.org/people/shawnguo/linux-2.6.git
+ F:    arch/arm/mach-mxs/
  ARM/GLOMATION GESBC9312SX MACHINE SUPPORT
  M:    Lennert Buytenhek <kernel@wantstofly.org>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@@ -1789,6 -1797,14 +1797,14 @@@ F:    include/net/cfg80211.
  F:    net/wireless/*
  X:    net/wireless/wext*
  
+ CHAR and MISC DRIVERS
+ M:    Arnd Bergmann <arnd@arndb.de>
+ M:    Greg Kroah-Hartman <greg@kroah.com>
+ T:    git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
+ S:    Maintained
+ F:    drivers/char/*
+ F:    drivers/misc/*
  CHECKPATCH
  M:    Andy Whitcroft <apw@canonical.com>
  S:    Supported
@@@ -3720,7 -3736,7 +3736,7 @@@ F:      fs/jbd2
  F:    include/linux/jbd2.h
  
  JSM Neo PCI based serial card
- M:    Breno Leitao <leitao@linux.vnet.ibm.com>
+ M:    Lucas Tavares <lucaskt@linux.vnet.ibm.com>
  L:    linux-serial@vger.kernel.org
  S:    Maintained
  F:    drivers/tty/serial/jsm/
@@@ -5659,7 -5675,6 +5675,6 @@@ F:      drivers/media/video/*7146
  F:    include/media/*7146*
  
  SAMSUNG AUDIO (ASoC) DRIVERS
- M:    Jassi Brar <jassisinghbrar@gmail.com>
  M:    Sangbeom Kim <sbkim73@samsung.com>
  L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
  S:    Supported
@@@ -6495,13 -6510,6 +6510,13 @@@ W:    http://tcp-lp-mod.sourceforge.net
  S:    Maintained
  F:    net/ipv4/tcp_lp.c
  
 +TEAM DRIVER
 +M:    Jiri Pirko <jpirko@redhat.com>
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +F:    drivers/net/team/
 +F:    include/linux/if_team.h
 +
  TEGRA SUPPORT
  M:    Colin Cross <ccross@android.com>
  M:    Olof Johansson <olof@lixom.net>
@@@ -216,7 -216,9 +216,9 @@@ static int addr4_resolve(struct sockadd
  
        neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
        if (!neigh || !(neigh->nud_state & NUD_VALID)) {
+               rcu_read_lock();
                neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
+               rcu_read_unlock();
                ret = -ENODATA;
                if (neigh)
                        goto release;
@@@ -243,8 -245,8 +245,8 @@@ static int addr6_resolve(struct sockadd
        int ret;
  
        memset(&fl6, 0, sizeof fl6);
 -      ipv6_addr_copy(&fl6.daddr, &dst_in->sin6_addr);
 -      ipv6_addr_copy(&fl6.saddr, &src_in->sin6_addr);
 +      fl6.daddr = dst_in->sin6_addr;
 +      fl6.saddr = src_in->sin6_addr;
        fl6.flowi6_oif = addr->bound_dev_if;
  
        dst = ip6_route_output(&init_net, NULL, &fl6);
                        goto put;
  
                src_in->sin6_family = AF_INET6;
 -              ipv6_addr_copy(&src_in->sin6_addr, &fl6.saddr);
 +              src_in->sin6_addr = fl6.saddr;
        }
  
        if (dst->dev->flags & IFF_LOOPBACK) {
                goto put;
        }
  
+       rcu_read_lock();
        neigh = dst_get_neighbour(dst);
        if (!neigh || !(neigh->nud_state & NUD_VALID)) {
                if (neigh)
                        neigh_event_send(neigh, NULL);
                ret = -ENODATA;
-               goto put;
+       } else {
+               ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
        }
-       ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
+       rcu_read_unlock();
  put:
        dst_release(dst);
        return ret;
@@@ -171,7 -171,7 +171,7 @@@ static int ipoib_stop(struct net_devic
        return 0;
  }
  
 -static u32 ipoib_fix_features(struct net_device *dev, u32 features)
 +static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
  {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
  
@@@ -432,7 -432,7 +432,7 @@@ static void path_rec_completion(int sta
  
        spin_lock_irqsave(&priv->lock, flags);
  
-       if (ah) {
+       if (!IS_ERR_OR_NULL(ah)) {
                path->pathrec = *pathrec;
  
                old_ah   = path->ah;
@@@ -555,6 -555,7 +555,7 @@@ static int path_rec_start(struct net_de
        return 0;
  }
  
+ /* called with rcu_read_lock */
  static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
  {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
@@@ -636,6 -637,7 +637,7 @@@ err_drop
        spin_unlock_irqrestore(&priv->lock, flags);
  }
  
+ /* called with rcu_read_lock */
  static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
  {
        struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
@@@ -720,13 -722,14 +722,14 @@@ static int ipoib_start_xmit(struct sk_b
        struct neighbour *n = NULL;
        unsigned long flags;
  
+       rcu_read_lock();
        if (likely(skb_dst(skb)))
                n = dst_get_neighbour(skb_dst(skb));
  
        if (likely(n)) {
                if (unlikely(!*to_ipoib_neigh(n))) {
                        ipoib_path_lookup(skb, dev);
-                       return NETDEV_TX_OK;
+                       goto unlock;
                }
  
                neigh = *to_ipoib_neigh(n);
                        ipoib_neigh_free(dev, neigh);
                        spin_unlock_irqrestore(&priv->lock, flags);
                        ipoib_path_lookup(skb, dev);
-                       return NETDEV_TX_OK;
+                       goto unlock;
                }
  
                if (ipoib_cm_get(neigh)) {
                        if (ipoib_cm_up(neigh)) {
                                ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
-                               return NETDEV_TX_OK;
+                               goto unlock;
                        }
                } else if (neigh->ah) {
                        ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha));
-                       return NETDEV_TX_OK;
+                       goto unlock;
                }
  
                if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
                                           phdr->hwaddr + 4);
                                dev_kfree_skb_any(skb);
                                ++dev->stats.tx_dropped;
-                               return NETDEV_TX_OK;
+                               goto unlock;
                        }
  
                        unicast_arp_send(skb, dev, phdr);
                }
        }
+ unlock:
+       rcu_read_unlock();
        return NETDEV_TX_OK;
  }
  
@@@ -837,7 -841,7 +841,7 @@@ static int ipoib_hard_header(struct sk_
        dst = skb_dst(skb);
        n = NULL;
        if (dst)
-               n = dst_get_neighbour(dst);
+               n = dst_get_neighbour_raw(dst);
        if ((!dst || !n) && daddr) {
                struct ipoib_pseudoheader *phdr =
                        (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
@@@ -1218,8 -1222,6 +1222,8 @@@ static struct net_device *ipoib_add_por
        priv->dev->mtu  = IPOIB_UD_MTU(priv->max_ib_mtu);
        priv->mcast_mtu  = priv->admin_mtu = priv->dev->mtu;
  
 +      priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
 +
        result = ib_query_pkey(hca, port, 0, &priv->pkey);
        if (result) {
                printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
@@@ -1325,12 -1325,11 +1325,12 @@@ static int bond_sethwaddr(struct net_de
        return 0;
  }
  
 -static u32 bond_fix_features(struct net_device *dev, u32 features)
 +static netdev_features_t bond_fix_features(struct net_device *dev,
 +      netdev_features_t features)
  {
        struct slave *slave;
        struct bonding *bond = netdev_priv(dev);
 -      u32 mask;
 +      netdev_features_t mask;
        int i;
  
        read_lock(&bond->lock);
@@@ -1364,7 -1363,7 +1364,7 @@@ static void bond_compute_features(struc
  {
        struct slave *slave;
        struct net_device *bond_dev = bond->dev;
 -      u32 vlan_features = BOND_VLAN_FEATURES;
 +      netdev_features_t vlan_features = BOND_VLAN_FEATURES;
        unsigned short max_hard_header_len = ETH_HLEN;
        int i;
  
@@@ -1898,7 -1897,7 +1898,7 @@@ int bond_release(struct net_device *bon
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave, *oldcurrent;
        struct sockaddr addr;
 -      u32 old_features = bond_dev->features;
 +      netdev_features_t old_features = bond_dev->features;
  
        /* slave is not a slave or master is not master of this slave */
        if (!(slave_dev->flags & IFF_SLAVE) ||
@@@ -2554,30 -2553,6 +2554,6 @@@ re_arm
        }
  }
  
- static __be32 bond_glean_dev_ip(struct net_device *dev)
- {
-       struct in_device *idev;
-       struct in_ifaddr *ifa;
-       __be32 addr = 0;
-       if (!dev)
-               return 0;
-       rcu_read_lock();
-       idev = __in_dev_get_rcu(dev);
-       if (!idev)
-               goto out;
-       ifa = idev->ifa_list;
-       if (!ifa)
-               goto out;
-       addr = ifa->ifa_local;
- out:
-       rcu_read_unlock();
-       return addr;
- }
  static int bond_has_this_ip(struct bonding *bond, __be32 ip)
  {
        struct vlan_entry *vlan;
@@@ -3323,6 -3298,10 +3299,10 @@@ static int bond_inetaddr_event(struct n
        struct bonding *bond;
        struct vlan_entry *vlan;
  
+       /* we only care about primary address */
+       if(ifa->ifa_flags & IFA_F_SECONDARY)
+               return NOTIFY_DONE;
        list_for_each_entry(bond, &bn->dev_list, bond_list) {
                if (bond->dev == event_dev) {
                        switch (event) {
                                bond->master_ip = ifa->ifa_local;
                                return NOTIFY_OK;
                        case NETDEV_DOWN:
-                               bond->master_ip = bond_glean_dev_ip(bond->dev);
+                               bond->master_ip = 0;
                                return NOTIFY_OK;
                        default:
                                return NOTIFY_DONE;
                                        vlan->vlan_ip = ifa->ifa_local;
                                        return NOTIFY_OK;
                                case NETDEV_DOWN:
-                                       vlan->vlan_ip =
-                                               bond_glean_dev_ip(vlan_dev);
+                                       vlan->vlan_ip = 0;
                                        return NOTIFY_OK;
                                default:
                                        return NOTIFY_DONE;
@@@ -4361,7 -4339,7 +4340,7 @@@ static void bond_setup(struct net_devic
                                NETIF_F_HW_VLAN_RX |
                                NETIF_F_HW_VLAN_FILTER;
  
 -      bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM);
 +      bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
        bond_dev->features |= bond_dev->hw_features;
  }
  
@@@ -474,11 -474,10 +474,11 @@@ static int dm9000_nway_reset(struct net
        return mii_nway_restart(&dm->mii);
  }
  
 -static int dm9000_set_features(struct net_device *dev, u32 features)
 +static int dm9000_set_features(struct net_device *dev,
 +      netdev_features_t features)
  {
        board_info_t *dm = to_dm9000_board(dev);
 -      u32 changed = dev->features ^ features;
 +      netdev_features_t changed = dev->features ^ features;
        unsigned long flags;
  
        if (!(changed & NETIF_F_RXCSUM))
@@@ -614,7 -613,7 +614,7 @@@ static int dm9000_set_wol(struct net_de
  
                if (!dm->wake_state)
                        irq_set_irq_wake(dm->irq_wake, 1);
-               else if (dm->wake_state & !opts)
+               else if (dm->wake_state && !opts)
                        irq_set_irq_wake(dm->irq_wake, 0);
        }
  
@@@ -1744,6 -1744,112 +1744,112 @@@ jme_phy_off(struct jme_adapter *jme
                jme_new_phy_off(jme);
  }
  
+ static int
+ jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg)
+ {
+       u32 phy_addr;
+       phy_addr = JM_PHY_SPEC_REG_READ | specreg;
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+                       phy_addr);
+       return jme_mdio_read(jme->dev, jme->mii_if.phy_id,
+                       JM_PHY_SPEC_DATA_REG);
+ }
+ static void
+ jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data)
+ {
+       u32 phy_addr;
+       phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg;
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG,
+                       phy_data);
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+                       phy_addr);
+ }
+ static int
+ jme_phy_calibration(struct jme_adapter *jme)
+ {
+       u32 ctrl1000, phy_data;
+       jme_phy_off(jme);
+       jme_phy_on(jme);
+       /*  Enabel PHY test mode 1 */
+       ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+       ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+       ctrl1000 |= PHY_GAD_TEST_MODE_1;
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+       phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+       phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0;
+       phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH |
+                       JM_PHY_EXT_COMM_2_CALI_ENABLE;
+       jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+       msleep(20);
+       phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+       phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE |
+                       JM_PHY_EXT_COMM_2_CALI_MODE_0 |
+                       JM_PHY_EXT_COMM_2_CALI_LATCH);
+       jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+       /*  Disable PHY test mode */
+       ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+       ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+       return 0;
+ }
+ static int
+ jme_phy_setEA(struct jme_adapter *jme)
+ {
+       u32 phy_comm0 = 0, phy_comm1 = 0;
+       u8 nic_ctrl;
+       pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl);
+       if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE)
+               return 0;
+       switch (jme->pdev->device) {
+       case PCI_DEVICE_ID_JMICRON_JMC250:
+               if (((jme->chip_main_rev == 5) &&
+                       ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+                       (jme->chip_sub_rev == 3))) ||
+                       (jme->chip_main_rev >= 6)) {
+                       phy_comm0 = 0x008A;
+                       phy_comm1 = 0x4109;
+               }
+               if ((jme->chip_main_rev == 3) &&
+                       ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+                       phy_comm0 = 0xE088;
+               break;
+       case PCI_DEVICE_ID_JMICRON_JMC260:
+               if (((jme->chip_main_rev == 5) &&
+                       ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+                       (jme->chip_sub_rev == 3))) ||
+                       (jme->chip_main_rev >= 6)) {
+                       phy_comm0 = 0x008A;
+                       phy_comm1 = 0x4109;
+               }
+               if ((jme->chip_main_rev == 3) &&
+                       ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+                       phy_comm0 = 0xE088;
+               if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0))
+                       phy_comm0 = 0x608A;
+               if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2))
+                       phy_comm0 = 0x408A;
+               break;
+       default:
+               return -ENODEV;
+       }
+       if (phy_comm0)
+               jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0);
+       if (phy_comm1)
+               jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1);
+       return 0;
+ }
  static int
  jme_open(struct net_device *netdev)
  {
                jme_set_settings(netdev, &jme->old_ecmd);
        else
                jme_reset_phy_processor(jme);
+       jme_phy_calibration(jme);
+       jme_phy_setEA(jme);
        jme_reset_link(jme);
  
        return 0;
@@@ -1883,7 -1990,7 +1990,7 @@@ jme_fill_tx_map(struct pci_dev *pdev
                struct page *page,
                u32 page_offset,
                u32 len,
 -              u8 hidma)
 +              bool hidma)
  {
        dma_addr_t dmaaddr;
  
@@@ -1917,7 -2024,7 +2024,7 @@@ jme_map_tx_skb(struct jme_adapter *jme
        struct jme_ring *txring = &(jme->txring[0]);
        struct txdesc *txdesc = txring->desc, *ctxdesc;
        struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
 -      u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
 +      bool hidma = jme->dev->features & NETIF_F_HIGHDMA;
        int i, nr_frags = skb_shinfo(skb)->nr_frags;
        int mask = jme->tx_ring_mask;
        const struct skb_frag_struct *frag;
@@@ -2292,9 -2399,9 +2399,9 @@@ jme_get_drvinfo(struct net_device *netd
  {
        struct jme_adapter *jme = netdev_priv(netdev);
  
 -      strcpy(info->driver, DRV_NAME);
 -      strcpy(info->version, DRV_VERSION);
 -      strcpy(info->bus_info, pci_name(jme->pdev));
 +      strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
 +      strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 +      strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
  }
  
  static int
@@@ -2620,8 -2727,8 +2727,8 @@@ jme_set_msglevel(struct net_device *net
        jme->msg_enable = value;
  }
  
 -static u32
 -jme_fix_features(struct net_device *netdev, u32 features)
 +static netdev_features_t
 +jme_fix_features(struct net_device *netdev, netdev_features_t features)
  {
        if (netdev->mtu > 1900)
                features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM);
  }
  
  static int
 -jme_set_features(struct net_device *netdev, u32 features)
 +jme_set_features(struct net_device *netdev, netdev_features_t features)
  {
        struct jme_adapter *jme = netdev_priv(netdev);
  
@@@ -3184,7 -3291,8 +3291,8 @@@ jme_resume(struct device *dev
                jme_set_settings(netdev, &jme->old_ecmd);
        else
                jme_reset_phy_processor(jme);
+       jme_phy_calibration(jme);
+       jme_phy_setEA(jme);
        jme_start_irq(jme);
        netif_device_attach(netdev);
  
@@@ -3239,4 -3347,3 +3347,3 @@@ MODULE_DESCRIPTION("JMicron JMC2x0 PCI 
  MODULE_LICENSE("GPL");
  MODULE_VERSION(DRV_VERSION);
  MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
@@@ -1827,7 -1827,8 +1827,8 @@@ static void ath9k_set_power_sleep(struc
        }
  
        /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
-       REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
+       if (AR_SREV_9300_20_OR_LATER(ah))
+               REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
  }
  
  /*
@@@ -2335,7 -2336,7 +2336,7 @@@ int ath9k_hw_fill_cap_info(struct ath_h
                        ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
        }
        if (AR_SREV_9462(ah))
 -              pCap->hw_caps |= ATH9K_HW_CAP_RTT;
 +              pCap->hw_caps |= ATH9K_HW_CAP_RTT | ATH9K_HW_CAP_MCI;
  
        return 0;
  }
@@@ -2583,7 -2584,7 +2584,7 @@@ void ath9k_hw_set_txpowerlimit(struct a
        struct ath9k_channel *chan = ah->curchan;
        struct ieee80211_channel *channel = chan->chan;
  
 -      reg->power_limit = min_t(int, limit, MAX_RATE_POWER);
 +      reg->power_limit = min_t(u32, limit, MAX_RATE_POWER);
        if (test)
                channel->max_power = MAX_RATE_POWER / 2;
  
@@@ -118,7 -118,7 +118,7 @@@ static void vm_finalize_features(struc
        vring_transport_features(vdev);
  
        for (i = 0; i < ARRAY_SIZE(vdev->features); i++) {
-               writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SET);
+               writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL);
                writel(vdev->features[i],
                                vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES);
        }
@@@ -361,12 -361,7 +361,12 @@@ static int vm_find_vqs(struct virtio_de
        return 0;
  }
  
 +static const char *vm_bus_name(struct virtio_device *vdev)
 +{
 +      struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
  
 +      return vm_dev->pdev->name;
 +}
  
  static struct virtio_config_ops virtio_mmio_config_ops = {
        .get            = vm_get,
        .del_vqs        = vm_del_vqs,
        .get_features   = vm_get_features,
        .finalize_features = vm_finalize_features,
 +      .bus_name       = vm_bus_name,
  };
  
  
@@@ -169,11 -169,29 +169,29 @@@ static void vp_set_status(struct virtio
        iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
  }
  
+ /* wait for pending irq handlers */
+ static void vp_synchronize_vectors(struct virtio_device *vdev)
+ {
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       int i;
+       if (vp_dev->intx_enabled)
+               synchronize_irq(vp_dev->pci_dev->irq);
+       for (i = 0; i < vp_dev->msix_vectors; ++i)
+               synchronize_irq(vp_dev->msix_entries[i].vector);
+ }
  static void vp_reset(struct virtio_device *vdev)
  {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        /* 0 status means a reset. */
        iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
+       /* Flush out the status write, and flush in device writes,
+        * including MSi-X interrupts, if any. */
+       ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
+       /* Flush pending VQ/configuration callbacks. */
+       vp_synchronize_vectors(vdev);
  }
  
  /* the notify function used when creating a virt queue */
@@@ -580,13 -598,6 +598,13 @@@ static int vp_find_vqs(struct virtio_de
                                  false, false);
  }
  
 +static const char *vp_bus_name(struct virtio_device *vdev)
 +{
 +      struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 +
 +      return pci_name(vp_dev->pci_dev);
 +}
 +
  static struct virtio_config_ops virtio_pci_config_ops = {
        .get            = vp_get,
        .set            = vp_set,
        .del_vqs        = vp_del_vqs,
        .get_features   = vp_get_features,
        .finalize_features = vp_finalize_features,
 +      .bus_name       = vp_bus_name,
  };
  
  static void virtio_pci_release_dev(struct device *_d)
@@@ -43,7 -43,6 +43,7 @@@
  #include <linux/rculist.h>
  #include <linux/dmaengine.h>
  #include <linux/workqueue.h>
 +#include <linux/dynamic_queue_limits.h>
  
  #include <linux/ethtool.h>
  #include <net/net_namespace.h>
@@@ -51,9 -50,6 +51,9 @@@
  #ifdef CONFIG_DCB
  #include <net/dcbnl.h>
  #endif
 +#include <net/netprio_cgroup.h>
 +
 +#include <linux/netdev_features.h>
  
  struct vlan_group;
  struct netpoll_info;
@@@ -145,20 -141,22 +145,20 @@@ static inline bool dev_xmit_complete(in
   *    used.
   */
  
 -#if defined(CONFIG_WLAN) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
 +#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
  # if defined(CONFIG_MAC80211_MESH)
  #  define LL_MAX_HEADER 128
  # else
  #  define LL_MAX_HEADER 96
  # endif
 -#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
 +#elif IS_ENABLED(CONFIG_TR)
  # define LL_MAX_HEADER 48
  #else
  # define LL_MAX_HEADER 32
  #endif
  
 -#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
 -    !defined(CONFIG_NET_IPGRE) &&  !defined(CONFIG_NET_IPGRE_MODULE) && \
 -    !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
 -    !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
 +#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
 +    !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
  #define MAX_HEADER LL_MAX_HEADER
  #else
  #define MAX_HEADER (LL_MAX_HEADER + 48)
@@@ -214,11 -212,6 +214,11 @@@ enum 
  #include <linux/cache.h>
  #include <linux/skbuff.h>
  
 +#ifdef CONFIG_RPS
 +#include <linux/jump_label.h>
 +extern struct jump_label_key rps_needed;
 +#endif
 +
  struct neighbour;
  struct neigh_parms;
  struct sk_buff;
@@@ -279,11 -272,16 +279,11 @@@ struct hh_cache 
   *
   * We could use other alignment values, but we must maintain the
   * relationship HH alignment <= LL alignment.
 - *
 - * LL_ALLOCATED_SPACE also takes into account the tailroom the device
 - * may need.
   */
  #define LL_RESERVED_SPACE(dev) \
        ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
        ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
 -#define LL_ALLOCATED_SPACE(dev) \
 -      ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  
  struct header_ops {
        int     (*create) (struct sk_buff *skb, struct net_device *dev,
@@@ -518,23 -516,11 +518,23 @@@ static inline void napi_synchronize(con
  #endif
  
  enum netdev_queue_state_t {
 -      __QUEUE_STATE_XOFF,
 +      __QUEUE_STATE_DRV_XOFF,
 +      __QUEUE_STATE_STACK_XOFF,
        __QUEUE_STATE_FROZEN,
 -#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF)         | \
 -                                  (1 << __QUEUE_STATE_FROZEN))
 +#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF)           | \
 +                            (1 << __QUEUE_STATE_STACK_XOFF))
 +#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF          | \
 +                                      (1 << __QUEUE_STATE_FROZEN))
  };
 +/*
 + * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue.  The
 + * netif_tx_* functions below are used to manipulate this flag.  The
 + * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
 + * queue independently.  The netif_xmit_*stopped functions below are called
 + * to check if the queue has been stopped by the driver or stack (either
 + * of the XOFF bits are set in the state).  Drivers should not need to call
 + * netif_xmit*stopped functions, they should only be using netif_tx_*.
 + */
  
  struct netdev_queue {
  /*
   */
        struct net_device       *dev;
        struct Qdisc            *qdisc;
 -      unsigned long           state;
        struct Qdisc            *qdisc_sleeping;
 -#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
 +#ifdef CONFIG_SYSFS
        struct kobject          kobj;
  #endif
  #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
         * please use this field instead of dev->trans_start
         */
        unsigned long           trans_start;
 +
 +      /*
 +       * Number of TX timeouts for this queue
 +       * (/sys/class/net/DEV/Q/trans_timeout)
 +       */
 +      unsigned long           trans_timeout;
 +
 +      unsigned long           state;
 +
 +#ifdef CONFIG_BQL
 +      struct dql              dql;
 +#endif
  } ____cacheline_aligned_in_smp;
  
  static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@@ -870,13 -845,12 +870,13 @@@ struct netdev_tc_txq 
   *    Called to release previously enslaved netdev.
   *
   *      Feature/offload setting functions.
 - * u32 (*ndo_fix_features)(struct net_device *dev, u32 features);
 + * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
 + *            netdev_features_t features);
   *    Adjusts the requested feature flags according to device-specific
   *    constraints, and returns the resulting flags. Must not modify
   *    the device state.
   *
 - * int (*ndo_set_features)(struct net_device *dev, u32 features);
 + * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
   *    Called to update device configuration to new features. Passed
   *    feature set might be less than what was returned by ndo_fix_features()).
   *    Must return >0 or -errno if it changed dev->features itself.
@@@ -938,7 -912,7 +938,7 @@@ struct net_device_ops 
        int                     (*ndo_get_vf_port)(struct net_device *dev,
                                                   int vf, struct sk_buff *skb);
        int                     (*ndo_setup_tc)(struct net_device *dev, u8 tc);
 -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
 +#if IS_ENABLED(CONFIG_FCOE)
        int                     (*ndo_fcoe_enable)(struct net_device *dev);
        int                     (*ndo_fcoe_disable)(struct net_device *dev);
        int                     (*ndo_fcoe_ddp_setup)(struct net_device *dev,
                                                       unsigned int sgc);
  #endif
  
 -#if defined(CONFIG_LIBFCOE) || defined(CONFIG_LIBFCOE_MODULE)
 +#if IS_ENABLED(CONFIG_LIBFCOE)
  #define NETDEV_FCOE_WWNN 0
  #define NETDEV_FCOE_WWPN 1
        int                     (*ndo_fcoe_get_wwn)(struct net_device *dev,
                                                 struct net_device *slave_dev);
        int                     (*ndo_del_slave)(struct net_device *dev,
                                                 struct net_device *slave_dev);
 -      u32                     (*ndo_fix_features)(struct net_device *dev,
 -                                                  u32 features);
 +      netdev_features_t       (*ndo_fix_features)(struct net_device *dev,
 +                                                  netdev_features_t features);
        int                     (*ndo_set_features)(struct net_device *dev,
 -                                                  u32 features);
 +                                                  netdev_features_t features);
 +      int                     (*ndo_neigh_construct)(struct neighbour *n);
 +      void                    (*ndo_neigh_destroy)(struct neighbour *n);
  };
  
  /*
@@@ -1025,13 -997,91 +1025,13 @@@ struct net_device 
        struct list_head        unreg_list;
  
        /* currently active device features */
 -      u32                     features;
 +      netdev_features_t       features;
        /* user-changeable features */
 -      u32                     hw_features;
 +      netdev_features_t       hw_features;
        /* user-requested features */
 -      u32                     wanted_features;
 +      netdev_features_t       wanted_features;
        /* mask of features inheritable by VLAN devices */
 -      u32                     vlan_features;
 -
 -      /* Net device feature bits; if you change something,
 -       * also update netdev_features_strings[] in ethtool.c */
 -
 -#define NETIF_F_SG            1       /* Scatter/gather IO. */
 -#define NETIF_F_IP_CSUM               2       /* Can checksum TCP/UDP over IPv4. */
 -#define NETIF_F_NO_CSUM               4       /* Does not require checksum. F.e. loopack. */
 -#define NETIF_F_HW_CSUM               8       /* Can checksum all the packets. */
 -#define NETIF_F_IPV6_CSUM     16      /* Can checksum TCP/UDP over IPV6 */
 -#define NETIF_F_HIGHDMA               32      /* Can DMA to high memory. */
 -#define NETIF_F_FRAGLIST      64      /* Scatter/gather IO. */
 -#define NETIF_F_HW_VLAN_TX    128     /* Transmit VLAN hw acceleration */
 -#define NETIF_F_HW_VLAN_RX    256     /* Receive VLAN hw acceleration */
 -#define NETIF_F_HW_VLAN_FILTER        512     /* Receive filtering on VLAN */
 -#define NETIF_F_VLAN_CHALLENGED       1024    /* Device cannot handle VLAN packets */
 -#define NETIF_F_GSO           2048    /* Enable software GSO. */
 -#define NETIF_F_LLTX          4096    /* LockLess TX - deprecated. Please */
 -                                      /* do not use LLTX in new drivers */
 -#define NETIF_F_NETNS_LOCAL   8192    /* Does not change network namespaces */
 -#define NETIF_F_GRO           16384   /* Generic receive offload */
 -#define NETIF_F_LRO           32768   /* large receive offload */
 -
 -/* the GSO_MASK reserves bits 16 through 23 */
 -#define NETIF_F_FCOE_CRC      (1 << 24) /* FCoE CRC32 */
 -#define NETIF_F_SCTP_CSUM     (1 << 25) /* SCTP checksum offload */
 -#define NETIF_F_FCOE_MTU      (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
 -#define NETIF_F_NTUPLE                (1 << 27) /* N-tuple filters supported */
 -#define NETIF_F_RXHASH                (1 << 28) /* Receive hashing offload */
 -#define NETIF_F_RXCSUM                (1 << 29) /* Receive checksumming offload */
 -#define NETIF_F_NOCACHE_COPY  (1 << 30) /* Use no-cache copyfromuser */
 -#define NETIF_F_LOOPBACK      (1 << 31) /* Enable loopback */
 -
 -      /* Segmentation offload features */
 -#define NETIF_F_GSO_SHIFT     16
 -#define NETIF_F_GSO_MASK      0x00ff0000
 -#define NETIF_F_TSO           (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
 -#define NETIF_F_UFO           (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
 -#define NETIF_F_GSO_ROBUST    (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
 -#define NETIF_F_TSO_ECN               (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
 -#define NETIF_F_TSO6          (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
 -#define NETIF_F_FSO           (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
 -
 -      /* Features valid for ethtool to change */
 -      /* = all defined minus driver/device-class-related */
 -#define NETIF_F_NEVER_CHANGE  (NETIF_F_VLAN_CHALLENGED | \
 -                                NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
 -#define NETIF_F_ETHTOOL_BITS  (0xff3fffff & ~NETIF_F_NEVER_CHANGE)
 -
 -      /* List of features with software fallbacks. */
 -#define NETIF_F_GSO_SOFTWARE  (NETIF_F_TSO | NETIF_F_TSO_ECN | \
 -                               NETIF_F_TSO6 | NETIF_F_UFO)
 -
 -
 -#define NETIF_F_GEN_CSUM      (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
 -#define NETIF_F_V4_CSUM               (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
 -#define NETIF_F_V6_CSUM               (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
 -#define NETIF_F_ALL_CSUM      (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
 -
 -#define NETIF_F_ALL_TSO       (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
 -
 -#define NETIF_F_ALL_FCOE      (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
 -                               NETIF_F_FSO)
 -
 -      /*
 -       * If one device supports one of these features, then enable them
 -       * for all in netdev_increment_features.
 -       */
 -#define NETIF_F_ONE_FOR_ALL   (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
 -                               NETIF_F_SG | NETIF_F_HIGHDMA |         \
 -                               NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
 -      /*
 -       * If one device doesn't support one of these features, then disable it
 -       * for all in netdev_increment_features.
 -       */
 -#define NETIF_F_ALL_FOR_ALL   (NETIF_F_NOCACHE_COPY | NETIF_F_FSO)
 -
 -      /* changeable features with no special hardware requirements */
 -#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
 +      netdev_features_t       vlan_features;
  
        /* Interface index. Unique device identifier    */
        int                     ifindex;
        unsigned char           perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
        unsigned char           addr_assign_type; /* hw address assignment type */
        unsigned char           addr_len;       /* hardware address length      */
 +      unsigned char           neigh_priv_len;
        unsigned short          dev_id;         /* for shared network cards */
  
        spinlock_t              addr_list_lock;
  
        /* Protocol specific pointers */
  
 -#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 +#if IS_ENABLED(CONFIG_VLAN_8021Q)
        struct vlan_group __rcu *vlgrp;         /* VLAN group */
  #endif
 -#ifdef CONFIG_NET_DSA
 -      void                    *dsa_ptr;       /* dsa specific data */
 +#if IS_ENABLED(CONFIG_NET_DSA)
 +      struct dsa_switch_tree  *dsa_ptr;       /* dsa specific data */
  #endif
        void                    *atalk_ptr;     /* AppleTalk link       */
        struct in_device __rcu  *ip_ptr;        /* IPv4 specific data   */
  
        unsigned char           broadcast[MAX_ADDR_LEN];        /* hw bcast add */
  
 -#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
 +#ifdef CONFIG_SYSFS
        struct kset             *queues_kset;
 +#endif
  
 +#ifdef CONFIG_RPS
        struct netdev_rx_queue  *_rx;
  
        /* Number of RX queues allocated at register_netdev() time */
        struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
        u8 prio_tc_map[TC_BITMASK + 1];
  
 -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
 +#if IS_ENABLED(CONFIG_FCOE)
        /* max exchange id for FCoE LRO by ddp */
        unsigned int            fcoe_ddp_xid;
 +#endif
 +#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
 +      struct netprio_map __rcu *priomap;
  #endif
        /* phy device may attach itself for hardware timestamping */
        struct phy_device *phydev;
@@@ -1471,7 -1515,7 +1471,7 @@@ struct packet_type 
                                         struct packet_type *,
                                         struct net_device *);
        struct sk_buff          *(*gso_segment)(struct sk_buff *skb,
 -                                              u32 features);
 +                                              netdev_features_t features);
        int                     (*gso_send_check)(struct sk_buff *skb);
        struct sk_buff          **(*gro_receive)(struct sk_buff **head,
                                               struct sk_buff *skb);
@@@ -1739,7 -1783,7 +1739,7 @@@ extern void __netif_schedule(struct Qdi
  
  static inline void netif_schedule_queue(struct netdev_queue *txq)
  {
 -      if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
 +      if (!(txq->state & QUEUE_STATE_ANY_XOFF))
                __netif_schedule(txq->qdisc);
  }
  
@@@ -1753,7 -1797,7 +1753,7 @@@ static inline void netif_tx_schedule_al
  
  static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
  {
 -      clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
 +      clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
  }
  
  /**
@@@ -1785,7 -1829,7 +1785,7 @@@ static inline void netif_tx_wake_queue(
                return;
        }
  #endif
 -      if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
 +      if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
                __netif_schedule(dev_queue->qdisc);
  }
  
@@@ -1817,7 -1861,7 +1817,7 @@@ static inline void netif_tx_stop_queue(
                pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
                return;
        }
 -      set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
 +      set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
  }
  
  /**
@@@ -1844,7 -1888,7 +1844,7 @@@ static inline void netif_tx_stop_all_qu
  
  static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
  {
 -      return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
 +      return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
  }
  
  /**
@@@ -1858,68 -1902,9 +1858,68 @@@ static inline int netif_queue_stopped(c
        return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
  }
  
 -static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
 +static inline int netif_xmit_stopped(const struct netdev_queue *dev_queue)
  {
 -      return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
 +      return dev_queue->state & QUEUE_STATE_ANY_XOFF;
 +}
 +
 +static inline int netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
 +{
 +      return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
 +}
 +
 +static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
 +                                      unsigned int bytes)
 +{
 +#ifdef CONFIG_BQL
 +      dql_queued(&dev_queue->dql, bytes);
 +      if (unlikely(dql_avail(&dev_queue->dql) < 0)) {
 +              set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
 +              if (unlikely(dql_avail(&dev_queue->dql) >= 0))
 +                      clear_bit(__QUEUE_STATE_STACK_XOFF,
 +                          &dev_queue->state);
 +      }
 +#endif
 +}
 +
 +static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
 +{
 +      netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
 +}
 +
 +static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
 +                                           unsigned pkts, unsigned bytes)
 +{
 +#ifdef CONFIG_BQL
 +      if (likely(bytes)) {
 +              dql_completed(&dev_queue->dql, bytes);
 +              if (unlikely(test_bit(__QUEUE_STATE_STACK_XOFF,
 +                  &dev_queue->state) &&
 +                  dql_avail(&dev_queue->dql) >= 0)) {
 +                      if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF,
 +                           &dev_queue->state))
 +                              netif_schedule_queue(dev_queue);
 +              }
 +      }
 +#endif
 +}
 +
 +static inline void netdev_completed_queue(struct net_device *dev,
 +                                        unsigned pkts, unsigned bytes)
 +{
 +      netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
 +}
 +
 +static inline void netdev_tx_reset_queue(struct netdev_queue *q)
 +{
 +#ifdef CONFIG_BQL
 +      dql_reset(&q->dql);
 +#endif
 +}
 +
 +static inline void netdev_reset_queue(struct net_device *dev_queue)
 +{
 +      netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
  }
  
  /**
@@@ -2006,7 -1991,7 +2006,7 @@@ static inline void netif_wake_subqueue(
        if (netpoll_trap())
                return;
  #endif
 -      if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
 +      if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
                __netif_schedule(txq->qdisc);
  }
  
@@@ -2535,8 -2520,7 +2535,8 @@@ extern int              netdev_set_master(struct ne
  extern int netdev_set_bond_master(struct net_device *dev,
                                  struct net_device *master);
  extern int skb_checksum_help(struct sk_buff *skb);
 -extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features);
 +extern struct sk_buff *skb_gso_segment(struct sk_buff *skb,
 +      netdev_features_t features);
  #ifdef CONFIG_BUG
  extern void netdev_rx_csum_fault(struct net_device *dev);
  #else
@@@ -2552,6 -2536,8 +2552,8 @@@ extern void             net_disable_timestamp(void
  extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
  extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
  extern void dev_seq_stop(struct seq_file *seq, void *v);
+ extern int dev_seq_open_ops(struct inode *inode, struct file *file,
+                           const struct seq_operations *ops);
  #endif
  
  extern int netdev_class_create_file(struct class_attribute *class_attr);
@@@ -2563,13 -2549,11 +2565,13 @@@ extern const char *netdev_drivername(co
  
  extern void linkwatch_run_queue(void);
  
 -static inline u32 netdev_get_wanted_features(struct net_device *dev)
 +static inline netdev_features_t netdev_get_wanted_features(
 +      struct net_device *dev)
  {
        return (dev->features & ~dev->hw_features) | dev->wanted_features;
  }
 -u32 netdev_increment_features(u32 all, u32 one, u32 mask);
 +netdev_features_t netdev_increment_features(netdev_features_t all,
 +      netdev_features_t one, netdev_features_t mask);
  int __netdev_update_features(struct net_device *dev);
  void netdev_update_features(struct net_device *dev);
  void netdev_change_features(struct net_device *dev);
  void netif_stacked_transfer_operstate(const struct net_device *rootdev,
                                        struct net_device *dev);
  
 -u32 netif_skb_features(struct sk_buff *skb);
 +netdev_features_t netif_skb_features(struct sk_buff *skb);
  
 -static inline int net_gso_ok(u32 features, int gso_type)
 +static inline int net_gso_ok(netdev_features_t features, int gso_type)
  {
 -      int feature = gso_type << NETIF_F_GSO_SHIFT;
 +      netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
 +
 +      /* check flags correspondence */
 +      BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
 +      BUILD_BUG_ON(SKB_GSO_UDP     != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
 +      BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
 +      BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
 +      BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
 +      BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
 +
        return (features & feature) == feature;
  }
  
 -static inline int skb_gso_ok(struct sk_buff *skb, u32 features)
 +static inline int skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
  {
        return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
               (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
  }
  
 -static inline int netif_needs_gso(struct sk_buff *skb, int features)
 +static inline int netif_needs_gso(struct sk_buff *skb,
 +      netdev_features_t features)
  {
        return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
                unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
@@@ -2620,6 -2594,22 +2622,6 @@@ static inline int netif_is_bond_slave(s
  
  extern struct pernet_operations __net_initdata loopback_net_ops;
  
 -static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
 -{
 -      if (dev->features & NETIF_F_RXCSUM)
 -              return 1;
 -      if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
 -              return 0;
 -      return dev->ethtool_ops->get_rx_csum(dev);
 -}
 -
 -static inline u32 dev_ethtool_get_flags(struct net_device *dev)
 -{
 -      if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
 -              return 0;
 -      return dev->ethtool_ops->get_flags(dev);
 -}
 -
  /* Logging, debugging and troubleshooting/diagnostic helpers. */
  
  /* netdev_printk helpers, similar to dev_printk */
@@@ -85,6 -85,8 +85,8 @@@
   * @reset: reset the device
   *    vdev: the virtio device
   *    After this, status and feature negotiation must be done again
+  *    Device must not be reset from its vq/config callbacks, or in
+  *    parallel with being added/removed.
   * @find_vqs: find virtqueues and instantiate them.
   *    vdev: the virtio_device
   *    nvqs: the number of virtqueues to find
   *    vdev: the virtio_device
   *    This gives the final feature bits for the device: it can change
   *    the dev->feature bits if it wants.
 + * @bus_name: return the bus name associated with the device
 + *    vdev: the virtio_device
 + *      This returns a pointer to the bus name a la pci_name from which
 + *      the caller can then copy.
   */
  typedef void vq_callback_t(struct virtqueue *);
  struct virtio_config_ops {
        void (*del_vqs)(struct virtio_device *);
        u32 (*get_features)(struct virtio_device *vdev);
        void (*finalize_features)(struct virtio_device *vdev);
 +      const char *(*bus_name)(struct virtio_device *vdev);
  };
  
  /* If driver didn't advertise the feature, it will never appear. */
@@@ -187,14 -184,5 +189,14 @@@ struct virtqueue *virtio_find_single_vq
                return ERR_PTR(err);
        return vq;
  }
 +
 +static inline
 +const char *virtio_bus_name(struct virtio_device *vdev)
 +{
 +      if (!vdev->config->bus_name)
 +              return "virtio";
 +      return vdev->config->bus_name(vdev);
 +}
 +
  #endif /* __KERNEL__ */
  #endif /* _LINUX_VIRTIO_CONFIG_H */
diff --combined include/net/inetpeer.h
@@@ -35,6 -35,7 +35,7 @@@ struct inet_peer 
  
        u32                     metrics[RTAX_MAX];
        u32                     rate_tokens;    /* rate limiting for ICMP */
+       int                     redirect_genid;
        unsigned long           rate_last;
        unsigned long           pmtu_expires;
        u32                     pmtu_orig;
@@@ -86,7 -87,7 +87,7 @@@ static inline struct inet_peer *inet_ge
  {
        struct inetpeer_addr daddr;
  
 -      ipv6_addr_copy((struct in6_addr *)daddr.addr.a6, v6daddr);
 +      *(struct in6_addr *)daddr.addr.a6 = *v6daddr;
        daddr.family = AF_INET6;
        return inet_getpeer(&daddr, create);
  }
diff --combined net/core/dev.c
  #include <linux/pci.h>
  #include <linux/inetdevice.h>
  #include <linux/cpu_rmap.h>
 -#include <linux/if_tunnel.h>
 -#include <linux/if_pppox.h>
 -#include <linux/ppp_defs.h>
  #include <linux/net_tstamp.h>
 +#include <linux/jump_label.h>
 +#include <net/flow_keys.h>
  
  #include "net-sysfs.h"
  
@@@ -1319,6 -1320,8 +1319,6 @@@ EXPORT_SYMBOL(dev_close)
   */
  void dev_disable_lro(struct net_device *dev)
  {
 -      u32 flags;
 -
        /*
         * If we're trying to disable lro on a vlan device
         * use the underlying physical device instead
        if (is_vlan_dev(dev))
                dev = vlan_dev_real_dev(dev);
  
 -      if (dev->ethtool_ops && dev->ethtool_ops->get_flags)
 -              flags = dev->ethtool_ops->get_flags(dev);
 -      else
 -              flags = ethtool_op_get_flags(dev);
 +      dev->wanted_features &= ~NETIF_F_LRO;
 +      netdev_update_features(dev);
  
 -      if (!(flags & ETH_FLAG_LRO))
 -              return;
 -
 -      __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO);
        if (unlikely(dev->features & NETIF_F_LRO))
                netdev_WARN(dev, "failed to disable LRO!\n");
  }
@@@ -1387,7 -1396,7 +1387,7 @@@ rollback
        for_each_net(net) {
                for_each_netdev(net, dev) {
                        if (dev == last)
-                               break;
+                               goto outroll;
  
                        if (dev->flags & IFF_UP) {
                                nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
                }
        }
  
+ outroll:
        raw_notifier_chain_unregister(&netdev_chain, nb);
        goto unlock;
  }
@@@ -1440,55 -1450,34 +1441,55 @@@ int call_netdevice_notifiers(unsigned l
  }
  EXPORT_SYMBOL(call_netdevice_notifiers);
  
 -/* When > 0 there are consumers of rx skb time stamps */
 -static atomic_t netstamp_needed = ATOMIC_INIT(0);
 +static struct jump_label_key netstamp_needed __read_mostly;
 +#ifdef HAVE_JUMP_LABEL
 +/* We are not allowed to call jump_label_dec() from irq context
 + * If net_disable_timestamp() is called from irq context, defer the
 + * jump_label_dec() calls.
 + */
 +static atomic_t netstamp_needed_deferred;
 +#endif
  
  void net_enable_timestamp(void)
  {
 -      atomic_inc(&netstamp_needed);
 +#ifdef HAVE_JUMP_LABEL
 +      int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
 +
 +      if (deferred) {
 +              while (--deferred)
 +                      jump_label_dec(&netstamp_needed);
 +              return;
 +      }
 +#endif
 +      WARN_ON(in_interrupt());
 +      jump_label_inc(&netstamp_needed);
  }
  EXPORT_SYMBOL(net_enable_timestamp);
  
  void net_disable_timestamp(void)
  {
 -      atomic_dec(&netstamp_needed);
 +#ifdef HAVE_JUMP_LABEL
 +      if (in_interrupt()) {
 +              atomic_inc(&netstamp_needed_deferred);
 +              return;
 +      }
 +#endif
 +      jump_label_dec(&netstamp_needed);
  }
  EXPORT_SYMBOL(net_disable_timestamp);
  
  static inline void net_timestamp_set(struct sk_buff *skb)
  {
 -      if (atomic_read(&netstamp_needed))
 +      skb->tstamp.tv64 = 0;
 +      if (static_branch(&netstamp_needed))
                __net_timestamp(skb);
 -      else
 -              skb->tstamp.tv64 = 0;
  }
  
 -static inline void net_timestamp_check(struct sk_buff *skb)
 -{
 -      if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
 -              __net_timestamp(skb);
 -}
 +#define net_timestamp_check(COND, SKB)                        \
 +      if (static_branch(&netstamp_needed)) {          \
 +              if ((COND) && !(SKB)->tstamp.tv64)      \
 +                      __net_timestamp(SKB);           \
 +      }                                               \
  
  static int net_hwtstamp_validate(struct ifreq *ifr)
  {
@@@ -1935,8 -1924,7 +1936,8 @@@ EXPORT_SYMBOL(skb_checksum_help)
   *    It may return NULL if the skb requires no segmentation.  This is
   *    only possible when GSO is used for verifying header integrity.
   */
 -struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
 +struct sk_buff *skb_gso_segment(struct sk_buff *skb,
 +      netdev_features_t features)
  {
        struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
        struct packet_type *ptype;
                if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
                        dev->ethtool_ops->get_drvinfo(dev, &info);
  
 -              WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
 -                   info.driver, dev ? dev->features : 0L,
 -                   skb->sk ? skb->sk->sk_route_caps : 0L,
 +              WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d ip_summed=%d\n",
 +                   info.driver, dev ? &dev->features : NULL,
 +                   skb->sk ? &skb->sk->sk_route_caps : NULL,
                     skb->len, skb->data_len, skb->ip_summed);
  
                if (skb_header_cloned(skb) &&
@@@ -2077,7 -2065,7 +2078,7 @@@ static void dev_gso_skb_destructor(stru
   *    This function segments the given skb and stores the list of segments
   *    in skb->next.
   */
 -static int dev_gso_segment(struct sk_buff *skb, int features)
 +static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
  {
        struct sk_buff *segs;
  
@@@ -2116,7 -2104,7 +2117,7 @@@ static inline void skb_orphan_try(struc
        }
  }
  
 -static bool can_checksum_protocol(unsigned long features, __be16 protocol)
 +static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
  {
        return ((features & NETIF_F_GEN_CSUM) ||
                ((features & NETIF_F_V4_CSUM) &&
                 protocol == htons(ETH_P_FCOE)));
  }
  
 -static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
 +static netdev_features_t harmonize_features(struct sk_buff *skb,
 +      __be16 protocol, netdev_features_t features)
  {
        if (!can_checksum_protocol(features, protocol)) {
                features &= ~NETIF_F_ALL_CSUM;
        return features;
  }
  
 -u32 netif_skb_features(struct sk_buff *skb)
 +netdev_features_t netif_skb_features(struct sk_buff *skb)
  {
        __be16 protocol = skb->protocol;
 -      u32 features = skb->dev->features;
 +      netdev_features_t features = skb->dev->features;
  
        if (protocol == htons(ETH_P_8021Q)) {
                struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
@@@ -2189,7 -2176,7 +2190,7 @@@ int dev_hard_start_xmit(struct sk_buff 
        unsigned int skb_len;
  
        if (likely(!skb->next)) {
 -              u32 features;
 +              netdev_features_t features;
  
                /*
                 * If device doesn't need skb->dst, release it right now while
@@@ -2270,7 -2257,7 +2271,7 @@@ gso
                        return rc;
                }
                txq_trans_update(txq);
 -              if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
 +              if (unlikely(netif_xmit_stopped(txq) && skb->next))
                        return NETDEV_TX_BUSY;
        } while (skb->next);
  
@@@ -2470,18 -2457,6 +2471,18 @@@ static inline int __dev_xmit_skb(struc
        return rc;
  }
  
 +#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
 +static void skb_update_prio(struct sk_buff *skb)
 +{
 +      struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
 +
 +      if ((!skb->priority) && (skb->sk) && map)
 +              skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
 +}
 +#else
 +#define skb_update_prio(skb)
 +#endif
 +
  static DEFINE_PER_CPU(int, xmit_recursion);
  #define RECURSION_LIMIT 10
  
@@@ -2522,8 -2497,6 +2523,8 @@@ int dev_queue_xmit(struct sk_buff *skb
         */
        rcu_read_lock_bh();
  
 +      skb_update_prio(skb);
 +
        txq = dev_pick_tx(dev, skb);
        q = rcu_dereference_bh(txq->qdisc);
  
  
                        HARD_TX_LOCK(dev, txq, cpu);
  
 -                      if (!netif_tx_queue_stopped(txq)) {
 +                      if (!netif_xmit_stopped(txq)) {
                                __this_cpu_inc(xmit_recursion);
                                rc = dev_hard_start_xmit(skb, dev, txq);
                                __this_cpu_dec(xmit_recursion);
@@@ -2619,28 -2592,123 +2620,28 @@@ static inline void ____napi_schedule(st
   */
  void __skb_get_rxhash(struct sk_buff *skb)
  {
 -      int nhoff, hash = 0, poff;
 -      const struct ipv6hdr *ip6;
 -      const struct iphdr *ip;
 -      const struct vlan_hdr *vlan;
 -      u8 ip_proto;
 -      u32 addr1, addr2;
 -      u16 proto;
 -      union {
 -              u32 v32;
 -              u16 v16[2];
 -      } ports;
 -
 -      nhoff = skb_network_offset(skb);
 -      proto = skb->protocol;
 -
 -again:
 -      switch (proto) {
 -      case __constant_htons(ETH_P_IP):
 -ip:
 -              if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
 -                      goto done;
 -
 -              ip = (const struct iphdr *) (skb->data + nhoff);
 -              if (ip_is_fragment(ip))
 -                      ip_proto = 0;
 -              else
 -                      ip_proto = ip->protocol;
 -              addr1 = (__force u32) ip->saddr;
 -              addr2 = (__force u32) ip->daddr;
 -              nhoff += ip->ihl * 4;
 -              break;
 -      case __constant_htons(ETH_P_IPV6):
 -ipv6:
 -              if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
 -                      goto done;
 -
 -              ip6 = (const struct ipv6hdr *) (skb->data + nhoff);
 -              ip_proto = ip6->nexthdr;
 -              addr1 = (__force u32) ip6->saddr.s6_addr32[3];
 -              addr2 = (__force u32) ip6->daddr.s6_addr32[3];
 -              nhoff += 40;
 -              break;
 -      case __constant_htons(ETH_P_8021Q):
 -              if (!pskb_may_pull(skb, sizeof(*vlan) + nhoff))
 -                      goto done;
 -              vlan = (const struct vlan_hdr *) (skb->data + nhoff);
 -              proto = vlan->h_vlan_encapsulated_proto;
 -              nhoff += sizeof(*vlan);
 -              goto again;
 -      case __constant_htons(ETH_P_PPP_SES):
 -              if (!pskb_may_pull(skb, PPPOE_SES_HLEN + nhoff))
 -                      goto done;
 -              proto = *((__be16 *) (skb->data + nhoff +
 -                                    sizeof(struct pppoe_hdr)));
 -              nhoff += PPPOE_SES_HLEN;
 -              switch (proto) {
 -              case __constant_htons(PPP_IP):
 -                      goto ip;
 -              case __constant_htons(PPP_IPV6):
 -                      goto ipv6;
 -              default:
 -                      goto done;
 -              }
 -      default:
 -              goto done;
 -      }
 -
 -      switch (ip_proto) {
 -      case IPPROTO_GRE:
 -              if (pskb_may_pull(skb, nhoff + 16)) {
 -                      u8 *h = skb->data + nhoff;
 -                      __be16 flags = *(__be16 *)h;
 +      struct flow_keys keys;
 +      u32 hash;
  
 -                      /*
 -                       * Only look inside GRE if version zero and no
 -                       * routing
 -                       */
 -                      if (!(flags & (GRE_VERSION|GRE_ROUTING))) {
 -                              proto = *(__be16 *)(h + 2);
 -                              nhoff += 4;
 -                              if (flags & GRE_CSUM)
 -                                      nhoff += 4;
 -                              if (flags & GRE_KEY)
 -                                      nhoff += 4;
 -                              if (flags & GRE_SEQ)
 -                                      nhoff += 4;
 -                              goto again;
 -                      }
 -              }
 -              break;
 -      case IPPROTO_IPIP:
 -              goto again;
 -      default:
 -              break;
 -      }
 +      if (!skb_flow_dissect(skb, &keys))
 +              return;
  
 -      ports.v32 = 0;
 -      poff = proto_ports_offset(ip_proto);
 -      if (poff >= 0) {
 -              nhoff += poff;
 -              if (pskb_may_pull(skb, nhoff + 4)) {
 -                      ports.v32 = * (__force u32 *) (skb->data + nhoff);
 -                      if (ports.v16[1] < ports.v16[0])
 -                              swap(ports.v16[0], ports.v16[1]);
 -                      skb->l4_rxhash = 1;
 -              }
 +      if (keys.ports) {
 +              if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
 +                      swap(keys.port16[0], keys.port16[1]);
 +              skb->l4_rxhash = 1;
        }
  
        /* get a consistent hash (same value on both flow directions) */
 -      if (addr2 < addr1)
 -              swap(addr1, addr2);
 +      if ((__force u32)keys.dst < (__force u32)keys.src)
 +              swap(keys.dst, keys.src);
  
 -      hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
 +      hash = jhash_3words((__force u32)keys.dst,
 +                          (__force u32)keys.src,
 +                          (__force u32)keys.ports, hashrnd);
        if (!hash)
                hash = 1;
  
 -done:
        skb->rxhash = hash;
  }
  EXPORT_SYMBOL(__skb_get_rxhash);
  struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
  EXPORT_SYMBOL(rps_sock_flow_table);
  
 +struct jump_label_key rps_needed __read_mostly;
 +
  static struct rps_dev_flow *
  set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
            struct rps_dev_flow *rflow, u16 next_cpu)
@@@ -2932,11 -2998,12 +2933,11 @@@ int netif_rx(struct sk_buff *skb
        if (netpoll_rx(skb))
                return NET_RX_DROP;
  
 -      if (netdev_tstamp_prequeue)
 -              net_timestamp_check(skb);
 +      net_timestamp_check(netdev_tstamp_prequeue, skb);
  
        trace_netif_rx(skb);
  #ifdef CONFIG_RPS
 -      {
 +      if (static_branch(&rps_needed)) {
                struct rps_dev_flow voidflow, *rflow = &voidflow;
                int cpu;
  
  
                rcu_read_unlock();
                preempt_enable();
 -      }
 -#else
 +      } else
 +#endif
        {
                unsigned int qtail;
                ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
                put_cpu();
        }
 -#endif
        return ret;
  }
  EXPORT_SYMBOL(netif_rx);
@@@ -3163,7 -3231,8 +3164,7 @@@ static int __netif_receive_skb(struct s
        int ret = NET_RX_DROP;
        __be16 type;
  
 -      if (!netdev_tstamp_prequeue)
 -              net_timestamp_check(skb);
 +      net_timestamp_check(!netdev_tstamp_prequeue, skb);
  
        trace_netif_receive_skb(skb);
  
   */
  int netif_receive_skb(struct sk_buff *skb)
  {
 -      if (netdev_tstamp_prequeue)
 -              net_timestamp_check(skb);
 +      net_timestamp_check(netdev_tstamp_prequeue, skb);
  
        if (skb_defer_rx_timestamp(skb))
                return NET_RX_SUCCESS;
  
  #ifdef CONFIG_RPS
 -      {
 +      if (static_branch(&rps_needed)) {
                struct rps_dev_flow voidflow, *rflow = &voidflow;
                int cpu, ret;
  
                if (cpu >= 0) {
                        ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
                        rcu_read_unlock();
 -              } else {
 -                      rcu_read_unlock();
 -                      ret = __netif_receive_skb(skb);
 +                      return ret;
                }
 -
 -              return ret;
 +              rcu_read_unlock();
        }
 -#else
 -      return __netif_receive_skb(skb);
  #endif
 +      return __netif_receive_skb(skb);
  }
  EXPORT_SYMBOL(netif_receive_skb);
  
@@@ -4209,6 -4283,12 +4210,12 @@@ static int dev_seq_open(struct inode *i
                            sizeof(struct dev_iter_state));
  }
  
+ int dev_seq_open_ops(struct inode *inode, struct file *file,
+                    const struct seq_operations *ops)
+ {
+       return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
+ }
  static const struct file_operations dev_seq_fops = {
        .owner   = THIS_MODULE,
        .open    = dev_seq_open,
@@@ -4459,7 -4539,7 +4466,7 @@@ static void dev_change_rx_flags(struct 
  
  static int __dev_set_promiscuity(struct net_device *dev, int inc)
  {
 -      unsigned short old_flags = dev->flags;
 +      unsigned int old_flags = dev->flags;
        uid_t uid;
        gid_t gid;
  
   */
  int dev_set_promiscuity(struct net_device *dev, int inc)
  {
 -      unsigned short old_flags = dev->flags;
 +      unsigned int old_flags = dev->flags;
        int err;
  
        err = __dev_set_promiscuity(dev, inc);
@@@ -4543,7 -4623,7 +4550,7 @@@ EXPORT_SYMBOL(dev_set_promiscuity)
  
  int dev_set_allmulti(struct net_device *dev, int inc)
  {
 -      unsigned short old_flags = dev->flags;
 +      unsigned int old_flags = dev->flags;
  
        ASSERT_RTNL();
  
@@@ -4646,7 -4726,7 +4653,7 @@@ EXPORT_SYMBOL(dev_get_flags)
  
  int __dev_change_flags(struct net_device *dev, unsigned int flags)
  {
 -      int old_flags = dev->flags;
 +      unsigned int old_flags = dev->flags;
        int ret;
  
        ASSERT_RTNL();
@@@ -4729,10 -4809,10 +4736,10 @@@ void __dev_notify_flags(struct net_devi
   *    Change settings on device based state flags. The flags are
   *    in the userspace exported format.
   */
 -int dev_change_flags(struct net_device *dev, unsigned flags)
 +int dev_change_flags(struct net_device *dev, unsigned int flags)
  {
 -      int ret, changes;
 -      int old_flags = dev->flags;
 +      int ret;
 +      unsigned int changes, old_flags = dev->flags;
  
        ret = __dev_change_flags(dev, flags);
        if (ret < 0)
@@@ -5289,8 -5369,7 +5296,8 @@@ static void rollback_registered(struct 
        list_del(&single);
  }
  
 -static u32 netdev_fix_features(struct net_device *dev, u32 features)
 +static netdev_features_t netdev_fix_features(struct net_device *dev,
 +      netdev_features_t features)
  {
        /* Fix illegal checksum combinations */
        if ((features & NETIF_F_HW_CSUM) &&
                features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
        }
  
 -      if ((features & NETIF_F_NO_CSUM) &&
 -          (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
 -              netdev_warn(dev, "mixed no checksumming and other settings.\n");
 -              features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
 -      }
 -
        /* Fix illegal SG+CSUM combinations. */
        if ((features & NETIF_F_SG) &&
            !(features & NETIF_F_ALL_CSUM)) {
  
  int __netdev_update_features(struct net_device *dev)
  {
 -      u32 features;
 +      netdev_features_t features;
        int err = 0;
  
        ASSERT_RTNL();
        if (dev->features == features)
                return 0;
  
 -      netdev_dbg(dev, "Features changed: 0x%08x -> 0x%08x\n",
 -              dev->features, features);
 +      netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
 +              &dev->features, &features);
  
        if (dev->netdev_ops->ndo_set_features)
                err = dev->netdev_ops->ndo_set_features(dev, features);
  
        if (unlikely(err < 0)) {
                netdev_err(dev,
 -                      "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
 -                      err, features, dev->features);
 +                      "set_features() failed (%d); wanted %pNF, left %pNF\n",
 +                      err, &features, &dev->features);
                return -1;
        }
  
@@@ -5470,9 -5555,6 +5477,9 @@@ static void netdev_init_one_queue(struc
        queue->xmit_lock_owner = -1;
        netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
        queue->dev = dev;
 +#ifdef CONFIG_BQL
 +      dql_init(&queue->dql, HZ);
 +#endif
  }
  
  static int netif_alloc_netdev_queues(struct net_device *dev)
@@@ -5558,12 -5640,11 +5565,12 @@@ int register_netdevice(struct net_devic
        dev->wanted_features = dev->features & dev->hw_features;
  
        /* Turn on no cache copy if HW is doing checksum */
 -      dev->hw_features |= NETIF_F_NOCACHE_COPY;
 -      if ((dev->features & NETIF_F_ALL_CSUM) &&
 -          !(dev->features & NETIF_F_NO_CSUM)) {
 -              dev->wanted_features |= NETIF_F_NOCACHE_COPY;
 -              dev->features |= NETIF_F_NOCACHE_COPY;
 +      if (!(dev->flags & IFF_LOOPBACK)) {
 +              dev->hw_features |= NETIF_F_NOCACHE_COPY;
 +              if (dev->features & NETIF_F_ALL_CSUM) {
 +                      dev->wanted_features |= NETIF_F_NOCACHE_COPY;
 +                      dev->features |= NETIF_F_NOCACHE_COPY;
 +              }
        }
  
        /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
@@@ -6299,8 -6380,7 +6306,8 @@@ static int dev_cpu_callback(struct noti
   *    @one to the master device with current feature set @all.  Will not
   *    enable anything that is off in @mask. Returns the new feature set.
   */
 -u32 netdev_increment_features(u32 all, u32 one, u32 mask)
 +netdev_features_t netdev_increment_features(netdev_features_t all,
 +      netdev_features_t one, netdev_features_t mask)
  {
        if (mask & NETIF_F_GEN_CSUM)
                mask |= NETIF_F_ALL_CSUM;
        all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
        all &= one | ~NETIF_F_ALL_FOR_ALL;
  
 -      /* If device needs checksumming, downgrade to it. */
 -      if (all & (NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM))
 -              all &= ~NETIF_F_NO_CSUM;
 -
        /* If one device supports hw checksumming, set for all. */
        if (all & NETIF_F_GEN_CSUM)
                all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
diff --combined net/ipv4/route.c
  #ifdef CONFIG_SYSCTL
  #include <linux/sysctl.h>
  #endif
 -#include <net/atmclip.h>
  #include <net/secure_seq.h>
  
  #define RT_FL_TOS(oldflp4) \
@@@ -130,6 -131,7 +130,7 @@@ static int ip_rt_mtu_expires __read_mos
  static int ip_rt_min_pmtu __read_mostly               = 512 + 20 + 20;
  static int ip_rt_min_advmss __read_mostly     = 256;
  static int rt_chain_length_max __read_mostly  = 20;
+ static int redirect_genid;
  
  /*
   *    Interface to generic destination cache.
@@@ -415,9 -417,13 +416,13 @@@ static int rt_cache_seq_show(struct seq
        else {
                struct rtable *r = v;
                struct neighbour *n;
-               int len;
+               int len, HHUptod;
  
+               rcu_read_lock();
                n = dst_get_neighbour(&r->dst);
+               HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
+               rcu_read_unlock();
                seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
                              "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
                        r->dst.dev ? r->dst.dev->name : "*",
                              dst_metric(&r->dst, RTAX_RTTVAR)),
                        r->rt_key_tos,
                        -1,
-                       (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0,
+                       HHUptod,
                        r->rt_spec_dst, &len);
  
                seq_printf(seq, "%*s\n", 127 - len, "");
@@@ -836,6 -842,7 +841,7 @@@ static void rt_cache_invalidate(struct 
  
        get_random_bytes(&shuffle, sizeof(shuffle));
        atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
+       redirect_genid++;
  }
  
  /*
@@@ -1012,18 -1019,23 +1018,18 @@@ static int slow_chain_length(const stru
  
  static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr)
  {
 -      struct neigh_table *tbl = &arp_tbl;
        static const __be32 inaddr_any = 0;
        struct net_device *dev = dst->dev;
        const __be32 *pkey = daddr;
        struct neighbour *n;
  
 -#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
 -      if (dev->type == ARPHRD_ATM)
 -              tbl = clip_tbl_hook;
 -#endif
        if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
                pkey = &inaddr_any;
  
 -      n = __ipv4_neigh_lookup(tbl, dev, *(__force u32 *)pkey);
 +      n = __ipv4_neigh_lookup(&arp_tbl, dev, *(__force u32 *)pkey);
        if (n)
                return n;
 -      return neigh_create(tbl, pkey, dev);
 +      return neigh_create(&arp_tbl, pkey, dev);
  }
  
  static int rt_bind_neighbour(struct rtable *rt)
@@@ -1385,8 -1397,10 +1391,10 @@@ void ip_rt_redirect(__be32 old_gw, __be
  
                                peer = rt->peer;
                                if (peer) {
-                                       if (peer->redirect_learned.a4 != new_gw) {
+                                       if (peer->redirect_learned.a4 != new_gw ||
+                                           peer->redirect_genid != redirect_genid) {
                                                peer->redirect_learned.a4 = new_gw;
+                                               peer->redirect_genid = redirect_genid;
                                                atomic_inc(&__rt_peer_genid);
                                        }
                                        check_peer_redir(&rt->dst, peer);
@@@ -1679,12 -1693,8 +1687,8 @@@ static void ip_rt_update_pmtu(struct ds
  }
  
  
- static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
+ static struct rtable *ipv4_validate_peer(struct rtable *rt)
  {
-       struct rtable *rt = (struct rtable *) dst;
-       if (rt_is_expired(rt))
-               return NULL;
        if (rt->rt_peer_genid != rt_peer_genid()) {
                struct inet_peer *peer;
  
  
                peer = rt->peer;
                if (peer) {
-                       check_peer_pmtu(dst, peer);
+                       check_peer_pmtu(&rt->dst, peer);
  
+                       if (peer->redirect_genid != redirect_genid)
+                               peer->redirect_learned.a4 = 0;
                        if (peer->redirect_learned.a4 &&
                            peer->redirect_learned.a4 != rt->rt_gateway) {
-                               if (check_peer_redir(dst, peer))
+                               if (check_peer_redir(&rt->dst, peer))
                                        return NULL;
                        }
                }
  
                rt->rt_peer_genid = rt_peer_genid();
        }
+       return rt;
+ }
+ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
+ {
+       struct rtable *rt = (struct rtable *) dst;
+       if (rt_is_expired(rt))
+               return NULL;
+       dst = (struct dst_entry *) ipv4_validate_peer(rt);
        return dst;
  }
  
@@@ -1851,6 -1873,8 +1867,8 @@@ static void rt_init_metrics(struct rtab
                dst_init_metrics(&rt->dst, peer->metrics, false);
  
                check_peer_pmtu(&rt->dst, peer);
+               if (peer->redirect_genid != redirect_genid)
+                       peer->redirect_learned.a4 = 0;
                if (peer->redirect_learned.a4 &&
                    peer->redirect_learned.a4 != rt->rt_gateway) {
                        rt->rt_gateway = peer->redirect_learned.a4;
@@@ -2356,6 -2380,9 +2374,9 @@@ int ip_route_input_common(struct sk_buf
                    rth->rt_mark == skb->mark &&
                    net_eq(dev_net(rth->dst.dev), net) &&
                    !rt_is_expired(rth)) {
+                       rth = ipv4_validate_peer(rth);
+                       if (!rth)
+                               continue;
                        if (noref) {
                                dst_use_noref(&rth->dst, jiffies);
                                skb_dst_set_noref(skb, &rth->dst);
@@@ -2731,6 -2758,9 +2752,9 @@@ struct rtable *__ip_route_output_key(st
                            (IPTOS_RT_MASK | RTO_ONLINK)) &&
                    net_eq(dev_net(rth->dst.dev), net) &&
                    !rt_is_expired(rth)) {
+                       rth = ipv4_validate_peer(rth);
+                       if (!rth)
+                               continue;
                        dst_use(&rth->dst, jiffies);
                        RT_CACHE_STAT_INC(out_hit);
                        rcu_read_unlock_bh();
diff --combined net/ipv4/udp.c
@@@ -1164,7 -1164,7 +1164,7 @@@ int udp_recvmsg(struct kiocb *iocb, str
        struct inet_sock *inet = inet_sk(sk);
        struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
        struct sk_buff *skb;
-       unsigned int ulen;
+       unsigned int ulen, copied;
        int peeked;
        int err;
        int is_udplite = IS_UDPLITE(sk);
@@@ -1186,9 -1186,10 +1186,10 @@@ try_again
                goto out;
  
        ulen = skb->len - sizeof(struct udphdr);
-       if (len > ulen)
-               len = ulen;
-       else if (len < ulen)
+       copied = len;
+       if (copied > ulen)
+               copied = ulen;
+       else if (copied < ulen)
                msg->msg_flags |= MSG_TRUNC;
  
        /*
         * coverage checksum (UDP-Lite), do it before the copy.
         */
  
-       if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
+       if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
                if (udp_lib_checksum_complete(skb))
                        goto csum_copy_err;
        }
  
        if (skb_csum_unnecessary(skb))
                err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
-                                             msg->msg_iov, len);
+                                             msg->msg_iov, copied);
        else {
                err = skb_copy_and_csum_datagram_iovec(skb,
                                                       sizeof(struct udphdr),
        if (inet->cmsg_flags)
                ip_cmsg_recv(msg, skb);
  
-       err = len;
+       err = copied;
        if (flags & MSG_TRUNC)
                err = ulen;
  
@@@ -1357,7 -1358,7 +1358,7 @@@ static int __udp_queue_rcv_skb(struct s
        if (inet_sk(sk)->inet_daddr)
                sock_rps_save_rxhash(sk, skb);
  
 -      rc = ip_queue_rcv_skb(sk, skb);
 +      rc = sock_queue_rcv_skb(sk, skb);
        if (rc < 0) {
                int is_udplite = IS_UDPLITE(sk);
  
@@@ -1473,7 -1474,6 +1474,7 @@@ int udp_queue_rcv_skb(struct sock *sk, 
  
        rc = 0;
  
 +      ipv4_pktinfo_prepare(skb);
        bh_lock_sock(sk);
        if (!sock_owned_by_user(sk))
                rc = __udp_queue_rcv_skb(sk, skb);
@@@ -2247,8 -2247,7 +2248,8 @@@ int udp4_ufo_send_check(struct sk_buff 
        return 0;
  }
  
 -struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features)
 +struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
 +      netdev_features_t features)
  {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        unsigned int mss;
diff --combined net/ipv6/ipv6_sockglue.c
@@@ -435,7 -435,7 +435,7 @@@ sticky_done
                        goto e_inval;
  
                np->sticky_pktinfo.ipi6_ifindex = pkt.ipi6_ifindex;
 -              ipv6_addr_copy(&np->sticky_pktinfo.ipi6_addr, &pkt.ipi6_addr);
 +              np->sticky_pktinfo.ipi6_addr = pkt.ipi6_addr;
                retv = 0;
                break;
        }
@@@ -503,7 -503,7 +503,7 @@@ done
                        goto e_inval;
                if (val > 255 || val < -1)
                        goto e_inval;
-               np->mcast_hops = val;
+               np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val);
                retv = 0;
                break;
  
@@@ -980,7 -980,8 +980,7 @@@ static int do_ipv6_getsockopt(struct so
                                struct in6_pktinfo src_info;
                                src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
                                        np->sticky_pktinfo.ipi6_ifindex;
 -                              np->mcast_oif? ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr) :
 -                                      ipv6_addr_copy(&src_info.ipi6_addr, &(np->sticky_pktinfo.ipi6_addr));
 +                              src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
                                put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
                        }
                        if (np->rxopt.bits.rxhlim) {
                                struct in6_pktinfo src_info;
                                src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
                                        np->sticky_pktinfo.ipi6_ifindex;
 -                              np->mcast_oif? ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr) :
 -                                      ipv6_addr_copy(&src_info.ipi6_addr, &(np->sticky_pktinfo.ipi6_addr));
 +                              src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
                                put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
                        }
                        if (np->rxopt.bits.rxohlim) {
diff --combined net/ipv6/udp.c
@@@ -340,7 -340,7 +340,7 @@@ int udpv6_recvmsg(struct kiocb *iocb, s
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct inet_sock *inet = inet_sk(sk);
        struct sk_buff *skb;
-       unsigned int ulen;
+       unsigned int ulen, copied;
        int peeked;
        int err;
        int is_udplite = IS_UDPLITE(sk);
@@@ -363,9 -363,10 +363,10 @@@ try_again
                goto out;
  
        ulen = skb->len - sizeof(struct udphdr);
-       if (len > ulen)
-               len = ulen;
-       else if (len < ulen)
+       copied = len;
+       if (copied > ulen)
+               copied = ulen;
+       else if (copied < ulen)
                msg->msg_flags |= MSG_TRUNC;
  
        is_udp4 = (skb->protocol == htons(ETH_P_IP));
         * coverage checksum (UDP-Lite), do it before the copy.
         */
  
-       if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
+       if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
                if (udp_lib_checksum_complete(skb))
                        goto csum_copy_err;
        }
  
        if (skb_csum_unnecessary(skb))
                err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
-                                             msg->msg_iov,len);
+                                             msg->msg_iov, copied       );
        else {
                err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
                if (err == -EINVAL)
                        ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
                                               &sin6->sin6_addr);
                else {
 -                      ipv6_addr_copy(&sin6->sin6_addr,
 -                                     &ipv6_hdr(skb)->saddr);
 +                      sin6->sin6_addr = ipv6_hdr(skb)->saddr;
                        if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
                                sin6->sin6_scope_id = IP6CB(skb)->iif;
                }
                        datagram_recv_ctl(sk, msg, skb);
        }
  
-       err = len;
+       err = copied;
        if (flags & MSG_TRUNC)
                err = ulen;
  
@@@ -537,9 -539,7 +538,9 @@@ int udpv6_queue_rcv_skb(struct sock * s
                        goto drop;
        }
  
 -      if ((rc = ip_queue_rcv_skb(sk, skb)) < 0) {
 +      skb_dst_drop(skb);
 +      rc = sock_queue_rcv_skb(sk, skb);
 +      if (rc < 0) {
                /* Note that an ENOMEM error is charged twice */
                if (rc == -ENOMEM)
                        UDP6_INC_STATS_BH(sock_net(sk),
@@@ -1114,11 -1114,11 +1115,11 @@@ do_udp_sendmsg
  
        fl6.flowi6_proto = sk->sk_protocol;
        if (!ipv6_addr_any(daddr))
 -              ipv6_addr_copy(&fl6.daddr, daddr);
 +              fl6.daddr = *daddr;
        else
                fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
        if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
 -              ipv6_addr_copy(&fl6.saddr, &np->saddr);
 +              fl6.saddr = np->saddr;
        fl6.fl6_sport = inet->inet_sport;
  
        final_p = fl6_update_dst(&fl6, opt, &final);
@@@ -1299,8 -1299,7 +1300,8 @@@ static int udp6_ufo_send_check(struct s
        return 0;
  }
  
 -static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
 +static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
 +      netdev_features_t features)
  {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        unsigned int mss;
diff --combined net/mac80211/agg-tx.c
@@@ -78,8 -78,7 +78,8 @@@ static void ieee80211_send_addba_reques
        memcpy(mgmt->da, da, ETH_ALEN);
        memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
        if (sdata->vif.type == NL80211_IFTYPE_AP ||
 -          sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
 +          sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
 +          sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
                memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
        else if (sdata->vif.type == NL80211_IFTYPE_STATION)
                memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
@@@ -162,6 -161,12 +162,12 @@@ int ___ieee80211_stop_tx_ba_session(str
                return -ENOENT;
        }
  
+       /* if we're already stopping ignore any new requests to stop */
+       if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+               spin_unlock_bh(&sta->lock);
+               return -EALREADY;
+       }
        if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
                /* not even started yet! */
                ieee80211_assign_tid_tx(sta, tid, NULL);
                return 0;
        }
  
+       set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
        spin_unlock_bh(&sta->lock);
  
  #ifdef CONFIG_MAC80211_HT_DEBUG
               sta->sta.addr, tid);
  #endif /* CONFIG_MAC80211_HT_DEBUG */
  
-       set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
        del_timer_sync(&tid_tx->addba_resp_timer);
  
        /*
         */
        clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
  
+       /*
+        * There might be a few packets being processed right now (on
+        * another CPU) that have already gotten past the aggregation
+        * check when it was still OPERATIONAL and consequently have
+        * IEEE80211_TX_CTL_AMPDU set. In that case, this code might
+        * call into the driver at the same time or even before the
+        * TX paths calls into it, which could confuse the driver.
+        *
+        * Wait for all currently running TX paths to finish before
+        * telling the driver. New packets will not go through since
+        * the aggregation session is no longer OPERATIONAL.
+        */
+       synchronize_net();
        tid_tx->stop_initiator = initiator;
        tid_tx->tx_stop = tx;
  
@@@ -373,8 -392,13 +393,8 @@@ int ieee80211_start_tx_ba_session(struc
               pubsta->addr, tid);
  #endif /* CONFIG_MAC80211_HT_DEBUG */
  
 -      /*
 -       * The aggregation code is not prepared to handle
 -       * anything but STA/AP due to the BSSID handling.
 -       * IBSS could work in the code but isn't supported
 -       * by drivers or the standard.
 -       */
        if (sdata->vif.type != NL80211_IFTYPE_STATION &&
 +          sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
            sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
            sdata->vif.type != NL80211_IFTYPE_AP)
                return -EINVAL;
@@@ -753,11 -777,27 +773,27 @@@ void ieee80211_process_addba_resp(struc
                goto out;
        }
  
-       del_timer(&tid_tx->addba_resp_timer);
+       del_timer_sync(&tid_tx->addba_resp_timer);
  
  #ifdef CONFIG_MAC80211_HT_DEBUG
        printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
  #endif
+       /*
+        * addba_resp_timer may have fired before we got here, and
+        * caused WANT_STOP to be set. If the stop then was already
+        * processed further, STOPPING might be set.
+        */
+       if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
+           test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+ #ifdef CONFIG_MAC80211_HT_DEBUG
+               printk(KERN_DEBUG
+                      "got addBA resp for tid %d but we already gave up\n",
+                      tid);
+ #endif
+               goto out;
+       }
        /*
         * IEEE 802.11-2007 7.3.1.14:
         * In an ADDBA Response frame, when the Status Code field
@@@ -111,8 -111,6 +111,6 @@@ int netlbl_cfg_unlbl_map_add(const cha
        struct netlbl_domaddr_map *addrmap = NULL;
        struct netlbl_domaddr4_map *map4 = NULL;
        struct netlbl_domaddr6_map *map6 = NULL;
-       const struct in_addr *addr4, *mask4;
-       const struct in6_addr *addr6, *mask6;
  
        entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
        if (entry == NULL)
                INIT_LIST_HEAD(&addrmap->list6);
  
                switch (family) {
-               case AF_INET:
-                       addr4 = addr;
-                       mask4 = mask;
+               case AF_INET: {
+                       const struct in_addr *addr4 = addr;
+                       const struct in_addr *mask4 = mask;
                        map4 = kzalloc(sizeof(*map4), GFP_ATOMIC);
                        if (map4 == NULL)
                                goto cfg_unlbl_map_add_failure;
                        if (ret_val != 0)
                                goto cfg_unlbl_map_add_failure;
                        break;
-               case AF_INET6:
-                       addr6 = addr;
-                       mask6 = mask;
+                       }
+ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+               case AF_INET6: {
+                       const struct in6_addr *addr6 = addr;
+                       const struct in6_addr *mask6 = mask;
                        map6 = kzalloc(sizeof(*map6), GFP_ATOMIC);
                        if (map6 == NULL)
                                goto cfg_unlbl_map_add_failure;
                        map6->type = NETLBL_NLTYPE_UNLABELED;
 -                      ipv6_addr_copy(&map6->list.addr, addr6);
 +                      map6->list.addr = *addr6;
                        map6->list.addr.s6_addr32[0] &= mask6->s6_addr32[0];
                        map6->list.addr.s6_addr32[1] &= mask6->s6_addr32[1];
                        map6->list.addr.s6_addr32[2] &= mask6->s6_addr32[2];
                        map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3];
 -                      ipv6_addr_copy(&map6->list.mask, mask6);
 +                      map6->list.mask = *mask6;
                        map6->list.valid = 1;
                        ret_val = netlbl_af6list_add(&map6->list,
                                                     &addrmap->list6);
                        if (ret_val != 0)
                                goto cfg_unlbl_map_add_failure;
                        break;
+                       }
+ #endif /* IPv6 */
                default:
                        goto cfg_unlbl_map_add_failure;
                        break;
@@@ -225,9 -227,11 +227,11 @@@ int netlbl_cfg_unlbl_static_add(struct 
        case AF_INET:
                addr_len = sizeof(struct in_addr);
                break;
+ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
        case AF_INET6:
                addr_len = sizeof(struct in6_addr);
                break;
+ #endif /* IPv6 */
        default:
                return -EPFNOSUPPORT;
        }
@@@ -266,9 -270,11 +270,11 @@@ int netlbl_cfg_unlbl_static_del(struct 
        case AF_INET:
                addr_len = sizeof(struct in_addr);
                break;
+ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
        case AF_INET6:
                addr_len = sizeof(struct in6_addr);
                break;
+ #endif /* IPv6 */
        default:
                return -EPFNOSUPPORT;
        }
diff --combined net/sched/sch_teql.c
@@@ -225,11 -225,11 +225,11 @@@ static int teql_qdisc_init(struct Qdis
  
  
  static int
- __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
+ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
+              struct net_device *dev, struct netdev_queue *txq,
+              struct neighbour *mn)
  {
-       struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
-       struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
-       struct neighbour *mn = dst_get_neighbour(skb_dst(skb));
+       struct teql_sched_data *q = qdisc_priv(txq->qdisc);
        struct neighbour *n = q->ncache;
  
        if (mn->tbl == NULL)
  }
  
  static inline int teql_resolve(struct sk_buff *skb,
-                              struct sk_buff *skb_res, struct net_device *dev)
+                              struct sk_buff *skb_res,
+                              struct net_device *dev,
+                              struct netdev_queue *txq)
  {
-       struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+       struct dst_entry *dst = skb_dst(skb);
+       struct neighbour *mn;
+       int res;
        if (txq->qdisc == &noop_qdisc)
                return -ENODEV;
  
-       if (dev->header_ops == NULL ||
-           skb_dst(skb) == NULL ||
-           dst_get_neighbour(skb_dst(skb)) == NULL)
+       if (!dev->header_ops || !dst)
                return 0;
-       return __teql_resolve(skb, skb_res, dev);
+       rcu_read_lock();
+       mn = dst_get_neighbour(dst);
+       res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
+       rcu_read_unlock();
+       return res;
  }
  
  static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
@@@ -301,18 -310,18 +310,18 @@@ restart
  
                if (slave_txq->qdisc_sleeping != q)
                        continue;
 -              if (__netif_subqueue_stopped(slave, subq) ||
 +              if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) ||
                    !netif_running(slave)) {
                        busy = 1;
                        continue;
                }
  
-               switch (teql_resolve(skb, skb_res, slave)) {
+               switch (teql_resolve(skb, skb_res, slave, slave_txq)) {
                case 0:
                        if (__netif_tx_trylock(slave_txq)) {
                                unsigned int length = qdisc_pkt_len(skb);
  
 -                              if (!netif_tx_queue_frozen_or_stopped(slave_txq) &&
 +                              if (!netif_xmit_frozen_or_stopped(slave_txq) &&
                                    slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
                                        txq_trans_update(slave_txq);
                                        __netif_tx_unlock(slave_txq);
                                }
                                __netif_tx_unlock(slave_txq);
                        }
 -                      if (netif_queue_stopped(dev))
 +                      if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)))
                                busy = 1;
                        break;
                case 1:
diff --combined net/wireless/nl80211.c
@@@ -89,8 -89,8 +89,8 @@@ static const struct nla_policy nl80211_
        [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
        [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
  
-       [NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN },
-       [NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN },
+       [NL80211_ATTR_MAC] = { .len = ETH_ALEN },
+       [NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN },
  
        [NL80211_ATTR_KEY] = { .type = NLA_NESTED, },
        [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY,
@@@ -98,7 -98,7 +98,7 @@@
        [NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 },
        [NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 },
        [NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG },
 -      [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 },
 +      [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 },
        [NL80211_ATTR_KEY_TYPE] = { .type = NLA_U32 },
  
        [NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 },
        [NL80211_ATTR_TDLS_OPERATION] = { .type = NLA_U8 },
        [NL80211_ATTR_TDLS_SUPPORT] = { .type = NLA_FLAG },
        [NL80211_ATTR_TDLS_EXTERNAL_SETUP] = { .type = NLA_FLAG },
 +      [NL80211_ATTR_DONT_WAIT_FOR_ACK] = { .type = NLA_FLAG },
 +      [NL80211_ATTR_PROBE_RESP] = { .type = NLA_BINARY,
 +                                    .len = IEEE80211_MAX_DATA_LEN },
 +      [NL80211_ATTR_DFS_REGION] = { .type = NLA_U8 },
 +      [NL80211_ATTR_DISABLE_HT] = { .type = NLA_FLAG },
 +      [NL80211_ATTR_HT_CAPABILITY_MASK] = {
 +              .len = NL80211_HT_CAPABILITY_LEN
 +      },
  };
  
  /* policy for the key attributes */
@@@ -211,7 -203,7 +211,7 @@@ static const struct nla_policy nl80211_
        [NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN },
        [NL80211_KEY_IDX] = { .type = NLA_U8 },
        [NL80211_KEY_CIPHER] = { .type = NLA_U32 },
 -      [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 },
 +      [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 },
        [NL80211_KEY_DEFAULT] = { .type = NLA_FLAG },
        [NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG },
        [NL80211_KEY_TYPE] = { .type = NLA_U32 },
@@@ -766,10 -758,6 +766,10 @@@ static int nl80211_send_wiphy(struct sk
        NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
                    dev->wiphy.available_antennas_rx);
  
 +      if (dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD)
 +              NLA_PUT_U32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
 +                          dev->wiphy.probe_resp_offload);
 +
        if ((dev->wiphy.available_antennas_tx ||
             dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) {
                u32 tx_ant = 0, rx_ant = 0;
        CMD(set_pmksa, SET_PMKSA);
        CMD(del_pmksa, DEL_PMKSA);
        CMD(flush_pmksa, FLUSH_PMKSA);
 -      CMD(remain_on_channel, REMAIN_ON_CHANNEL);
 +      if (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
 +              CMD(remain_on_channel, REMAIN_ON_CHANNEL);
        CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
        CMD(mgmt_tx, FRAME);
        CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL);
        }
        if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
                CMD(sched_scan_start, START_SCHED_SCAN);
 +      CMD(probe_client, PROBE_CLIENT);
 +      if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) {
 +              i++;
 +              NLA_PUT_U32(msg, i, NL80211_CMD_REGISTER_BEACONS);
 +      }
 +
 +#ifdef CONFIG_NL80211_TESTMODE
 +      CMD(testmode_cmd, TESTMODE);
 +#endif
  
  #undef CMD
  
  
        nla_nest_end(msg, nl_cmds);
  
 -      if (dev->ops->remain_on_channel)
 +      if (dev->ops->remain_on_channel &&
 +          dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
                NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
                            dev->wiphy.max_remain_on_channel_duration);
  
 -      if (dev->ops->mgmt_tx_cancel_wait)
 +      if (dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)
                NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK);
  
        if (mgmt_stypes) {
        if (nl80211_put_iface_combinations(&dev->wiphy, msg))
                goto nla_put_failure;
  
 +      if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME)
 +              NLA_PUT_U32(msg, NL80211_ATTR_DEVICE_AP_SME,
 +                          dev->wiphy.ap_sme_capa);
 +
 +      NLA_PUT_U32(msg, NL80211_ATTR_FEATURE_FLAGS, dev->wiphy.features);
 +
 +      if (dev->wiphy.ht_capa_mod_mask)
 +              NLA_PUT(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
 +                      sizeof(*dev->wiphy.ht_capa_mod_mask),
 +                      dev->wiphy.ht_capa_mod_mask);
 +
        return genlmsg_end(msg, hdr);
  
   nla_put_failure:
@@@ -2189,13 -2155,6 +2189,13 @@@ static int nl80211_addset_beacon(struc
                        nla_len(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]);
        }
  
 +      if (info->attrs[NL80211_ATTR_PROBE_RESP]) {
 +              params.probe_resp =
 +                      nla_data(info->attrs[NL80211_ATTR_PROBE_RESP]);
 +              params.probe_resp_len =
 +                      nla_len(info->attrs[NL80211_ATTR_PROBE_RESP]);
 +      }
 +
        err = call(&rdev->wiphy, dev, &params);
        if (!err && params.interval)
                wdev->beacon_interval = params.interval;
@@@ -2494,34 -2453,26 +2494,34 @@@ static int nl80211_get_station(struct s
  /*
   * Get vlan interface making sure it is running and on the right wiphy.
   */
 -static int get_vlan(struct genl_info *info,
 -                  struct cfg80211_registered_device *rdev,
 -                  struct net_device **vlan)
 +static struct net_device *get_vlan(struct genl_info *info,
 +                                 struct cfg80211_registered_device *rdev)
  {
        struct nlattr *vlanattr = info->attrs[NL80211_ATTR_STA_VLAN];
 -      *vlan = NULL;
 -
 -      if (vlanattr) {
 -              *vlan = dev_get_by_index(genl_info_net(info),
 -                                       nla_get_u32(vlanattr));
 -              if (!*vlan)
 -                      return -ENODEV;
 -              if (!(*vlan)->ieee80211_ptr)
 -                      return -EINVAL;
 -              if ((*vlan)->ieee80211_ptr->wiphy != &rdev->wiphy)
 -                      return -EINVAL;
 -              if (!netif_running(*vlan))
 -                      return -ENETDOWN;
 +      struct net_device *v;
 +      int ret;
 +
 +      if (!vlanattr)
 +              return NULL;
 +
 +      v = dev_get_by_index(genl_info_net(info), nla_get_u32(vlanattr));
 +      if (!v)
 +              return ERR_PTR(-ENODEV);
 +
 +      if (!v->ieee80211_ptr || v->ieee80211_ptr->wiphy != &rdev->wiphy) {
 +              ret = -EINVAL;
 +              goto error;
        }
 -      return 0;
 +
 +      if (!netif_running(v)) {
 +              ret = -ENETDOWN;
 +              goto error;
 +      }
 +
 +      return v;
 + error:
 +      dev_put(v);
 +      return ERR_PTR(ret);
  }
  
  static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
                params.plink_state =
                    nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]);
  
 -      err = get_vlan(info, rdev, &params.vlan);
 -      if (err)
 -              goto out;
 +      params.vlan = get_vlan(info, rdev);
 +      if (IS_ERR(params.vlan))
 +              return PTR_ERR(params.vlan);
  
        /* validate settings */
        err = 0;
@@@ -2741,9 -2692,9 +2741,9 @@@ static int nl80211_new_station(struct s
              (rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)))
                return -EINVAL;
  
 -      err = get_vlan(info, rdev, &params.vlan);
 -      if (err)
 -              goto out;
 +      params.vlan = get_vlan(info, rdev);
 +      if (IS_ERR(params.vlan))
 +              return PTR_ERR(params.vlan);
  
        /* validate settings */
        err = 0;
@@@ -3406,9 -3357,6 +3406,9 @@@ static int nl80211_get_reg(struct sk_bu
  
        NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2,
                cfg80211_regdomain->alpha2);
 +      if (cfg80211_regdomain->dfs_region)
 +              NLA_PUT_U8(msg, NL80211_ATTR_DFS_REGION,
 +                         cfg80211_regdomain->dfs_region);
  
        nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES);
        if (!nl_reg_rules)
@@@ -3467,7 -3415,6 +3467,7 @@@ static int nl80211_set_reg(struct sk_bu
        char *alpha2 = NULL;
        int rem_reg_rules = 0, r = 0;
        u32 num_rules = 0, rule_idx = 0, size_of_regd;
 +      u8 dfs_region = 0;
        struct ieee80211_regdomain *rd = NULL;
  
        if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
  
        alpha2 = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
  
 +      if (info->attrs[NL80211_ATTR_DFS_REGION])
 +              dfs_region = nla_get_u8(info->attrs[NL80211_ATTR_DFS_REGION]);
 +
        nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
                        rem_reg_rules) {
                num_rules++;
        rd->alpha2[0] = alpha2[0];
        rd->alpha2[1] = alpha2[1];
  
 +      /*
 +       * Disable DFS master mode if the DFS region was
 +       * not supported or known on this kernel.
 +       */
 +      if (reg_supported_dfs_region(dfs_region))
 +              rd->dfs_region = dfs_region;
 +
        nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
                        rem_reg_rules) {
                nla_parse(tb, NL80211_REG_RULE_ATTR_MAX,
@@@ -4422,9 -4359,6 +4422,9 @@@ static int nl80211_associate(struct sk_
        const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL;
        int err, ssid_len, ie_len = 0;
        bool use_mfp = false;
 +      u32 flags = 0;
 +      struct ieee80211_ht_cap *ht_capa = NULL;
 +      struct ieee80211_ht_cap *ht_capa_mask = NULL;
  
        if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
                return -EINVAL;
        if (info->attrs[NL80211_ATTR_PREV_BSSID])
                prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]);
  
 +      if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT]))
 +              flags |= ASSOC_REQ_DISABLE_HT;
 +
 +      if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
 +              ht_capa_mask =
 +                      nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]);
 +
 +      if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) {
 +              if (!ht_capa_mask)
 +                      return -EINVAL;
 +              ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
 +      }
 +
        err = nl80211_crypto_settings(rdev, info, &crypto, 1);
        if (!err)
                err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid,
                                          ssid, ssid_len, ie, ie_len, use_mfp,
 -                                        &crypto);
 +                                        &crypto, flags, ht_capa,
 +                                        ht_capa_mask);
  
        return err;
  }
@@@ -4976,22 -4896,6 +4976,22 @@@ static int nl80211_connect(struct sk_bu
                        return PTR_ERR(connkeys);
        }
  
 +      if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT]))
 +              connect.flags |= ASSOC_REQ_DISABLE_HT;
 +
 +      if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
 +              memcpy(&connect.ht_capa_mask,
 +                     nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]),
 +                     sizeof(connect.ht_capa_mask));
 +
 +      if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) {
 +              if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
 +                      return -EINVAL;
 +              memcpy(&connect.ht_capa,
 +                     nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]),
 +                     sizeof(connect.ht_capa));
 +      }
 +
        err = cfg80211_connect(rdev, dev, &connect, connkeys);
        if (err)
                kfree(connkeys);
@@@ -5179,8 -5083,7 +5179,8 @@@ static int nl80211_remain_on_channel(st
            duration > rdev->wiphy.max_remain_on_channel_duration)
                return -EINVAL;
  
 -      if (!rdev->ops->remain_on_channel)
 +      if (!rdev->ops->remain_on_channel ||
 +          !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL))
                return -EOPNOTSUPP;
  
        if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
@@@ -5368,13 -5271,12 +5368,13 @@@ static int nl80211_tx_mgmt(struct sk_bu
        bool channel_type_valid = false;
        u32 freq;
        int err;
 -      void *hdr;
 +      void *hdr = NULL;
        u64 cookie;
 -      struct sk_buff *msg;
 +      struct sk_buff *msg = NULL;
        unsigned int wait = 0;
 -      bool offchan;
 -      bool no_cck;
 +      bool offchan, no_cck, dont_wait_for_ack;
 +
 +      dont_wait_for_ack = info->attrs[NL80211_ATTR_DONT_WAIT_FOR_ACK];
  
        if (!info->attrs[NL80211_ATTR_FRAME] ||
            !info->attrs[NL80211_ATTR_WIPHY_FREQ])
                return -EOPNOTSUPP;
  
        if (info->attrs[NL80211_ATTR_DURATION]) {
 -              if (!rdev->ops->mgmt_tx_cancel_wait)
 +              if (!(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX))
                        return -EINVAL;
                wait = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]);
        }
  
        offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK];
  
 +      if (offchan && !(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX))
 +              return -EINVAL;
 +
        no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
  
        freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
        if (chan == NULL)
                return -EINVAL;
  
 -      msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 -      if (!msg)
 -              return -ENOMEM;
 +      if (!dont_wait_for_ack) {
 +              msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 +              if (!msg)
 +                      return -ENOMEM;
  
 -      hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
 -                           NL80211_CMD_FRAME);
 +              hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
 +                                   NL80211_CMD_FRAME);
  
 -      if (IS_ERR(hdr)) {
 -              err = PTR_ERR(hdr);
 -              goto free_msg;
 +              if (IS_ERR(hdr)) {
 +                      err = PTR_ERR(hdr);
 +                      goto free_msg;
 +              }
        }
 +
        err = cfg80211_mlme_mgmt_tx(rdev, dev, chan, offchan, channel_type,
                                    channel_type_valid, wait,
                                    nla_data(info->attrs[NL80211_ATTR_FRAME]),
                                    nla_len(info->attrs[NL80211_ATTR_FRAME]),
 -                                  no_cck, &cookie);
 +                                  no_cck, dont_wait_for_ack, &cookie);
        if (err)
                goto free_msg;
  
 -      NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
 +      if (msg) {
 +              NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
  
 -      genlmsg_end(msg, hdr);
 -      return genlmsg_reply(msg, info);
 +              genlmsg_end(msg, hdr);
 +              return genlmsg_reply(msg, info);
 +      }
 +
 +      return 0;
  
   nla_put_failure:
        err = -ENOBUFS;
@@@ -5940,91 -5832,6 +5940,91 @@@ static int nl80211_set_rekey_data(struc
        return err;
  }
  
 +static int nl80211_register_unexpected_frame(struct sk_buff *skb,
 +                                           struct genl_info *info)
 +{
 +      struct net_device *dev = info->user_ptr[1];
 +      struct wireless_dev *wdev = dev->ieee80211_ptr;
 +
 +      if (wdev->iftype != NL80211_IFTYPE_AP &&
 +          wdev->iftype != NL80211_IFTYPE_P2P_GO)
 +              return -EINVAL;
 +
 +      if (wdev->ap_unexpected_nlpid)
 +              return -EBUSY;
 +
 +      wdev->ap_unexpected_nlpid = info->snd_pid;
 +      return 0;
 +}
 +
 +static int nl80211_probe_client(struct sk_buff *skb,
 +                              struct genl_info *info)
 +{
 +      struct cfg80211_registered_device *rdev = info->user_ptr[0];
 +      struct net_device *dev = info->user_ptr[1];
 +      struct wireless_dev *wdev = dev->ieee80211_ptr;
 +      struct sk_buff *msg;
 +      void *hdr;
 +      const u8 *addr;
 +      u64 cookie;
 +      int err;
 +
 +      if (wdev->iftype != NL80211_IFTYPE_AP &&
 +          wdev->iftype != NL80211_IFTYPE_P2P_GO)
 +              return -EOPNOTSUPP;
 +
 +      if (!info->attrs[NL80211_ATTR_MAC])
 +              return -EINVAL;
 +
 +      if (!rdev->ops->probe_client)
 +              return -EOPNOTSUPP;
 +
 +      msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 +      if (!msg)
 +              return -ENOMEM;
 +
 +      hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
 +                           NL80211_CMD_PROBE_CLIENT);
 +
 +      if (IS_ERR(hdr)) {
 +              err = PTR_ERR(hdr);
 +              goto free_msg;
 +      }
 +
 +      addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
 +
 +      err = rdev->ops->probe_client(&rdev->wiphy, dev, addr, &cookie);
 +      if (err)
 +              goto free_msg;
 +
 +      NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
 +
 +      genlmsg_end(msg, hdr);
 +
 +      return genlmsg_reply(msg, info);
 +
 + nla_put_failure:
 +      err = -ENOBUFS;
 + free_msg:
 +      nlmsg_free(msg);
 +      return err;
 +}
 +
 +static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
 +{
 +      struct cfg80211_registered_device *rdev = info->user_ptr[0];
 +
 +      if (!(rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS))
 +              return -EOPNOTSUPP;
 +
 +      if (rdev->ap_beacons_nlpid)
 +              return -EBUSY;
 +
 +      rdev->ap_beacons_nlpid = info->snd_pid;
 +
 +      return 0;
 +}
 +
  #define NL80211_FLAG_NEED_WIPHY               0x01
  #define NL80211_FLAG_NEED_NETDEV      0x02
  #define NL80211_FLAG_NEED_RTNL                0x04
@@@ -6580,30 -6387,6 +6580,30 @@@ static struct genl_ops nl80211_ops[] = 
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
                                  NL80211_FLAG_NEED_RTNL,
        },
 +      {
 +              .cmd = NL80211_CMD_UNEXPECTED_FRAME,
 +              .doit = nl80211_register_unexpected_frame,
 +              .policy = nl80211_policy,
 +              .flags = GENL_ADMIN_PERM,
 +              .internal_flags = NL80211_FLAG_NEED_NETDEV |
 +                                NL80211_FLAG_NEED_RTNL,
 +      },
 +      {
 +              .cmd = NL80211_CMD_PROBE_CLIENT,
 +              .doit = nl80211_probe_client,
 +              .policy = nl80211_policy,
 +              .flags = GENL_ADMIN_PERM,
 +              .internal_flags = NL80211_FLAG_NEED_NETDEV |
 +                                NL80211_FLAG_NEED_RTNL,
 +      },
 +      {
 +              .cmd = NL80211_CMD_REGISTER_BEACONS,
 +              .doit = nl80211_register_beacons,
 +              .policy = nl80211_policy,
 +              .flags = GENL_ADMIN_PERM,
 +              .internal_flags = NL80211_FLAG_NEED_WIPHY |
 +                                NL80211_FLAG_NEED_RTNL,
 +      },
  };
  
  static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@@ -6856,7 -6639,10 +6856,7 @@@ void nl80211_send_reg_change_event(stru
        if (wiphy_idx_valid(request->wiphy_idx))
                NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx);
  
 -      if (genlmsg_end(msg, hdr) < 0) {
 -              nlmsg_free(msg);
 -              return;
 -      }
 +      genlmsg_end(msg, hdr);
  
        rcu_read_lock();
        genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id,
@@@ -6892,7 -6678,10 +6892,7 @@@ static void nl80211_send_mlme_event(str
        NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
        NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
  
 -      if (genlmsg_end(msg, hdr) < 0) {
 -              nlmsg_free(msg);
 -              return;
 -      }
 +      genlmsg_end(msg, hdr);
  
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@@ -6973,7 -6762,10 +6973,7 @@@ static void nl80211_send_mlme_timeout(s
        NLA_PUT_FLAG(msg, NL80211_ATTR_TIMED_OUT);
        NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
  
 -      if (genlmsg_end(msg, hdr) < 0) {
 -              nlmsg_free(msg);
 -              return;
 -      }
 +      genlmsg_end(msg, hdr);
  
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@@ -7029,7 -6821,10 +7029,7 @@@ void nl80211_send_connect_result(struc
        if (resp_ie)
                NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie);
  
 -      if (genlmsg_end(msg, hdr) < 0) {
 -              nlmsg_free(msg);
 -              return;
 -      }
 +      genlmsg_end(msg, hdr);
  
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@@ -7067,7 -6862,10 +7067,7 @@@ void nl80211_send_roamed(struct cfg8021
        if (resp_ie)
                NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie);
  
 -      if (genlmsg_end(msg, hdr) < 0) {
 -              nlmsg_free(msg);
 -              return;
 -      }
 +      genlmsg_end(msg, hdr);
  
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@@ -7105,7 -6903,10 +7105,7 @@@ void nl80211_send_disconnected(struct c
        if (ie)
                NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie);
  
 -      if (genlmsg_end(msg, hdr) < 0) {
 -              nlmsg_free(msg);
 -              return;
 -      }
 +      genlmsg_end(msg, hdr);
  
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, GFP_KERNEL);
@@@ -7138,7 -6939,10 +7138,7 @@@ void nl80211_send_ibss_bssid(struct cfg
        NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
        NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
  
 -      if (genlmsg_end(msg, hdr) < 0) {
 -              nlmsg_free(msg);
 -              return;
 -      }
 +      genlmsg_end(msg, hdr);
  
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@@ -7173,7 -6977,10 +7173,7 @@@ void nl80211_send_new_peer_candidate(st
        if (ie_len && ie)
                NLA_PUT(msg, NL80211_ATTR_IE, ie_len , ie);
  
 -      if (genlmsg_end(msg, hdr) < 0) {
 -              nlmsg_free(msg);
 -              return;
 -      }
 +      genlmsg_end(msg, hdr);
  
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@@ -7212,7 -7019,10 +7212,7 @@@ void nl80211_michael_mic_failure(struc
        if (tsc)
                NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc);
  
 -      if (genlmsg_end(msg, hdr) < 0) {
 -              nlmsg_free(msg);
 -              return;
 -      }
 +      genlmsg_end(msg, hdr);
  
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@@ -7263,7 -7073,10 +7263,7 @@@ void nl80211_send_beacon_hint_event(str
                goto nla_put_failure;
        nla_nest_end(msg, nl_freq);
  
 -      if (genlmsg_end(msg, hdr) < 0) {
 -              nlmsg_free(msg);
 -              return;
 -      }
 +      genlmsg_end(msg, hdr);
  
        rcu_read_lock();
        genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id,
@@@ -7306,7 -7119,10 +7306,7 @@@ static void nl80211_send_remain_on_chan
        if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL)
                NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration);
  
 -      if (genlmsg_end(msg, hdr) < 0) {
 -              nlmsg_free(msg);
 -              return;
 -      }
 +      genlmsg_end(msg, hdr);
  
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@@ -7377,7 -7193,10 +7377,7 @@@ void nl80211_send_sta_del_event(struct 
        NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
        NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
  
 -      if (genlmsg_end(msg, hdr) < 0) {
 -              nlmsg_free(msg);
 -              return;
 -      }
 +      genlmsg_end(msg, hdr);
  
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
        nlmsg_free(msg);
  }
  
 +static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
 +                                     const u8 *addr, gfp_t gfp)
 +{
 +      struct wireless_dev *wdev = dev->ieee80211_ptr;
 +      struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
 +      struct sk_buff *msg;
 +      void *hdr;
 +      int err;
 +      u32 nlpid = ACCESS_ONCE(wdev->ap_unexpected_nlpid);
 +
 +      if (!nlpid)
 +              return false;
 +
 +      msg = nlmsg_new(100, gfp);
 +      if (!msg)
 +              return true;
 +
 +      hdr = nl80211hdr_put(msg, 0, 0, 0, cmd);
 +      if (!hdr) {
 +              nlmsg_free(msg);
 +              return true;
 +      }
 +
 +      NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
 +      NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
 +      NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
 +
 +      err = genlmsg_end(msg, hdr);
 +      if (err < 0) {
 +              nlmsg_free(msg);
 +              return true;
 +      }
 +
 +      genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
 +      return true;
 +
 + nla_put_failure:
 +      genlmsg_cancel(msg, hdr);
 +      nlmsg_free(msg);
 +      return true;
 +}
 +
 +bool nl80211_unexpected_frame(struct net_device *dev, const u8 *addr, gfp_t gfp)
 +{
 +      return __nl80211_unexpected_frame(dev, NL80211_CMD_UNEXPECTED_FRAME,
 +                                        addr, gfp);
 +}
 +
 +bool nl80211_unexpected_4addr_frame(struct net_device *dev,
 +                                  const u8 *addr, gfp_t gfp)
 +{
 +      return __nl80211_unexpected_frame(dev,
 +                                        NL80211_CMD_UNEXPECTED_4ADDR_FRAME,
 +                                        addr, gfp);
 +}
 +
  int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
                      struct net_device *netdev, u32 nlpid,
                      int freq, const u8 *buf, size_t len, gfp_t gfp)
  {
        struct sk_buff *msg;
        void *hdr;
 -      int err;
  
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
        if (!msg)
        NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
        NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
  
 -      err = genlmsg_end(msg, hdr);
 -      if (err < 0) {
 -              nlmsg_free(msg);
 -              return err;
 -      }
 +      genlmsg_end(msg, hdr);
  
 -      err = genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
 -      if (err < 0)
 -              return err;
 -      return 0;
 +      return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
  
   nla_put_failure:
        genlmsg_cancel(msg, hdr);
@@@ -7501,7 -7272,10 +7501,7 @@@ void nl80211_send_mgmt_tx_status(struc
        if (ack)
                NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
  
 -      if (genlmsg_end(msg, hdr) < 0) {
 -              nlmsg_free(msg);
 -              return;
 -      }
 +      genlmsg_end(msg, hdr);
  
        genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp);
        return;
@@@ -7543,7 -7317,10 +7543,7 @@@ nl80211_send_cqm_rssi_notify(struct cfg
  
        nla_nest_end(msg, pinfoattr);
  
 -      if (genlmsg_end(msg, hdr) < 0) {
 -              nlmsg_free(msg);
 -              return;
 -      }
 +      genlmsg_end(msg, hdr);
  
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@@ -7585,7 -7362,10 +7585,7 @@@ void nl80211_gtk_rekey_notify(struct cf
  
        nla_nest_end(msg, rekey_attr);
  
 -      if (genlmsg_end(msg, hdr) < 0) {
 -              nlmsg_free(msg);
 -              return;
 -      }
 +      genlmsg_end(msg, hdr);
  
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@@ -7628,7 -7408,10 +7628,7 @@@ void nl80211_pmksa_candidate_notify(str
  
        nla_nest_end(msg, attr);
  
 -      if (genlmsg_end(msg, hdr) < 0) {
 -              nlmsg_free(msg);
 -              return;
 -      }
 +      genlmsg_end(msg, hdr);
  
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@@ -7670,45 -7453,7 +7670,45 @@@ nl80211_send_cqm_pktloss_notify(struct 
  
        nla_nest_end(msg, pinfoattr);
  
 -      if (genlmsg_end(msg, hdr) < 0) {
 +      genlmsg_end(msg, hdr);
 +
 +      genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
 +                              nl80211_mlme_mcgrp.id, gfp);
 +      return;
 +
 + nla_put_failure:
 +      genlmsg_cancel(msg, hdr);
 +      nlmsg_free(msg);
 +}
 +
 +void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
 +                         u64 cookie, bool acked, gfp_t gfp)
 +{
 +      struct wireless_dev *wdev = dev->ieee80211_ptr;
 +      struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
 +      struct sk_buff *msg;
 +      void *hdr;
 +      int err;
 +
 +      msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
 +      if (!msg)
 +              return;
 +
 +      hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PROBE_CLIENT);
 +      if (!hdr) {
 +              nlmsg_free(msg);
 +              return;
 +      }
 +
 +      NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
 +      NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
 +      NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
 +      NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
 +      if (acked)
 +              NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
 +
 +      err = genlmsg_end(msg, hdr);
 +      if (err < 0) {
                nlmsg_free(msg);
                return;
        }
        genlmsg_cancel(msg, hdr);
        nlmsg_free(msg);
  }
 +EXPORT_SYMBOL(cfg80211_probe_status);
 +
 +void cfg80211_report_obss_beacon(struct wiphy *wiphy,
 +                               const u8 *frame, size_t len,
 +                               int freq, gfp_t gfp)
 +{
 +      struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 +      struct sk_buff *msg;
 +      void *hdr;
 +      u32 nlpid = ACCESS_ONCE(rdev->ap_beacons_nlpid);
 +
 +      if (!nlpid)
 +              return;
 +
 +      msg = nlmsg_new(len + 100, gfp);
 +      if (!msg)
 +              return;
 +
 +      hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME);
 +      if (!hdr) {
 +              nlmsg_free(msg);
 +              return;
 +      }
 +
 +      NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
 +      if (freq)
 +              NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
 +      NLA_PUT(msg, NL80211_ATTR_FRAME, len, frame);
 +
 +      genlmsg_end(msg, hdr);
 +
 +      genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
 +      return;
 +
 + nla_put_failure:
 +      genlmsg_cancel(msg, hdr);
 +      nlmsg_free(msg);
 +}
 +EXPORT_SYMBOL(cfg80211_report_obss_beacon);
  
  static int nl80211_netlink_notify(struct notifier_block * nb,
                                  unsigned long state,
  
        rcu_read_lock();
  
 -      list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list)
 +      list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
                list_for_each_entry_rcu(wdev, &rdev->netdev_list, list)
                        cfg80211_mlme_unregister_socket(wdev, notify->pid);
 +              if (rdev->ap_beacons_nlpid == notify->pid)
 +                      rdev->ap_beacons_nlpid = 0;
 +      }
  
        rcu_read_unlock();