Merge tag 'configfs-for-4.8-2' of git://git.infradead.org/users/hch/configfs
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 23 Sep 2016 16:45:15 +0000 (09:45 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 23 Sep 2016 16:45:15 +0000 (09:45 -0700)
Pull configfs fix from Christoph Hellwig:
 "One more trivial fix for the binary attribute code from Phil Turnbull"

* tag 'configfs-for-4.8-2' of git://git.infradead.org/users/hch/configfs:
  configfs: Return -EFBIG from configfs_write_bin_file.

217 files changed:
Documentation/devicetree/bindings/mmc/sdhci-st.txt
Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
Documentation/media/uapi/cec/cec-ioc-dqevent.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/bcm2835-rpi.dtsi
arch/arm/boot/dts/bcm283x.dtsi
arch/arm/boot/dts/stih407-family.dtsi
arch/arm/boot/dts/stih410.dtsi
arch/arm/common/locomo.c
arch/arm/common/sa1111.c
arch/arm/configs/keystone_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/crypto/aes-ce-glue.c
arch/arm/include/asm/pgtable-2level-hwdef.h
arch/arm/include/asm/pgtable-3level-hwdef.h
arch/arm/mach-exynos/suspend.c
arch/arm/mach-pxa/lubbock.c
arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
arch/arm/mm/mmu.c
arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
arch/arm64/boot/dts/apm/apm-storm.dtsi
arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi [new symlink]
arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts
arch/arm64/boot/dts/broadcom/bcm2837.dtsi
arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi [new symlink]
arch/arm64/boot/dts/broadcom/bcm283x.dtsi [new symlink]
arch/arm64/boot/dts/broadcom/ns2.dtsi
arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
arch/arm64/boot/dts/exynos/exynos7.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
arch/arm64/boot/dts/marvell/armada-ap806.dtsi
arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
arch/arm64/boot/dts/xilinx/zynqmp.dtsi
arch/arm64/crypto/aes-glue.c
arch/avr32/lib/copy_user.S
arch/openrisc/include/asm/uaccess.h
arch/powerpc/include/asm/cpu_has_feature.h
arch/powerpc/kernel/idle_book3s.S
arch/powerpc/platforms/powernv/pci-ioda.c
arch/x86/events/amd/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/pt.c
arch/x86/kvm/pmu_amd.c
crypto/blkcipher.c
crypto/echainiv.c
drivers/base/power/runtime.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
drivers/gpu/drm/drm_ioc32.c
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_vgpu.c
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/vc4/vc4_bo.c
drivers/gpu/drm/vc4/vc4_validate_shaders.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mcg.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/sw/rdmavt/mr.c
drivers/infiniband/sw/rxe/rxe.c
drivers/infiniband/sw/rxe/rxe_comp.c
drivers/infiniband/sw/rxe/rxe_net.c
drivers/infiniband/sw/rxe/rxe_net.h
drivers/infiniband/sw/rxe/rxe_recv.c
drivers/infiniband/sw/rxe/rxe_req.c
drivers/infiniband/sw/rxe/rxe_resp.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/irqchip/irq-atmel-aic.c
drivers/irqchip/irq-atmel-aic5.c
drivers/media/cec-edid.c
drivers/media/pci/cx23885/cx23885-417.c
drivers/media/pci/saa7134/saa7134-dvb.c
drivers/media/pci/saa7134/saa7134-empress.c
drivers/media/platform/Kconfig
drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h
drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
drivers/media/platform/rcar-fcp.c
drivers/mmc/host/omap.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sdhci-st.c
drivers/net/can/flexcan.c
drivers/net/can/ifi_canfd/ifi_canfd.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
drivers/net/phy/mdio-xgene.c
drivers/net/usb/r8152.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/xen-netback/xenbus.c
drivers/pcmcia/ds.c
drivers/pcmcia/pxa2xx_base.c
drivers/pcmcia/pxa2xx_base.h
drivers/pcmcia/sa1111_badge4.c
drivers/pcmcia/sa1111_generic.c
drivers/pcmcia/sa1111_jornada720.c
drivers/pcmcia/sa1111_lubbock.c
drivers/pcmcia/sa1111_neponset.c
drivers/pcmcia/sa11xx_base.c
drivers/pcmcia/soc_common.c
drivers/rapidio/rio_cm.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/net/qeth_l3_sys.c
drivers/staging/media/cec/TODO
drivers/staging/media/cec/cec-adap.c
drivers/staging/media/cec/cec-api.c
drivers/staging/media/cec/cec-core.c
drivers/staging/media/pulse8-cec/pulse8-cec.c
drivers/usb/core/config.c
drivers/usb/musb/Kconfig
drivers/usb/serial/usb-serial-simple.c
fs/aio.c
fs/autofs4/expire.c
fs/cifs/cifsfs.c
fs/cifs/cifsproto.h
fs/cifs/connect.c
fs/notify/fanotify/fanotify.c
fs/notify/fanotify/fanotify_user.c
fs/notify/group.c
fs/notify/notification.c
fs/ocfs2/alloc.c
fs/ocfs2/cluster/tcp_internal.h
fs/ocfs2/dlm/dlmconvert.c
fs/ocfs2/file.c
fs/ocfs2/suballoc.c
fs/proc/kcore.c
fs/ramfs/file-mmu.c
include/linux/cec-funcs.h
include/linux/cec.h
include/linux/cpuhotplug.h
include/linux/fsnotify_backend.h
include/linux/irq.h
include/linux/pagemap.h
include/linux/uio.h
include/media/cec.h
include/net/netfilter/nf_conntrack_synproxy.h
include/net/sctp/sm.h
include/net/sock.h
include/net/xfrm.h
kernel/cgroup.c
lib/iov_iter.c
mm/debug.c
mm/khugepaged.c
mm/memcontrol.c
mm/memory_hotplug.c
mm/page_io.c
mm/swapfile.c
mm/usercopy.c
net/batman-adv/bat_v_elp.c
net/batman-adv/routing.c
net/core/sock.c
net/ipv4/ip_input.c
net/ipv4/ip_vti.c
net/ipv4/ipmr.c
net/ipv4/netfilter/nft_chain_route_ipv4.c
net/ipv4/route.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv6/ip6_vti.c
net/ipv6/ip6mr.c
net/ipv6/netfilter/nft_chain_route_ipv6.c
net/ipv6/route.c
net/ipv6/xfrm6_input.c
net/ipv6/xfrm6_tunnel.c
net/irda/af_irda.c
net/mac80211/agg-rx.c
net/mac80211/agg-tx.c
net/mac80211/mesh_hwmp.c
net/mac80211/mesh_pathtbl.c
net/mac80211/sta_info.c
net/mac80211/tx.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_tables_trace.c
net/sctp/input.c
net/sunrpc/auth_gss/svcauth_gss.c
net/wireless/nl80211.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
scripts/faddr2line [new file with mode: 0755]

index 88faa91..3cd4c43 100644 (file)
@@ -10,7 +10,7 @@ Required properties:
                        subsystem (mmcss) inside the FlashSS (available in STiH407 SoC
                        family).
 
-- clock-names:         Should be "mmc".
+- clock-names:         Should be "mmc" and "icn".  (NB: The latter is not compulsory)
                        See: Documentation/devicetree/bindings/resource-names.txt
 - clocks:              Phandle to the clock.
                        See: Documentation/devicetree/bindings/clock/clock-bindings.txt
index 04ee900..201d483 100644 (file)
@@ -144,7 +144,7 @@ logical address types are already defined will return with error ``EBUSY``.
 
        -  ``flags``
 
-       -  Flags. No flags are defined yet, so set this to 0.
+       -  Flags. See :ref:`cec-log-addrs-flags` for a list of available flags.
 
     -  .. row 7
 
@@ -201,6 +201,25 @@ logical address types are already defined will return with error ``EBUSY``.
           give the CEC framework more information about the device type, even
           though the framework won't use it directly in the CEC message.
 
+.. _cec-log-addrs-flags:
+
+.. flat-table:: Flags for struct cec_log_addrs
+    :header-rows:  0
+    :stub-columns: 0
+    :widths:       3 1 4
+
+
+    -  .. _`CEC-LOG-ADDRS-FL-ALLOW-UNREG-FALLBACK`:
+
+       -  ``CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK``
+
+       -  1
+
+       -  By default if no logical address of the requested type can be claimed, then
+         it will go back to the unconfigured state. If this flag is set, then it will
+         fallback to the Unregistered logical address. Note that if the Unregistered
+         logical address was explicitly requested, then this flag has no effect.
+
 .. _cec-versions:
 
 .. flat-table:: CEC Versions
index 7a6d6d0..2e1e739 100644 (file)
@@ -64,7 +64,8 @@ it is guaranteed that the state did change in between the two events.
 
        -  ``phys_addr``
 
-       -  The current physical address.
+       -  The current physical address. This is ``CEC_PHYS_ADDR_INVALID`` if no
+          valid physical address is set.
 
     -  .. row 2
 
@@ -72,7 +73,10 @@ it is guaranteed that the state did change in between the two events.
 
        -  ``log_addr_mask``
 
-       -  The current set of claimed logical addresses.
+       -  The current set of claimed logical addresses. This is 0 if no logical
+          addresses are claimed or if ``phys_addr`` is ``CEC_PHYS_ADDR_INVALID``.
+         If bit 15 is set (``1 << CEC_LOG_ADDR_UNREGISTERED``) then this device
+         has the unregistered logical address. In that case all other bits are 0.
 
 
 
index a5e1270..01bff8e 100644 (file)
@@ -1625,6 +1625,7 @@ N:        rockchip
 ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
 M:     Kukjin Kim <kgene@kernel.org>
 M:     Krzysztof Kozlowski <krzk@kernel.org>
+R:     Javier Martinez Canillas <javier@osg.samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 S:     Maintained
@@ -2500,7 +2501,7 @@ S:        Supported
 F:     kernel/bpf/
 
 BROADCOM B44 10/100 ETHERNET DRIVER
-M:     Gary Zambrano <zambrano@broadcom.com>
+M:     Michael Chan <michael.chan@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/broadcom/b44.*
@@ -6102,7 +6103,7 @@ S:        Supported
 F:     drivers/cpufreq/intel_pstate.c
 
 INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
-M:     Maik Broemme <mbroemme@plusserver.de>
+M:     Maik Broemme <mbroemme@libmpq.org>
 L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/fb/intelfb.txt
@@ -8160,6 +8161,15 @@ S:       Maintained
 W:     https://fedorahosted.org/dropwatch/
 F:     net/core/drop_monitor.c
 
+NETWORKING [DSA]
+M:     Andrew Lunn <andrew@lunn.ch>
+M:     Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+M:     Florian Fainelli <f.fainelli@gmail.com>
+S:     Maintained
+F:     net/dsa/
+F:     include/net/dsa.h
+F:     drivers/net/dsa/
+
 NETWORKING [GENERAL]
 M:     "David S. Miller" <davem@davemloft.net>
 L:     netdev@vger.kernel.org
@@ -12568,7 +12578,7 @@ F:      include/linux/if_*vlan.h
 F:     net/8021q/
 
 VLYNQ BUS
-M:     Florian Fainelli <florian@openwrt.org>
+M:     Florian Fainelli <f.fainelli@gmail.com>
 L:     openwrt-devel@lists.openwrt.org (subscribers-only)
 S:     Maintained
 F:     drivers/vlynq/vlynq.c
index 1a8c8dd..74e22c2 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 8
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Psychotic Stoned Sheep
 
 # *DOCUMENTATION*
index caf2707..e9b47b2 100644 (file)
@@ -2,6 +2,7 @@
 
 / {
        memory {
+               device_type = "memory";
                reg = <0 0x10000000>;
        };
 
index b982522..445624a 100644 (file)
@@ -2,7 +2,6 @@
 #include <dt-bindings/clock/bcm2835.h>
 #include <dt-bindings/clock/bcm2835-aux.h>
 #include <dt-bindings/gpio/gpio.h>
-#include "skeleton.dtsi"
 
 /* This include file covers the common peripherals and configuration between
  * bcm2835 and bcm2836 implementations, leaving the CPU configuration to
@@ -13,6 +12,8 @@
        compatible = "brcm,bcm2835";
        model = "BCM2835";
        interrupt-parent = <&intc>;
+       #address-cells = <1>;
+       #size-cells = <1>;
 
        chosen {
                bootargs = "earlyprintk console=ttyAMA0";
index d294e82..8b063ab 100644 (file)
                        interrupt-names = "mmcirq";
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_mmc0>;
-                       clock-names = "mmc";
-                       clocks = <&clk_s_c0_flexgen CLK_MMC_0>;
+                       clock-names = "mmc", "icn";
+                       clocks = <&clk_s_c0_flexgen CLK_MMC_0>,
+                                <&clk_s_c0_flexgen CLK_RX_ICN_HVA>;
                        bus-width = <8>;
                        non-removable;
                };
                        interrupt-names = "mmcirq";
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_sd1>;
-                       clock-names = "mmc";
-                       clocks = <&clk_s_c0_flexgen CLK_MMC_1>;
+                       clock-names = "mmc", "icn";
+                       clocks = <&clk_s_c0_flexgen CLK_MMC_1>,
+                                <&clk_s_c0_flexgen CLK_RX_ICN_HVA>;
                        resets = <&softreset STIH407_MMC1_SOFTRESET>;
                        bus-width = <4>;
                };
index 18ed1ad..4031886 100644 (file)
@@ -41,7 +41,8 @@
                        compatible = "st,st-ohci-300x";
                        reg = <0x9a03c00 0x100>;
                        interrupts = <GIC_SPI 180 IRQ_TYPE_NONE>;
-                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
+                                <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
                        resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
                                 <&softreset STIH407_USB2_PORT0_SOFTRESET>;
                        reset-names = "power", "softreset";
@@ -57,7 +58,8 @@
                        interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_usb0>;
-                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
+                                <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
                        resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
                                 <&softreset STIH407_USB2_PORT0_SOFTRESET>;
                        reset-names = "power", "softreset";
@@ -71,7 +73,8 @@
                        compatible = "st,st-ohci-300x";
                        reg = <0x9a83c00 0x100>;
                        interrupts = <GIC_SPI 181 IRQ_TYPE_NONE>;
-                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
+                                <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
                        resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
                                 <&softreset STIH407_USB2_PORT1_SOFTRESET>;
                        reset-names = "power", "softreset";
@@ -87,7 +90,8 @@
                        interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_usb1>;
-                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
+                                <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
                        resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
                                 <&softreset STIH407_USB2_PORT1_SOFTRESET>;
                        reset-names = "power", "softreset";
index 0e97b4b..6c7b068 100644 (file)
@@ -140,7 +140,7 @@ static struct locomo_dev_info locomo_devices[] = {
 
 static void locomo_handler(struct irq_desc *desc)
 {
-       struct locomo *lchip = irq_desc_get_chip_data(desc);
+       struct locomo *lchip = irq_desc_get_handler_data(desc);
        int req, i;
 
        /* Acknowledge the parent IRQ */
@@ -200,8 +200,7 @@ static void locomo_setup_irq(struct locomo *lchip)
         * Install handler for IRQ_LOCOMO_HW.
         */
        irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING);
-       irq_set_chip_data(lchip->irq, lchip);
-       irq_set_chained_handler(lchip->irq, locomo_handler);
+       irq_set_chained_handler_and_data(lchip->irq, locomo_handler, lchip);
 
        /* Install handlers for IRQ_LOCOMO_* */
        for ( ; irq <= lchip->irq_base + 3; irq++) {
index fb0a0a4..2e076c4 100644 (file)
@@ -472,8 +472,8 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
         * specifies that S0ReadyInt and S1ReadyInt should be '1'.
         */
        sa1111_writel(0, irqbase + SA1111_INTPOL0);
-       sa1111_writel(SA1111_IRQMASK_HI(IRQ_S0_READY_NINT) |
-                     SA1111_IRQMASK_HI(IRQ_S1_READY_NINT),
+       sa1111_writel(BIT(IRQ_S0_READY_NINT & 31) |
+                     BIT(IRQ_S1_READY_NINT & 31),
                      irqbase + SA1111_INTPOL1);
 
        /* clear all IRQs */
@@ -754,7 +754,7 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
        if (sachip->irq != NO_IRQ) {
                ret = sa1111_setup_irq(sachip, pd->irq_base);
                if (ret)
-                       goto err_unmap;
+                       goto err_clk;
        }
 
 #ifdef CONFIG_ARCH_SA1100
@@ -799,6 +799,8 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
 
        return 0;
 
+ err_clk:
+       clk_disable(sachip->clk);
  err_unmap:
        iounmap(sachip->base);
  err_clk_unprep:
@@ -869,9 +871,9 @@ struct sa1111_save_data {
 
 #ifdef CONFIG_PM
 
-static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
+static int sa1111_suspend_noirq(struct device *dev)
 {
-       struct sa1111 *sachip = platform_get_drvdata(dev);
+       struct sa1111 *sachip = dev_get_drvdata(dev);
        struct sa1111_save_data *save;
        unsigned long flags;
        unsigned int val;
@@ -934,9 +936,9 @@ static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
  *     restored by their respective drivers, and must be called
  *     via LDM after this function.
  */
-static int sa1111_resume(struct platform_device *dev)
+static int sa1111_resume_noirq(struct device *dev)
 {
-       struct sa1111 *sachip = platform_get_drvdata(dev);
+       struct sa1111 *sachip = dev_get_drvdata(dev);
        struct sa1111_save_data *save;
        unsigned long flags, id;
        void __iomem *base;
@@ -952,7 +954,7 @@ static int sa1111_resume(struct platform_device *dev)
        id = sa1111_readl(sachip->base + SA1111_SKID);
        if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
                __sa1111_remove(sachip);
-               platform_set_drvdata(dev, NULL);
+               dev_set_drvdata(dev, NULL);
                kfree(save);
                return 0;
        }
@@ -1003,8 +1005,8 @@ static int sa1111_resume(struct platform_device *dev)
 }
 
 #else
-#define sa1111_suspend NULL
-#define sa1111_resume  NULL
+#define sa1111_suspend_noirq NULL
+#define sa1111_resume_noirq  NULL
 #endif
 
 static int sa1111_probe(struct platform_device *pdev)
@@ -1017,7 +1019,7 @@ static int sa1111_probe(struct platform_device *pdev)
                return -EINVAL;
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
-               return -ENXIO;
+               return irq;
 
        return __sa1111_probe(&pdev->dev, mem, irq);
 }
@@ -1038,6 +1040,11 @@ static int sa1111_remove(struct platform_device *pdev)
        return 0;
 }
 
+static struct dev_pm_ops sa1111_pm_ops = {
+       .suspend_noirq = sa1111_suspend_noirq,
+       .resume_noirq = sa1111_resume_noirq,
+};
+
 /*
  *     Not sure if this should be on the system bus or not yet.
  *     We really want some way to register a system device at
@@ -1050,10 +1057,9 @@ static int sa1111_remove(struct platform_device *pdev)
 static struct platform_driver sa1111_device_driver = {
        .probe          = sa1111_probe,
        .remove         = sa1111_remove,
-       .suspend        = sa1111_suspend,
-       .resume         = sa1111_resume,
        .driver         = {
                .name   = "sa1111",
+               .pm     = &sa1111_pm_ops,
        },
 };
 
index 71b42e6..78cd2f1 100644 (file)
@@ -161,6 +161,7 @@ CONFIG_USB_MON=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_DWC3=y
+CONFIG_NOP_USB_XCEIV=y
 CONFIG_KEYSTONE_USB_PHY=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
index 2c8665c..ea3566f 100644 (file)
@@ -781,7 +781,7 @@ CONFIG_MXS_DMA=y
 CONFIG_DMA_BCM2835=y
 CONFIG_DMA_OMAP=y
 CONFIG_QCOM_BAM_DMA=y
-CONFIG_XILINX_VDMA=y
+CONFIG_XILINX_DMA=y
 CONFIG_DMA_SUN6I=y
 CONFIG_STAGING=y
 CONFIG_SENSORS_ISL29018=y
index da3c042..aef022a 100644 (file)
@@ -284,7 +284,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                err = blkcipher_walk_done(desc, &walk,
                                          walk.nbytes % AES_BLOCK_SIZE);
        }
-       if (nbytes) {
+       if (walk.nbytes % AES_BLOCK_SIZE) {
                u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
                u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
                u8 __aligned(8) tail[AES_BLOCK_SIZE];
index d0131ee..3f82e9d 100644 (file)
@@ -47,6 +47,7 @@
 #define PMD_SECT_WB            (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
 #define PMD_SECT_MINICACHE     (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
 #define PMD_SECT_WBWA          (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
+#define PMD_SECT_CACHE_MASK    (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
 #define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2))
 
 /*
index f8f1cff..4cd664a 100644 (file)
@@ -62,6 +62,7 @@
 #define PMD_SECT_WT            (_AT(pmdval_t, 2) << 2) /* normal inner write-through */
 #define PMD_SECT_WB            (_AT(pmdval_t, 3) << 2) /* normal inner write-back */
 #define PMD_SECT_WBWA          (_AT(pmdval_t, 7) << 2) /* normal inner write-alloc */
+#define PMD_SECT_CACHE_MASK    (_AT(pmdval_t, 7) << 2)
 
 /*
  * + Level 3 descriptor (PTE)
index 3750575..06332f6 100644 (file)
@@ -255,6 +255,12 @@ static int __init exynos_pmu_irq_init(struct device_node *node,
                return -ENOMEM;
        }
 
+       /*
+        * Clear the OF_POPULATED flag set in of_irq_init so that
+        * later the Exynos PMU platform device won't be skipped.
+        */
+       of_node_clear_flag(node, OF_POPULATED);
+
        return 0;
 }
 
index 7245f33..d6159f8 100644 (file)
@@ -137,6 +137,18 @@ static struct pxa2xx_udc_mach_info udc_info __initdata = {
        // no D+ pullup; lubbock can't connect/disconnect in software
 };
 
+static void lubbock_init_pcmcia(void)
+{
+       struct clk *clk;
+
+       /* Add an alias for the SA1111 PCMCIA clock */
+       clk = clk_get_sys("pxa2xx-pcmcia", NULL);
+       if (!IS_ERR(clk)) {
+               clkdev_create(clk, NULL, "1800");
+               clk_put(clk);
+       }
+}
+
 static struct resource sa1111_resources[] = {
        [0] = {
                .start  = 0x10000000,
@@ -467,6 +479,8 @@ static void __init lubbock_init(void)
        pxa_set_btuart_info(NULL);
        pxa_set_stuart_info(NULL);
 
+       lubbock_init_pcmcia();
+
        clk_add_alias("SA1111_CLK", NULL, "GPIO11_CLK", NULL);
        pxa_set_udc_info(&udc_info);
        pxa_set_fb_info(NULL, &sharp_lm8v31);
index 62437b5..73e3adb 100644 (file)
 
 #define REGULATOR_IRQ_MASK     BIT(2)  /* IRQ2, active low */
 
-static void __iomem *irqc;
-
-static const u8 da9063_mask_regs[] = {
-       DA9063_REG_IRQ_MASK_A,
-       DA9063_REG_IRQ_MASK_B,
-       DA9063_REG_IRQ_MASK_C,
-       DA9063_REG_IRQ_MASK_D,
-};
-
-/* DA9210 System Control and Event Registers */
+/* start of DA9210 System Control and Event Registers */
 #define DA9210_REG_MASK_A              0x54
-#define DA9210_REG_MASK_B              0x55
-
-static const u8 da9210_mask_regs[] = {
-       DA9210_REG_MASK_A,
-       DA9210_REG_MASK_B,
-};
-
-static void da9xxx_mask_irqs(struct i2c_client *client, const u8 regs[],
-                            unsigned int nregs)
-{
-       unsigned int i;
 
-       dev_info(&client->dev, "Masking %s interrupt sources\n", client->name);
+static void __iomem *irqc;
 
-       for (i = 0; i < nregs; i++) {
-               int error = i2c_smbus_write_byte_data(client, regs[i], ~0);
-               if (error) {
-                       dev_err(&client->dev, "i2c error %d\n", error);
-                       return;
-               }
-       }
-}
+/* first byte sets the memory pointer, following are consecutive reg values */
+static u8 da9063_irq_clr[] = { DA9063_REG_IRQ_MASK_A, 0xff, 0xff, 0xff, 0xff };
+static u8 da9210_irq_clr[] = { DA9210_REG_MASK_A, 0xff, 0xff };
+
+static struct i2c_msg da9xxx_msgs[2] = {
+       {
+               .addr = 0x58,
+               .len = ARRAY_SIZE(da9063_irq_clr),
+               .buf = da9063_irq_clr,
+       }, {
+               .addr = 0x68,
+               .len = ARRAY_SIZE(da9210_irq_clr),
+               .buf = da9210_irq_clr,
+       },
+};
 
 static int regulator_quirk_notify(struct notifier_block *nb,
                                  unsigned long action, void *data)
@@ -93,12 +80,15 @@ static int regulator_quirk_notify(struct notifier_block *nb,
        client = to_i2c_client(dev);
        dev_dbg(dev, "Detected %s\n", client->name);
 
-       if ((client->addr == 0x58 && !strcmp(client->name, "da9063")))
-               da9xxx_mask_irqs(client, da9063_mask_regs,
-                                ARRAY_SIZE(da9063_mask_regs));
-       else if (client->addr == 0x68 && !strcmp(client->name, "da9210"))
-               da9xxx_mask_irqs(client, da9210_mask_regs,
-                                ARRAY_SIZE(da9210_mask_regs));
+       if ((client->addr == 0x58 && !strcmp(client->name, "da9063")) ||
+           (client->addr == 0x68 && !strcmp(client->name, "da9210"))) {
+               int ret;
+
+               dev_info(&client->dev, "clearing da9063/da9210 interrupts\n");
+               ret = i2c_transfer(client->adapter, da9xxx_msgs, ARRAY_SIZE(da9xxx_msgs));
+               if (ret != ARRAY_SIZE(da9xxx_msgs))
+                       dev_err(&client->dev, "i2c error %d\n", ret);
+       }
 
        mon = ioread32(irqc + IRQC_MONITOR);
        if (mon & REGULATOR_IRQ_MASK)
index 6344913..30fe03f 100644 (file)
@@ -137,7 +137,7 @@ void __init init_default_cache_policy(unsigned long pmd)
 
        initial_pmd_value = pmd;
 
-       pmd &= PMD_SECT_TEX(1) | PMD_SECT_BUFFERABLE | PMD_SECT_CACHEABLE;
+       pmd &= PMD_SECT_CACHE_MASK;
 
        for (i = 0; i < ARRAY_SIZE(cache_policies); i++)
                if (cache_policies[i].pmd == pmd) {
index 445aa67..c2b9bcb 100644 (file)
                /* Local timer */
                timer {
                        compatible = "arm,armv8-timer";
-                       interrupts = <1 13 0xf01>,
-                                    <1 14 0xf01>,
-                                    <1 11 0xf01>,
-                                    <1 10 0xf01>;
+                       interrupts = <1 13 0xf08>,
+                                    <1 14 0xf08>,
+                                    <1 11 0xf08>,
+                                    <1 10 0xf08>;
                };
 
                timer0: timer0@ffc03000 {
index e502c24..bf6c8d0 100644 (file)
        timer {
                compatible = "arm,armv8-timer";
                interrupts = <GIC_PPI 13
-                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>,
+                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 14
-                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>,
+                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 11
-                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>,
+                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 10
-                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>;
+                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>;
        };
 
        xtal: xtal-clk {
index f1c2c71..c29dab9 100644 (file)
 
        timer {
                compatible = "arm,armv8-timer";
-               interrupts = <1 0 0xff01>,      /* Secure Phys IRQ */
-                            <1 13 0xff01>,     /* Non-secure Phys IRQ */
-                            <1 14 0xff01>,     /* Virt IRQ */
-                            <1 15 0xff01>;     /* Hyp IRQ */
+               interrupts = <1 0 0xff08>,      /* Secure Phys IRQ */
+                            <1 13 0xff08>,     /* Non-secure Phys IRQ */
+                            <1 14 0xff08>,     /* Virt IRQ */
+                            <1 15 0xff08>;     /* Hyp IRQ */
                clock-frequency = <50000000>;
        };
 
diff --git a/arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi b/arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi
new file mode 120000 (symlink)
index 0000000..3937b77
--- /dev/null
@@ -0,0 +1 @@
+../../../../arm/boot/dts/bcm2835-rpi.dtsi
\ No newline at end of file
index 6f47dd2..7841b72 100644 (file)
@@ -1,7 +1,7 @@
 /dts-v1/;
 #include "bcm2837.dtsi"
-#include "../../../../arm/boot/dts/bcm2835-rpi.dtsi"
-#include "../../../../arm/boot/dts/bcm283x-rpi-smsc9514.dtsi"
+#include "bcm2835-rpi.dtsi"
+#include "bcm283x-rpi-smsc9514.dtsi"
 
 / {
        compatible = "raspberrypi,3-model-b", "brcm,bcm2837";
index f2a31d0..8216bbb 100644 (file)
@@ -1,4 +1,4 @@
-#include "../../../../arm/boot/dts/bcm283x.dtsi"
+#include "bcm283x.dtsi"
 
 / {
        compatible = "brcm,bcm2836";
diff --git a/arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi b/arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi
new file mode 120000 (symlink)
index 0000000..dca7c05
--- /dev/null
@@ -0,0 +1 @@
+../../../../arm/boot/dts/bcm283x-rpi-smsc9514.dtsi
\ No newline at end of file
diff --git a/arch/arm64/boot/dts/broadcom/bcm283x.dtsi b/arch/arm64/boot/dts/broadcom/bcm283x.dtsi
new file mode 120000 (symlink)
index 0000000..5f54e4c
--- /dev/null
@@ -0,0 +1 @@
+../../../../arm/boot/dts/bcm283x.dtsi
\ No newline at end of file
index f53b095..d4a12fa 100644 (file)
        timer {
                compatible = "arm,armv8-timer";
                interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(0xff) |
-                             IRQ_TYPE_EDGE_RISING)>,
+                             IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 14 (GIC_CPU_MASK_RAW(0xff) |
-                             IRQ_TYPE_EDGE_RISING)>,
+                             IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 11 (GIC_CPU_MASK_RAW(0xff) |
-                             IRQ_TYPE_EDGE_RISING)>,
+                             IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 10 (GIC_CPU_MASK_RAW(0xff) |
-                             IRQ_TYPE_EDGE_RISING)>;
+                             IRQ_TYPE_LEVEL_LOW)>;
        };
 
        pmu {
index 2eb9b22..04dc8a8 100644 (file)
 
        timer {
                compatible = "arm,armv8-timer";
-               interrupts = <1 13 0xff01>,
-                            <1 14 0xff01>,
-                            <1 11 0xff01>,
-                            <1 10 0xff01>;
+               interrupts = <1 13 4>,
+                            <1 14 4>,
+                            <1 11 4>,
+                            <1 10 4>;
        };
 
        pmu {
index ca663df..1628315 100644 (file)
 
                timer {
                        compatible = "arm,armv8-timer";
-                       interrupts = <1 13 0xff01>,
-                                    <1 14 0xff01>,
-                                    <1 11 0xff01>,
-                                    <1 10 0xff01>;
+                       interrupts = <1 13 0xff08>,
+                                    <1 14 0xff08>,
+                                    <1 11 0xff08>,
+                                    <1 10 0xff08>;
                };
 
                pmu_system_controller: system-controller@105c0000 {
index e669fbd..a67e210 100644 (file)
 
        timer {
                compatible = "arm,armv8-timer";
-               interrupts = <1 13 0x1>, /* Physical Secure PPI */
-                            <1 14 0x1>, /* Physical Non-Secure PPI */
-                            <1 11 0x1>, /* Virtual PPI */
-                            <1 10 0x1>; /* Hypervisor PPI */
+               interrupts = <1 13 0xf08>, /* Physical Secure PPI */
+                            <1 14 0xf08>, /* Physical Non-Secure PPI */
+                            <1 11 0xf08>, /* Virtual PPI */
+                            <1 10 0xf08>; /* Hypervisor PPI */
        };
 
        pmu {
index 21023a3..e3b6034 100644 (file)
 
        timer {
                compatible = "arm,armv8-timer";
-               interrupts = <1 13 0x8>, /* Physical Secure PPI, active-low */
-                            <1 14 0x8>, /* Physical Non-Secure PPI, active-low */
-                            <1 11 0x8>, /* Virtual PPI, active-low */
-                            <1 10 0x8>; /* Hypervisor PPI, active-low */
+               interrupts = <1 13 4>, /* Physical Secure PPI, active-low */
+                            <1 14 4>, /* Physical Non-Secure PPI, active-low */
+                            <1 11 4>, /* Virtual PPI, active-low */
+                            <1 10 4>; /* Hypervisor PPI, active-low */
        };
 
        pmu {
index eab1a42..c2a6745 100644 (file)
 
                        timer {
                                compatible = "arm,armv8-timer";
-                               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>,
-                                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>,
-                                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>,
-                                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
+                               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
                        };
 
                        odmi: odmi@300000 {
index c223915..d73bdc8 100644 (file)
 
        timer {
                compatible = "arm,armv8-timer";
-               interrupts = <1 13 0xf01>,
-                            <1 14 0xf01>,
-                            <1 11 0xf01>,
-                            <1 10 0xf01>;
+               interrupts = <1 13 4>,
+                            <1 14 4>,
+                            <1 11 4>,
+                            <1 10 4>;
        };
 
        soc {
index e595f22..3e2e51f 100644 (file)
        timer {
                compatible = "arm,armv8-timer";
                interrupt-parent = <&gic>;
-               interrupts = <1 13 0xf01>,
-                            <1 14 0xf01>,
-                            <1 11 0xf01>,
-                            <1 10 0xf01>;
+               interrupts = <1 13 0xf08>,
+                            <1 14 0xf08>,
+                            <1 11 0xf08>,
+                            <1 10 0xf08>;
        };
 
        amba_apu {
index 5c88804..6b2aa0f 100644 (file)
@@ -216,7 +216,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                err = blkcipher_walk_done(desc, &walk,
                                          walk.nbytes % AES_BLOCK_SIZE);
        }
-       if (nbytes) {
+       if (walk.nbytes % AES_BLOCK_SIZE) {
                u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
                u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
                u8 __aligned(8) tail[AES_BLOCK_SIZE];
index 96a6de9..0753734 100644 (file)
@@ -23,8 +23,8 @@
         */
        .text
        .align  1
-       .global copy_from_user
-       .type   copy_from_user, @function
+       .global ___copy_from_user
+       .type   ___copy_from_user, @function
 ___copy_from_user:
        branch_if_kernel r8, __copy_user
        ret_if_privileged r8, r11, r10, r10
index cbad29b..5cc6b4f 100644 (file)
@@ -276,7 +276,7 @@ copy_from_user(void *to, const void *from, unsigned long n)
        unsigned long res = n;
 
        if (likely(access_ok(VERIFY_READ, from, n)))
-               n = __copy_tofrom_user(to, from, n);
+               res = __copy_tofrom_user(to, from, n);
        if (unlikely(res))
                memset(to + (n - res), 0, res);
        return res;
index 2ef55f8..b312b15 100644 (file)
@@ -15,7 +15,7 @@ static inline bool early_cpu_has_feature(unsigned long feature)
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
 #include <linux/jump_label.h>
 
-#define NUM_CPU_FTR_KEYS       64
+#define NUM_CPU_FTR_KEYS       BITS_PER_LONG
 
 extern struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS];
 
index 2265c63..bd739fe 100644 (file)
@@ -411,7 +411,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  *
  * r13 - PACA
  * cr3 - gt if waking up with partial/complete hypervisor state loss
- * cr4 - eq if waking up from complete hypervisor state loss.
+ * cr4 - gt or eq if waking up from complete hypervisor state loss.
  */
 _GLOBAL(pnv_wakeup_tb_loss)
        ld      r1,PACAR1(r13)
@@ -453,7 +453,7 @@ lwarx_loop2:
         * At this stage
         * cr2 - eq if first thread to wakeup in core
         * cr3-  gt if waking up with partial/complete hypervisor state loss
-        * cr4 - eq if waking up from complete hypervisor state loss.
+        * cr4 - gt or eq if waking up from complete hypervisor state loss.
         */
 
        ori     r15,r15,PNV_CORE_IDLE_LOCK_BIT
@@ -481,7 +481,7 @@ first_thread_in_subcore:
         * If waking up from sleep, subcore state is not lost. Hence
         * skip subcore state restore
         */
-       bne     cr4,subcore_state_restored
+       blt     cr4,subcore_state_restored
 
        /* Restore per-subcore state */
        ld      r4,_SDR1(r1)
@@ -526,7 +526,7 @@ timebase_resync:
         * If waking up from sleep, per core state is not lost, skip to
         * clear_lock.
         */
-       bne     cr4,clear_lock
+       blt     cr4,clear_lock
 
        /*
         * First thread in the core to wake up and its waking up with
@@ -557,7 +557,7 @@ common_exit:
         * If waking up from sleep, hypervisor state is not lost. Hence
         * skip hypervisor state restore.
         */
-       bne     cr4,hypervisor_state_restored
+       blt     cr4,hypervisor_state_restored
 
        /* Waking up from winkle */
 
index c16d790..bc0c91e 100644 (file)
@@ -2217,7 +2217,7 @@ static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
 
        pnv_pci_link_table_and_group(phb->hose->node, num,
                        tbl, &pe->table_group);
-       pnv_pci_phb3_tce_invalidate_pe(pe);
+       pnv_pci_ioda2_tce_invalidate_pe(pe);
 
        return 0;
 }
@@ -2355,7 +2355,7 @@ static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
        if (ret)
                pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
        else
-               pnv_pci_phb3_tce_invalidate_pe(pe);
+               pnv_pci_ioda2_tce_invalidate_pe(pe);
 
        pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
 
@@ -3426,7 +3426,17 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
                }
        }
 
-       pnv_ioda_free_pe(pe);
+       /*
+        * The PE for root bus can be removed because of hotplug in EEH
+        * recovery for fenced PHB error. We need to mark the PE dead so
+        * that it can be populated again in PCI hot add path. The PE
+        * shouldn't be destroyed as it's the global reserved resource.
+        */
+       if (phb->ioda.root_pe_populated &&
+           phb->ioda.root_pe_idx == pe->pe_number)
+               phb->ioda.root_pe_populated = false;
+       else
+               pnv_ioda_free_pe(pe);
 }
 
 static void pnv_pci_release_device(struct pci_dev *pdev)
@@ -3442,7 +3452,17 @@ static void pnv_pci_release_device(struct pci_dev *pdev)
        if (!pdn || pdn->pe_number == IODA_INVALID_PE)
                return;
 
+       /*
+        * PCI hotplug can happen as part of EEH error recovery. The @pdn
+        * isn't removed and added afterwards in this scenario. We should
+        * set the PE number in @pdn to an invalid one. Otherwise, the PE's
+        * device count is decreased on removing devices while failing to
+        * be increased on adding devices. It leads to unbalanced PE's device
+        * count and eventually make normal PCI hotplug path broken.
+        */
        pe = &phb->ioda.pe_array[pdn->pe_number];
+       pdn->pe_number = IODA_INVALID_PE;
+
        WARN_ON(--pe->device_count < 0);
        if (pe->device_count == 0)
                pnv_ioda_release_pe(pe);
index e07a22b..f5f4b3f 100644 (file)
@@ -119,8 +119,8 @@ static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
 {
   [PERF_COUNT_HW_CPU_CYCLES]                   = 0x0076,
   [PERF_COUNT_HW_INSTRUCTIONS]                 = 0x00c0,
-  [PERF_COUNT_HW_CACHE_REFERENCES]             = 0x0080,
-  [PERF_COUNT_HW_CACHE_MISSES]                 = 0x0081,
+  [PERF_COUNT_HW_CACHE_REFERENCES]             = 0x077d,
+  [PERF_COUNT_HW_CACHE_MISSES]                 = 0x077e,
   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]          = 0x00c2,
   [PERF_COUNT_HW_BRANCH_MISSES]                        = 0x00c3,
   [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]      = 0x00d0, /* "Decoder empty" event */
index 2cbde2f..4c9a79b 100644 (file)
@@ -1730,9 +1730,11 @@ static __initconst const u64 knl_hw_cache_extra_regs
  * disabled state if called consecutively.
  *
  * During consecutive calls, the same disable value will be written to related
- * registers, so the PMU state remains unchanged. hw.state in
- * intel_bts_disable_local will remain PERF_HES_STOPPED too in consecutive
- * calls.
+ * registers, so the PMU state remains unchanged.
+ *
+ * intel_bts events don't coexist with intel PMU's BTS events because of
+ * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
+ * disabled around intel PMU's event batching etc, only inside the PMI handler.
  */
 static void __intel_pmu_disable_all(void)
 {
@@ -1742,8 +1744,6 @@ static void __intel_pmu_disable_all(void)
 
        if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
                intel_pmu_disable_bts();
-       else
-               intel_bts_disable_local();
 
        intel_pmu_pebs_disable_all();
 }
@@ -1771,8 +1771,7 @@ static void __intel_pmu_enable_all(int added, bool pmi)
                        return;
 
                intel_pmu_enable_bts(event->hw.config);
-       } else
-               intel_bts_enable_local();
+       }
 }
 
 static void intel_pmu_enable_all(int added)
@@ -2073,6 +2072,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
         */
        if (!x86_pmu.late_ack)
                apic_write(APIC_LVTPC, APIC_DM_NMI);
+       intel_bts_disable_local();
        __intel_pmu_disable_all();
        handled = intel_pmu_drain_bts_buffer();
        handled += intel_bts_interrupt();
@@ -2172,6 +2172,7 @@ done:
        /* Only restore PMU state when it's active. See x86_pmu_disable(). */
        if (cpuc->enabled)
                __intel_pmu_enable_all(0, true);
+       intel_bts_enable_local();
 
        /*
         * Only unmask the NMI after the overflow counters
index 04bb5fb..861a7d9 100644 (file)
@@ -1074,6 +1074,11 @@ static void pt_addr_filters_fini(struct perf_event *event)
        event->hw.addr_filters = NULL;
 }
 
+static inline bool valid_kernel_ip(unsigned long ip)
+{
+       return virt_addr_valid(ip) && kernel_ip(ip);
+}
+
 static int pt_event_addr_filters_validate(struct list_head *filters)
 {
        struct perf_addr_filter *filter;
@@ -1081,11 +1086,16 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
 
        list_for_each_entry(filter, filters, entry) {
                /* PT doesn't support single address triggers */
-               if (!filter->range)
+               if (!filter->range || !filter->size)
                        return -EOPNOTSUPP;
 
-               if (!filter->inode && !kernel_ip(filter->offset))
-                       return -EINVAL;
+               if (!filter->inode) {
+                       if (!valid_kernel_ip(filter->offset))
+                               return -EINVAL;
+
+                       if (!valid_kernel_ip(filter->offset + filter->size))
+                               return -EINVAL;
+               }
 
                if (++range > pt_cap_get(PT_CAP_num_address_ranges))
                        return -EOPNOTSUPP;
@@ -1111,7 +1121,7 @@ static void pt_event_addr_filters_sync(struct perf_event *event)
                } else {
                        /* apply the offset */
                        msr_a = filter->offset + offs[range];
-                       msr_b = filter->size + msr_a;
+                       msr_b = filter->size + msr_a - 1;
                }
 
                filters->filter[range].msr_a  = msr_a;
index 39b9112..cd94443 100644 (file)
@@ -23,8 +23,8 @@
 static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
        [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
        [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
-       [2] = { 0x80, 0x00, PERF_COUNT_HW_CACHE_REFERENCES },
-       [3] = { 0x81, 0x00, PERF_COUNT_HW_CACHE_MISSES },
+       [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
+       [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
        [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
        [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
        [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
index 3699995..a832426 100644 (file)
@@ -233,6 +233,8 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
                return blkcipher_walk_done(desc, walk, -EINVAL);
        }
 
+       bsize = min(walk->walk_blocksize, n);
+
        walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
                         BLKCIPHER_WALK_DIFF);
        if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
@@ -245,7 +247,6 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
                }
        }
 
-       bsize = min(walk->walk_blocksize, n);
        n = scatterwalk_clamp(&walk->in, n);
        n = scatterwalk_clamp(&walk->out, n);
 
index 1b01fe9..e3d889b 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * echainiv: Encrypted Chain IV Generator
  *
- * This generator generates an IV based on a sequence number by xoring it
- * with a salt and then encrypting it with the same key as used to encrypt
+ * This generator generates an IV based on a sequence number by multiplying
+ * it with a salt and then encrypting it with the same key as used to encrypt
  * the plain text.  This algorithm requires that the block size be equal
  * to the IV size.  It is mainly useful for CBC.
  *
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
-#include <linux/mm.h>
 #include <linux/module.h>
-#include <linux/percpu.h>
-#include <linux/spinlock.h>
+#include <linux/slab.h>
 #include <linux/string.h>
 
-#define MAX_IV_SIZE 16
-
-static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
-
-/* We don't care if we get preempted and read/write IVs from the next CPU. */
-static void echainiv_read_iv(u8 *dst, unsigned size)
-{
-       u32 *a = (u32 *)dst;
-       u32 __percpu *b = echainiv_iv;
-
-       for (; size >= 4; size -= 4) {
-               *a++ = this_cpu_read(*b);
-               b++;
-       }
-}
-
-static void echainiv_write_iv(const u8 *src, unsigned size)
-{
-       const u32 *a = (const u32 *)src;
-       u32 __percpu *b = echainiv_iv;
-
-       for (; size >= 4; size -= 4) {
-               this_cpu_write(*b, *a);
-               a++;
-               b++;
-       }
-}
-
-static void echainiv_encrypt_complete2(struct aead_request *req, int err)
-{
-       struct aead_request *subreq = aead_request_ctx(req);
-       struct crypto_aead *geniv;
-       unsigned int ivsize;
-
-       if (err == -EINPROGRESS)
-               return;
-
-       if (err)
-               goto out;
-
-       geniv = crypto_aead_reqtfm(req);
-       ivsize = crypto_aead_ivsize(geniv);
-
-       echainiv_write_iv(subreq->iv, ivsize);
-
-       if (req->iv != subreq->iv)
-               memcpy(req->iv, subreq->iv, ivsize);
-
-out:
-       if (req->iv != subreq->iv)
-               kzfree(subreq->iv);
-}
-
-static void echainiv_encrypt_complete(struct crypto_async_request *base,
-                                        int err)
-{
-       struct aead_request *req = base->data;
-
-       echainiv_encrypt_complete2(req, err);
-       aead_request_complete(req, err);
-}
-
 static int echainiv_encrypt(struct aead_request *req)
 {
        struct crypto_aead *geniv = crypto_aead_reqtfm(req);
        struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
        struct aead_request *subreq = aead_request_ctx(req);
-       crypto_completion_t compl;
-       void *data;
+       __be64 nseqno;
+       u64 seqno;
        u8 *info;
        unsigned int ivsize = crypto_aead_ivsize(geniv);
        int err;
@@ -108,8 +44,6 @@ static int echainiv_encrypt(struct aead_request *req)
 
        aead_request_set_tfm(subreq, ctx->child);
 
-       compl = echainiv_encrypt_complete;
-       data = req;
        info = req->iv;
 
        if (req->src != req->dst) {
@@ -127,29 +61,30 @@ static int echainiv_encrypt(struct aead_request *req)
                        return err;
        }
 
-       if (unlikely(!IS_ALIGNED((unsigned long)info,
-                                crypto_aead_alignmask(geniv) + 1))) {
-               info = kmalloc(ivsize, req->base.flags &
-                                      CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
-                                                                 GFP_ATOMIC);
-               if (!info)
-                       return -ENOMEM;
-
-               memcpy(info, req->iv, ivsize);
-       }
-
-       aead_request_set_callback(subreq, req->base.flags, compl, data);
+       aead_request_set_callback(subreq, req->base.flags,
+                                 req->base.complete, req->base.data);
        aead_request_set_crypt(subreq, req->dst, req->dst,
                               req->cryptlen, info);
        aead_request_set_ad(subreq, req->assoclen);
 
-       crypto_xor(info, ctx->salt, ivsize);
+       memcpy(&nseqno, info + ivsize - 8, 8);
+       seqno = be64_to_cpu(nseqno);
+       memset(info, 0, ivsize);
+
        scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
-       echainiv_read_iv(info, ivsize);
 
-       err = crypto_aead_encrypt(subreq);
-       echainiv_encrypt_complete2(req, err);
-       return err;
+       do {
+               u64 a;
+
+               memcpy(&a, ctx->salt + ivsize - 8, 8);
+
+               a |= 1;
+               a *= seqno;
+
+               memcpy(info + ivsize - 8, &a, 8);
+       } while ((ivsize -= 8));
+
+       return crypto_aead_encrypt(subreq);
 }
 
 static int echainiv_decrypt(struct aead_request *req)
@@ -196,8 +131,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
        alg = crypto_spawn_aead_alg(spawn);
 
        err = -EINVAL;
-       if (inst->alg.ivsize & (sizeof(u32) - 1) ||
-           inst->alg.ivsize > MAX_IV_SIZE)
+       if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
                goto free_inst;
 
        inst->alg.encrypt = echainiv_encrypt;
@@ -206,7 +140,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
        inst->alg.init = aead_init_geniv;
        inst->alg.exit = aead_exit_geniv;
 
-       inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
        inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
        inst->alg.base.cra_ctxsize += inst->alg.ivsize;
 
index 17995fa..82a081e 100644 (file)
@@ -419,7 +419,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
        struct device *parent = NULL;
        int retval;
 
-       trace_rpm_suspend(dev, rpmflags);
+       trace_rpm_suspend_rcuidle(dev, rpmflags);
 
  repeat:
        retval = rpm_check_suspend_allowed(dev);
@@ -549,7 +549,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
        }
 
  out:
-       trace_rpm_return_int(dev, _THIS_IP_, retval);
+       trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 
        return retval;
 
index a978381..9b17a66 100644 (file)
@@ -387,7 +387,7 @@ void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
        atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
 }
 
-void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
+static void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
 {
        struct atmel_hlcdc_crtc_state *state;
 
index 016c191..52c527f 100644 (file)
@@ -320,19 +320,19 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
                        u32 *coeff_tab = heo_upscaling_ycoef;
                        u32 max_memsize;
 
-                       if (state->crtc_w < state->src_w)
+                       if (state->crtc_h < state->src_h)
                                coeff_tab = heo_downscaling_ycoef;
                        for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++)
                                atmel_hlcdc_layer_update_cfg(&plane->layer,
                                                             33 + i,
                                                             0xffffffff,
                                                             coeff_tab[i]);
-                       factor = ((8 * 256 * state->src_w) - (256 * 4)) /
-                                state->crtc_w;
+                       factor = ((8 * 256 * state->src_h) - (256 * 4)) /
+                                state->crtc_h;
                        factor++;
-                       max_memsize = ((factor * state->crtc_w) + (256 * 4)) /
+                       max_memsize = ((factor * state->crtc_h) + (256 * 4)) /
                                      2048;
-                       if (max_memsize > state->src_w)
+                       if (max_memsize > state->src_h)
                                factor--;
                        factor_reg |= (factor << 16) | 0x80000000;
                }
index 57676f8..a628975 100644 (file)
@@ -1015,6 +1015,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
        return 0;
 }
 
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
 typedef struct drm_mode_fb_cmd232 {
        u32 fb_id;
        u32 width;
@@ -1071,6 +1072,7 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
 
        return 0;
 }
+#endif
 
 static drm_ioctl_compat_t *drm_compat_ioctls[] = {
        [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
@@ -1104,7 +1106,9 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
        [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
 #endif
        [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
        [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
+#endif
 };
 
 /**
index e016640..40ce841 100644 (file)
@@ -55,11 +55,11 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev,
        flags = exynos_gem->flags;
 
        /*
-        * without iommu support, not support physically non-continuous memory
-        * for framebuffer.
+        * Physically non-contiguous memory type for framebuffer is not
+        * supported without IOMMU.
         */
        if (IS_NONCONTIG_BUFFER(flags)) {
-               DRM_ERROR("cannot use this gem memory type for fb.\n");
+               DRM_ERROR("Non-contiguous GEM memory is not supported.\n");
                return -EINVAL;
        }
 
index 0525c56..147ef0d 100644 (file)
@@ -1753,32 +1753,6 @@ static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int fimc_suspend(struct device *dev)
-{
-       struct fimc_context *ctx = get_fimc_context(dev);
-
-       DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
-       if (pm_runtime_suspended(dev))
-               return 0;
-
-       return fimc_clk_ctrl(ctx, false);
-}
-
-static int fimc_resume(struct device *dev)
-{
-       struct fimc_context *ctx = get_fimc_context(dev);
-
-       DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
-       if (!pm_runtime_suspended(dev))
-               return fimc_clk_ctrl(ctx, true);
-
-       return 0;
-}
-#endif
-
 static int fimc_runtime_suspend(struct device *dev)
 {
        struct fimc_context *ctx = get_fimc_context(dev);
@@ -1799,7 +1773,8 @@ static int fimc_runtime_resume(struct device *dev)
 #endif
 
 static const struct dev_pm_ops fimc_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
        SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
 };
 
index 4bf00f5..6eca8bb 100644 (file)
@@ -1475,8 +1475,8 @@ static int g2d_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int g2d_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int g2d_runtime_suspend(struct device *dev)
 {
        struct g2d_data *g2d = dev_get_drvdata(dev);
 
@@ -1490,25 +1490,6 @@ static int g2d_suspend(struct device *dev)
 
        flush_work(&g2d->runqueue_work);
 
-       return 0;
-}
-
-static int g2d_resume(struct device *dev)
-{
-       struct g2d_data *g2d = dev_get_drvdata(dev);
-
-       g2d->suspended = false;
-       g2d_exec_runqueue(g2d);
-
-       return 0;
-}
-#endif
-
-#ifdef CONFIG_PM
-static int g2d_runtime_suspend(struct device *dev)
-{
-       struct g2d_data *g2d = dev_get_drvdata(dev);
-
        clk_disable_unprepare(g2d->gate_clk);
 
        return 0;
@@ -1523,12 +1504,16 @@ static int g2d_runtime_resume(struct device *dev)
        if (ret < 0)
                dev_warn(dev, "failed to enable clock.\n");
 
+       g2d->suspended = false;
+       g2d_exec_runqueue(g2d);
+
        return ret;
 }
 #endif
 
 static const struct dev_pm_ops g2d_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
        SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
 };
 
index 5d20da8..52a9d26 100644 (file)
@@ -1760,34 +1760,7 @@ static int gsc_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int gsc_suspend(struct device *dev)
-{
-       struct gsc_context *ctx = get_gsc_context(dev);
-
-       DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
-       if (pm_runtime_suspended(dev))
-               return 0;
-
-       return gsc_clk_ctrl(ctx, false);
-}
-
-static int gsc_resume(struct device *dev)
-{
-       struct gsc_context *ctx = get_gsc_context(dev);
-
-       DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
-       if (!pm_runtime_suspended(dev))
-               return gsc_clk_ctrl(ctx, true);
-
-       return 0;
-}
-#endif
-
-#ifdef CONFIG_PM
-static int gsc_runtime_suspend(struct device *dev)
+static int __maybe_unused gsc_runtime_suspend(struct device *dev)
 {
        struct gsc_context *ctx = get_gsc_context(dev);
 
@@ -1796,7 +1769,7 @@ static int gsc_runtime_suspend(struct device *dev)
        return  gsc_clk_ctrl(ctx, false);
 }
 
-static int gsc_runtime_resume(struct device *dev)
+static int __maybe_unused gsc_runtime_resume(struct device *dev)
 {
        struct gsc_context *ctx = get_gsc_context(dev);
 
@@ -1804,10 +1777,10 @@ static int gsc_runtime_resume(struct device *dev)
 
        return  gsc_clk_ctrl(ctx, true);
 }
-#endif
 
 static const struct dev_pm_ops gsc_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
        SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
 };
 
index 404367a..6591e40 100644 (file)
@@ -794,29 +794,6 @@ static int rotator_clk_crtl(struct rot_context *rot, bool enable)
        return 0;
 }
 
-
-#ifdef CONFIG_PM_SLEEP
-static int rotator_suspend(struct device *dev)
-{
-       struct rot_context *rot = dev_get_drvdata(dev);
-
-       if (pm_runtime_suspended(dev))
-               return 0;
-
-       return rotator_clk_crtl(rot, false);
-}
-
-static int rotator_resume(struct device *dev)
-{
-       struct rot_context *rot = dev_get_drvdata(dev);
-
-       if (!pm_runtime_suspended(dev))
-               return rotator_clk_crtl(rot, true);
-
-       return 0;
-}
-#endif
-
 static int rotator_runtime_suspend(struct device *dev)
 {
        struct rot_context *rot = dev_get_drvdata(dev);
@@ -833,7 +810,8 @@ static int rotator_runtime_resume(struct device *dev)
 #endif
 
 static const struct dev_pm_ops rotator_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
        SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
                                                                        NULL)
 };
index 95ddd56..5de36d8 100644 (file)
@@ -1281,6 +1281,11 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        intel_runtime_pm_enable(dev_priv);
 
+       /* Everything is in place, we can now relax! */
+       DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+                driver.name, driver.major, driver.minor, driver.patchlevel,
+                driver.date, pci_name(pdev), dev_priv->drm.primary->index);
+
        intel_runtime_pm_put(dev_priv);
 
        return 0;
index 7a30af7..f38ceff 100644 (file)
@@ -122,8 +122,11 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
        has_full_48bit_ppgtt =
                IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
 
-       if (intel_vgpu_active(dev_priv))
-               has_full_ppgtt = false; /* emulation is too hard */
+       if (intel_vgpu_active(dev_priv)) {
+               /* emulation is too hard */
+               has_full_ppgtt = false;
+               has_full_48bit_ppgtt = false;
+       }
 
        if (!has_aliasing_ppgtt)
                return 0;
@@ -158,7 +161,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
                return 0;
        }
 
-       if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists)
+       if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
                return has_full_48bit_ppgtt ? 3 : 2;
        else
                return has_aliasing_ppgtt ? 1 : 0;
index f6acb5a..b81cfb3 100644 (file)
@@ -65,9 +65,6 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
 
        BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
 
-       if (!IS_HASWELL(dev_priv))
-               return;
-
        magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
        if (magic != VGT_MAGIC)
                return;
index 47bdf9d..b9e5a63 100644 (file)
@@ -554,7 +554,6 @@ void intel_dvo_init(struct drm_device *dev)
                return;
        }
 
-       drm_encoder_cleanup(&intel_encoder->base);
        kfree(intel_dvo);
        kfree(intel_connector);
 }
index adca262..7acbbbf 100644 (file)
@@ -1047,6 +1047,23 @@ err_out:
        return err;
 }
 
+static int intel_use_opregion_panel_type_callback(const struct dmi_system_id *id)
+{
+       DRM_INFO("Using panel type from OpRegion on %s\n", id->ident);
+       return 1;
+}
+
+static const struct dmi_system_id intel_use_opregion_panel_type[] = {
+       {
+               .callback = intel_use_opregion_panel_type_callback,
+               .ident = "Conrac GmbH IX45GM2",
+               .matches = {DMI_MATCH(DMI_SYS_VENDOR, "Conrac GmbH"),
+                           DMI_MATCH(DMI_PRODUCT_NAME, "IX45GM2"),
+               },
+       },
+       { }
+};
+
 int
 intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
 {
@@ -1072,6 +1089,16 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
                return -ENODEV;
        }
 
+       /*
+        * So far we know that some machined must use it, others must not use it.
+        * There doesn't seem to be any way to determine which way to go, except
+        * via a quirk list :(
+        */
+       if (!dmi_check_system(intel_use_opregion_panel_type)) {
+               DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
+               return -ENODEV;
+       }
+
        /*
         * FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
         * low vswing for eDP, whereas the VBT panel type (2) gives us normal
index 53e13c1..2d24813 100644 (file)
@@ -7859,6 +7859,7 @@ static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
        case GEN6_PCODE_ILLEGAL_CMD:
                return -ENXIO;
        case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+       case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
                return -EOVERFLOW;
        case GEN6_PCODE_TIMEOUT:
                return -ETIMEDOUT;
index 2b0d1ba..cf171b4 100644 (file)
@@ -255,14 +255,14 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
        struct drm_i915_private *dev_priv = to_i915(dev);
 
        uint32_t max_sleep_time = 0x1f;
-       /* Lately it was identified that depending on panel idle frame count
-        * calculated at HW can be off by 1. So let's use what came
-        * from VBT + 1.
-        * There are also other cases where panel demands at least 4
-        * but VBT is not being set. To cover these 2 cases lets use
-        * at least 5 when VBT isn't set to be on the safest side.
+       /*
+        * Let's respect VBT in case VBT asks a higher idle_frame value.
+        * Let's use 6 as the minimum to cover all known cases including
+        * the off-by-one issue that HW has in some cases. Also there are
+        * cases where sink should be able to train
+        * with the 5 or 6 idle patterns.
         */
-       uint32_t idle_frames = dev_priv->vbt.psr.idle_frames + 1;
+       uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
        uint32_t val = EDP_PSR_ENABLE;
 
        val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
index 59adcf8..3f6704c 100644 (file)
@@ -144,7 +144,7 @@ static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
        return &vc4->bo_cache.size_list[page_index];
 }
 
-void vc4_bo_cache_purge(struct drm_device *dev)
+static void vc4_bo_cache_purge(struct drm_device *dev)
 {
        struct vc4_dev *vc4 = to_vc4_dev(dev);
 
index 46527e9..2543cf5 100644 (file)
@@ -309,8 +309,14 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade
         * of uniforms on each side.  However, this scheme is easy to
         * validate so it's all we allow for now.
         */
-
-       if (QPU_GET_FIELD(inst, QPU_SIG) != QPU_SIG_NONE) {
+       switch (QPU_GET_FIELD(inst, QPU_SIG)) {
+       case QPU_SIG_NONE:
+       case QPU_SIG_SCOREBOARD_UNLOCK:
+       case QPU_SIG_COLOR_LOAD:
+       case QPU_SIG_LOAD_TMU0:
+       case QPU_SIG_LOAD_TMU1:
+               break;
+       default:
                DRM_ERROR("uniforms address change must be "
                          "normal math\n");
                return false;
index 9c2e53d..0f21c3a 100644 (file)
@@ -1128,6 +1128,27 @@ void handle_port_mgmt_change_event(struct work_struct *work)
 
                /* Generate GUID changed event */
                if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
+                       if (mlx4_is_master(dev->dev)) {
+                               union ib_gid gid;
+                               int err = 0;
+
+                               if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix)
+                                       err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1);
+                               else
+                                       gid.global.subnet_prefix =
+                                               eqe->event.port_mgmt_change.params.port_info.gid_prefix;
+                               if (err) {
+                                       pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n",
+                                               port, err);
+                               } else {
+                                       pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n",
+                                                port,
+                                                (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix),
+                                                be64_to_cpu(gid.global.subnet_prefix));
+                                       atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix,
+                                                    be64_to_cpu(gid.global.subnet_prefix));
+                               }
+                       }
                        mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
                        /*if master, notify all slaves*/
                        if (mlx4_is_master(dev->dev))
@@ -2202,6 +2223,8 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
                if (err)
                        goto demux_err;
                dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
+               atomic64_set(&dev->sriov.demux[i].subnet_prefix,
+                            be64_to_cpu(gid.global.subnet_prefix));
                err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
                                      &dev->sriov.sqps[i]);
                if (err)
index 2af44c2..87ba9bc 100644 (file)
@@ -2202,6 +2202,9 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
        bool per_port = !!(ibdev->dev->caps.flags2 &
                MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
 
+       if (mlx4_is_slave(ibdev->dev))
+               return 0;
+
        for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
                /* i == 1 means we are building port counters */
                if (i && !per_port)
index 8f7ad07..097bfcc 100644 (file)
@@ -489,7 +489,7 @@ static u8 get_leave_state(struct mcast_group *group)
                if (!group->members[i])
                        leave_state |= (1 << i);
 
-       return leave_state & (group->rec.scope_join_state & 7);
+       return leave_state & (group->rec.scope_join_state & 0xf);
 }
 
 static int join_group(struct mcast_group *group, int slave, u8 join_mask)
@@ -564,8 +564,8 @@ static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
                } else
                        mcg_warn_group(group, "DRIVER BUG\n");
        } else if (group->state == MCAST_LEAVE_SENT) {
-               if (group->rec.scope_join_state & 7)
-                       group->rec.scope_join_state &= 0xf8;
+               if (group->rec.scope_join_state & 0xf)
+                       group->rec.scope_join_state &= 0xf0;
                group->state = MCAST_IDLE;
                mutex_unlock(&group->lock);
                if (release_group(group, 1))
@@ -605,7 +605,7 @@ static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
 static int handle_join_req(struct mcast_group *group, u8 join_mask,
                           struct mcast_req *req)
 {
-       u8 group_join_state = group->rec.scope_join_state & 7;
+       u8 group_join_state = group->rec.scope_join_state & 0xf;
        int ref = 0;
        u16 status;
        struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
@@ -690,8 +690,8 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work)
                        u8 cur_join_state;
 
                        resp_join_state = ((struct ib_sa_mcmember_data *)
-                                               group->response_sa_mad.data)->scope_join_state & 7;
-                       cur_join_state = group->rec.scope_join_state & 7;
+                                               group->response_sa_mad.data)->scope_join_state & 0xf;
+                       cur_join_state = group->rec.scope_join_state & 0xf;
 
                        if (method == IB_MGMT_METHOD_GET_RESP) {
                                /* successfull join */
@@ -710,7 +710,7 @@ process_requests:
                req = list_first_entry(&group->pending_list, struct mcast_req,
                                       group_list);
                sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
-               req_join_state = sa_data->scope_join_state & 0x7;
+               req_join_state = sa_data->scope_join_state & 0xf;
 
                /* For a leave request, we will immediately answer the VF, and
                 * update our internal counters. The actual leave will be sent
index 7c5832e..686ab48 100644 (file)
@@ -448,7 +448,7 @@ struct mlx4_ib_demux_ctx {
        struct workqueue_struct *wq;
        struct workqueue_struct *ud_wq;
        spinlock_t ud_lock;
-       __be64 subnet_prefix;
+       atomic64_t subnet_prefix;
        __be64 guid_cache[128];
        struct mlx4_ib_dev *dev;
        /* the following lock protects both mcg_table and mcg_mgid0_list */
index 768085f..7fb9629 100644 (file)
@@ -2493,24 +2493,27 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
                sqp->ud_header.grh.flow_label    =
                        ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
                sqp->ud_header.grh.hop_limit     = ah->av.ib.hop_limit;
-               if (is_eth)
+               if (is_eth) {
                        memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
-               else {
-               if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
-                       /* When multi-function is enabled, the ib_core gid
-                        * indexes don't necessarily match the hw ones, so
-                        * we must use our own cache */
-                       sqp->ud_header.grh.source_gid.global.subnet_prefix =
-                               to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
-                                                      subnet_prefix;
-                       sqp->ud_header.grh.source_gid.global.interface_id =
-                               to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
-                                              guid_cache[ah->av.ib.gid_index];
-               } else
-                       ib_get_cached_gid(ib_dev,
-                                         be32_to_cpu(ah->av.ib.port_pd) >> 24,
-                                         ah->av.ib.gid_index,
-                                         &sqp->ud_header.grh.source_gid, NULL);
+               } else {
+                       if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
+                               /* When multi-function is enabled, the ib_core gid
+                                * indexes don't necessarily match the hw ones, so
+                                * we must use our own cache
+                                */
+                               sqp->ud_header.grh.source_gid.global.subnet_prefix =
+                                       cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov.
+                                                                   demux[sqp->qp.port - 1].
+                                                                   subnet_prefix)));
+                               sqp->ud_header.grh.source_gid.global.interface_id =
+                                       to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
+                                                      guid_cache[ah->av.ib.gid_index];
+                       } else {
+                               ib_get_cached_gid(ib_dev,
+                                                 be32_to_cpu(ah->av.ib.port_pd) >> 24,
+                                                 ah->av.ib.gid_index,
+                                                 &sqp->ud_header.grh.source_gid, NULL);
+                       }
                }
                memcpy(sqp->ud_header.grh.destination_gid.raw,
                       ah->av.ib.dgid, 16);
index 8150ea3..e19537c 100644 (file)
@@ -288,7 +288,9 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
 
 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
 {
-       return !MLX5_CAP_GEN(dev->mdev, ib_virt);
+       if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
+               return !MLX5_CAP_GEN(dev->mdev, ib_virt);
+       return 0;
 }
 
 enum {
@@ -1428,6 +1430,13 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
                                             dmac_47_16),
                                ib_spec->eth.val.dst_mac);
 
+               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+                                            smac_47_16),
+                               ib_spec->eth.mask.src_mac);
+               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+                                            smac_47_16),
+                               ib_spec->eth.val.src_mac);
+
                if (ib_spec->eth.mask.vlan_tag) {
                        MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
                                 vlan_tag, 1);
index 80c4b6b..46b6497 100644 (file)
@@ -294,7 +294,7 @@ static void __rvt_free_mr(struct rvt_mr *mr)
 {
        rvt_deinit_mregion(&mr->mr);
        rvt_free_lkey(&mr->mr);
-       vfree(mr);
+       kfree(mr);
 }
 
 /**
index 55f0e8f..ddd5927 100644 (file)
@@ -362,15 +362,34 @@ static int __init rxe_module_init(void)
                return err;
        }
 
-       err = rxe_net_init();
+       err = rxe_net_ipv4_init();
        if (err) {
-               pr_err("rxe: unable to init\n");
+               pr_err("rxe: unable to init ipv4 tunnel\n");
                rxe_cache_exit();
-               return err;
+               goto exit;
+       }
+
+       err = rxe_net_ipv6_init();
+       if (err) {
+               pr_err("rxe: unable to init ipv6 tunnel\n");
+               rxe_cache_exit();
+               goto exit;
        }
+
+       err = register_netdevice_notifier(&rxe_net_notifier);
+       if (err) {
+               pr_err("rxe: Failed to rigister netdev notifier\n");
+               goto exit;
+       }
+
        pr_info("rxe: loaded\n");
 
        return 0;
+
+exit:
+       rxe_release_udp_tunnel(recv_sockets.sk4);
+       rxe_release_udp_tunnel(recv_sockets.sk6);
+       return err;
 }
 
 static void __exit rxe_module_exit(void)
index 36f67de..1c59ef2 100644 (file)
@@ -689,7 +689,14 @@ int rxe_completer(void *arg)
                                        qp->req.need_retry = 1;
                                        rxe_run_task(&qp->req.task, 1);
                                }
+
+                               if (pkt) {
+                                       rxe_drop_ref(pkt->qp);
+                                       kfree_skb(skb);
+                               }
+
                                goto exit;
+
                        } else {
                                wqe->status = IB_WC_RETRY_EXC_ERR;
                                state = COMPST_ERROR;
@@ -716,6 +723,12 @@ int rxe_completer(void *arg)
                case COMPST_ERROR:
                        do_complete(qp, wqe);
                        rxe_qp_error(qp);
+
+                       if (pkt) {
+                               rxe_drop_ref(pkt->qp);
+                               kfree_skb(skb);
+                       }
+
                        goto exit;
                }
        }
index 0b8d2ea..eedf2f1 100644 (file)
@@ -275,9 +275,10 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
        return sock;
 }
 
-static void rxe_release_udp_tunnel(struct socket *sk)
+void rxe_release_udp_tunnel(struct socket *sk)
 {
-       udp_tunnel_sock_release(sk);
+       if (sk)
+               udp_tunnel_sock_release(sk);
 }
 
 static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
@@ -658,51 +659,45 @@ out:
        return NOTIFY_OK;
 }
 
-static struct notifier_block rxe_net_notifier = {
+struct notifier_block rxe_net_notifier = {
        .notifier_call = rxe_notify,
 };
 
-int rxe_net_init(void)
+int rxe_net_ipv4_init(void)
 {
-       int err;
-
        spin_lock_init(&dev_list_lock);
 
-       recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
-                       htons(ROCE_V2_UDP_DPORT), true);
-       if (IS_ERR(recv_sockets.sk6)) {
-               recv_sockets.sk6 = NULL;
-               pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
-               return -1;
-       }
-
        recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
-                       htons(ROCE_V2_UDP_DPORT), false);
+                               htons(ROCE_V2_UDP_DPORT), false);
        if (IS_ERR(recv_sockets.sk4)) {
-               rxe_release_udp_tunnel(recv_sockets.sk6);
                recv_sockets.sk4 = NULL;
-               recv_sockets.sk6 = NULL;
                pr_err("rxe: Failed to create IPv4 UDP tunnel\n");
                return -1;
        }
 
-       err = register_netdevice_notifier(&rxe_net_notifier);
-       if (err) {
-               rxe_release_udp_tunnel(recv_sockets.sk6);
-               rxe_release_udp_tunnel(recv_sockets.sk4);
-               pr_err("rxe: Failed to rigister netdev notifier\n");
-       }
-
-       return err;
+       return 0;
 }
 
-void rxe_net_exit(void)
+int rxe_net_ipv6_init(void)
 {
-       if (recv_sockets.sk6)
-               rxe_release_udp_tunnel(recv_sockets.sk6);
+#if IS_ENABLED(CONFIG_IPV6)
 
-       if (recv_sockets.sk4)
-               rxe_release_udp_tunnel(recv_sockets.sk4);
+       spin_lock_init(&dev_list_lock);
 
+       recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
+                                               htons(ROCE_V2_UDP_DPORT), true);
+       if (IS_ERR(recv_sockets.sk6)) {
+               recv_sockets.sk6 = NULL;
+               pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
+               return -1;
+       }
+#endif
+       return 0;
+}
+
+void rxe_net_exit(void)
+{
+       rxe_release_udp_tunnel(recv_sockets.sk6);
+       rxe_release_udp_tunnel(recv_sockets.sk4);
        unregister_netdevice_notifier(&rxe_net_notifier);
 }
index 7b06f76..0daf7f0 100644 (file)
@@ -44,10 +44,13 @@ struct rxe_recv_sockets {
 };
 
 extern struct rxe_recv_sockets recv_sockets;
+extern struct notifier_block rxe_net_notifier;
+void rxe_release_udp_tunnel(struct socket *sk);
 
 struct rxe_dev *rxe_net_add(struct net_device *ndev);
 
-int rxe_net_init(void);
+int rxe_net_ipv4_init(void);
+int rxe_net_ipv6_init(void);
 void rxe_net_exit(void);
 
 #endif /* RXE_NET_H */
index 3d464c2..144d2f1 100644 (file)
@@ -312,7 +312,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
                 * make a copy of the skb to post to the next qp
                 */
                skb_copy = (mce->qp_list.next != &mcg->qp_list) ?
-                               skb_clone(skb, GFP_KERNEL) : NULL;
+                               skb_clone(skb, GFP_ATOMIC) : NULL;
 
                pkt->qp = qp;
                rxe_add_ref(qp);
index 33b2d9d..13a848a 100644 (file)
@@ -511,24 +511,21 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
 }
 
 static void update_wqe_state(struct rxe_qp *qp,
-                            struct rxe_send_wqe *wqe,
-                            struct rxe_pkt_info *pkt,
-                            enum wqe_state *prev_state)
+               struct rxe_send_wqe *wqe,
+               struct rxe_pkt_info *pkt)
 {
-       enum wqe_state prev_state_ = wqe->state;
-
        if (pkt->mask & RXE_END_MASK) {
                if (qp_type(qp) == IB_QPT_RC)
                        wqe->state = wqe_state_pending;
        } else {
                wqe->state = wqe_state_processing;
        }
-
-       *prev_state = prev_state_;
 }
 
-static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
-                        struct rxe_pkt_info *pkt, int payload)
+static void update_wqe_psn(struct rxe_qp *qp,
+                          struct rxe_send_wqe *wqe,
+                          struct rxe_pkt_info *pkt,
+                          int payload)
 {
        /* number of packets left to send including current one */
        int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
@@ -546,9 +543,34 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
                qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
        else
                qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
+}
 
-       qp->req.opcode = pkt->opcode;
+static void save_state(struct rxe_send_wqe *wqe,
+                      struct rxe_qp *qp,
+                      struct rxe_send_wqe *rollback_wqe,
+                      struct rxe_qp *rollback_qp)
+{
+       rollback_wqe->state     = wqe->state;
+       rollback_wqe->first_psn = wqe->first_psn;
+       rollback_wqe->last_psn  = wqe->last_psn;
+       rollback_qp->req.psn    = qp->req.psn;
+}
 
+static void rollback_state(struct rxe_send_wqe *wqe,
+                          struct rxe_qp *qp,
+                          struct rxe_send_wqe *rollback_wqe,
+                          struct rxe_qp *rollback_qp)
+{
+       wqe->state     = rollback_wqe->state;
+       wqe->first_psn = rollback_wqe->first_psn;
+       wqe->last_psn  = rollback_wqe->last_psn;
+       qp->req.psn    = rollback_qp->req.psn;
+}
+
+static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+                        struct rxe_pkt_info *pkt, int payload)
+{
+       qp->req.opcode = pkt->opcode;
 
        if (pkt->mask & RXE_END_MASK)
                qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
@@ -571,7 +593,8 @@ int rxe_requester(void *arg)
        int mtu;
        int opcode;
        int ret;
-       enum wqe_state prev_state;
+       struct rxe_qp rollback_qp;
+       struct rxe_send_wqe rollback_wqe;
 
 next_wqe:
        if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
@@ -688,13 +711,21 @@ next_wqe:
                goto err;
        }
 
-       update_wqe_state(qp, wqe, &pkt, &prev_state);
+       /*
+        * To prevent a race on wqe access between requester and completer,
+        * wqe members state and psn need to be set before calling
+        * rxe_xmit_packet().
+        * Otherwise, completer might initiate an unjustified retry flow.
+        */
+       save_state(wqe, qp, &rollback_wqe, &rollback_qp);
+       update_wqe_state(qp, wqe, &pkt);
+       update_wqe_psn(qp, wqe, &pkt, payload);
        ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
        if (ret) {
                qp->need_req_skb = 1;
                kfree_skb(skb);
 
-               wqe->state = prev_state;
+               rollback_state(wqe, qp, &rollback_wqe, &rollback_qp);
 
                if (ret == -EAGAIN) {
                        rxe_run_task(&qp->req.task, 1);
index ebb03b4..3e0f0f2 100644 (file)
@@ -972,11 +972,13 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
        free_rd_atomic_resource(qp, res);
        rxe_advance_resp_resource(qp);
 
+       memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(skb->cb));
+
        res->type = RXE_ATOMIC_MASK;
        res->atomic.skb = skb;
-       res->first_psn = qp->resp.psn;
-       res->last_psn = qp->resp.psn;
-       res->cur_psn = qp->resp.psn;
+       res->first_psn = ack_pkt.psn;
+       res->last_psn  = ack_pkt.psn;
+       res->cur_psn   = ack_pkt.psn;
 
        rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy);
        if (rc) {
@@ -1116,8 +1118,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
                                rc = RESPST_CLEANUP;
                                goto out;
                        }
-                       bth_set_psn(SKB_TO_PKT(skb_copy),
-                                   qp->resp.psn - 1);
+
                        /* Resend the result. */
                        rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
                                             pkt, skb_copy);
index dc6d241..be11d5d 100644 (file)
@@ -1161,8 +1161,17 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
        }
 
        if (level == IPOIB_FLUSH_LIGHT) {
+               int oper_up;
                ipoib_mark_paths_invalid(dev);
+               /* Set IPoIB operation as down to prevent races between:
+                * the flush flow which leaves MCG and on the fly joins
+                * which can happen during that time. mcast restart task
+                * should deal with join requests we missed.
+                */
+               oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
                ipoib_mcast_dev_flush(dev);
+               if (oper_up)
+                       set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
                ipoib_flush_ah(dev);
        }
 
index 112e17c..37f952d 100644 (file)
@@ -176,6 +176,7 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
 {
        struct irq_domain_chip_generic *dgc = d->gc;
        struct irq_chip_generic *gc;
+       unsigned long flags;
        unsigned smr;
        int idx;
        int ret;
@@ -194,11 +195,11 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
 
        gc = dgc->gc[idx];
 
-       irq_gc_lock(gc);
+       irq_gc_lock_irqsave(gc, flags);
        smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
        aic_common_set_priority(intspec[2], &smr);
        irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
-       irq_gc_unlock(gc);
+       irq_gc_unlock_irqrestore(gc, flags);
 
        return ret;
 }
index 4f0d068..2a624d8 100644 (file)
@@ -258,6 +258,7 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
                                 unsigned int *out_type)
 {
        struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0);
+       unsigned long flags;
        unsigned smr;
        int ret;
 
@@ -269,12 +270,12 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
        if (ret)
                return ret;
 
-       irq_gc_lock(bgc);
+       irq_gc_lock_irqsave(bgc, flags);
        irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
        smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
        aic_common_set_priority(intspec[2], &smr);
        irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
-       irq_gc_unlock(bgc);
+       irq_gc_unlock_irqrestore(bgc, flags);
 
        return ret;
 }
index 7001824..5719b99 100644 (file)
@@ -70,7 +70,10 @@ static unsigned int cec_get_edid_spa_location(const u8 *edid, unsigned int size)
                                u8 tag = edid[i] >> 5;
                                u8 len = edid[i] & 0x1f;
 
-                               if (tag == 3 && len >= 5 && i + len <= end)
+                               if (tag == 3 && len >= 5 && i + len <= end &&
+                                   edid[i + 1] == 0x03 &&
+                                   edid[i + 2] == 0x0c &&
+                                   edid[i + 3] == 0x00)
                                        return i + 4;
                                i += len + 1;
                        } while (i < end);
index efec2d1..4d080da 100644 (file)
@@ -1552,6 +1552,7 @@ int cx23885_417_register(struct cx23885_dev *dev)
        q->mem_ops = &vb2_dma_sg_memops;
        q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
        q->lock = &dev->lock;
+       q->dev = &dev->pci->dev;
 
        err = vb2_queue_init(q);
        if (err < 0)
index db987e5..59a4b5f 100644 (file)
@@ -1238,6 +1238,7 @@ static int dvb_init(struct saa7134_dev *dev)
        q->buf_struct_size = sizeof(struct saa7134_buf);
        q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
        q->lock = &dev->lock;
+       q->dev = &dev->pci->dev;
        ret = vb2_queue_init(q);
        if (ret) {
                vb2_dvb_dealloc_frontends(&dev->frontends);
index ca417a4..791a516 100644 (file)
@@ -295,6 +295,7 @@ static int empress_init(struct saa7134_dev *dev)
        q->buf_struct_size = sizeof(struct saa7134_buf);
        q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
        q->lock = &dev->lock;
+       q->dev = &dev->pci->dev;
        err = vb2_queue_init(q);
        if (err)
                return err;
index f25344b..552b635 100644 (file)
@@ -169,7 +169,7 @@ config VIDEO_MEDIATEK_VPU
 config VIDEO_MEDIATEK_VCODEC
        tristate "Mediatek Video Codec driver"
        depends on MTK_IOMMU || COMPILE_TEST
-       depends on VIDEO_DEV && VIDEO_V4L2
+       depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
        depends on ARCH_MEDIATEK || COMPILE_TEST
        select VIDEOBUF2_DMA_CONTIG
        select V4L2_MEM2MEM_DEV
index 94f0a42..3a8e695 100644 (file)
@@ -23,7 +23,6 @@
 #include <media/v4l2-ioctl.h>
 #include <media/videobuf2-core.h>
 
-#include "mtk_vcodec_util.h"
 
 #define MTK_VCODEC_DRV_NAME    "mtk_vcodec_drv"
 #define MTK_VCODEC_ENC_NAME    "mtk-vcodec-enc"
index 3ed3f2d..2c5719a 100644 (file)
@@ -487,7 +487,6 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
        struct mtk_q_data *q_data;
        int ret, i;
        struct mtk_video_fmt *fmt;
-       unsigned int pitch_w_div16;
        struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
 
        vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
@@ -530,15 +529,6 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
        q_data->coded_width = f->fmt.pix_mp.width;
        q_data->coded_height = f->fmt.pix_mp.height;
 
-       pitch_w_div16 = DIV_ROUND_UP(q_data->visible_width, 16);
-       if (pitch_w_div16 % 8 != 0) {
-               /* Adjust returned width/height, so application could correctly
-                * allocate hw required memory
-                */
-               q_data->visible_height += 32;
-               vidioc_try_fmt(f, q_data->fmt);
-       }
-
        q_data->field = f->fmt.pix_mp.field;
        ctx->colorspace = f->fmt.pix_mp.colorspace;
        ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
@@ -878,7 +868,8 @@ static int mtk_venc_encode_header(void *priv)
 {
        struct mtk_vcodec_ctx *ctx = priv;
        int ret;
-       struct vb2_buffer *dst_buf;
+       struct vb2_buffer *src_buf, *dst_buf;
+       struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
        struct mtk_vcodec_mem bs_buf;
        struct venc_done_result enc_result;
 
@@ -911,6 +902,15 @@ static int mtk_venc_encode_header(void *priv)
                mtk_v4l2_err("venc_if_encode failed=%d", ret);
                return -EINVAL;
        }
+       src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+       if (src_buf) {
+               src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
+               dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf);
+               dst_buf->timestamp = src_buf->timestamp;
+               dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode;
+       } else {
+               mtk_v4l2_err("No timestamp for the header buffer.");
+       }
 
        ctx->state = MTK_STATE_HEADER;
        dst_buf->planes[0].bytesused = enc_result.bs_size;
@@ -1003,7 +1003,7 @@ static void mtk_venc_worker(struct work_struct *work)
        struct mtk_vcodec_mem bs_buf;
        struct venc_done_result enc_result;
        int ret, i;
-       struct vb2_v4l2_buffer *vb2_v4l2;
+       struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
 
        /* check dst_buf, dst_buf may be removed in device_run
         * to stored encdoe header so we need check dst_buf and
@@ -1043,9 +1043,14 @@ static void mtk_venc_worker(struct work_struct *work)
        ret = venc_if_encode(ctx, VENC_START_OPT_ENCODE_FRAME,
                             &frm_buf, &bs_buf, &enc_result);
 
-       vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf);
+       src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
+       dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf);
+
+       dst_buf->timestamp = src_buf->timestamp;
+       dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode;
+
        if (enc_result.is_key_frm)
-               vb2_v4l2->flags |= V4L2_BUF_FLAG_KEYFRAME;
+               dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_KEYFRAME;
 
        if (ret) {
                v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
@@ -1217,7 +1222,7 @@ int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx)
                        0, V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE);
        v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE,
                        V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
-                       0, V4L2_MPEG_VIDEO_H264_PROFILE_MAIN);
+                       0, V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
        v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL,
                        V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
                        0, V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
@@ -1288,5 +1293,10 @@ int mtk_venc_lock(struct mtk_vcodec_ctx *ctx)
 
 void mtk_vcodec_enc_release(struct mtk_vcodec_ctx *ctx)
 {
-       venc_if_deinit(ctx);
+       int ret = venc_if_deinit(ctx);
+
+       if (ret)
+               mtk_v4l2_err("venc_if_deinit failed=%d", ret);
+
+       ctx->state = MTK_STATE_FREE;
 }
index c7806ec..5cd2151 100644 (file)
@@ -218,11 +218,15 @@ static int fops_vcodec_release(struct file *file)
        mtk_v4l2_debug(1, "[%d] encoder", ctx->id);
        mutex_lock(&dev->dev_mutex);
 
+       /*
+        * Call v4l2_m2m_ctx_release to make sure the worker thread is not
+        * running after venc_if_deinit.
+        */
+       v4l2_m2m_ctx_release(ctx->m2m_ctx);
        mtk_vcodec_enc_release(ctx);
        v4l2_fh_del(&ctx->fh);
        v4l2_fh_exit(&ctx->fh);
        v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
-       v4l2_m2m_ctx_release(ctx->m2m_ctx);
 
        list_del_init(&ctx->list);
        dev->num_instances--;
index 33e890f..1213185 100644 (file)
@@ -16,7 +16,6 @@
 #define _MTK_VCODEC_INTR_H_
 
 #define MTK_INST_IRQ_RECEIVED          0x1
-#define MTK_INST_WORK_THREAD_ABORT_DONE        0x2
 
 struct mtk_vcodec_ctx;
 
index 9a60052..63d4be4 100644 (file)
@@ -61,6 +61,8 @@ enum venc_h264_bs_mode {
 
 /*
  * struct venc_h264_vpu_config - Structure for h264 encoder configuration
+ *                               AP-W/R : AP is writer/reader on this item
+ *                               VPU-W/R: VPU is write/reader on this item
  * @input_fourcc: input fourcc
  * @bitrate: target bitrate (in bps)
  * @pic_w: picture width. Picture size is visible stream resolution, in pixels,
@@ -94,13 +96,13 @@ struct venc_h264_vpu_config {
 
 /*
  * struct venc_h264_vpu_buf - Structure for buffer information
- * @align: buffer alignment (in bytes)
+ *                            AP-W/R : AP is writer/reader on this item
+ *                            VPU-W/R: VPU is write/reader on this item
  * @iova: IO virtual address
  * @vpua: VPU side memory addr which is used by RC_CODE
  * @size: buffer size (in bytes)
  */
 struct venc_h264_vpu_buf {
-       u32 align;
        u32 iova;
        u32 vpua;
        u32 size;
@@ -108,6 +110,8 @@ struct venc_h264_vpu_buf {
 
 /*
  * struct venc_h264_vsi - Structure for VPU driver control and info share
+ *                        AP-W/R : AP is writer/reader on this item
+ *                        VPU-W/R: VPU is write/reader on this item
  * This structure is allocated in VPU side and shared to AP side.
  * @config: h264 encoder configuration
  * @work_bufs: working buffer information in VPU side
@@ -150,12 +154,6 @@ struct venc_h264_inst {
        struct mtk_vcodec_ctx *ctx;
 };
 
-static inline void h264_write_reg(struct venc_h264_inst *inst, u32 addr,
-                                 u32 val)
-{
-       writel(val, inst->hw_base + addr);
-}
-
 static inline u32 h264_read_reg(struct venc_h264_inst *inst, u32 addr)
 {
        return readl(inst->hw_base + addr);
@@ -214,6 +212,8 @@ static unsigned int h264_get_level(struct venc_h264_inst *inst,
                return 40;
        case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
                return 41;
+       case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+               return 42;
        default:
                mtk_vcodec_debug(inst, "unsupported level %d", level);
                return 31;
index 60bbcd2..6d97584 100644 (file)
@@ -56,6 +56,8 @@ enum venc_vp8_vpu_work_buf {
 
 /*
  * struct venc_vp8_vpu_config - Structure for vp8 encoder configuration
+ *                              AP-W/R : AP is writer/reader on this item
+ *                              VPU-W/R: VPU is write/reader on this item
  * @input_fourcc: input fourcc
  * @bitrate: target bitrate (in bps)
  * @pic_w: picture width. Picture size is visible stream resolution, in pixels,
@@ -83,14 +85,14 @@ struct venc_vp8_vpu_config {
 };
 
 /*
- * struct venc_vp8_vpu_buf -Structure for buffer information
- * @align: buffer alignment (in bytes)
+ * struct venc_vp8_vpu_buf - Structure for buffer information
+ *                           AP-W/R : AP is writer/reader on this item
+ *                           VPU-W/R: VPU is write/reader on this item
  * @iova: IO virtual address
  * @vpua: VPU side memory addr which is used by RC_CODE
  * @size: buffer size (in bytes)
  */
 struct venc_vp8_vpu_buf {
-       u32 align;
        u32 iova;
        u32 vpua;
        u32 size;
@@ -98,6 +100,8 @@ struct venc_vp8_vpu_buf {
 
 /*
  * struct venc_vp8_vsi - Structure for VPU driver control and info share
+ *                       AP-W/R : AP is writer/reader on this item
+ *                       VPU-W/R: VPU is write/reader on this item
  * This structure is allocated in VPU side and shared to AP side.
  * @config: vp8 encoder configuration
  * @work_bufs: working buffer information in VPU side
@@ -138,12 +142,6 @@ struct venc_vp8_inst {
        struct mtk_vcodec_ctx *ctx;
 };
 
-static inline void vp8_enc_write_reg(struct venc_vp8_inst *inst, u32 addr,
-                                    u32 val)
-{
-       writel(val, inst->hw_base + addr);
-}
-
 static inline u32 vp8_enc_read_reg(struct venc_vp8_inst *inst, u32 addr)
 {
        return readl(inst->hw_base + addr);
index 6a7bcc3..bc50c69 100644 (file)
@@ -99,10 +99,16 @@ EXPORT_SYMBOL_GPL(rcar_fcp_put);
  */
 int rcar_fcp_enable(struct rcar_fcp_device *fcp)
 {
+       int error;
+
        if (!fcp)
                return 0;
 
-       return pm_runtime_get_sync(fcp->dev);
+       error = pm_runtime_get_sync(fcp->dev);
+       if (error < 0)
+               return error;
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(rcar_fcp_enable);
 
index f23d65e..be3c49f 100644 (file)
@@ -1016,14 +1016,16 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
 
                /* Only reconfigure if we have a different burst size */
                if (*bp != burst) {
-                       struct dma_slave_config cfg;
-
-                       cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
-                       cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
-                       cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
-                       cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
-                       cfg.src_maxburst = burst;
-                       cfg.dst_maxburst = burst;
+                       struct dma_slave_config cfg = {
+                               .src_addr = host->phys_base +
+                                           OMAP_MMC_REG(host, DATA),
+                               .dst_addr = host->phys_base +
+                                           OMAP_MMC_REG(host, DATA),
+                               .src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+                               .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+                               .src_maxburst = burst,
+                               .dst_maxburst = burst,
+                       };
 
                        if (dmaengine_slave_config(c, &cfg))
                                goto use_pio;
index 24ebc9a..5f2f24a 100644 (file)
@@ -1409,11 +1409,18 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
 static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
                                        struct mmc_request *req)
 {
-       struct dma_slave_config cfg;
        struct dma_async_tx_descriptor *tx;
        int ret = 0, i;
        struct mmc_data *data = req->data;
        struct dma_chan *chan;
+       struct dma_slave_config cfg = {
+               .src_addr = host->mapbase + OMAP_HSMMC_DATA,
+               .dst_addr = host->mapbase + OMAP_HSMMC_DATA,
+               .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+               .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+               .src_maxburst = data->blksz / 4,
+               .dst_maxburst = data->blksz / 4,
+       };
 
        /* Sanity check: all the SG entries must be aligned by block size. */
        for (i = 0; i < data->sg_len; i++) {
@@ -1433,13 +1440,6 @@ static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
 
        chan = omap_hsmmc_get_dma_chan(host, data);
 
-       cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
-       cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
-       cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       cfg.src_maxburst = data->blksz / 4;
-       cfg.dst_maxburst = data->blksz / 4;
-
        ret = dmaengine_slave_config(chan, &cfg);
        if (ret)
                return ret;
index c95ba83..ed92ce7 100644 (file)
@@ -28,6 +28,7 @@
 
 struct st_mmc_platform_data {
        struct  reset_control *rstc;
+       struct  clk *icnclk;
        void __iomem *top_ioaddr;
 };
 
@@ -353,7 +354,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
        struct sdhci_host *host;
        struct st_mmc_platform_data *pdata;
        struct sdhci_pltfm_host *pltfm_host;
-       struct clk *clk;
+       struct clk *clk, *icnclk;
        int ret = 0;
        u16 host_version;
        struct resource *res;
@@ -365,6 +366,11 @@ static int sdhci_st_probe(struct platform_device *pdev)
                return PTR_ERR(clk);
        }
 
+       /* ICN clock isn't compulsory, but use it if it's provided. */
+       icnclk = devm_clk_get(&pdev->dev, "icn");
+       if (IS_ERR(icnclk))
+               icnclk = NULL;
+
        rstc = devm_reset_control_get(&pdev->dev, NULL);
        if (IS_ERR(rstc))
                rstc = NULL;
@@ -389,6 +395,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
        }
 
        clk_prepare_enable(clk);
+       clk_prepare_enable(icnclk);
 
        /* Configure the FlashSS Top registers for setting eMMC TX/RX delay */
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -400,6 +407,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
        }
 
        pltfm_host->clk = clk;
+       pdata->icnclk = icnclk;
 
        /* Configure the Arasan HC inside the flashSS */
        st_mmcss_cconfig(np, host);
@@ -422,6 +430,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
        return 0;
 
 err_out:
+       clk_disable_unprepare(icnclk);
        clk_disable_unprepare(clk);
 err_of:
        sdhci_pltfm_free(pdev);
@@ -442,6 +451,8 @@ static int sdhci_st_remove(struct platform_device *pdev)
 
        ret = sdhci_pltfm_unregister(pdev);
 
+       clk_disable_unprepare(pdata->icnclk);
+
        if (rstc)
                reset_control_assert(rstc);
 
@@ -462,6 +473,7 @@ static int sdhci_st_suspend(struct device *dev)
        if (pdata->rstc)
                reset_control_assert(pdata->rstc);
 
+       clk_disable_unprepare(pdata->icnclk);
        clk_disable_unprepare(pltfm_host->clk);
 out:
        return ret;
@@ -475,6 +487,7 @@ static int sdhci_st_resume(struct device *dev)
        struct device_node *np = dev->of_node;
 
        clk_prepare_enable(pltfm_host->clk);
+       clk_prepare_enable(pdata->icnclk);
 
        if (pdata->rstc)
                reset_control_deassert(pdata->rstc);
index 41c0fc9..16f7cad 100644 (file)
@@ -1268,11 +1268,10 @@ static int __maybe_unused flexcan_suspend(struct device *device)
        struct flexcan_priv *priv = netdev_priv(dev);
        int err;
 
-       err = flexcan_chip_disable(priv);
-       if (err)
-               return err;
-
        if (netif_running(dev)) {
+               err = flexcan_chip_disable(priv);
+               if (err)
+                       return err;
                netif_stop_queue(dev);
                netif_device_detach(dev);
        }
@@ -1285,13 +1284,17 @@ static int __maybe_unused flexcan_resume(struct device *device)
 {
        struct net_device *dev = dev_get_drvdata(device);
        struct flexcan_priv *priv = netdev_priv(dev);
+       int err;
 
        priv->can.state = CAN_STATE_ERROR_ACTIVE;
        if (netif_running(dev)) {
                netif_device_attach(dev);
                netif_start_queue(dev);
+               err = flexcan_chip_enable(priv);
+               if (err)
+                       return err;
        }
-       return flexcan_chip_enable(priv);
+       return 0;
 }
 
 static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);
index 2d1d22e..368bb07 100644 (file)
 #define IFI_CANFD_TIME_SET_TIMEA_4_12_6_6      BIT(15)
 
 #define IFI_CANFD_TDELAY                       0x1c
+#define IFI_CANFD_TDELAY_DEFAULT               0xb
+#define IFI_CANFD_TDELAY_MASK                  0x3fff
+#define IFI_CANFD_TDELAY_ABS                   BIT(14)
+#define IFI_CANFD_TDELAY_EN                    BIT(15)
 
 #define IFI_CANFD_ERROR                                0x20
 #define IFI_CANFD_ERROR_TX_OFFSET              0
@@ -641,7 +645,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
        struct ifi_canfd_priv *priv = netdev_priv(ndev);
        const struct can_bittiming *bt = &priv->can.bittiming;
        const struct can_bittiming *dbt = &priv->can.data_bittiming;
-       u16 brp, sjw, tseg1, tseg2;
+       u16 brp, sjw, tseg1, tseg2, tdc;
 
        /* Configure bit timing */
        brp = bt->brp - 2;
@@ -664,6 +668,11 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
               (brp << IFI_CANFD_TIME_PRESCALE_OFF) |
               (sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8),
               priv->base + IFI_CANFD_FTIME);
+
+       /* Configure transmitter delay */
+       tdc = (dbt->brp * (dbt->phase_seg1 + 1)) & IFI_CANFD_TDELAY_MASK;
+       writel(IFI_CANFD_TDELAY_EN | IFI_CANFD_TDELAY_ABS | tdc,
+              priv->base + IFI_CANFD_TDELAY);
 }
 
 static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id,
index 8fc3f3c..505ceaf 100644 (file)
@@ -6356,10 +6356,6 @@ bnx2_open(struct net_device *dev)
        struct bnx2 *bp = netdev_priv(dev);
        int rc;
 
-       rc = bnx2_request_firmware(bp);
-       if (rc < 0)
-               goto out;
-
        netif_carrier_off(dev);
 
        bnx2_disable_int(bp);
@@ -6428,7 +6424,6 @@ open_err:
        bnx2_free_irq(bp);
        bnx2_free_mem(bp);
        bnx2_del_napi(bp);
-       bnx2_release_firmware(bp);
        goto out;
 }
 
@@ -8575,6 +8570,12 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        pci_set_drvdata(pdev, dev);
 
+       rc = bnx2_request_firmware(bp);
+       if (rc < 0)
+               goto error;
+
+
+       bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
        memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
 
        dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
@@ -8607,6 +8608,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        return 0;
 
 error:
+       bnx2_release_firmware(bp);
        pci_iounmap(pdev, bp->regview);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
index 0e4fdc3..31f61a7 100644 (file)
 #define BNAD_NUM_TXF_COUNTERS 12
 #define BNAD_NUM_RXF_COUNTERS 10
 #define BNAD_NUM_CQ_COUNTERS (3 + 5)
-#define BNAD_NUM_RXQ_COUNTERS 6
+#define BNAD_NUM_RXQ_COUNTERS 7
 #define BNAD_NUM_TXQ_COUNTERS 5
 
-#define BNAD_ETHTOOL_STATS_NUM                                         \
-       (sizeof(struct rtnl_link_stats64) / sizeof(u64) +       \
-       sizeof(struct bnad_drv_stats) / sizeof(u64) +           \
-       offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
-
-static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
+static const char *bnad_net_stats_strings[] = {
        "rx_packets",
        "tx_packets",
        "rx_bytes",
@@ -50,22 +45,10 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
        "tx_dropped",
        "multicast",
        "collisions",
-
        "rx_length_errors",
-       "rx_over_errors",
        "rx_crc_errors",
        "rx_frame_errors",
-       "rx_fifo_errors",
-       "rx_missed_errors",
-
-       "tx_aborted_errors",
-       "tx_carrier_errors",
        "tx_fifo_errors",
-       "tx_heartbeat_errors",
-       "tx_window_errors",
-
-       "rx_compressed",
-       "tx_compressed",
 
        "netif_queue_stop",
        "netif_queue_wakeup",
@@ -254,6 +237,8 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
        "fc_tx_fid_parity_errors",
 };
 
+#define BNAD_ETHTOOL_STATS_NUM ARRAY_SIZE(bnad_net_stats_strings)
+
 static int
 bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
 {
@@ -658,6 +643,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
                                string += ETH_GSTRING_LEN;
                                sprintf(string, "rxq%d_allocbuf_failed", q_num);
                                string += ETH_GSTRING_LEN;
+                               sprintf(string, "rxq%d_mapbuf_failed", q_num);
+                               string += ETH_GSTRING_LEN;
                                sprintf(string, "rxq%d_producer_index", q_num);
                                string += ETH_GSTRING_LEN;
                                sprintf(string, "rxq%d_consumer_index", q_num);
@@ -678,6 +665,9 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
                                        sprintf(string, "rxq%d_allocbuf_failed",
                                                                q_num);
                                        string += ETH_GSTRING_LEN;
+                                       sprintf(string, "rxq%d_mapbuf_failed",
+                                               q_num);
+                                       string += ETH_GSTRING_LEN;
                                        sprintf(string, "rxq%d_producer_index",
                                                                q_num);
                                        string += ETH_GSTRING_LEN;
@@ -854,9 +844,9 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
                       u64 *buf)
 {
        struct bnad *bnad = netdev_priv(netdev);
-       int i, j, bi;
+       int i, j, bi = 0;
        unsigned long flags;
-       struct rtnl_link_stats64 *net_stats64;
+       struct rtnl_link_stats64 net_stats64;
        u64 *stats64;
        u32 bmap;
 
@@ -871,14 +861,25 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
         * under the same lock
         */
        spin_lock_irqsave(&bnad->bna_lock, flags);
-       bi = 0;
-       memset(buf, 0, stats->n_stats * sizeof(u64));
-
-       net_stats64 = (struct rtnl_link_stats64 *)buf;
-       bnad_netdev_qstats_fill(bnad, net_stats64);
-       bnad_netdev_hwstats_fill(bnad, net_stats64);
 
-       bi = sizeof(*net_stats64) / sizeof(u64);
+       memset(&net_stats64, 0, sizeof(net_stats64));
+       bnad_netdev_qstats_fill(bnad, &net_stats64);
+       bnad_netdev_hwstats_fill(bnad, &net_stats64);
+
+       buf[bi++] = net_stats64.rx_packets;
+       buf[bi++] = net_stats64.tx_packets;
+       buf[bi++] = net_stats64.rx_bytes;
+       buf[bi++] = net_stats64.tx_bytes;
+       buf[bi++] = net_stats64.rx_errors;
+       buf[bi++] = net_stats64.tx_errors;
+       buf[bi++] = net_stats64.rx_dropped;
+       buf[bi++] = net_stats64.tx_dropped;
+       buf[bi++] = net_stats64.multicast;
+       buf[bi++] = net_stats64.collisions;
+       buf[bi++] = net_stats64.rx_length_errors;
+       buf[bi++] = net_stats64.rx_crc_errors;
+       buf[bi++] = net_stats64.rx_frame_errors;
+       buf[bi++] = net_stats64.tx_fifo_errors;
 
        /* Get netif_queue_stopped from stack */
        bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
index 2e2aa9f..edd2338 100644 (file)
@@ -419,8 +419,8 @@ struct link_config {
        unsigned short supported;        /* link capabilities */
        unsigned short advertising;      /* advertised capabilities */
        unsigned short lp_advertising;   /* peer advertised capabilities */
-       unsigned short requested_speed;  /* speed user has requested */
-       unsigned short speed;            /* actual link speed */
+       unsigned int   requested_speed;  /* speed user has requested */
+       unsigned int   speed;            /* actual link speed */
        unsigned char  requested_fc;     /* flow control user has requested */
        unsigned char  fc;               /* actual link flow control */
        unsigned char  autoneg;          /* autonegotiating? */
index c762a8c..3ceafb5 100644 (file)
@@ -4305,10 +4305,17 @@ static const struct pci_error_handlers cxgb4_eeh = {
        .resume         = eeh_resume,
 };
 
+/* Return true if the Link Configuration supports "High Speeds" (those greater
+ * than 1Gb/s).
+ */
 static inline bool is_x_10g_port(const struct link_config *lc)
 {
-       return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
-              (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
+       unsigned int speeds, high_speeds;
+
+       speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
+       high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
+
+       return high_speeds != 0;
 }
 
 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
@@ -4756,8 +4763,12 @@ static void print_port_info(const struct net_device *dev)
                bufp += sprintf(bufp, "1000/");
        if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
                bufp += sprintf(bufp, "10G/");
+       if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
+               bufp += sprintf(bufp, "25G/");
        if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
                bufp += sprintf(bufp, "40G/");
+       if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G)
+               bufp += sprintf(bufp, "100G/");
        if (bufp != buf)
                --bufp;
        sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
index dc92c80..660204b 100644 (file)
@@ -3627,7 +3627,8 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
 }
 
 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
-                    FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+                    FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
+                    FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
                     FW_PORT_CAP_ANEG)
 
 /**
@@ -7196,8 +7197,12 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
                speed = 1000;
        else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
                speed = 10000;
+       else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
+               speed = 25000;
        else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
                speed = 40000;
+       else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
+               speed = 100000;
 
        lc = &pi->link_cfg;
 
index a89b307..30507d4 100644 (file)
@@ -2265,6 +2265,12 @@ enum fw_port_cap {
        FW_PORT_CAP_802_3_ASM_DIR       = 0x8000,
 };
 
+#define FW_PORT_CAP_SPEED_S     0
+#define FW_PORT_CAP_SPEED_M     0x3f
+#define FW_PORT_CAP_SPEED_V(x)  ((x) << FW_PORT_CAP_SPEED_S)
+#define FW_PORT_CAP_SPEED_G(x) \
+       (((x) >> FW_PORT_CAP_SPEED_S) & FW_PORT_CAP_SPEED_M)
+
 enum fw_port_mdi {
        FW_PORT_CAP_MDI_UNCHANGED,
        FW_PORT_CAP_MDI_AUTO,
index 8ee5414..17a2bbc 100644 (file)
@@ -108,8 +108,8 @@ struct link_config {
        unsigned int   supported;        /* link capabilities */
        unsigned int   advertising;      /* advertised capabilities */
        unsigned short lp_advertising;   /* peer advertised capabilities */
-       unsigned short requested_speed;  /* speed user has requested */
-       unsigned short speed;            /* actual link speed */
+       unsigned int   requested_speed;  /* speed user has requested */
+       unsigned int   speed;            /* actual link speed */
        unsigned char  requested_fc;     /* flow control user has requested */
        unsigned char  fc;               /* actual link flow control */
        unsigned char  autoneg;          /* autonegotiating? */
@@ -271,10 +271,17 @@ static inline bool is_10g_port(const struct link_config *lc)
        return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
 }
 
+/* Return true if the Link Configuration supports "High Speeds" (those greater
+ * than 1Gb/s).
+ */
 static inline bool is_x_10g_port(const struct link_config *lc)
 {
-       return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
-               (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
+       unsigned int speeds, high_speeds;
+
+       speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
+       high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
+
+       return high_speeds != 0;
 }
 
 static inline unsigned int core_ticks_per_usec(const struct adapter *adapter)
index 427bfa7..b5622b1 100644 (file)
@@ -314,8 +314,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
 }
 
 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
-                    FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
-                    FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
+                    FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
+                    FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
+                    FW_PORT_CAP_ANEG)
 
 /**
  *     init_link_config - initialize a link's SW state
@@ -1712,8 +1713,12 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
                        speed = 1000;
                else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
                        speed = 10000;
+               else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
+                       speed = 25000;
                else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
                        speed = 40000;
+               else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
+                       speed = 100000;
 
                /*
                 * Scan all of our "ports" (Virtual Interfaces) looking for
index 4c9771d..7af09cb 100644 (file)
@@ -977,7 +977,37 @@ static void emac_set_multicast_list(struct net_device *ndev)
                dev->mcast_pending = 1;
                return;
        }
+
+       mutex_lock(&dev->link_lock);
        __emac_set_multicast_list(dev);
+       mutex_unlock(&dev->link_lock);
+}
+
+static int emac_set_mac_address(struct net_device *ndev, void *sa)
+{
+       struct emac_instance *dev = netdev_priv(ndev);
+       struct sockaddr *addr = sa;
+       struct emac_regs __iomem *p = dev->emacp;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+              return -EADDRNOTAVAIL;
+
+       mutex_lock(&dev->link_lock);
+
+       memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+
+       emac_rx_disable(dev);
+       emac_tx_disable(dev);
+       out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
+       out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
+               (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
+               ndev->dev_addr[5]);
+       emac_tx_enable(dev);
+       emac_rx_enable(dev);
+
+       mutex_unlock(&dev->link_lock);
+
+       return 0;
 }
 
 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
@@ -2686,7 +2716,7 @@ static const struct net_device_ops emac_netdev_ops = {
        .ndo_do_ioctl           = emac_ioctl,
        .ndo_tx_timeout         = emac_tx_timeout,
        .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_set_mac_address    = emac_set_mac_address,
        .ndo_start_xmit         = emac_start_xmit,
        .ndo_change_mtu         = eth_change_mtu,
 };
@@ -2699,7 +2729,7 @@ static const struct net_device_ops emac_gige_netdev_ops = {
        .ndo_do_ioctl           = emac_ioctl,
        .ndo_tx_timeout         = emac_tx_timeout,
        .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_set_mac_address    = emac_set_mac_address,
        .ndo_start_xmit         = emac_start_xmit_sg,
        .ndo_change_mtu         = emac_change_mtu,
 };
index d919915..3743af8 100644 (file)
@@ -1923,6 +1923,7 @@ const struct of_device_id of_mtk_match[] = {
        { .compatible = "mediatek,mt7623-eth" },
        {},
 };
+MODULE_DEVICE_TABLE(of, of_mtk_match);
 
 static struct platform_driver mtk_driver = {
        .probe = mtk_probe,
index f613977..cf8f8a7 100644 (file)
@@ -1305,8 +1305,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
        return 0;
 
 err_out_unmap:
-       while (i >= 0)
-               mlx4_free_eq(dev, &priv->eq_table.eq[i--]);
+       while (i > 0)
+               mlx4_free_eq(dev, &priv->eq_table.eq[--i]);
 #ifdef CONFIG_RFS_ACCEL
        for (i = 1; i <= dev->caps.num_ports; i++) {
                if (mlx4_priv(dev)->port[i].rmap) {
index 75dd2e3..7183ac4 100644 (file)
@@ -2970,6 +2970,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
                mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
                device_remove_file(&info->dev->persist->pdev->dev,
                                   &info->port_attr);
+               devlink_port_unregister(&info->devlink_port);
                info->port = -1;
        }
 
@@ -2984,6 +2985,8 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
        device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
        device_remove_file(&info->dev->persist->pdev->dev,
                           &info->port_mtu_attr);
+       devlink_port_unregister(&info->devlink_port);
+
 #ifdef CONFIG_RFS_ACCEL
        free_irq_cpu_rmap(info->rmap);
        info->rmap = NULL;
index 8b78f15..b247949 100644 (file)
@@ -1554,6 +1554,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
 
 abort:
        esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
+       esw->mode = SRIOV_NONE;
        return err;
 }
 
index 3dc83a9..7de40e6 100644 (file)
@@ -446,7 +446,7 @@ out:
 
 static int esw_offloads_start(struct mlx5_eswitch *esw)
 {
-       int err, num_vfs = esw->dev->priv.sriov.num_vfs;
+       int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
 
        if (esw->mode != SRIOV_LEGACY) {
                esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
@@ -455,8 +455,12 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
 
        mlx5_eswitch_disable_sriov(esw);
        err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
-       if (err)
-               esw_warn(esw->dev, "Failed set eswitch to offloads, err %d\n", err);
+       if (err) {
+               esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
+               err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
+               if (err1)
+                       esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
+       }
        return err;
 }
 
@@ -508,12 +512,16 @@ create_ft_err:
 
 static int esw_offloads_stop(struct mlx5_eswitch *esw)
 {
-       int err, num_vfs = esw->dev->priv.sriov.num_vfs;
+       int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
 
        mlx5_eswitch_disable_sriov(esw);
        err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
-       if (err)
-               esw_warn(esw->dev, "Failed set eswitch legacy mode. err %d\n", err);
+       if (err) {
+               esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
+               err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
+               if (err1)
+                       esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
+       }
 
        return err;
 }
index 9134010..287ade1 100644 (file)
@@ -425,11 +425,11 @@ struct mlx5_cmd_fc_bulk *
 mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num)
 {
        struct mlx5_cmd_fc_bulk *b;
-       int outlen = sizeof(*b) +
+       int outlen =
                MLX5_ST_SZ_BYTES(query_flow_counter_out) +
                MLX5_ST_SZ_BYTES(traffic_counter) * num;
 
-       b = kzalloc(outlen, GFP_KERNEL);
+       b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
        if (!b)
                return NULL;
 
index 252e492..39dadfc 100644 (file)
@@ -2044,12 +2044,16 @@ static int nfp_net_netdev_open(struct net_device *netdev)
 
        nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
                               GFP_KERNEL);
-       if (!nn->rx_rings)
+       if (!nn->rx_rings) {
+               err = -ENOMEM;
                goto err_free_lsc;
+       }
        nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
                               GFP_KERNEL);
-       if (!nn->tx_rings)
+       if (!nn->tx_rings) {
+               err = -ENOMEM;
                goto err_free_rx_rings;
+       }
 
        for (r = 0; r < nn->num_r_vecs; r++) {
                err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
index a240f26..f776a77 100644 (file)
@@ -1153,8 +1153,8 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
        p_drv_version = &union_data.drv_version;
        p_drv_version->version = p_ver->version;
 
-       for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) {
-               val = cpu_to_be32(p_ver->name[i]);
+       for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
+               val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
                *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
        }
 
index cbefe9e..885a5e6 100644 (file)
@@ -261,7 +261,7 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode)
        }
        if (mode & WAKE_UCAST) {
                pr_debug("GMAC: WOL on global unicast\n");
-               pmt |= global_unicast;
+               pmt |= power_down | global_unicast | wake_up_frame_en;
        }
 
        writel(pmt, ioaddr + GMAC_PMT);
index df5580d..51019b7 100644 (file)
@@ -102,7 +102,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
        }
        if (mode & WAKE_UCAST) {
                pr_debug("GMAC: WOL on global unicast\n");
-               pmt |= global_unicast;
+               pmt |= power_down | global_unicast | wake_up_frame_en;
        }
 
        writel(pmt, ioaddr + GMAC_PMT);
index 7756748..92af182 100644 (file)
@@ -424,10 +424,8 @@ static int xgene_mdio_remove(struct platform_device *pdev)
        mdiobus_unregister(mdio_bus);
        mdiobus_free(mdio_bus);
 
-       if (dev->of_node) {
-               if (IS_ERR(pdata->clk))
-                       clk_disable_unprepare(pdata->clk);
-       }
+       if (dev->of_node)
+               clk_disable_unprepare(pdata->clk);
 
        return 0;
 }
index f41a8ad..c254248 100644 (file)
@@ -32,7 +32,7 @@
 #define NETNEXT_VERSION                "08"
 
 /* Information for net */
-#define NET_VERSION            "5"
+#define NET_VERSION            "6"
 
 #define DRIVER_VERSION         "v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -2552,6 +2552,77 @@ static void r8152_aldps_en(struct r8152 *tp, bool enable)
        }
 }
 
+static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg)
+{
+       ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev);
+       ocp_reg_write(tp, OCP_EEE_DATA, reg);
+       ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev);
+}
+
+static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg)
+{
+       u16 data;
+
+       r8152_mmd_indirect(tp, dev, reg);
+       data = ocp_reg_read(tp, OCP_EEE_DATA);
+       ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
+
+       return data;
+}
+
+static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data)
+{
+       r8152_mmd_indirect(tp, dev, reg);
+       ocp_reg_write(tp, OCP_EEE_DATA, data);
+       ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
+}
+
+static void r8152_eee_en(struct r8152 *tp, bool enable)
+{
+       u16 config1, config2, config3;
+       u32 ocp_data;
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
+       config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask;
+       config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2);
+       config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask;
+
+       if (enable) {
+               ocp_data |= EEE_RX_EN | EEE_TX_EN;
+               config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN;
+               config1 |= sd_rise_time(1);
+               config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN;
+               config3 |= fast_snr(42);
+       } else {
+               ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
+               config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN |
+                            RX_QUIET_EN);
+               config1 |= sd_rise_time(7);
+               config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN);
+               config3 |= fast_snr(511);
+       }
+
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
+       ocp_reg_write(tp, OCP_EEE_CONFIG1, config1);
+       ocp_reg_write(tp, OCP_EEE_CONFIG2, config2);
+       ocp_reg_write(tp, OCP_EEE_CONFIG3, config3);
+}
+
+static void r8152b_enable_eee(struct r8152 *tp)
+{
+       r8152_eee_en(tp, true);
+       r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX);
+}
+
+static void r8152b_enable_fc(struct r8152 *tp)
+{
+       u16 anar;
+
+       anar = r8152_mdio_read(tp, MII_ADVERTISE);
+       anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+       r8152_mdio_write(tp, MII_ADVERTISE, anar);
+}
+
 static void rtl8152_disable(struct r8152 *tp)
 {
        r8152_aldps_en(tp, false);
@@ -2561,13 +2632,9 @@ static void rtl8152_disable(struct r8152 *tp)
 
 static void r8152b_hw_phy_cfg(struct r8152 *tp)
 {
-       u16 data;
-
-       data = r8152_mdio_read(tp, MII_BMCR);
-       if (data & BMCR_PDOWN) {
-               data &= ~BMCR_PDOWN;
-               r8152_mdio_write(tp, MII_BMCR, data);
-       }
+       r8152b_enable_eee(tp);
+       r8152_aldps_en(tp, true);
+       r8152b_enable_fc(tp);
 
        set_bit(PHY_RESET, &tp->flags);
 }
@@ -2701,20 +2768,52 @@ static void r8152b_enter_oob(struct r8152 *tp)
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
 }
 
+static void r8153_aldps_en(struct r8152 *tp, bool enable)
+{
+       u16 data;
+
+       data = ocp_reg_read(tp, OCP_POWER_CFG);
+       if (enable) {
+               data |= EN_ALDPS;
+               ocp_reg_write(tp, OCP_POWER_CFG, data);
+       } else {
+               data &= ~EN_ALDPS;
+               ocp_reg_write(tp, OCP_POWER_CFG, data);
+               msleep(20);
+       }
+}
+
+static void r8153_eee_en(struct r8152 *tp, bool enable)
+{
+       u32 ocp_data;
+       u16 config;
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
+       config = ocp_reg_read(tp, OCP_EEE_CFG);
+
+       if (enable) {
+               ocp_data |= EEE_RX_EN | EEE_TX_EN;
+               config |= EEE10_EN;
+       } else {
+               ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
+               config &= ~EEE10_EN;
+       }
+
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
+       ocp_reg_write(tp, OCP_EEE_CFG, config);
+}
+
 static void r8153_hw_phy_cfg(struct r8152 *tp)
 {
        u32 ocp_data;
        u16 data;
 
-       if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
-           tp->version == RTL_VER_05)
-               ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+       /* disable ALDPS before updating the PHY parameters */
+       r8153_aldps_en(tp, false);
 
-       data = r8152_mdio_read(tp, MII_BMCR);
-       if (data & BMCR_PDOWN) {
-               data &= ~BMCR_PDOWN;
-               r8152_mdio_write(tp, MII_BMCR, data);
-       }
+       /* disable EEE before updating the PHY parameters */
+       r8153_eee_en(tp, false);
+       ocp_reg_write(tp, OCP_EEE_ADV, 0);
 
        if (tp->version == RTL_VER_03) {
                data = ocp_reg_read(tp, OCP_EEE_CFG);
@@ -2745,6 +2844,12 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
        sram_write(tp, SRAM_10M_AMP1, 0x00af);
        sram_write(tp, SRAM_10M_AMP2, 0x0208);
 
+       r8153_eee_en(tp, true);
+       ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX);
+
+       r8153_aldps_en(tp, true);
+       r8152b_enable_fc(tp);
+
        set_bit(PHY_RESET, &tp->flags);
 }
 
@@ -2866,21 +2971,6 @@ static void r8153_enter_oob(struct r8152 *tp)
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
 }
 
-static void r8153_aldps_en(struct r8152 *tp, bool enable)
-{
-       u16 data;
-
-       data = ocp_reg_read(tp, OCP_POWER_CFG);
-       if (enable) {
-               data |= EN_ALDPS;
-               ocp_reg_write(tp, OCP_POWER_CFG, data);
-       } else {
-               data &= ~EN_ALDPS;
-               ocp_reg_write(tp, OCP_POWER_CFG, data);
-               msleep(20);
-       }
-}
-
 static void rtl8153_disable(struct r8152 *tp)
 {
        r8153_aldps_en(tp, false);
@@ -3246,103 +3336,6 @@ static int rtl8152_close(struct net_device *netdev)
        return res;
 }
 
-static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg)
-{
-       ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev);
-       ocp_reg_write(tp, OCP_EEE_DATA, reg);
-       ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev);
-}
-
-static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg)
-{
-       u16 data;
-
-       r8152_mmd_indirect(tp, dev, reg);
-       data = ocp_reg_read(tp, OCP_EEE_DATA);
-       ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
-
-       return data;
-}
-
-static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data)
-{
-       r8152_mmd_indirect(tp, dev, reg);
-       ocp_reg_write(tp, OCP_EEE_DATA, data);
-       ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
-}
-
-static void r8152_eee_en(struct r8152 *tp, bool enable)
-{
-       u16 config1, config2, config3;
-       u32 ocp_data;
-
-       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
-       config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask;
-       config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2);
-       config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask;
-
-       if (enable) {
-               ocp_data |= EEE_RX_EN | EEE_TX_EN;
-               config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN;
-               config1 |= sd_rise_time(1);
-               config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN;
-               config3 |= fast_snr(42);
-       } else {
-               ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
-               config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN |
-                            RX_QUIET_EN);
-               config1 |= sd_rise_time(7);
-               config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN);
-               config3 |= fast_snr(511);
-       }
-
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
-       ocp_reg_write(tp, OCP_EEE_CONFIG1, config1);
-       ocp_reg_write(tp, OCP_EEE_CONFIG2, config2);
-       ocp_reg_write(tp, OCP_EEE_CONFIG3, config3);
-}
-
-static void r8152b_enable_eee(struct r8152 *tp)
-{
-       r8152_eee_en(tp, true);
-       r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX);
-}
-
-static void r8153_eee_en(struct r8152 *tp, bool enable)
-{
-       u32 ocp_data;
-       u16 config;
-
-       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
-       config = ocp_reg_read(tp, OCP_EEE_CFG);
-
-       if (enable) {
-               ocp_data |= EEE_RX_EN | EEE_TX_EN;
-               config |= EEE10_EN;
-       } else {
-               ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
-               config &= ~EEE10_EN;
-       }
-
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
-       ocp_reg_write(tp, OCP_EEE_CFG, config);
-}
-
-static void r8153_enable_eee(struct r8152 *tp)
-{
-       r8153_eee_en(tp, true);
-       ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX);
-}
-
-static void r8152b_enable_fc(struct r8152 *tp)
-{
-       u16 anar;
-
-       anar = r8152_mdio_read(tp, MII_ADVERTISE);
-       anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
-       r8152_mdio_write(tp, MII_ADVERTISE, anar);
-}
-
 static void rtl_tally_reset(struct r8152 *tp)
 {
        u32 ocp_data;
@@ -3355,10 +3348,17 @@ static void rtl_tally_reset(struct r8152 *tp)
 static void r8152b_init(struct r8152 *tp)
 {
        u32 ocp_data;
+       u16 data;
 
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
 
+       data = r8152_mdio_read(tp, MII_BMCR);
+       if (data & BMCR_PDOWN) {
+               data &= ~BMCR_PDOWN;
+               r8152_mdio_write(tp, MII_BMCR, data);
+       }
+
        r8152_aldps_en(tp, false);
 
        if (tp->version == RTL_VER_01) {
@@ -3380,9 +3380,6 @@ static void r8152b_init(struct r8152 *tp)
                   SPDWN_RXDV_MSK | SPDWN_LINKCHG_MSK;
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data);
 
-       r8152b_enable_eee(tp);
-       r8152_aldps_en(tp, true);
-       r8152b_enable_fc(tp);
        rtl_tally_reset(tp);
 
        /* enable rx aggregation */
@@ -3394,12 +3391,12 @@ static void r8152b_init(struct r8152 *tp)
 static void r8153_init(struct r8152 *tp)
 {
        u32 ocp_data;
+       u16 data;
        int i;
 
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
 
-       r8153_aldps_en(tp, false);
        r8153_u1u2en(tp, false);
 
        for (i = 0; i < 500; i++) {
@@ -3416,6 +3413,23 @@ static void r8153_init(struct r8152 *tp)
                msleep(20);
        }
 
+       if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
+           tp->version == RTL_VER_05)
+               ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+
+       data = r8152_mdio_read(tp, MII_BMCR);
+       if (data & BMCR_PDOWN) {
+               data &= ~BMCR_PDOWN;
+               r8152_mdio_write(tp, MII_BMCR, data);
+       }
+
+       for (i = 0; i < 500; i++) {
+               ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK;
+               if (ocp_data == PHY_STAT_LAN_ON)
+                       break;
+               msleep(20);
+       }
+
        usb_disable_lpm(tp->udev);
        r8153_u2p3en(tp, false);
 
@@ -3483,9 +3497,6 @@ static void r8153_init(struct r8152 *tp)
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
 
-       r8153_enable_eee(tp);
-       r8153_aldps_en(tp, true);
-       r8152b_enable_fc(tp);
        rtl_tally_reset(tp);
        r8153_u2p3en(tp, true);
 }
index c6585ab..b3a87a3 100644 (file)
@@ -513,6 +513,15 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
        int hdrlen = ieee80211_hdrlen(hdr->frame_control);
        int queue;
 
+       /* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
+        * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
+        * queue. STATION (HS2.0) uses the auxiliary context of the FW,
+        * and hence needs to be sent on the aux queue
+        */
+       if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
+           skb_info->control.vif->type == NL80211_IFTYPE_STATION)
+               IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
+
        memcpy(&info, skb->cb, sizeof(info));
 
        if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
@@ -526,16 +535,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
        /* This holds the amsdu headers length */
        skb_info->driver_data[0] = (void *)(uintptr_t)0;
 
-       /*
-        * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
-        * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
-        * queue. STATION (HS2.0) uses the auxiliary context of the FW,
-        * and hence needs to be sent on the aux queue
-        */
-       if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
-           info.control.vif->type == NL80211_IFTYPE_STATION)
-               IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
-
        queue = info.hw_queue;
 
        /*
index 6a31f26..daf4c78 100644 (file)
@@ -271,6 +271,11 @@ static int netback_probe(struct xenbus_device *dev,
        be->dev = dev;
        dev_set_drvdata(&dev->dev, be);
 
+       be->state = XenbusStateInitialising;
+       err = xenbus_switch_state(dev, XenbusStateInitialising);
+       if (err)
+               goto fail;
+
        sg = 1;
 
        do {
@@ -383,11 +388,6 @@ static int netback_probe(struct xenbus_device *dev,
 
        be->hotplug_script = script;
 
-       err = xenbus_switch_state(dev, XenbusStateInitWait);
-       if (err)
-               goto fail;
-
-       be->state = XenbusStateInitWait;
 
        /* This kicks hotplug scripts, so do it immediately. */
        err = backend_create_xenvif(be);
@@ -492,20 +492,20 @@ static inline void backend_switch_state(struct backend_info *be,
 
 /* Handle backend state transitions:
  *
- * The backend state starts in InitWait and the following transitions are
+ * The backend state starts in Initialising and the following transitions are
  * allowed.
  *
- * InitWait -> Connected
- *
- *    ^    \         |
- *    |     \        |
- *    |      \       |
- *    |       \      |
- *    |        \     |
- *    |         \    |
- *    |          V   V
+ * Initialising -> InitWait -> Connected
+ *          \
+ *           \        ^    \         |
+ *            \       |     \        |
+ *             \      |      \       |
+ *              \     |       \      |
+ *               \    |        \     |
+ *                \   |         \    |
+ *                 V  |          V   V
  *
- *  Closed  <-> Closing
+ *                  Closed  <-> Closing
  *
  * The state argument specifies the eventual state of the backend and the
  * function transitions to that state via the shortest path.
@@ -515,6 +515,20 @@ static void set_backend_state(struct backend_info *be,
 {
        while (be->state != state) {
                switch (be->state) {
+               case XenbusStateInitialising:
+                       switch (state) {
+                       case XenbusStateInitWait:
+                       case XenbusStateConnected:
+                       case XenbusStateClosing:
+                               backend_switch_state(be, XenbusStateInitWait);
+                               break;
+                       case XenbusStateClosed:
+                               backend_switch_state(be, XenbusStateClosed);
+                               break;
+                       default:
+                               BUG();
+                       }
+                       break;
                case XenbusStateClosed:
                        switch (state) {
                        case XenbusStateInitWait:
index 489ea10..69b5e81 100644 (file)
@@ -977,7 +977,7 @@ static int pcmcia_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
 
 /************************ runtime PM support ***************************/
 
-static int pcmcia_dev_suspend(struct device *dev, pm_message_t state);
+static int pcmcia_dev_suspend(struct device *dev);
 static int pcmcia_dev_resume(struct device *dev);
 
 static int runtime_suspend(struct device *dev)
@@ -985,7 +985,7 @@ static int runtime_suspend(struct device *dev)
        int rc;
 
        device_lock(dev);
-       rc = pcmcia_dev_suspend(dev, PMSG_SUSPEND);
+       rc = pcmcia_dev_suspend(dev);
        device_unlock(dev);
        return rc;
 }
@@ -1135,7 +1135,7 @@ ATTRIBUTE_GROUPS(pcmcia_dev);
 
 /* PM support, also needed for reset */
 
-static int pcmcia_dev_suspend(struct device *dev, pm_message_t state)
+static int pcmcia_dev_suspend(struct device *dev)
 {
        struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
        struct pcmcia_driver *p_drv = NULL;
@@ -1410,6 +1410,9 @@ static struct class_interface pcmcia_bus_interface __refdata = {
        .remove_dev = &pcmcia_bus_remove_socket,
 };
 
+static const struct dev_pm_ops pcmcia_bus_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(pcmcia_dev_suspend, pcmcia_dev_resume)
+};
 
 struct bus_type pcmcia_bus_type = {
        .name = "pcmcia",
@@ -1418,8 +1421,7 @@ struct bus_type pcmcia_bus_type = {
        .dev_groups = pcmcia_dev_groups,
        .probe = pcmcia_device_probe,
        .remove = pcmcia_device_remove,
-       .suspend = pcmcia_dev_suspend,
-       .resume = pcmcia_dev_resume,
+       .pm = &pcmcia_bus_pm_ops,
 };
 
 
index 483f919..91b5f57 100644 (file)
@@ -214,9 +214,8 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
 }
 #endif
 
-void pxa2xx_configure_sockets(struct device *dev)
+void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops)
 {
-       struct pcmcia_low_level *ops = dev->platform_data;
        /*
         * We have at least one socket, so set MECR:CIT
         * (Card Is There)
@@ -322,7 +321,7 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
                        goto err1;
        }
 
-       pxa2xx_configure_sockets(&dev->dev);
+       pxa2xx_configure_sockets(&dev->dev, ops);
        dev_set_drvdata(&dev->dev, sinfo);
 
        return 0;
@@ -348,7 +347,9 @@ static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
 
 static int pxa2xx_drv_pcmcia_resume(struct device *dev)
 {
-       pxa2xx_configure_sockets(dev);
+       struct pcmcia_low_level *ops = (struct pcmcia_low_level *)dev->platform_data;
+
+       pxa2xx_configure_sockets(dev, ops);
        return 0;
 }
 
index b609b45..e58c7a4 100644 (file)
@@ -1,4 +1,4 @@
 int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
 void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
-void pxa2xx_configure_sockets(struct device *dev);
+void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops);
 
index 12f0dd0..2f49093 100644 (file)
@@ -134,20 +134,14 @@ static struct pcmcia_low_level badge4_pcmcia_ops = {
 
 int pcmcia_badge4_init(struct sa1111_dev *dev)
 {
-       int ret = -ENODEV;
-
-       if (machine_is_badge4()) {
-               printk(KERN_INFO
-                      "%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n",
-                      __func__,
-                      badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
-
-               sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops);
-               ret = sa1111_pcmcia_add(dev, &badge4_pcmcia_ops,
-                               sa11xx_drv_pcmcia_add_one);
-       }
-
-       return ret;
+       printk(KERN_INFO
+              "%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n",
+              __func__,
+              badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
+
+       sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops);
+       return sa1111_pcmcia_add(dev, &badge4_pcmcia_ops,
+                                sa11xx_drv_pcmcia_add_one);
 }
 
 static int __init pcmv_setup(char *s)
index a1531fe..3d95dff 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <mach/hardware.h>
 #include <asm/hardware/sa1111.h>
+#include <asm/mach-types.h>
 #include <asm/irq.h>
 
 #include "sa1111_generic.h"
@@ -203,19 +204,30 @@ static int pcmcia_probe(struct sa1111_dev *dev)
        sa1111_writel(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + PCSSR);
        sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR);
 
+       ret = -ENODEV;
 #ifdef CONFIG_SA1100_BADGE4
-       pcmcia_badge4_init(dev);
+       if (machine_is_badge4())
+               ret = pcmcia_badge4_init(dev);
 #endif
 #ifdef CONFIG_SA1100_JORNADA720
-       pcmcia_jornada720_init(dev);
+       if (machine_is_jornada720())
+               ret = pcmcia_jornada720_init(dev);
 #endif
 #ifdef CONFIG_ARCH_LUBBOCK
-       pcmcia_lubbock_init(dev);
+       if (machine_is_lubbock())
+               ret = pcmcia_lubbock_init(dev);
 #endif
 #ifdef CONFIG_ASSABET_NEPONSET
-       pcmcia_neponset_init(dev);
+       if (machine_is_assabet())
+               ret = pcmcia_neponset_init(dev);
 #endif
-       return 0;
+
+       if (ret) {
+               release_mem_region(dev->res.start, 512);
+               sa1111_disable_device(dev);
+       }
+
+       return ret;
 }
 
 static int pcmcia_remove(struct sa1111_dev *dev)
index c2c3058..480a3ed 100644 (file)
@@ -94,22 +94,17 @@ static struct pcmcia_low_level jornada720_pcmcia_ops = {
 
 int pcmcia_jornada720_init(struct sa1111_dev *sadev)
 {
-       int ret = -ENODEV;
+       unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
 
-       if (machine_is_jornada720()) {
-               unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
+       /* Fixme: why messing around with SA11x0's GPIO1? */
+       GRER |= 0x00000002;
 
-               GRER |= 0x00000002;
+       /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
+       sa1111_set_io_dir(sadev, pin, 0, 0);
+       sa1111_set_io(sadev, pin, 0);
+       sa1111_set_sleep_io(sadev, pin, 0);
 
-               /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
-               sa1111_set_io_dir(sadev, pin, 0, 0);
-               sa1111_set_io(sadev, pin, 0);
-               sa1111_set_sleep_io(sadev, pin, 0);
-
-               sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
-               ret = sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops,
-                               sa11xx_drv_pcmcia_add_one);
-       }
-
-       return ret;
+       sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
+       return sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops,
+                                sa11xx_drv_pcmcia_add_one);
 }
index c5caf57..e741f49 100644 (file)
@@ -210,27 +210,21 @@ static struct pcmcia_low_level lubbock_pcmcia_ops = {
 
 int pcmcia_lubbock_init(struct sa1111_dev *sadev)
 {
-       int ret = -ENODEV;
-
-       if (machine_is_lubbock()) {
-               /*
-                * Set GPIO_A<3:0> to be outputs for the MAX1600,
-                * and switch to standby mode.
-                */
-               sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
-               sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
-               sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
-
-               /* Set CF Socket 1 power to standby mode. */
-               lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
+       /*
+        * Set GPIO_A<3:0> to be outputs for the MAX1600,
+        * and switch to standby mode.
+        */
+       sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
+       sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
+       sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
 
-               pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
-               pxa2xx_configure_sockets(&sadev->dev);
-               ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
-                               pxa2xx_drv_pcmcia_add_one);
-       }
+       /* Set CF Socket 1 power to standby mode. */
+       lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
 
-       return ret;
+       pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
+       pxa2xx_configure_sockets(&sadev->dev, &lubbock_pcmcia_ops);
+       return sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
+                                pxa2xx_drv_pcmcia_add_one);
 }
 
 MODULE_LICENSE("GPL");
index 1d78739..019c395 100644 (file)
@@ -110,20 +110,14 @@ static struct pcmcia_low_level neponset_pcmcia_ops = {
 
 int pcmcia_neponset_init(struct sa1111_dev *sadev)
 {
-       int ret = -ENODEV;
-
-       if (machine_is_assabet()) {
-               /*
-                * Set GPIO_A<3:0> to be outputs for the MAX1600,
-                * and switch to standby mode.
-                */
-               sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
-               sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
-               sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
-               sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops);
-               ret = sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops,
-                               sa11xx_drv_pcmcia_add_one);
-       }
-
-       return ret;
+       /*
+        * Set GPIO_A<3:0> to be outputs for the MAX1600,
+        * and switch to standby mode.
+        */
+       sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
+       sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
+       sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
+       sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops);
+       return sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops,
+                                sa11xx_drv_pcmcia_add_one);
 }
index 9f6ec87..48140ac 100644 (file)
@@ -144,19 +144,19 @@ static int
 sa1100_pcmcia_show_timing(struct soc_pcmcia_socket *skt, char *buf)
 {
        struct soc_pcmcia_timing timing;
-       unsigned int clock = clk_get_rate(skt->clk);
+       unsigned int clock = clk_get_rate(skt->clk) / 1000;
        unsigned long mecr = MECR;
        char *p = buf;
 
        soc_common_pcmcia_get_timing(skt, &timing);
 
-       p+=sprintf(p, "I/O      : %u (%u)\n", timing.io,
+       p+=sprintf(p, "I/O      : %uns (%uns)\n", timing.io,
                   sa1100_pcmcia_cmd_time(clock, MECR_BSIO_GET(mecr, skt->nr)));
 
-       p+=sprintf(p, "attribute: %u (%u)\n", timing.attr,
+       p+=sprintf(p, "attribute: %uns (%uns)\n", timing.attr,
                   sa1100_pcmcia_cmd_time(clock, MECR_BSA_GET(mecr, skt->nr)));
 
-       p+=sprintf(p, "common   : %u (%u)\n", timing.mem,
+       p+=sprintf(p, "common   : %uns (%uns)\n", timing.mem,
                   sa1100_pcmcia_cmd_time(clock, MECR_BSM_GET(mecr, skt->nr)));
 
        return p - buf;
index eed5e9c..d5ca760 100644 (file)
@@ -235,7 +235,7 @@ static unsigned int soc_common_pcmcia_skt_state(struct soc_pcmcia_socket *skt)
        stat |= skt->cs_state.Vcc ? SS_POWERON : 0;
 
        if (skt->cs_state.flags & SS_IOCARD)
-               stat |= state.bvd1 ? SS_STSCHG : 0;
+               stat |= state.bvd1 ? 0 : SS_STSCHG;
        else {
                if (state.bvd1 == 0)
                        stat |= SS_BATDEAD;
index 3fa17ac..cebc296 100644 (file)
@@ -2247,17 +2247,30 @@ static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code,
 {
        struct rio_channel *ch;
        unsigned int i;
+       LIST_HEAD(list);
 
        riocm_debug(EXIT, ".");
 
+       /*
+        * If there are any channels left in connected state send
+        * close notification to the connection partner.
+        * First build a list of channels that require a closing
+        * notification because function riocm_send_close() should
+        * be called outside of spinlock protected code.
+        */
        spin_lock_bh(&idr_lock);
        idr_for_each_entry(&ch_idr, ch, i) {
-               riocm_debug(EXIT, "close ch %d", ch->id);
-               if (ch->state == RIO_CM_CONNECTED)
-                       riocm_send_close(ch);
+               if (ch->state == RIO_CM_CONNECTED) {
+                       riocm_debug(EXIT, "close ch %d", ch->id);
+                       idr_remove(&ch_idr, ch->id);
+                       list_add(&ch->ch_node, &list);
+               }
        }
        spin_unlock_bh(&idr_lock);
 
+       list_for_each_entry(ch, &list, ch_node)
+               riocm_send_close(ch);
+
        return NOTIFY_DONE;
 }
 
index bf40063..6d4b68c 100644 (file)
@@ -999,6 +999,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
                                                 __u16, __u16,
                                                 enum qeth_prot_versions);
 int qeth_set_features(struct net_device *, netdev_features_t);
+int qeth_recover_features(struct net_device *);
 netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
 
 /* exports for OSN */
index 7dba6c8..20cf296 100644 (file)
@@ -3619,7 +3619,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card,
                int e;
 
                e = 0;
-               while (buffer->element[e].addr) {
+               while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
+                      buffer->element[e].addr) {
                        unsigned long phys_aob_addr;
 
                        phys_aob_addr = (unsigned long) buffer->element[e].addr;
@@ -6131,6 +6132,35 @@ static int qeth_set_ipa_tso(struct qeth_card *card, int on)
        return rc;
 }
 
+/* try to restore device features on a device after recovery */
+int qeth_recover_features(struct net_device *dev)
+{
+       struct qeth_card *card = dev->ml_priv;
+       netdev_features_t recover = dev->features;
+
+       if (recover & NETIF_F_IP_CSUM) {
+               if (qeth_set_ipa_csum(card, 1, IPA_OUTBOUND_CHECKSUM))
+                       recover ^= NETIF_F_IP_CSUM;
+       }
+       if (recover & NETIF_F_RXCSUM) {
+               if (qeth_set_ipa_csum(card, 1, IPA_INBOUND_CHECKSUM))
+                       recover ^= NETIF_F_RXCSUM;
+       }
+       if (recover & NETIF_F_TSO) {
+               if (qeth_set_ipa_tso(card, 1))
+                       recover ^= NETIF_F_TSO;
+       }
+
+       if (recover == dev->features)
+               return 0;
+
+       dev_warn(&card->gdev->dev,
+                "Device recovery failed to restore all offload features\n");
+       dev->features = recover;
+       return -EIO;
+}
+EXPORT_SYMBOL_GPL(qeth_recover_features);
+
 int qeth_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct qeth_card *card = dev->ml_priv;
index 7bc20c5..bb27058 100644 (file)
@@ -1124,14 +1124,11 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
                        card->dev->hw_features |= NETIF_F_RXCSUM;
                        card->dev->vlan_features |= NETIF_F_RXCSUM;
                }
-               /* Turn on SG per default */
-               card->dev->features |= NETIF_F_SG;
        }
        card->info.broadcast_capable = 1;
        qeth_l2_request_initial_mac(card);
        card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
                                  PAGE_SIZE;
-       card->dev->gso_max_segs = (QETH_MAX_BUFFER_ELEMENTS(card) - 1);
        SET_NETDEV_DEV(card->dev, &card->gdev->dev);
        netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
        netif_carrier_off(card->dev);
@@ -1246,6 +1243,9 @@ contin:
                }
                /* this also sets saved unicast addresses */
                qeth_l2_set_rx_mode(card->dev);
+               rtnl_lock();
+               qeth_recover_features(card->dev);
+               rtnl_unlock();
        }
        /* let user_space know that device is online */
        kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
index 7293466..272d9e7 100644 (file)
@@ -257,6 +257,11 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
        if (addr->in_progress)
                return -EINPROGRESS;
 
+       if (!qeth_card_hw_is_reachable(card)) {
+               addr->disp_flag = QETH_DISP_ADDR_DELETE;
+               return 0;
+       }
+
        rc = qeth_l3_deregister_addr_entry(card, addr);
 
        hash_del(&addr->hnode);
@@ -296,6 +301,11 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
                hash_add(card->ip_htable, &addr->hnode,
                                qeth_l3_ipaddr_hash(addr));
 
+               if (!qeth_card_hw_is_reachable(card)) {
+                       addr->disp_flag = QETH_DISP_ADDR_ADD;
+                       return 0;
+               }
+
                /* qeth_l3_register_addr_entry can go to sleep
                 * if we add a IPV4 addr. It is caused by the reason
                 * that SETIP ipa cmd starts ARP staff for IPV4 addr.
@@ -390,12 +400,16 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
        int i;
        int rc;
 
-       QETH_CARD_TEXT(card, 4, "recoverip");
+       QETH_CARD_TEXT(card, 4, "recovrip");
 
        spin_lock_bh(&card->ip_lock);
 
        hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
-               if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
+               if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
+                       qeth_l3_deregister_addr_entry(card, addr);
+                       hash_del(&addr->hnode);
+                       kfree(addr);
+               } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
                        if (addr->proto == QETH_PROT_IPV4) {
                                addr->in_progress = 1;
                                spin_unlock_bh(&card->ip_lock);
@@ -407,10 +421,8 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
 
                        if (!rc) {
                                addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
-                               if (addr->ref_counter < 1) {
+                               if (addr->ref_counter < 1)
                                        qeth_l3_delete_ip(card, addr);
-                                       kfree(addr);
-                               }
                        } else {
                                hash_del(&addr->hnode);
                                kfree(addr);
@@ -689,7 +701,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
 
        spin_lock_bh(&card->ip_lock);
 
-       if (!qeth_l3_ip_from_hash(card, ipaddr))
+       if (qeth_l3_ip_from_hash(card, ipaddr))
                rc = -EEXIST;
        else
                qeth_l3_add_ip(card, ipaddr);
@@ -757,7 +769,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
 
        spin_lock_bh(&card->ip_lock);
 
-       if (!qeth_l3_ip_from_hash(card, ipaddr))
+       if (qeth_l3_ip_from_hash(card, ipaddr))
                rc = -EEXIST;
        else
                qeth_l3_add_ip(card, ipaddr);
@@ -3108,7 +3120,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                                card->dev->vlan_features = NETIF_F_SG |
                                        NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
                                        NETIF_F_TSO;
-                               card->dev->features = NETIF_F_SG;
                        }
                }
        } else if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -3136,7 +3147,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
        netif_keep_dst(card->dev);
        card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
                                  PAGE_SIZE;
-       card->dev->gso_max_segs = (QETH_MAX_BUFFER_ELEMENTS(card) - 1);
 
        SET_NETDEV_DEV(card->dev, &card->gdev->dev);
        netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
@@ -3269,6 +3279,7 @@ contin:
                else
                        dev_open(card->dev);
                qeth_l3_set_multicast_list(card->dev);
+               qeth_recover_features(card->dev);
                rtnl_unlock();
        }
        qeth_trace_features(card);
index 65645b1..0e00a5c 100644 (file)
@@ -297,7 +297,9 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
                addr->u.a6.pfxlen = 0;
                addr->type = QETH_IP_TYPE_NORMAL;
 
+               spin_lock_bh(&card->ip_lock);
                qeth_l3_delete_ip(card, addr);
+               spin_unlock_bh(&card->ip_lock);
                kfree(addr);
        }
 
@@ -329,7 +331,10 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
                addr->type = QETH_IP_TYPE_NORMAL;
        } else
                return -ENOMEM;
+
+       spin_lock_bh(&card->ip_lock);
        qeth_l3_add_ip(card, addr);
+       spin_unlock_bh(&card->ip_lock);
        kfree(addr);
 
        return count;
index a10d4f8..1322469 100644 (file)
@@ -12,6 +12,7 @@ Hopefully this will happen later in 2016.
 
 Other TODOs:
 
+- There are two possible replies to CEC_MSG_INITIATE_ARC. How to handle that?
 - Add a flag to inhibit passing CEC RC messages to the rc subsystem.
   Applications should be able to choose this when calling S_LOG_ADDRS.
 - If the reply field of cec_msg is set then when the reply arrives it
index b2393bb..946986f 100644 (file)
@@ -124,10 +124,10 @@ static void cec_queue_event(struct cec_adapter *adap,
        u64 ts = ktime_get_ns();
        struct cec_fh *fh;
 
-       mutex_lock(&adap->devnode.fhs_lock);
+       mutex_lock(&adap->devnode.lock);
        list_for_each_entry(fh, &adap->devnode.fhs, list)
                cec_queue_event_fh(fh, ev, ts);
-       mutex_unlock(&adap->devnode.fhs_lock);
+       mutex_unlock(&adap->devnode.lock);
 }
 
 /*
@@ -191,12 +191,12 @@ static void cec_queue_msg_monitor(struct cec_adapter *adap,
        u32 monitor_mode = valid_la ? CEC_MODE_MONITOR :
                                      CEC_MODE_MONITOR_ALL;
 
-       mutex_lock(&adap->devnode.fhs_lock);
+       mutex_lock(&adap->devnode.lock);
        list_for_each_entry(fh, &adap->devnode.fhs, list) {
                if (fh->mode_follower >= monitor_mode)
                        cec_queue_msg_fh(fh, msg);
        }
-       mutex_unlock(&adap->devnode.fhs_lock);
+       mutex_unlock(&adap->devnode.lock);
 }
 
 /*
@@ -207,12 +207,12 @@ static void cec_queue_msg_followers(struct cec_adapter *adap,
 {
        struct cec_fh *fh;
 
-       mutex_lock(&adap->devnode.fhs_lock);
+       mutex_lock(&adap->devnode.lock);
        list_for_each_entry(fh, &adap->devnode.fhs, list) {
                if (fh->mode_follower == CEC_MODE_FOLLOWER)
                        cec_queue_msg_fh(fh, msg);
        }
-       mutex_unlock(&adap->devnode.fhs_lock);
+       mutex_unlock(&adap->devnode.lock);
 }
 
 /* Notify userspace of an adapter state change. */
@@ -851,6 +851,9 @@ void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
        if (!valid_la || msg->len <= 1)
                return;
 
+       if (adap->log_addrs.log_addr_mask == 0)
+               return;
+
        /*
         * Process the message on the protocol level. If is_reply is true,
         * then cec_receive_notify() won't pass on the reply to the listener(s)
@@ -1047,11 +1050,17 @@ static int cec_config_thread_func(void *arg)
                        dprintk(1, "could not claim LA %d\n", i);
        }
 
+       if (adap->log_addrs.log_addr_mask == 0 &&
+           !(las->flags & CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK))
+               goto unconfigure;
+
 configured:
        if (adap->log_addrs.log_addr_mask == 0) {
                /* Fall back to unregistered */
                las->log_addr[0] = CEC_LOG_ADDR_UNREGISTERED;
                las->log_addr_mask = 1 << las->log_addr[0];
+               for (i = 1; i < las->num_log_addrs; i++)
+                       las->log_addr[i] = CEC_LOG_ADDR_INVALID;
        }
        adap->is_configured = true;
        adap->is_configuring = false;
@@ -1070,6 +1079,8 @@ configured:
                        cec_report_features(adap, i);
                cec_report_phys_addr(adap, i);
        }
+       for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
+               las->log_addr[i] = CEC_LOG_ADDR_INVALID;
        mutex_lock(&adap->lock);
        adap->kthread_config = NULL;
        mutex_unlock(&adap->lock);
@@ -1398,7 +1409,6 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
        u8 init_laddr = cec_msg_initiator(msg);
        u8 devtype = cec_log_addr2dev(adap, dest_laddr);
        int la_idx = cec_log_addr2idx(adap, dest_laddr);
-       bool is_directed = la_idx >= 0;
        bool from_unregistered = init_laddr == 0xf;
        struct cec_msg tx_cec_msg = { };
 
@@ -1560,7 +1570,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
                 * Unprocessed messages are aborted if userspace isn't doing
                 * any processing either.
                 */
-               if (is_directed && !is_reply && !adap->follower_cnt &&
+               if (!is_broadcast && !is_reply && !adap->follower_cnt &&
                    !adap->cec_follower && msg->msg[1] != CEC_MSG_FEATURE_ABORT)
                        return cec_feature_abort(adap, msg);
                break;
index 7be7615..e274e2f 100644 (file)
@@ -162,7 +162,7 @@ static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
                return -ENOTTY;
        if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
                return -EFAULT;
-       log_addrs.flags = 0;
+       log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK;
        mutex_lock(&adap->lock);
        if (!adap->is_configuring &&
            (!log_addrs.num_log_addrs || !adap->is_configured) &&
@@ -435,7 +435,7 @@ static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        void __user *parg = (void __user *)arg;
 
        if (!devnode->registered)
-               return -EIO;
+               return -ENODEV;
 
        switch (cmd) {
        case CEC_ADAP_G_CAPS:
@@ -508,14 +508,14 @@ static int cec_open(struct inode *inode, struct file *filp)
 
        filp->private_data = fh;
 
-       mutex_lock(&devnode->fhs_lock);
+       mutex_lock(&devnode->lock);
        /* Queue up initial state events */
        ev_state.state_change.phys_addr = adap->phys_addr;
        ev_state.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
        cec_queue_event_fh(fh, &ev_state, 0);
 
        list_add(&fh->list, &devnode->fhs);
-       mutex_unlock(&devnode->fhs_lock);
+       mutex_unlock(&devnode->lock);
 
        return 0;
 }
@@ -540,9 +540,9 @@ static int cec_release(struct inode *inode, struct file *filp)
                cec_monitor_all_cnt_dec(adap);
        mutex_unlock(&adap->lock);
 
-       mutex_lock(&devnode->fhs_lock);
+       mutex_lock(&devnode->lock);
        list_del(&fh->list);
-       mutex_unlock(&devnode->fhs_lock);
+       mutex_unlock(&devnode->lock);
 
        /* Unhook pending transmits from this filehandle. */
        mutex_lock(&adap->lock);
index 112a5fa..3b1e4d2 100644 (file)
@@ -51,31 +51,29 @@ int cec_get_device(struct cec_devnode *devnode)
 {
        /*
         * Check if the cec device is available. This needs to be done with
-        * the cec_devnode_lock held to prevent an open/unregister race:
+        * the devnode->lock held to prevent an open/unregister race:
         * without the lock, the device could be unregistered and freed between
         * the devnode->registered check and get_device() calls, leading to
         * a crash.
         */
-       mutex_lock(&cec_devnode_lock);
+       mutex_lock(&devnode->lock);
        /*
         * return ENXIO if the cec device has been removed
         * already or if it is not registered anymore.
         */
        if (!devnode->registered) {
-               mutex_unlock(&cec_devnode_lock);
+               mutex_unlock(&devnode->lock);
                return -ENXIO;
        }
        /* and increase the device refcount */
        get_device(&devnode->dev);
-       mutex_unlock(&cec_devnode_lock);
+       mutex_unlock(&devnode->lock);
        return 0;
 }
 
 void cec_put_device(struct cec_devnode *devnode)
 {
-       mutex_lock(&cec_devnode_lock);
        put_device(&devnode->dev);
-       mutex_unlock(&cec_devnode_lock);
 }
 
 /* Called when the last user of the cec device exits. */
@@ -84,11 +82,10 @@ static void cec_devnode_release(struct device *cd)
        struct cec_devnode *devnode = to_cec_devnode(cd);
 
        mutex_lock(&cec_devnode_lock);
-
        /* Mark device node number as free */
        clear_bit(devnode->minor, cec_devnode_nums);
-
        mutex_unlock(&cec_devnode_lock);
+
        cec_delete_adapter(to_cec_adapter(devnode));
 }
 
@@ -117,7 +114,7 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
 
        /* Initialization */
        INIT_LIST_HEAD(&devnode->fhs);
-       mutex_init(&devnode->fhs_lock);
+       mutex_init(&devnode->lock);
 
        /* Part 1: Find a free minor number */
        mutex_lock(&cec_devnode_lock);
@@ -160,7 +157,9 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
 cdev_del:
        cdev_del(&devnode->cdev);
 clr_bit:
+       mutex_lock(&cec_devnode_lock);
        clear_bit(devnode->minor, cec_devnode_nums);
+       mutex_unlock(&cec_devnode_lock);
        return ret;
 }
 
@@ -177,17 +176,21 @@ static void cec_devnode_unregister(struct cec_devnode *devnode)
 {
        struct cec_fh *fh;
 
+       mutex_lock(&devnode->lock);
+
        /* Check if devnode was never registered or already unregistered */
-       if (!devnode->registered || devnode->unregistered)
+       if (!devnode->registered || devnode->unregistered) {
+               mutex_unlock(&devnode->lock);
                return;
+       }
 
-       mutex_lock(&devnode->fhs_lock);
        list_for_each_entry(fh, &devnode->fhs, list)
                wake_up_interruptible(&fh->wait);
-       mutex_unlock(&devnode->fhs_lock);
 
        devnode->registered = false;
        devnode->unregistered = true;
+       mutex_unlock(&devnode->lock);
+
        device_del(&devnode->dev);
        cdev_del(&devnode->cdev);
        put_device(&devnode->dev);
index 94f8590..ed8bd95 100644 (file)
@@ -114,14 +114,11 @@ static void pulse8_irq_work_handler(struct work_struct *work)
                cec_transmit_done(pulse8->adap, CEC_TX_STATUS_OK,
                                  0, 0, 0, 0);
                break;
-       case MSGCODE_TRANSMIT_FAILED_LINE:
-               cec_transmit_done(pulse8->adap, CEC_TX_STATUS_ARB_LOST,
-                                 1, 0, 0, 0);
-               break;
        case MSGCODE_TRANSMIT_FAILED_ACK:
                cec_transmit_done(pulse8->adap, CEC_TX_STATUS_NACK,
                                  0, 1, 0, 0);
                break;
+       case MSGCODE_TRANSMIT_FAILED_LINE:
        case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
        case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
                cec_transmit_done(pulse8->adap, CEC_TX_STATUS_ERROR,
@@ -170,6 +167,9 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
                case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
                        schedule_work(&pulse8->work);
                        break;
+               case MSGCODE_HIGH_ERROR:
+               case MSGCODE_LOW_ERROR:
+               case MSGCODE_RECEIVE_FAILED:
                case MSGCODE_TIMEOUT_ERROR:
                        break;
                case MSGCODE_COMMAND_ACCEPTED:
@@ -388,7 +388,7 @@ static int pulse8_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
        int err;
 
        cmd[0] = MSGCODE_TRANSMIT_IDLETIME;
-       cmd[1] = 3;
+       cmd[1] = signal_free_time;
        err = pulse8_send_and_wait(pulse8, cmd, 2,
                                   MSGCODE_COMMAND_ACCEPTED, 1);
        cmd[0] = MSGCODE_TRANSMIT_ACK_POLARITY;
index 15ce4ab..a2d90ac 100644 (file)
@@ -240,8 +240,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
        memcpy(&endpoint->desc, d, n);
        INIT_LIST_HEAD(&endpoint->urb_list);
 
-       /* Fix up bInterval values outside the legal range. Use 32 ms if no
-        * proper value can be guessed. */
+       /*
+        * Fix up bInterval values outside the legal range.
+        * Use 10 or 8 ms if no proper value can be guessed.
+        */
        i = 0;          /* i = min, j = max, n = default */
        j = 255;
        if (usb_endpoint_xfer_int(d)) {
@@ -250,13 +252,15 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
                case USB_SPEED_SUPER_PLUS:
                case USB_SPEED_SUPER:
                case USB_SPEED_HIGH:
-                       /* Many device manufacturers are using full-speed
+                       /*
+                        * Many device manufacturers are using full-speed
                         * bInterval values in high-speed interrupt endpoint
-                        * descriptors. Try to fix those and fall back to a
-                        * 32 ms default value otherwise. */
+                        * descriptors. Try to fix those and fall back to an
+                        * 8-ms default value otherwise.
+                        */
                        n = fls(d->bInterval*8);
                        if (n == 0)
-                               n = 9;  /* 32 ms = 2^(9-1) uframes */
+                               n = 7;  /* 8 ms = 2^(7-1) uframes */
                        j = 16;
 
                        /*
@@ -271,10 +275,12 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
                        }
                        break;
                default:                /* USB_SPEED_FULL or _LOW */
-                       /* For low-speed, 10 ms is the official minimum.
+                       /*
+                        * For low-speed, 10 ms is the official minimum.
                         * But some "overclocked" devices might want faster
-                        * polling so we'll allow it. */
-                       n = 32;
+                        * polling so we'll allow it.
+                        */
+                       n = 10;
                        break;
                }
        } else if (usb_endpoint_xfer_isoc(d)) {
@@ -282,10 +288,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
                j = 16;
                switch (to_usb_device(ddev)->speed) {
                case USB_SPEED_HIGH:
-                       n = 9;          /* 32 ms = 2^(9-1) uframes */
+                       n = 7;          /* 8 ms = 2^(7-1) uframes */
                        break;
                default:                /* USB_SPEED_FULL */
-                       n = 6;          /* 32 ms = 2^(6-1) frames */
+                       n = 4;          /* 8 ms = 2^(4-1) frames */
                        break;
                }
        }
index 886526b..73cfa13 100644 (file)
@@ -87,7 +87,7 @@ config USB_MUSB_DA8XX
 config USB_MUSB_TUSB6010
        tristate "TUSB6010"
        depends on HAS_IOMEM
-       depends on ARCH_OMAP2PLUS || COMPILE_TEST
+       depends on (ARCH_OMAP2PLUS || COMPILE_TEST) && !BLACKFIN
        depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules
 
 config USB_MUSB_OMAP2PLUS
index a204782..e98b6e5 100644 (file)
@@ -54,7 +54,8 @@ DEVICE(funsoft, FUNSOFT_IDS);
 /* Infineon Flashloader driver */
 #define FLASHLOADER_IDS()              \
        { USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \
-       { USB_DEVICE(0x8087, 0x0716) }
+       { USB_DEVICE(0x8087, 0x0716) }, \
+       { USB_DEVICE(0x8087, 0x0801) }
 DEVICE(flashloader, FLASHLOADER_IDS);
 
 /* Google Serial USB SubClass */
index fb8e45b..4fe81d1 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -239,7 +239,12 @@ static struct dentry *aio_mount(struct file_system_type *fs_type,
        static const struct dentry_operations ops = {
                .d_dname        = simple_dname,
        };
-       return mount_pseudo(fs_type, "aio:", NULL, &ops, AIO_RING_MAGIC);
+       struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, &ops,
+                                          AIO_RING_MAGIC);
+
+       if (!IS_ERR(root))
+               root->d_sb->s_iflags |= SB_I_NOEXEC;
+       return root;
 }
 
 /* aio_setup
index b493909..d8e6d42 100644 (file)
@@ -417,6 +417,7 @@ static struct dentry *should_expire(struct dentry *dentry,
        }
        return NULL;
 }
+
 /*
  * Find an eligible tree to time-out
  * A tree is eligible if :-
@@ -432,6 +433,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
        struct dentry *root = sb->s_root;
        struct dentry *dentry;
        struct dentry *expired;
+       struct dentry *found;
        struct autofs_info *ino;
 
        if (!root)
@@ -442,31 +444,46 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
 
        dentry = NULL;
        while ((dentry = get_next_positive_subdir(dentry, root))) {
+               int flags = how;
+
                spin_lock(&sbi->fs_lock);
                ino = autofs4_dentry_ino(dentry);
-               if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
-                       expired = NULL;
-               else
-                       expired = should_expire(dentry, mnt, timeout, how);
-               if (!expired) {
+               if (ino->flags & AUTOFS_INF_WANT_EXPIRE) {
                        spin_unlock(&sbi->fs_lock);
                        continue;
                }
+               spin_unlock(&sbi->fs_lock);
+
+               expired = should_expire(dentry, mnt, timeout, flags);
+               if (!expired)
+                       continue;
+
+               spin_lock(&sbi->fs_lock);
                ino = autofs4_dentry_ino(expired);
                ino->flags |= AUTOFS_INF_WANT_EXPIRE;
                spin_unlock(&sbi->fs_lock);
                synchronize_rcu();
-               spin_lock(&sbi->fs_lock);
-               if (should_expire(expired, mnt, timeout, how)) {
-                       if (expired != dentry)
-                               dput(dentry);
-                       goto found;
-               }
 
+               /* Make sure a reference is not taken on found if
+                * things have changed.
+                */
+               flags &= ~AUTOFS_EXP_LEAVES;
+               found = should_expire(expired, mnt, timeout, how);
+               if (!found || found != expired)
+                       /* Something has changed, continue */
+                       goto next;
+
+               if (expired != dentry)
+                       dput(dentry);
+
+               spin_lock(&sbi->fs_lock);
+               goto found;
+next:
+               spin_lock(&sbi->fs_lock);
                ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
+               spin_unlock(&sbi->fs_lock);
                if (expired != dentry)
                        dput(expired);
-               spin_unlock(&sbi->fs_lock);
        }
        return NULL;
 
@@ -483,6 +500,7 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk)
        struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
        struct autofs_info *ino = autofs4_dentry_ino(dentry);
        int status;
+       int state;
 
        /* Block on any pending expire */
        if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE))
@@ -490,8 +508,19 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk)
        if (rcu_walk)
                return -ECHILD;
 
+retry:
        spin_lock(&sbi->fs_lock);
-       if (ino->flags & AUTOFS_INF_EXPIRING) {
+       state = ino->flags & (AUTOFS_INF_WANT_EXPIRE | AUTOFS_INF_EXPIRING);
+       if (state == AUTOFS_INF_WANT_EXPIRE) {
+               spin_unlock(&sbi->fs_lock);
+               /*
+                * Possibly being selected for expire, wait until
+                * it's selected or not.
+                */
+               schedule_timeout_uninterruptible(HZ/10);
+               goto retry;
+       }
+       if (state & AUTOFS_INF_EXPIRING) {
                spin_unlock(&sbi->fs_lock);
 
                pr_debug("waiting for expire %p name=%pd\n", dentry, dentry);
index 6bbec5e..14ae4b8 100644 (file)
@@ -609,6 +609,9 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
        char *s, *p;
        char sep;
 
+       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
+               return dget(sb->s_root);
+
        full_path = cifs_build_path_to_root(vol, cifs_sb,
                                            cifs_sb_master_tcon(cifs_sb));
        if (full_path == NULL)
@@ -686,26 +689,22 @@ cifs_do_mount(struct file_system_type *fs_type,
        cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
        if (cifs_sb->mountdata == NULL) {
                root = ERR_PTR(-ENOMEM);
-               goto out_cifs_sb;
+               goto out_free;
        }
 
-       if (volume_info->prepath) {
-               cifs_sb->prepath = kstrdup(volume_info->prepath, GFP_KERNEL);
-               if (cifs_sb->prepath == NULL) {
-                       root = ERR_PTR(-ENOMEM);
-                       goto out_cifs_sb;
-               }
+       rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
+       if (rc) {
+               root = ERR_PTR(rc);
+               goto out_free;
        }
 
-       cifs_setup_cifs_sb(volume_info, cifs_sb);
-
        rc = cifs_mount(cifs_sb, volume_info);
        if (rc) {
                if (!(flags & MS_SILENT))
                        cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
                                 rc);
                root = ERR_PTR(rc);
-               goto out_mountdata;
+               goto out_free;
        }
 
        mnt_data.vol = volume_info;
@@ -735,11 +734,7 @@ cifs_do_mount(struct file_system_type *fs_type,
                sb->s_flags |= MS_ACTIVE;
        }
 
-       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
-               root = dget(sb->s_root);
-       else
-               root = cifs_get_root(volume_info, sb);
-
+       root = cifs_get_root(volume_info, sb);
        if (IS_ERR(root))
                goto out_super;
 
@@ -752,9 +747,9 @@ out:
        cifs_cleanup_volume_info(volume_info);
        return root;
 
-out_mountdata:
+out_free:
+       kfree(cifs_sb->prepath);
        kfree(cifs_sb->mountdata);
-out_cifs_sb:
        kfree(cifs_sb);
 out_nls:
        unload_nls(volume_info->local_nls);
index 1243bd3..95dab43 100644 (file)
@@ -184,7 +184,7 @@ extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
                                 unsigned int to_read);
 extern int cifs_read_page_from_socket(struct TCP_Server_Info *server,
                                      struct page *page, unsigned int to_read);
-extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+extern int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
                               struct cifs_sb_info *cifs_sb);
 extern int cifs_match_super(struct super_block *, void *);
 extern void cifs_cleanup_volume_info(struct smb_vol *pvolume_info);
index 7ae0328..2e4f4ba 100644 (file)
@@ -2781,6 +2781,24 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
        return 1;
 }
 
+static int
+match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
+{
+       struct cifs_sb_info *old = CIFS_SB(sb);
+       struct cifs_sb_info *new = mnt_data->cifs_sb;
+
+       if (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) {
+               if (!(new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH))
+                       return 0;
+               /* The prepath should be null terminated strings */
+               if (strcmp(new->prepath, old->prepath))
+                       return 0;
+
+               return 1;
+       }
+       return 0;
+}
+
 int
 cifs_match_super(struct super_block *sb, void *data)
 {
@@ -2808,7 +2826,8 @@ cifs_match_super(struct super_block *sb, void *data)
 
        if (!match_server(tcp_srv, volume_info) ||
            !match_session(ses, volume_info) ||
-           !match_tcon(tcon, volume_info->UNC)) {
+           !match_tcon(tcon, volume_info->UNC) ||
+           !match_prepath(sb, mnt_data)) {
                rc = 0;
                goto out;
        }
@@ -3222,7 +3241,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
        }
 }
 
-void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
                        struct cifs_sb_info *cifs_sb)
 {
        INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
@@ -3316,6 +3335,14 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
 
        if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm))
                cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n");
+
+       if (pvolume_info->prepath) {
+               cifs_sb->prepath = kstrdup(pvolume_info->prepath, GFP_KERNEL);
+               if (cifs_sb->prepath == NULL)
+                       return -ENOMEM;
+       }
+
+       return 0;
 }
 
 static void
index d2f97ec..e0e5f7c 100644 (file)
@@ -67,18 +67,7 @@ static int fanotify_get_response(struct fsnotify_group *group,
 
        pr_debug("%s: group=%p event=%p\n", __func__, group, event);
 
-       wait_event(group->fanotify_data.access_waitq, event->response ||
-                               atomic_read(&group->fanotify_data.bypass_perm));
-
-       if (!event->response) { /* bypass_perm set */
-               /*
-                * Event was canceled because group is being destroyed. Remove
-                * it from group's event list because we are responsible for
-                * freeing the permission event.
-                */
-               fsnotify_remove_event(group, &event->fae.fse);
-               return 0;
-       }
+       wait_event(group->fanotify_data.access_waitq, event->response);
 
        /* userspace responded, convert to something usable */
        switch (event->response) {
index 8e8e6bc..a643138 100644 (file)
@@ -358,16 +358,20 @@ static int fanotify_release(struct inode *ignored, struct file *file)
 
 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
        struct fanotify_perm_event_info *event, *next;
+       struct fsnotify_event *fsn_event;
 
        /*
-        * There may be still new events arriving in the notification queue
-        * but since userspace cannot use fanotify fd anymore, no event can
-        * enter or leave access_list by now.
+        * Stop new events from arriving in the notification queue. since
+        * userspace cannot use fanotify fd anymore, no event can enter or
+        * leave access_list by now either.
         */
-       spin_lock(&group->fanotify_data.access_lock);
-
-       atomic_inc(&group->fanotify_data.bypass_perm);
+       fsnotify_group_stop_queueing(group);
 
+       /*
+        * Process all permission events on access_list and notification queue
+        * and simulate reply from userspace.
+        */
+       spin_lock(&group->fanotify_data.access_lock);
        list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
                                 fae.fse.list) {
                pr_debug("%s: found group=%p event=%p\n", __func__, group,
@@ -379,12 +383,21 @@ static int fanotify_release(struct inode *ignored, struct file *file)
        spin_unlock(&group->fanotify_data.access_lock);
 
        /*
-        * Since bypass_perm is set, newly queued events will not wait for
-        * access response. Wake up the already sleeping ones now.
-        * synchronize_srcu() in fsnotify_destroy_group() will wait for all
-        * processes sleeping in fanotify_handle_event() waiting for access
-        * response and thus also for all permission events to be freed.
+        * Destroy all non-permission events. For permission events just
+        * dequeue them and set the response. They will be freed once the
+        * response is consumed and fanotify_get_response() returns.
         */
+       mutex_lock(&group->notification_mutex);
+       while (!fsnotify_notify_queue_is_empty(group)) {
+               fsn_event = fsnotify_remove_first_event(group);
+               if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS))
+                       fsnotify_destroy_event(group, fsn_event);
+               else
+                       FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
+       }
+       mutex_unlock(&group->notification_mutex);
+
+       /* Response for all permission events it set, wakeup waiters */
        wake_up(&group->fanotify_data.access_waitq);
 #endif
 
@@ -755,7 +768,6 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
        spin_lock_init(&group->fanotify_data.access_lock);
        init_waitqueue_head(&group->fanotify_data.access_waitq);
        INIT_LIST_HEAD(&group->fanotify_data.access_list);
-       atomic_set(&group->fanotify_data.bypass_perm, 0);
 #endif
        switch (flags & FAN_ALL_CLASS_BITS) {
        case FAN_CLASS_NOTIF:
index 3e2dd85..b47f7cf 100644 (file)
@@ -39,6 +39,17 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
        kfree(group);
 }
 
+/*
+ * Stop queueing new events for this group. Once this function returns
+ * fsnotify_add_event() will not add any new events to the group's queue.
+ */
+void fsnotify_group_stop_queueing(struct fsnotify_group *group)
+{
+       mutex_lock(&group->notification_mutex);
+       group->shutdown = true;
+       mutex_unlock(&group->notification_mutex);
+}
+
 /*
  * Trying to get rid of a group. Remove all marks, flush all events and release
  * the group reference.
@@ -47,6 +58,14 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
  */
 void fsnotify_destroy_group(struct fsnotify_group *group)
 {
+       /*
+        * Stop queueing new events. The code below is careful enough to not
+        * require this but fanotify needs to stop queuing events even before
+        * fsnotify_destroy_group() is called and this makes the other callers
+        * of fsnotify_destroy_group() to see the same behavior.
+        */
+       fsnotify_group_stop_queueing(group);
+
        /* clear all inode marks for this group, attach them to destroy_list */
        fsnotify_detach_group_marks(group);
 
index a95d8e0..e455e83 100644 (file)
@@ -82,7 +82,8 @@ void fsnotify_destroy_event(struct fsnotify_group *group,
  * Add an event to the group notification queue.  The group can later pull this
  * event off the queue to deal with.  The function returns 0 if the event was
  * added to the queue, 1 if the event was merged with some other queued event,
- * 2 if the queue of events has overflown.
+ * 2 if the event was not queued - either the queue of events has overflown
+ * or the group is shutting down.
  */
 int fsnotify_add_event(struct fsnotify_group *group,
                       struct fsnotify_event *event,
@@ -96,6 +97,11 @@ int fsnotify_add_event(struct fsnotify_group *group,
 
        mutex_lock(&group->notification_mutex);
 
+       if (group->shutdown) {
+               mutex_unlock(&group->notification_mutex);
+               return 2;
+       }
+
        if (group->q_len >= group->max_events) {
                ret = 2;
                /* Queue overflow event only if it isn't already queued */
@@ -125,21 +131,6 @@ queue:
        return ret;
 }
 
-/*
- * Remove @event from group's notification queue. It is the responsibility of
- * the caller to destroy the event.
- */
-void fsnotify_remove_event(struct fsnotify_group *group,
-                          struct fsnotify_event *event)
-{
-       mutex_lock(&group->notification_mutex);
-       if (!list_empty(&event->list)) {
-               list_del_init(&event->list);
-               group->q_len--;
-       }
-       mutex_unlock(&group->notification_mutex);
-}
-
 /*
  * Remove and return the first event from the notification list.  It is the
  * responsibility of the caller to destroy the obtained event
index 7dabbc3..f165f86 100644 (file)
@@ -5922,7 +5922,6 @@ bail:
 }
 
 static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
-                                        handle_t *handle,
                                         struct inode *data_alloc_inode,
                                         struct buffer_head *data_alloc_bh)
 {
@@ -5935,11 +5934,19 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
        struct ocfs2_truncate_log *tl;
        struct inode *tl_inode = osb->osb_tl_inode;
        struct buffer_head *tl_bh = osb->osb_tl_bh;
+       handle_t *handle;
 
        di = (struct ocfs2_dinode *) tl_bh->b_data;
        tl = &di->id2.i_dealloc;
        i = le16_to_cpu(tl->tl_used) - 1;
        while (i >= 0) {
+               handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC);
+               if (IS_ERR(handle)) {
+                       status = PTR_ERR(handle);
+                       mlog_errno(status);
+                       goto bail;
+               }
+
                /* Caller has given us at least enough credits to
                 * update the truncate log dinode */
                status = ocfs2_journal_access_di(handle, INODE_CACHE(tl_inode), tl_bh,
@@ -5974,12 +5981,7 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
                        }
                }
 
-               status = ocfs2_extend_trans(handle,
-                               OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC);
-               if (status < 0) {
-                       mlog_errno(status);
-                       goto bail;
-               }
+               ocfs2_commit_trans(osb, handle);
                i--;
        }
 
@@ -5994,7 +5996,6 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
 {
        int status;
        unsigned int num_to_flush;
-       handle_t *handle;
        struct inode *tl_inode = osb->osb_tl_inode;
        struct inode *data_alloc_inode = NULL;
        struct buffer_head *tl_bh = osb->osb_tl_bh;
@@ -6038,21 +6039,11 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
                goto out_mutex;
        }
 
-       handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC);
-       if (IS_ERR(handle)) {
-               status = PTR_ERR(handle);
-               mlog_errno(status);
-               goto out_unlock;
-       }
-
-       status = ocfs2_replay_truncate_records(osb, handle, data_alloc_inode,
+       status = ocfs2_replay_truncate_records(osb, data_alloc_inode,
                                               data_alloc_bh);
        if (status < 0)
                mlog_errno(status);
 
-       ocfs2_commit_trans(osb, handle);
-
-out_unlock:
        brelse(data_alloc_bh);
        ocfs2_inode_unlock(data_alloc_inode, 1);
 
@@ -6413,43 +6404,34 @@ static int ocfs2_free_cached_blocks(struct ocfs2_super *osb,
                goto out_mutex;
        }
 
-       handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE);
-       if (IS_ERR(handle)) {
-               ret = PTR_ERR(handle);
-               mlog_errno(ret);
-               goto out_unlock;
-       }
-
        while (head) {
                if (head->free_bg)
                        bg_blkno = head->free_bg;
                else
                        bg_blkno = ocfs2_which_suballoc_group(head->free_blk,
                                                              head->free_bit);
+               handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE);
+               if (IS_ERR(handle)) {
+                       ret = PTR_ERR(handle);
+                       mlog_errno(ret);
+                       goto out_unlock;
+               }
+
                trace_ocfs2_free_cached_blocks(
                     (unsigned long long)head->free_blk, head->free_bit);
 
                ret = ocfs2_free_suballoc_bits(handle, inode, di_bh,
                                               head->free_bit, bg_blkno, 1);
-               if (ret) {
+               if (ret)
                        mlog_errno(ret);
-                       goto out_journal;
-               }
 
-               ret = ocfs2_extend_trans(handle, OCFS2_SUBALLOC_FREE);
-               if (ret) {
-                       mlog_errno(ret);
-                       goto out_journal;
-               }
+               ocfs2_commit_trans(osb, handle);
 
                tmp = head;
                head = head->free_next;
                kfree(tmp);
        }
 
-out_journal:
-       ocfs2_commit_trans(osb, handle);
-
 out_unlock:
        ocfs2_inode_unlock(inode, 1);
        brelse(di_bh);
index 94b1836..b95e7df 100644 (file)
@@ -44,9 +44,6 @@
  * version here in tcp_internal.h should not need to be bumped for
  * filesystem locking changes.
  *
- * New in version 12
- *     - Negotiate hb timeout when storage is down.
- *
  * New in version 11
  *     - Negotiation of filesystem locking in the dlm join.
  *
@@ -78,7 +75,7 @@
  *     - full 64 bit i_size in the metadata lock lvbs
  *     - introduction of "rw" lock and pushing meta/data locking down
  */
-#define O2NET_PROTOCOL_VERSION 12ULL
+#define O2NET_PROTOCOL_VERSION 11ULL
 struct o2net_handshake {
        __be64  protocol_version;
        __be64  connector_id;
index cdeafb4..0bb1286 100644 (file)
@@ -268,7 +268,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
                                  struct dlm_lock *lock, int flags, int type)
 {
        enum dlm_status status;
-       u8 old_owner = res->owner;
 
        mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
             lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
@@ -335,7 +334,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
 
        spin_lock(&res->spinlock);
        res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
-       lock->convert_pending = 0;
        /* if it failed, move it back to granted queue.
         * if master returns DLM_NORMAL and then down before sending ast,
         * it may have already been moved to granted queue, reset to
@@ -344,12 +342,14 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
                if (status != DLM_NOTQUEUED)
                        dlm_error(status);
                dlm_revert_pending_convert(res, lock);
-       } else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
-                       (old_owner != res->owner)) {
-               mlog(0, "res %.*s is in recovering or has been recovered.\n",
-                               res->lockname.len, res->lockname.name);
+       } else if (!lock->convert_pending) {
+               mlog(0, "%s: res %.*s, owner died and lock has been moved back "
+                               "to granted list, retry convert.\n",
+                               dlm->name, res->lockname.len, res->lockname.name);
                status = DLM_RECOVERING;
        }
+
+       lock->convert_pending = 0;
 bail:
        spin_unlock(&res->spinlock);
 
index 4e7b0dc..0b055bf 100644 (file)
@@ -1506,7 +1506,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
                                       u64 start, u64 len)
 {
        int ret = 0;
-       u64 tmpend, end = start + len;
+       u64 tmpend = 0;
+       u64 end = start + len;
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        unsigned int csize = osb->s_clustersize;
        handle_t *handle;
@@ -1538,18 +1539,31 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
        }
 
        /*
-        * We want to get the byte offset of the end of the 1st cluster.
+        * If start is on a cluster boundary and end is somewhere in another
+        * cluster, we have not COWed the cluster starting at start, unless
+        * end is also within the same cluster. So, in this case, we skip this
+        * first call to ocfs2_zero_range_for_truncate() truncate and move on
+        * to the next one.
         */
-       tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
-       if (tmpend > end)
-               tmpend = end;
+       if ((start & (csize - 1)) != 0) {
+               /*
+                * We want to get the byte offset of the end of the 1st
+                * cluster.
+                */
+               tmpend = (u64)osb->s_clustersize +
+                       (start & ~(osb->s_clustersize - 1));
+               if (tmpend > end)
+                       tmpend = end;
 
-       trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start,
-                                                (unsigned long long)tmpend);
+               trace_ocfs2_zero_partial_clusters_range1(
+                       (unsigned long long)start,
+                       (unsigned long long)tmpend);
 
-       ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
-       if (ret)
-               mlog_errno(ret);
+               ret = ocfs2_zero_range_for_truncate(inode, handle, start,
+                                                   tmpend);
+               if (ret)
+                       mlog_errno(ret);
+       }
 
        if (tmpend < end) {
                /*
index ea47120..6ad3533 100644 (file)
@@ -1199,14 +1199,24 @@ retry:
                        inode_unlock((*ac)->ac_inode);
 
                        ret = ocfs2_try_to_free_truncate_log(osb, bits_wanted);
-                       if (ret == 1)
+                       if (ret == 1) {
+                               iput((*ac)->ac_inode);
+                               (*ac)->ac_inode = NULL;
                                goto retry;
+                       }
 
                        if (ret < 0)
                                mlog_errno(ret);
 
                        inode_lock((*ac)->ac_inode);
-                       ocfs2_inode_lock((*ac)->ac_inode, NULL, 1);
+                       ret = ocfs2_inode_lock((*ac)->ac_inode, NULL, 1);
+                       if (ret < 0) {
+                               mlog_errno(ret);
+                               inode_unlock((*ac)->ac_inode);
+                               iput((*ac)->ac_inode);
+                               (*ac)->ac_inode = NULL;
+                               goto bail;
+                       }
                }
                if (status < 0) {
                        if (status != -ENOSPC)
index a939f5e..5c89a07 100644 (file)
@@ -430,6 +430,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
 static ssize_t
 read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
 {
+       char *buf = file->private_data;
        ssize_t acc = 0;
        size_t size, tsz;
        size_t elf_buflen;
@@ -500,23 +501,20 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
                        if (clear_user(buffer, tsz))
                                return -EFAULT;
                } else if (is_vmalloc_or_module_addr((void *)start)) {
-                       char * elf_buf;
-
-                       elf_buf = kzalloc(tsz, GFP_KERNEL);
-                       if (!elf_buf)
-                               return -ENOMEM;
-                       vread(elf_buf, (char *)start, tsz);
+                       vread(buf, (char *)start, tsz);
                        /* we have to zero-fill user buffer even if no read */
-                       if (copy_to_user(buffer, elf_buf, tsz)) {
-                               kfree(elf_buf);
+                       if (copy_to_user(buffer, buf, tsz))
                                return -EFAULT;
-                       }
-                       kfree(elf_buf);
                } else {
                        if (kern_addr_valid(start)) {
                                unsigned long n;
 
-                               n = copy_to_user(buffer, (char *)start, tsz);
+                               /*
+                                * Using bounce buffer to bypass the
+                                * hardened user copy kernel text checks.
+                                */
+                               memcpy(buf, (char *) start, tsz);
+                               n = copy_to_user(buffer, buf, tsz);
                                /*
                                 * We cannot distinguish between fault on source
                                 * and fault on destination. When this happens
@@ -549,6 +547,11 @@ static int open_kcore(struct inode *inode, struct file *filp)
 {
        if (!capable(CAP_SYS_RAWIO))
                return -EPERM;
+
+       filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!filp->private_data)
+               return -ENOMEM;
+
        if (kcore_need_update)
                kcore_update_ram();
        if (i_size_read(inode) != proc_root_kcore->size) {
@@ -559,10 +562,16 @@ static int open_kcore(struct inode *inode, struct file *filp)
        return 0;
 }
 
+static int release_kcore(struct inode *inode, struct file *file)
+{
+       kfree(file->private_data);
+       return 0;
+}
 
 static const struct file_operations proc_kcore_operations = {
        .read           = read_kcore,
        .open           = open_kcore,
+       .release        = release_kcore,
        .llseek         = default_llseek,
 };
 
index 183a212..12af049 100644 (file)
 #include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/ramfs.h>
+#include <linux/sched.h>
 
 #include "internal.h"
 
+static unsigned long ramfs_mmu_get_unmapped_area(struct file *file,
+               unsigned long addr, unsigned long len, unsigned long pgoff,
+               unsigned long flags)
+{
+       return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+}
+
 const struct file_operations ramfs_file_operations = {
        .read_iter      = generic_file_read_iter,
        .write_iter     = generic_file_write_iter,
@@ -38,6 +46,7 @@ const struct file_operations ramfs_file_operations = {
        .splice_read    = generic_file_splice_read,
        .splice_write   = iter_file_splice_write,
        .llseek         = generic_file_llseek,
+       .get_unmapped_area      = ramfs_mmu_get_unmapped_area,
 };
 
 const struct inode_operations ramfs_file_inode_operations = {
index 82c3d3b..138bbf7 100644 (file)
@@ -162,10 +162,11 @@ static inline void cec_msg_standby(struct cec_msg *msg)
 
 
 /* One Touch Record Feature */
-static inline void cec_msg_record_off(struct cec_msg *msg)
+static inline void cec_msg_record_off(struct cec_msg *msg, bool reply)
 {
        msg->len = 2;
        msg->msg[1] = CEC_MSG_RECORD_OFF;
+       msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
 }
 
 struct cec_op_arib_data {
@@ -227,7 +228,7 @@ static inline void cec_set_digital_service_id(__u8 *msg,
        if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) {
                *msg++ = (digital->channel.channel_number_fmt << 2) |
                         (digital->channel.major >> 8);
-               *msg++ = digital->channel.major && 0xff;
+               *msg++ = digital->channel.major & 0xff;
                *msg++ = digital->channel.minor >> 8;
                *msg++ = digital->channel.minor & 0xff;
                *msg++ = 0;
@@ -323,6 +324,7 @@ static inline void cec_msg_record_on_phys_addr(struct cec_msg *msg,
 }
 
 static inline void cec_msg_record_on(struct cec_msg *msg,
+                                    bool reply,
                                     const struct cec_op_record_src *rec_src)
 {
        switch (rec_src->type) {
@@ -346,6 +348,7 @@ static inline void cec_msg_record_on(struct cec_msg *msg,
                                            rec_src->ext_phys_addr.phys_addr);
                break;
        }
+       msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
 }
 
 static inline void cec_ops_record_on(const struct cec_msg *msg,
@@ -1141,6 +1144,75 @@ static inline void cec_msg_give_device_vendor_id(struct cec_msg *msg,
        msg->reply = reply ? CEC_MSG_DEVICE_VENDOR_ID : 0;
 }
 
+static inline void cec_msg_vendor_command(struct cec_msg *msg,
+                                         __u8 size, const __u8 *vendor_cmd)
+{
+       if (size > 14)
+               size = 14;
+       msg->len = 2 + size;
+       msg->msg[1] = CEC_MSG_VENDOR_COMMAND;
+       memcpy(msg->msg + 2, vendor_cmd, size);
+}
+
+static inline void cec_ops_vendor_command(const struct cec_msg *msg,
+                                         __u8 *size,
+                                         const __u8 **vendor_cmd)
+{
+       *size = msg->len - 2;
+
+       if (*size > 14)
+               *size = 14;
+       *vendor_cmd = msg->msg + 2;
+}
+
+static inline void cec_msg_vendor_command_with_id(struct cec_msg *msg,
+                                                 __u32 vendor_id, __u8 size,
+                                                 const __u8 *vendor_cmd)
+{
+       if (size > 11)
+               size = 11;
+       msg->len = 5 + size;
+       msg->msg[1] = CEC_MSG_VENDOR_COMMAND_WITH_ID;
+       msg->msg[2] = vendor_id >> 16;
+       msg->msg[3] = (vendor_id >> 8) & 0xff;
+       msg->msg[4] = vendor_id & 0xff;
+       memcpy(msg->msg + 5, vendor_cmd, size);
+}
+
+static inline void cec_ops_vendor_command_with_id(const struct cec_msg *msg,
+                                                 __u32 *vendor_id,  __u8 *size,
+                                                 const __u8 **vendor_cmd)
+{
+       *size = msg->len - 5;
+
+       if (*size > 11)
+               *size = 11;
+       *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4];
+       *vendor_cmd = msg->msg + 5;
+}
+
+static inline void cec_msg_vendor_remote_button_down(struct cec_msg *msg,
+                                                    __u8 size,
+                                                    const __u8 *rc_code)
+{
+       if (size > 14)
+               size = 14;
+       msg->len = 2 + size;
+       msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN;
+       memcpy(msg->msg + 2, rc_code, size);
+}
+
+static inline void cec_ops_vendor_remote_button_down(const struct cec_msg *msg,
+                                                    __u8 *size,
+                                                    const __u8 **rc_code)
+{
+       *size = msg->len - 2;
+
+       if (*size > 14)
+               *size = 14;
+       *rc_code = msg->msg + 2;
+}
+
 static inline void cec_msg_vendor_remote_button_up(struct cec_msg *msg)
 {
        msg->len = 2;
@@ -1277,7 +1349,7 @@ static inline void cec_msg_user_control_pressed(struct cec_msg *msg,
                msg->len += 4;
                msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) |
                              (ui_cmd->channel_identifier.major >> 8);
-               msg->msg[4] = ui_cmd->channel_identifier.major && 0xff;
+               msg->msg[4] = ui_cmd->channel_identifier.major & 0xff;
                msg->msg[5] = ui_cmd->channel_identifier.minor >> 8;
                msg->msg[6] = ui_cmd->channel_identifier.minor & 0xff;
                break;
index b3e2289..851968e 100644 (file)
@@ -364,7 +364,7 @@ struct cec_caps {
  * @num_log_addrs: how many logical addresses should be claimed. Set by the
  *     caller.
  * @vendor_id: the vendor ID of the device. Set by the caller.
- * @flags: set to 0.
+ * @flags: flags.
  * @osd_name: the OSD name of the device. Set by the caller.
  * @primary_device_type: the primary device type for each logical address.
  *     Set by the caller.
@@ -389,6 +389,9 @@ struct cec_log_addrs {
        __u8 features[CEC_MAX_LOG_ADDRS][12];
 };
 
+/* Allow a fallback to unregistered */
+#define CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK  (1 << 0)
+
 /* Events */
 
 /* Event that occurs when the adapter state changes */
index 242bf53..34bd805 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __CPUHOTPLUG_H
 #define __CPUHOTPLUG_H
 
+#include <linux/types.h>
+
 enum cpuhp_state {
        CPUHP_OFFLINE,
        CPUHP_CREATE_THREADS,
index 58205f3..7268ed0 100644 (file)
@@ -148,6 +148,7 @@ struct fsnotify_group {
        #define FS_PRIO_1       1 /* fanotify content based access control */
        #define FS_PRIO_2       2 /* fanotify pre-content access */
        unsigned int priority;
+       bool shutdown;          /* group is being shut down, don't queue more events */
 
        /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
        struct mutex mark_mutex;        /* protect marks_list */
@@ -179,7 +180,6 @@ struct fsnotify_group {
                        spinlock_t access_lock;
                        struct list_head access_list;
                        wait_queue_head_t access_waitq;
-                       atomic_t bypass_perm;
 #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
                        int f_flags;
                        unsigned int max_marks;
@@ -292,6 +292,8 @@ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *op
 extern void fsnotify_get_group(struct fsnotify_group *group);
 /* drop reference on a group from fsnotify_alloc_group */
 extern void fsnotify_put_group(struct fsnotify_group *group);
+/* group destruction begins, stop queuing new events */
+extern void fsnotify_group_stop_queueing(struct fsnotify_group *group);
 /* destroy group */
 extern void fsnotify_destroy_group(struct fsnotify_group *group);
 /* fasync handler function */
@@ -304,8 +306,6 @@ extern int fsnotify_add_event(struct fsnotify_group *group,
                              struct fsnotify_event *event,
                              int (*merge)(struct list_head *,
                                           struct fsnotify_event *));
-/* Remove passed event from groups notification queue */
-extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
 /* true if the group notification queue is empty */
 extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
 /* return, but do not dequeue the first event on the notification queue */
index b52424e..0ac26c8 100644 (file)
@@ -945,6 +945,16 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
 static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
 #endif
 
+/*
+ * The irqsave variants are for usage in non interrupt code. Do not use
+ * them in irq_chip callbacks. Use irq_gc_lock() instead.
+ */
+#define irq_gc_lock_irqsave(gc, flags) \
+       raw_spin_lock_irqsave(&(gc)->lock, flags)
+
+#define irq_gc_unlock_irqrestore(gc, flags)    \
+       raw_spin_unlock_irqrestore(&(gc)->lock, flags)
+
 static inline void irq_reg_writel(struct irq_chip_generic *gc,
                                  u32 val, int reg_offset)
 {
index 66a1260..7e3d537 100644 (file)
@@ -571,56 +571,56 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
  */
 static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
 {
-       int ret = 0;
        char __user *end = uaddr + size - 1;
 
        if (unlikely(size == 0))
-               return ret;
+               return 0;
 
+       if (unlikely(uaddr > end))
+               return -EFAULT;
        /*
         * Writing zeroes into userspace here is OK, because we know that if
         * the zero gets there, we'll be overwriting it.
         */
-       while (uaddr <= end) {
-               ret = __put_user(0, uaddr);
-               if (ret != 0)
-                       return ret;
+       do {
+               if (unlikely(__put_user(0, uaddr) != 0))
+                       return -EFAULT;
                uaddr += PAGE_SIZE;
-       }
+       } while (uaddr <= end);
 
        /* Check whether the range spilled into the next page. */
        if (((unsigned long)uaddr & PAGE_MASK) ==
                        ((unsigned long)end & PAGE_MASK))
-               ret = __put_user(0, end);
+               return __put_user(0, end);
 
-       return ret;
+       return 0;
 }
 
 static inline int fault_in_multipages_readable(const char __user *uaddr,
                                               int size)
 {
        volatile char c;
-       int ret = 0;
        const char __user *end = uaddr + size - 1;
 
        if (unlikely(size == 0))
-               return ret;
+               return 0;
 
-       while (uaddr <= end) {
-               ret = __get_user(c, uaddr);
-               if (ret != 0)
-                       return ret;
+       if (unlikely(uaddr > end))
+               return -EFAULT;
+
+       do {
+               if (unlikely(__get_user(c, uaddr) != 0))
+                       return -EFAULT;
                uaddr += PAGE_SIZE;
-       }
+       } while (uaddr <= end);
 
        /* Check whether the range spilled into the next page. */
        if (((unsigned long)uaddr & PAGE_MASK) ==
                        ((unsigned long)end & PAGE_MASK)) {
-               ret = __get_user(c, end);
-               (void)c;
+               return __get_user(c, end);
        }
 
-       return ret;
+       return 0;
 }
 
 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
index 1b5d1cd..75b4aaf 100644 (file)
@@ -76,7 +76,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
                struct iov_iter *i, unsigned long offset, size_t bytes);
 void iov_iter_advance(struct iov_iter *i, size_t bytes);
 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes);
+#define iov_iter_fault_in_multipages_readable iov_iter_fault_in_readable
 size_t iov_iter_single_seg_count(const struct iov_iter *i);
 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
                         struct iov_iter *i);
index dc7854b..fdb5d60 100644 (file)
@@ -57,8 +57,8 @@ struct cec_devnode {
        int minor;
        bool registered;
        bool unregistered;
-       struct mutex fhs_lock;
        struct list_head fhs;
+       struct mutex lock;
 };
 
 struct cec_adapter;
index 6793614..e693731 100644 (file)
@@ -27,6 +27,20 @@ static inline struct nf_conn_synproxy *nfct_synproxy_ext_add(struct nf_conn *ct)
 #endif
 }
 
+static inline bool nf_ct_add_synproxy(struct nf_conn *ct,
+                                     const struct nf_conn *tmpl)
+{
+       if (tmpl && nfct_synproxy(tmpl)) {
+               if (!nfct_seqadj_ext_add(ct))
+                       return false;
+
+               if (!nfct_synproxy_ext_add(ct))
+                       return false;
+       }
+
+       return true;
+}
+
 struct synproxy_stats {
        unsigned int                    syn_received;
        unsigned int                    cookie_invalid;
index efc0174..bafe2a0 100644 (file)
@@ -382,7 +382,7 @@ enum {
        ADDIP_SERIAL_SIGN_BIT = (1<<31)
 };
 
-static inline int ADDIP_SERIAL_gte(__u16 s, __u16 t)
+static inline int ADDIP_SERIAL_gte(__u32 s, __u32 t)
 {
        return ((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT);
 }
index ff5be7e..8741988 100644 (file)
@@ -1332,6 +1332,16 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
        if (!sk_has_account(sk))
                return;
        sk->sk_forward_alloc += size;
+
+       /* Avoid a possible overflow.
+        * TCP send queues can make this happen, if sk_mem_reclaim()
+        * is not called and more than 2 GBytes are released at once.
+        *
+        * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
+        * no need to hold that much forward allocation anyway.
+        */
+       if (unlikely(sk->sk_forward_alloc >= 1 << 21))
+               __sk_mem_reclaim(sk, 1 << 20);
 }
 
 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
index adfebd6..1793431 100644 (file)
@@ -1540,8 +1540,10 @@ int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
 void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
 int xfrm6_extract_header(struct sk_buff *skb);
 int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
-int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
+int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
+                 struct ip6_tnl *t);
 int xfrm6_transport_finish(struct sk_buff *skb, int async);
+int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
 int xfrm6_rcv(struct sk_buff *skb);
 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
                     xfrm_address_t *saddr, u8 proto);
index d1c51b7..5e8dab5 100644 (file)
@@ -6270,6 +6270,12 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
        if (cgroup_sk_alloc_disabled)
                return;
 
+       /* Socket clone path */
+       if (skcd->val) {
+               cgroup_get(sock_cgroup_ptr(skcd));
+               return;
+       }
+
        rcu_read_lock();
 
        while (true) {
index 9e8c738..7e3138c 100644 (file)
@@ -290,26 +290,6 @@ done:
        return wanted - bytes;
 }
 
-/*
- * Fault in the first iovec of the given iov_iter, to a maximum length
- * of bytes. Returns 0 on success, or non-zero if the memory could not be
- * accessed (ie. because it is an invalid address).
- *
- * writev-intensive code may want this to prefault several iovecs -- that
- * would be possible (callers must not rely on the fact that _only_ the
- * first iovec will be faulted with the current implementation).
- */
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
-{
-       if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
-               char __user *buf = i->iov->iov_base + i->iov_offset;
-               bytes = min(bytes, i->iov->iov_len - i->iov_offset);
-               return fault_in_pages_readable(buf, bytes);
-       }
-       return 0;
-}
-EXPORT_SYMBOL(iov_iter_fault_in_readable);
-
 /*
  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
  * bytes.  For each iovec, fault in each page that constitutes the iovec.
@@ -317,7 +297,7 @@ EXPORT_SYMBOL(iov_iter_fault_in_readable);
  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
  * because it is an invalid address).
  */
-int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
+int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
 {
        size_t skip = i->iov_offset;
        const struct iovec *iov;
@@ -334,7 +314,7 @@ int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
        }
        return 0;
 }
-EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
+EXPORT_SYMBOL(iov_iter_fault_in_readable);
 
 void iov_iter_init(struct iov_iter *i, int direction,
                        const struct iovec *iov, unsigned long nr_segs,
index 8865bfb..74c7cae 100644 (file)
@@ -42,9 +42,11 @@ const struct trace_print_flags vmaflag_names[] = {
 
 void __dump_page(struct page *page, const char *reason)
 {
+       int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
+
        pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx",
-                 page, page_ref_count(page), page_mapcount(page),
-                 page->mapping, page->index);
+                 page, page_ref_count(page), mapcount,
+                 page->mapping, page_to_pgoff(page));
        if (PageCompound(page))
                pr_cont(" compound_mapcount: %d", compound_mapcount(page));
        pr_cont("\n");
index 79c52d0..728d779 100644 (file)
@@ -838,7 +838,8 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
  * value (scan code).
  */
 
-static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address)
+static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
+               struct vm_area_struct **vmap)
 {
        struct vm_area_struct *vma;
        unsigned long hstart, hend;
@@ -846,7 +847,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address)
        if (unlikely(khugepaged_test_exit(mm)))
                return SCAN_ANY_PROCESS;
 
-       vma = find_vma(mm, address);
+       *vmap = vma = find_vma(mm, address);
        if (!vma)
                return SCAN_VMA_NULL;
 
@@ -881,6 +882,11 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
                .pmd = pmd,
        };
 
+       /* we only decide to swapin, if there is enough young ptes */
+       if (referenced < HPAGE_PMD_NR/2) {
+               trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
+               return false;
+       }
        fe.pte = pte_offset_map(pmd, address);
        for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE;
                        fe.pte++, fe.address += PAGE_SIZE) {
@@ -888,17 +894,12 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
                if (!is_swap_pte(pteval))
                        continue;
                swapped_in++;
-               /* we only decide to swapin, if there is enough young ptes */
-               if (referenced < HPAGE_PMD_NR/2) {
-                       trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
-                       return false;
-               }
                ret = do_swap_page(&fe, pteval);
 
                /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
                if (ret & VM_FAULT_RETRY) {
                        down_read(&mm->mmap_sem);
-                       if (hugepage_vma_revalidate(mm, address)) {
+                       if (hugepage_vma_revalidate(mm, address, &fe.vma)) {
                                /* vma is no longer available, don't continue to swapin */
                                trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
                                return false;
@@ -923,7 +924,6 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
 static void collapse_huge_page(struct mm_struct *mm,
                                   unsigned long address,
                                   struct page **hpage,
-                                  struct vm_area_struct *vma,
                                   int node, int referenced)
 {
        pmd_t *pmd, _pmd;
@@ -933,6 +933,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        spinlock_t *pmd_ptl, *pte_ptl;
        int isolated = 0, result = 0;
        struct mem_cgroup *memcg;
+       struct vm_area_struct *vma;
        unsigned long mmun_start;       /* For mmu_notifiers */
        unsigned long mmun_end;         /* For mmu_notifiers */
        gfp_t gfp;
@@ -961,7 +962,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        }
 
        down_read(&mm->mmap_sem);
-       result = hugepage_vma_revalidate(mm, address);
+       result = hugepage_vma_revalidate(mm, address, &vma);
        if (result) {
                mem_cgroup_cancel_charge(new_page, memcg, true);
                up_read(&mm->mmap_sem);
@@ -994,7 +995,7 @@ static void collapse_huge_page(struct mm_struct *mm,
         * handled by the anon_vma lock + PG_lock.
         */
        down_write(&mm->mmap_sem);
-       result = hugepage_vma_revalidate(mm, address);
+       result = hugepage_vma_revalidate(mm, address, &vma);
        if (result)
                goto out;
        /* check if the pmd is still valid */
@@ -1202,7 +1203,7 @@ out_unmap:
        if (ret) {
                node = khugepaged_find_target_node();
                /* collapse_huge_page will return with the mmap_sem released */
-               collapse_huge_page(mm, address, hpage, vma, node, referenced);
+               collapse_huge_page(mm, address, hpage, node, referenced);
        }
 out:
        trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
index 9a6a51a..4be518d 100644 (file)
@@ -1740,17 +1740,22 @@ static DEFINE_MUTEX(percpu_charge_mutex);
 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 {
        struct memcg_stock_pcp *stock;
+       unsigned long flags;
        bool ret = false;
 
        if (nr_pages > CHARGE_BATCH)
                return ret;
 
-       stock = &get_cpu_var(memcg_stock);
+       local_irq_save(flags);
+
+       stock = this_cpu_ptr(&memcg_stock);
        if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
                stock->nr_pages -= nr_pages;
                ret = true;
        }
-       put_cpu_var(memcg_stock);
+
+       local_irq_restore(flags);
+
        return ret;
 }
 
@@ -1771,15 +1776,18 @@ static void drain_stock(struct memcg_stock_pcp *stock)
        stock->cached = NULL;
 }
 
-/*
- * This must be called under preempt disabled or must be called by
- * a thread which is pinned to local cpu.
- */
 static void drain_local_stock(struct work_struct *dummy)
 {
-       struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
+       struct memcg_stock_pcp *stock;
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       stock = this_cpu_ptr(&memcg_stock);
        drain_stock(stock);
        clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
+
+       local_irq_restore(flags);
 }
 
 /*
@@ -1788,14 +1796,19 @@ static void drain_local_stock(struct work_struct *dummy)
  */
 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 {
-       struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
+       struct memcg_stock_pcp *stock;
+       unsigned long flags;
+
+       local_irq_save(flags);
 
+       stock = this_cpu_ptr(&memcg_stock);
        if (stock->cached != memcg) { /* reset if necessary */
                drain_stock(stock);
                stock->cached = memcg;
        }
        stock->nr_pages += nr_pages;
-       put_cpu_var(memcg_stock);
+
+       local_irq_restore(flags);
 }
 
 /*
index 41266dc..b58906b 100644 (file)
@@ -1567,7 +1567,9 @@ static struct page *new_node_page(struct page *page, unsigned long private,
                return alloc_huge_page_node(page_hstate(compound_head(page)),
                                        next_node_in(nid, nmask));
 
-       node_clear(nid, nmask);
+       if (nid != next_node_in(nid, nmask))
+               node_clear(nid, nmask);
+
        if (PageHighMem(page)
            || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
                gfp_mask |= __GFP_HIGHMEM;
index 16bd82f..eafe5dd 100644 (file)
@@ -264,6 +264,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
        int ret;
        struct swap_info_struct *sis = page_swap_info(page);
 
+       BUG_ON(!PageSwapCache(page));
        if (sis->flags & SWP_FILE) {
                struct kiocb kiocb;
                struct file *swap_file = sis->swap_file;
@@ -337,6 +338,7 @@ int swap_readpage(struct page *page)
        int ret = 0;
        struct swap_info_struct *sis = page_swap_info(page);
 
+       BUG_ON(!PageSwapCache(page));
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_PAGE(PageUptodate(page), page);
        if (frontswap_load(page) == 0) {
@@ -386,6 +388,7 @@ int swap_set_page_dirty(struct page *page)
 
        if (sis->flags & SWP_FILE) {
                struct address_space *mapping = sis->swap_file->f_mapping;
+               BUG_ON(!PageSwapCache(page));
                return mapping->a_ops->set_page_dirty(page);
        } else {
                return __set_page_dirty_no_writeback(page);
index 78cfa29..2657acc 100644 (file)
@@ -2724,7 +2724,6 @@ int swapcache_prepare(swp_entry_t entry)
 struct swap_info_struct *page_swap_info(struct page *page)
 {
        swp_entry_t swap = { .val = page_private(page) };
-       BUG_ON(!PageSwapCache(page));
        return swap_info[swp_type(swap)];
 }
 
index 089328f..3c8da0a 100644 (file)
@@ -207,8 +207,11 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
         * Some architectures (arm64) return true for virt_addr_valid() on
         * vmalloced addresses. Work around this by checking for vmalloc
         * first.
+        *
+        * We also need to check for module addresses explicitly since we
+        * may copy static data from modules to userspace
         */
-       if (is_vmalloc_addr(ptr))
+       if (is_vmalloc_or_module_addr(ptr))
                return NULL;
 
        if (!virt_addr_valid(ptr))
index 7d17001..ee08540 100644 (file)
@@ -335,7 +335,7 @@ int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface)
                goto out;
 
        skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN);
-       elp_buff = skb_push(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN);
+       elp_buff = skb_put(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN);
        elp_packet = (struct batadv_elp_packet *)elp_buff;
        memset(elp_packet, 0, BATADV_ELP_HLEN);
 
index 7602c00..3d19947 100644 (file)
@@ -469,6 +469,29 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
        return 0;
 }
 
+/**
+ * batadv_last_bonding_get - Get last_bonding_candidate of orig_node
+ * @orig_node: originator node whose last bonding candidate should be retrieved
+ *
+ * Return: last bonding candidate of router or NULL if not found
+ *
+ * The object is returned with refcounter increased by 1.
+ */
+static struct batadv_orig_ifinfo *
+batadv_last_bonding_get(struct batadv_orig_node *orig_node)
+{
+       struct batadv_orig_ifinfo *last_bonding_candidate;
+
+       spin_lock_bh(&orig_node->neigh_list_lock);
+       last_bonding_candidate = orig_node->last_bonding_candidate;
+
+       if (last_bonding_candidate)
+               kref_get(&last_bonding_candidate->refcount);
+       spin_unlock_bh(&orig_node->neigh_list_lock);
+
+       return last_bonding_candidate;
+}
+
 /**
  * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node
  * @orig_node: originator node whose bonding candidates should be replaced
@@ -539,7 +562,7 @@ batadv_find_router(struct batadv_priv *bat_priv,
         * router - obviously there are no other candidates.
         */
        rcu_read_lock();
-       last_candidate = orig_node->last_bonding_candidate;
+       last_candidate = batadv_last_bonding_get(orig_node);
        if (last_candidate)
                last_cand_router = rcu_dereference(last_candidate->router);
 
@@ -631,6 +654,9 @@ next:
                batadv_orig_ifinfo_put(next_candidate);
        }
 
+       if (last_candidate)
+               batadv_orig_ifinfo_put(last_candidate);
+
        return router;
 }
 
index 25dab8b..fd7b41e 100644 (file)
@@ -1362,7 +1362,6 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
                if (!try_module_get(prot->owner))
                        goto out_free_sec;
                sk_tx_queue_clear(sk);
-               cgroup_sk_alloc(&sk->sk_cgrp_data);
        }
 
        return sk;
@@ -1422,6 +1421,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
                sock_net_set(sk, net);
                atomic_set(&sk->sk_wmem_alloc, 1);
 
+               cgroup_sk_alloc(&sk->sk_cgrp_data);
                sock_update_classid(&sk->sk_cgrp_data);
                sock_update_netprioidx(&sk->sk_cgrp_data);
        }
@@ -1566,6 +1566,9 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                newsk->sk_priority = 0;
                newsk->sk_incoming_cpu = raw_smp_processor_id();
                atomic64_set(&newsk->sk_cookie, 0);
+
+               cgroup_sk_alloc(&newsk->sk_cgrp_data);
+
                /*
                 * Before updating sk_refcnt, we must commit prior changes to memory
                 * (Documentation/RCU/rculist_nulls.txt for details)
index 4b351af..d6feabb 100644 (file)
@@ -312,6 +312,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        const struct iphdr *iph = ip_hdr(skb);
        struct rtable *rt;
+       struct net_device *dev = skb->dev;
 
        /* if ingress device is enslaved to an L3 master device pass the
         * skb to its handler for processing
@@ -341,7 +342,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
         */
        if (!skb_valid_dst(skb)) {
                int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
-                                              iph->tos, skb->dev);
+                                              iph->tos, dev);
                if (unlikely(err)) {
                        if (err == -EXDEV)
                                __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
@@ -370,7 +371,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
                __IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
        } else if (skb->pkt_type == PACKET_BROADCAST ||
                   skb->pkt_type == PACKET_MULTICAST) {
-               struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
+               struct in_device *in_dev = __in_dev_get_rcu(dev);
 
                /* RFC 1122 3.3.6:
                 *
index cc701fa..5d7944f 100644 (file)
@@ -88,6 +88,7 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
        struct net_device *dev;
        struct pcpu_sw_netstats *tstats;
        struct xfrm_state *x;
+       struct xfrm_mode *inner_mode;
        struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4;
        u32 orig_mark = skb->mark;
        int ret;
@@ -105,7 +106,19 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
        }
 
        x = xfrm_input_state(skb);
-       family = x->inner_mode->afinfo->family;
+
+       inner_mode = x->inner_mode;
+
+       if (x->sel.family == AF_UNSPEC) {
+               inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
+               if (inner_mode == NULL) {
+                       XFRM_INC_STATS(dev_net(skb->dev),
+                                      LINUX_MIB_XFRMINSTATEMODEERROR);
+                       return -EINVAL;
+               }
+       }
+
+       family = inner_mode->afinfo->family;
 
        skb->mark = be32_to_cpu(tunnel->parms.i_key);
        ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
index 2625332..a87bcd2 100644 (file)
@@ -2076,6 +2076,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
        struct rta_mfc_stats mfcs;
        struct nlattr *mp_attr;
        struct rtnexthop *nhp;
+       unsigned long lastuse;
        int ct;
 
        /* If cache is unresolved, don't try to parse IIF and OIF */
@@ -2105,12 +2106,14 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
 
        nla_nest_end(skb, mp_attr);
 
+       lastuse = READ_ONCE(c->mfc_un.res.lastuse);
+       lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
+
        mfcs.mfcs_packets = c->mfc_un.res.pkt;
        mfcs.mfcs_bytes = c->mfc_un.res.bytes;
        mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
        if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
-           nla_put_u64_64bit(skb, RTA_EXPIRES,
-                             jiffies_to_clock_t(c->mfc_un.res.lastuse),
+           nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
                              RTA_PAD))
                return -EMSGSIZE;
 
index 2375b0a..30493be 100644 (file)
@@ -31,6 +31,7 @@ static unsigned int nf_route_table_hook(void *priv,
        __be32 saddr, daddr;
        u_int8_t tos;
        const struct iphdr *iph;
+       int err;
 
        /* root is playing with raw sockets. */
        if (skb->len < sizeof(struct iphdr) ||
@@ -46,15 +47,17 @@ static unsigned int nf_route_table_hook(void *priv,
        tos = iph->tos;
 
        ret = nft_do_chain(&pkt, priv);
-       if (ret != NF_DROP && ret != NF_QUEUE) {
+       if (ret != NF_DROP && ret != NF_STOLEN) {
                iph = ip_hdr(skb);
 
                if (iph->saddr != saddr ||
                    iph->daddr != daddr ||
                    skb->mark != mark ||
-                   iph->tos != tos)
-                       if (ip_route_me_harder(state->net, skb, RTN_UNSPEC))
-                               ret = NF_DROP;
+                   iph->tos != tos) {
+                       err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
+                       if (err < 0)
+                               ret = NF_DROP_ERR(err);
+               }
        }
        return ret;
 }
index a1f2830..b5b47a2 100644 (file)
@@ -476,12 +476,18 @@ u32 ip_idents_reserve(u32 hash, int segs)
        atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
        u32 old = ACCESS_ONCE(*p_tstamp);
        u32 now = (u32)jiffies;
-       u32 delta = 0;
+       u32 new, delta = 0;
 
        if (old != now && cmpxchg(p_tstamp, old, now) == old)
                delta = prandom_u32_max(now - old);
 
-       return atomic_add_return(segs + delta, p_id) - segs;
+       /* Do not use atomic_add_return() as it makes UBSAN unhappy */
+       do {
+               old = (u32)atomic_read(p_id);
+               new = old + delta + segs;
+       } while (atomic_cmpxchg(p_id, old, new) != old);
+
+       return new - segs;
 }
 EXPORT_SYMBOL(ip_idents_reserve);
 
index 3ebf45b..08323bd 100644 (file)
@@ -5885,7 +5885,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
                 * so release it.
                 */
                if (req) {
-                       tp->total_retrans = req->num_retrans;
+                       inet_csk(sk)->icsk_retransmits = 0;
                        reqsk_fastopen_remove(sk, req, false);
                } else {
                        /* Make sure socket is routed, for correct metrics. */
index bdaef7f..5288cec 100644 (file)
@@ -2605,7 +2605,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
         * copying overhead: fragmentation, tunneling, mangling etc.
         */
        if (atomic_read(&sk->sk_wmem_alloc) >
-           min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
+           min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
+                 sk->sk_sndbuf))
                return -EAGAIN;
 
        if (skb_still_in_host_queue(sk, skb))
@@ -2830,7 +2831,7 @@ begin_fwd:
                if (tcp_retransmit_skb(sk, skb, segs))
                        return;
 
-               NET_INC_STATS(sock_net(sk), mib_idx);
+               NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
 
                if (tcp_in_cwnd_reduction(sk))
                        tp->prr_out += tcp_skb_pcount(skb);
@@ -3567,6 +3568,8 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
        if (!res) {
                __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
                __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+               if (unlikely(tcp_passive_fastopen(sk)))
+                       tcp_sk(sk)->total_retrans++;
        }
        return res;
 }
index d84930b..f712b41 100644 (file)
@@ -384,6 +384,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk)
         */
        inet_rtx_syn_ack(sk, req);
        req->num_timeout++;
+       icsk->icsk_retransmits++;
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                          TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
 }
index d90a11f..5bd3afd 100644 (file)
@@ -321,11 +321,9 @@ static int vti6_rcv(struct sk_buff *skb)
                        goto discard;
                }
 
-               XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t;
-
                rcu_read_unlock();
 
-               return xfrm6_rcv(skb);
+               return xfrm6_rcv_tnl(skb, t);
        }
        rcu_read_unlock();
        return -EINVAL;
@@ -340,6 +338,7 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
        struct net_device *dev;
        struct pcpu_sw_netstats *tstats;
        struct xfrm_state *x;
+       struct xfrm_mode *inner_mode;
        struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6;
        u32 orig_mark = skb->mark;
        int ret;
@@ -357,7 +356,19 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
        }
 
        x = xfrm_input_state(skb);
-       family = x->inner_mode->afinfo->family;
+
+       inner_mode = x->inner_mode;
+
+       if (x->sel.family == AF_UNSPEC) {
+               inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
+               if (inner_mode == NULL) {
+                       XFRM_INC_STATS(dev_net(skb->dev),
+                                      LINUX_MIB_XFRMINSTATEMODEERROR);
+                       return -EINVAL;
+               }
+       }
+
+       family = inner_mode->afinfo->family;
 
        skb->mark = be32_to_cpu(t->parms.i_key);
        ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
index 6122f9c..fccb5dd 100644 (file)
@@ -2239,6 +2239,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
        struct rta_mfc_stats mfcs;
        struct nlattr *mp_attr;
        struct rtnexthop *nhp;
+       unsigned long lastuse;
        int ct;
 
        /* If cache is unresolved, don't try to parse IIF and OIF */
@@ -2269,12 +2270,14 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
 
        nla_nest_end(skb, mp_attr);
 
+       lastuse = READ_ONCE(c->mfc_un.res.lastuse);
+       lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
+
        mfcs.mfcs_packets = c->mfc_un.res.pkt;
        mfcs.mfcs_bytes = c->mfc_un.res.bytes;
        mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
        if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
-           nla_put_u64_64bit(skb, RTA_EXPIRES,
-                             jiffies_to_clock_t(c->mfc_un.res.lastuse),
+           nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
                              RTA_PAD))
                return -EMSGSIZE;
 
index 71d995f..2535223 100644 (file)
@@ -31,6 +31,7 @@ static unsigned int nf_route_table_hook(void *priv,
        struct in6_addr saddr, daddr;
        u_int8_t hop_limit;
        u32 mark, flowlabel;
+       int err;
 
        /* malformed packet, drop it */
        if (nft_set_pktinfo_ipv6(&pkt, skb, state) < 0)
@@ -46,13 +47,16 @@ static unsigned int nf_route_table_hook(void *priv,
        flowlabel = *((u32 *)ipv6_hdr(skb));
 
        ret = nft_do_chain(&pkt, priv);
-       if (ret != NF_DROP && ret != NF_QUEUE &&
+       if (ret != NF_DROP && ret != NF_STOLEN &&
            (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
             memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
             skb->mark != mark ||
             ipv6_hdr(skb)->hop_limit != hop_limit ||
-            flowlabel != *((u_int32_t *)ipv6_hdr(skb))))
-               return ip6_route_me_harder(state->net, skb) == 0 ? ret : NF_DROP;
+            flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) {
+               err = ip6_route_me_harder(state->net, skb);
+               if (err < 0)
+                       ret = NF_DROP_ERR(err);
+       }
 
        return ret;
 }
index 4981755..e3a224b 100644 (file)
@@ -1986,9 +1986,18 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
                        if (!(gwa_type & IPV6_ADDR_UNICAST))
                                goto out;
 
-                       if (cfg->fc_table)
+                       if (cfg->fc_table) {
                                grt = ip6_nh_lookup_table(net, cfg, gw_addr);
 
+                               if (grt) {
+                                       if (grt->rt6i_flags & RTF_GATEWAY ||
+                                           (dev && dev != grt->dst.dev)) {
+                                               ip6_rt_put(grt);
+                                               grt = NULL;
+                                       }
+                               }
+                       }
+
                        if (!grt)
                                grt = rt6_lookup(net, gw_addr, NULL,
                                                 cfg->fc_ifindex, 1);
index 00a2d40..b578956 100644 (file)
@@ -21,9 +21,10 @@ int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb)
        return xfrm6_extract_header(skb);
 }
 
-int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
+int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
+                 struct ip6_tnl *t)
 {
-       XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
+       XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t;
        XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
        XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
        return xfrm_input(skb, nexthdr, spi, 0);
@@ -49,13 +50,18 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
        return -1;
 }
 
-int xfrm6_rcv(struct sk_buff *skb)
+int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t)
 {
        return xfrm6_rcv_spi(skb, skb_network_header(skb)[IP6CB(skb)->nhoff],
-                            0);
+                            0, t);
 }
-EXPORT_SYMBOL(xfrm6_rcv);
+EXPORT_SYMBOL(xfrm6_rcv_tnl);
 
+int xfrm6_rcv(struct sk_buff *skb)
+{
+       return xfrm6_rcv_tnl(skb, NULL);
+}
+EXPORT_SYMBOL(xfrm6_rcv);
 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
                     xfrm_address_t *saddr, u8 proto)
 {
index 5743044..e1c0bbe 100644 (file)
@@ -236,7 +236,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
        __be32 spi;
 
        spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr);
-       return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi);
+       return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi, NULL);
 }
 
 static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
index 8d2f7c9..ccc2444 100644 (file)
@@ -832,7 +832,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
        struct sock *sk = sock->sk;
        struct irda_sock *new, *self = irda_sk(sk);
        struct sock *newsk;
-       struct sk_buff *skb;
+       struct sk_buff *skb = NULL;
        int err;
 
        err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0);
@@ -900,7 +900,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
        err = -EPERM; /* value does not seem to make sense. -arnd */
        if (!new->tsap) {
                pr_debug("%s(), dup failed!\n", __func__);
-               kfree_skb(skb);
                goto out;
        }
 
@@ -919,7 +918,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
        /* Clean up the original one to keep it in listen state */
        irttp_listen(self->tsap);
 
-       kfree_skb(skb);
        sk->sk_ack_backlog--;
 
        newsock->state = SS_CONNECTED;
@@ -927,6 +925,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
        irda_connect_response(new);
        err = 0;
 out:
+       kfree_skb(skb);
        release_sock(sk);
        return err;
 }
index a9aff60..afa9468 100644 (file)
@@ -261,10 +261,16 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
                .timeout = timeout,
                .ssn = start_seq_num,
        };
-
        int i, ret = -EOPNOTSUPP;
        u16 status = WLAN_STATUS_REQUEST_DECLINED;
 
+       if (tid >= IEEE80211_FIRST_TSPEC_TSID) {
+               ht_dbg(sta->sdata,
+                      "STA %pM requests BA session on unsupported tid %d\n",
+                      sta->sta.addr, tid);
+               goto end_no_lock;
+       }
+
        if (!sta->sta.ht_cap.ht_supported) {
                ht_dbg(sta->sdata,
                       "STA %pM erroneously requests BA session on tid %d w/o QoS\n",
index 5650c46..45319cc 100644 (file)
@@ -584,6 +584,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
            ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW))
                return -EINVAL;
 
+       if (WARN_ON(tid >= IEEE80211_FIRST_TSPEC_TSID))
+               return -EINVAL;
+
        ht_dbg(sdata, "Open BA session requested for %pM tid %u\n",
               pubsta->addr, tid);
 
index 8f9c3bd..faccef9 100644 (file)
@@ -746,6 +746,7 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
                sta = next_hop_deref_protected(mpath);
                if (mpath->flags & MESH_PATH_ACTIVE &&
                    ether_addr_equal(ta, sta->sta.addr) &&
+                   !(mpath->flags & MESH_PATH_FIXED) &&
                    (!(mpath->flags & MESH_PATH_SN_VALID) ||
                    SN_GT(target_sn, mpath->sn)  || target_sn == 0)) {
                        mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -1012,7 +1013,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
                goto enddiscovery;
 
        spin_lock_bh(&mpath->state_lock);
-       if (mpath->flags & MESH_PATH_DELETED) {
+       if (mpath->flags & (MESH_PATH_DELETED | MESH_PATH_FIXED)) {
                spin_unlock_bh(&mpath->state_lock);
                goto enddiscovery;
        }
index 6db2ddf..f0e6175 100644 (file)
@@ -826,7 +826,7 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
        mpath->metric = 0;
        mpath->hop_count = 0;
        mpath->exp_time = 0;
-       mpath->flags |= MESH_PATH_FIXED;
+       mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID;
        mesh_path_activate(mpath);
        spin_unlock_bh(&mpath->state_lock);
        mesh_path_tx_pending(mpath);
index 76b737d..aa58df8 100644 (file)
@@ -1616,7 +1616,6 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
 
                sta_info_recalc_tim(sta);
        } else {
-               unsigned long tids = sta->txq_buffered_tids & driver_release_tids;
                int tid;
 
                /*
@@ -1648,7 +1647,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
                for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
                        struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
 
-                       if (!(tids & BIT(tid)) || txqi->tin.backlog_packets)
+                       if (!(driver_release_tids & BIT(tid)) ||
+                           txqi->tin.backlog_packets)
                                continue;
 
                        sta_info_recalc_tim(sta);
index 5023966..18b285e 100644 (file)
@@ -796,6 +796,36 @@ static __le16 ieee80211_tx_next_seq(struct sta_info *sta, int tid)
        return ret;
 }
 
+static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
+                                         struct ieee80211_vif *vif,
+                                         struct ieee80211_sta *pubsta,
+                                         struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_txq *txq = NULL;
+
+       if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
+           (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
+               return NULL;
+
+       if (!ieee80211_is_data(hdr->frame_control))
+               return NULL;
+
+       if (pubsta) {
+               u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+
+               txq = pubsta->txq[tid];
+       } else if (vif) {
+               txq = vif->txq;
+       }
+
+       if (!txq)
+               return NULL;
+
+       return to_txq_info(txq);
+}
+
 static ieee80211_tx_result debug_noinline
 ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
 {
@@ -853,7 +883,8 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
        tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
        tx->sta->tx_stats.msdu[tid]++;
 
-       if (!tx->sta->sta.txq[0])
+       if (!ieee80211_get_txq(tx->local, info->control.vif, &tx->sta->sta,
+                              tx->skb))
                hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
 
        return TX_CONTINUE;
@@ -1243,36 +1274,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
        return TX_CONTINUE;
 }
 
-static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
-                                         struct ieee80211_vif *vif,
-                                         struct ieee80211_sta *pubsta,
-                                         struct sk_buff *skb)
-{
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_txq *txq = NULL;
-
-       if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
-           (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
-               return NULL;
-
-       if (!ieee80211_is_data(hdr->frame_control))
-               return NULL;
-
-       if (pubsta) {
-               u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
-
-               txq = pubsta->txq[tid];
-       } else if (vif) {
-               txq = vif->txq;
-       }
-
-       if (!txq)
-               return NULL;
-
-       return to_txq_info(txq);
-}
-
 static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb)
 {
        IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time();
@@ -1514,8 +1515,12 @@ out:
        spin_unlock_bh(&fq->lock);
 
        if (skb && skb_has_frag_list(skb) &&
-           !ieee80211_hw_check(&local->hw, TX_FRAG_LIST))
-               skb_linearize(skb);
+           !ieee80211_hw_check(&local->hw, TX_FRAG_LIST)) {
+               if (skb_linearize(skb)) {
+                       ieee80211_free_txskb(&local->hw, skb);
+                       return NULL;
+               }
+       }
 
        return skb;
 }
@@ -3264,7 +3269,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
 
        if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
                *ieee80211_get_qos_ctl(hdr) = tid;
-               if (!sta->sta.txq[0])
+               if (!ieee80211_get_txq(local, &sdata->vif, &sta->sta, skb))
                        hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
        } else {
                info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
index dd2c43a..9934b0c 100644 (file)
@@ -1035,9 +1035,9 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
        if (IS_ERR(ct))
                return (struct nf_conntrack_tuple_hash *)ct;
 
-       if (tmpl && nfct_synproxy(tmpl)) {
-               nfct_seqadj_ext_add(ct);
-               nfct_synproxy_ext_add(ct);
+       if (!nf_ct_add_synproxy(ct, tmpl)) {
+               nf_conntrack_free(ct);
+               return ERR_PTR(-ENOMEM);
        }
 
        timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
index de31818..ecee105 100644 (file)
@@ -441,7 +441,8 @@ nf_nat_setup_info(struct nf_conn *ct,
                        ct->status |= IPS_DST_NAT;
 
                if (nfct_help(ct))
-                       nfct_seqadj_ext_add(ct);
+                       if (!nfct_seqadj_ext_add(ct))
+                               return NF_DROP;
        }
 
        if (maniptype == NF_NAT_MANIP_SRC) {
@@ -807,7 +808,7 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
        if (err < 0)
                return err;
 
-       return nf_nat_setup_info(ct, &range, manip);
+       return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
 }
 #else
 static int
index 39eb1cc..fa24a5b 100644 (file)
@@ -237,7 +237,7 @@ void nft_trace_notify(struct nft_traceinfo *info)
                break;
        case NFT_TRACETYPE_POLICY:
                if (nla_put_be32(skb, NFTA_TRACE_POLICY,
-                                info->basechain->policy))
+                                htonl(info->basechain->policy)))
                        goto nla_put_failure;
                break;
        }
index 69444d3..1555fb8 100644 (file)
@@ -796,27 +796,34 @@ struct sctp_hash_cmp_arg {
 static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg,
                                const void *ptr)
 {
+       struct sctp_transport *t = (struct sctp_transport *)ptr;
        const struct sctp_hash_cmp_arg *x = arg->key;
-       const struct sctp_transport *t = ptr;
-       struct sctp_association *asoc = t->asoc;
-       const struct net *net = x->net;
+       struct sctp_association *asoc;
+       int err = 1;
 
        if (!sctp_cmp_addr_exact(&t->ipaddr, x->paddr))
-               return 1;
-       if (!net_eq(sock_net(asoc->base.sk), net))
-               return 1;
+               return err;
+       if (!sctp_transport_hold(t))
+               return err;
+
+       asoc = t->asoc;
+       if (!net_eq(sock_net(asoc->base.sk), x->net))
+               goto out;
        if (x->ep) {
                if (x->ep != asoc->ep)
-                       return 1;
+                       goto out;
        } else {
                if (x->laddr->v4.sin_port != htons(asoc->base.bind_addr.port))
-                       return 1;
+                       goto out;
                if (!sctp_bind_addr_match(&asoc->base.bind_addr,
                                          x->laddr, sctp_sk(asoc->base.sk)))
-                       return 1;
+                       goto out;
        }
 
-       return 0;
+       err = 0;
+out:
+       sctp_transport_put(t);
+       return err;
 }
 
 static inline u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
index 1d28181..d858202 100644 (file)
@@ -569,9 +569,10 @@ gss_svc_searchbyctx(struct cache_detail *cd, struct xdr_netobj *handle)
        struct rsc *found;
 
        memset(&rsci, 0, sizeof(rsci));
-       rsci.handle.data = handle->data;
-       rsci.handle.len = handle->len;
+       if (dup_to_netobj(&rsci.handle, handle->data, handle->len))
+               return NULL;
        found = rsc_lookup(cd, &rsci);
+       rsc_free(&rsci);
        if (!found)
                return NULL;
        if (cache_check(cd, &found->h, NULL))
index f02653a..4809f4d 100644 (file)
@@ -6978,7 +6978,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
 
                params.n_counter_offsets_presp = len / sizeof(u16);
                if (rdev->wiphy.max_num_csa_counters &&
-                   (params.n_counter_offsets_beacon >
+                   (params.n_counter_offsets_presp >
                     rdev->wiphy.max_num_csa_counters))
                        return -EINVAL;
 
index 9895a8c..a30f898 100644 (file)
@@ -332,6 +332,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
 {
        tasklet_hrtimer_cancel(&x->mtimer);
        del_timer_sync(&x->rtimer);
+       kfree(x->aead);
        kfree(x->aalg);
        kfree(x->ealg);
        kfree(x->calg);
index cb65d91..0889209 100644 (file)
@@ -581,9 +581,12 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
        if (err)
                goto error;
 
-       if (attrs[XFRMA_SEC_CTX] &&
-           security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX])))
-               goto error;
+       if (attrs[XFRMA_SEC_CTX]) {
+               err = security_xfrm_state_alloc(x,
+                                               nla_data(attrs[XFRMA_SEC_CTX]));
+               if (err)
+                       goto error;
+       }
 
        if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
                                               attrs[XFRMA_REPLAY_ESN_VAL])))
diff --git a/scripts/faddr2line b/scripts/faddr2line
new file mode 100755 (executable)
index 0000000..450b332
--- /dev/null
@@ -0,0 +1,177 @@
+#!/bin/bash
+#
+# Translate stack dump function offsets.
+#
+# addr2line doesn't work with KASLR addresses.  This works similarly to
+# addr2line, but instead takes the 'func+0x123' format as input:
+#
+#   $ ./scripts/faddr2line ~/k/vmlinux meminfo_proc_show+0x5/0x568
+#   meminfo_proc_show+0x5/0x568:
+#   meminfo_proc_show at fs/proc/meminfo.c:27
+#
+# If the address is part of an inlined function, the full inline call chain is
+# printed:
+#
+#   $ ./scripts/faddr2line ~/k/vmlinux native_write_msr+0x6/0x27
+#   native_write_msr+0x6/0x27:
+#   arch_static_branch at arch/x86/include/asm/msr.h:121
+#    (inlined by) static_key_false at include/linux/jump_label.h:125
+#    (inlined by) native_write_msr at arch/x86/include/asm/msr.h:125
+#
+# The function size after the '/' in the input is optional, but recommended.
+# It's used to help disambiguate any duplicate symbol names, which can occur
+# rarely.  If the size is omitted for a duplicate symbol then it's possible for
+# multiple code sites to be printed:
+#
+#   $ ./scripts/faddr2line ~/k/vmlinux raw_ioctl+0x5
+#   raw_ioctl+0x5/0x20:
+#   raw_ioctl at drivers/char/raw.c:122
+#
+#   raw_ioctl+0x5/0xb1:
+#   raw_ioctl at net/ipv4/raw.c:876
+#
+# Multiple addresses can be specified on a single command line:
+#
+#   $ ./scripts/faddr2line ~/k/vmlinux type_show+0x10/45 free_reserved_area+0x90
+#   type_show+0x10/0x2d:
+#   type_show at drivers/video/backlight/backlight.c:213
+#
+#   free_reserved_area+0x90/0x123:
+#   free_reserved_area at mm/page_alloc.c:6429 (discriminator 2)
+
+
+set -o errexit
+set -o nounset
+
+command -v awk >/dev/null 2>&1 || die "awk isn't installed"
+command -v readelf >/dev/null 2>&1 || die "readelf isn't installed"
+command -v addr2line >/dev/null 2>&1 || die "addr2line isn't installed"
+
+usage() {
+       echo "usage: faddr2line <object file> <func+offset> <func+offset>..." >&2
+       exit 1
+}
+
+warn() {
+       echo "$1" >&2
+}
+
+die() {
+       echo "ERROR: $1" >&2
+       exit 1
+}
+
+# Try to figure out the source directory prefix so we can remove it from the
+# addr2line output.  HACK ALERT: This assumes that start_kernel() is in
+# kernel/init.c!  This only works for vmlinux.  Otherwise it falls back to
+# printing the absolute path.
+find_dir_prefix() {
+       local objfile=$1
+
+       local start_kernel_addr=$(readelf -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
+       [[ -z $start_kernel_addr ]] && return
+
+       local file_line=$(addr2line -e $objfile $start_kernel_addr)
+       [[ -z $file_line ]] && return
+
+       local prefix=${file_line%init/main.c:*}
+       if [[ -z $prefix ]] || [[ $prefix = $file_line ]]; then
+               return
+       fi
+
+       DIR_PREFIX=$prefix
+       return 0
+}
+
+__faddr2line() {
+       local objfile=$1
+       local func_addr=$2
+       local dir_prefix=$3
+       local print_warnings=$4
+
+       local func=${func_addr%+*}
+       local offset=${func_addr#*+}
+       offset=${offset%/*}
+       local size=
+       [[ $func_addr =~ "/" ]] && size=${func_addr#*/}
+
+       if [[ -z $func ]] || [[ -z $offset ]] || [[ $func = $func_addr ]]; then
+               warn "bad func+offset $func_addr"
+               DONE=1
+               return
+       fi
+
+       # Go through each of the object's symbols which match the func name.
+       # In rare cases there might be duplicates.
+       while read symbol; do
+               local fields=($symbol)
+               local sym_base=0x${fields[1]}
+               local sym_size=${fields[2]}
+               local sym_type=${fields[3]}
+
+               # calculate the address
+               local addr=$(($sym_base + $offset))
+               if [[ -z $addr ]] || [[ $addr = 0 ]]; then
+                       warn "bad address: $sym_base + $offset"
+                       DONE=1
+                       return
+               fi
+               local hexaddr=0x$(printf %x $addr)
+
+               # weed out non-function symbols
+               if [[ $sym_type != "FUNC" ]]; then
+                       [[ $print_warnings = 1 ]] &&
+                               echo "skipping $func address at $hexaddr due to non-function symbol"
+                       continue
+               fi
+
+               # if the user provided a size, make sure it matches the symbol's size
+               if [[ -n $size ]] && [[ $size -ne $sym_size ]]; then
+                       [[ $print_warnings = 1 ]] &&
+                               echo "skipping $func address at $hexaddr due to size mismatch ($size != $sym_size)"
+                       continue;
+               fi
+
+               # make sure the provided offset is within the symbol's range
+               if [[ $offset -gt $sym_size ]]; then
+                       [[ $print_warnings = 1 ]] &&
+                               echo "skipping $func address at $hexaddr due to size mismatch ($offset > $sym_size)"
+                       continue
+               fi
+
+               # separate multiple entries with a blank line
+               [[ $FIRST = 0 ]] && echo
+               FIRST=0
+
+               local hexsize=0x$(printf %x $sym_size)
+               echo "$func+$offset/$hexsize:"
+               addr2line -fpie $objfile $hexaddr | sed "s; $dir_prefix\(\./\)*; ;"
+               DONE=1
+
+       done < <(readelf -sW $objfile | awk -v f=$func '$8 == f {print}')
+}
+
+[[ $# -lt 2 ]] && usage
+
+objfile=$1
+[[ ! -f $objfile ]] && die "can't find objfile $objfile"
+shift
+
+DIR_PREFIX=supercalifragilisticexpialidocious
+find_dir_prefix $objfile
+
+FIRST=1
+while [[ $# -gt 0 ]]; do
+       func_addr=$1
+       shift
+
+       # print any matches found
+       DONE=0
+       __faddr2line $objfile $func_addr $DIR_PREFIX 0
+
+       # if no match was found, print warnings
+       if [[ $DONE = 0 ]]; then
+               __faddr2line $objfile $func_addr $DIR_PREFIX 1
+               warn "no match for $func_addr"
+       fi
+done