Merge tag 'irqchip-core-4.8-2' of git://git.infradead.org/users/jcooper/linux into...
authorThomas Gleixner <tglx@linutronix.de>
Sat, 2 Jul 2016 09:42:56 +0000 (11:42 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Sat, 2 Jul 2016 09:42:56 +0000 (11:42 +0200)
Pull irqchip core changes for v4.8 (second set) from Jason Cooper:

 - Add Aspeed VIC driver

273 files changed:
Documentation/DocBook/device-drivers.tmpl
Documentation/arm64/silicon-errata.txt
Documentation/devicetree/bindings/display/imx/ldb.txt
Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
Documentation/filesystems/devpts.txt
Documentation/kdump/gdbmacros.txt
Documentation/networking/dsa/dsa.txt
Documentation/networking/ip-sysctl.txt
Documentation/security/keys.txt
MAINTAINERS
Makefile
arch/arm/kernel/ptrace.c
arch/arm64/Kconfig
arch/arm64/Kconfig.debug
arch/arm64/Makefile
arch/arm64/include/asm/elf.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/page.h
arch/arm64/include/asm/uaccess.h
arch/arm64/include/asm/unistd.h
arch/arm64/include/asm/unistd32.h
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/traps.c
arch/arm64/kvm/hyp/vgic-v3-sr.c
arch/arm64/kvm/sys_regs.c
arch/arm64/mm/dump.c
arch/arm64/mm/hugetlbpage.c
arch/parisc/include/asm/traps.h
arch/parisc/kernel/processor.c
arch/parisc/kernel/time.c
arch/parisc/kernel/unaligned.c
arch/parisc/kernel/unwind.c
arch/powerpc/include/asm/reg.h
arch/powerpc/kernel/prom_init.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/pgtable-book3s64.c
arch/powerpc/mm/pgtable-radix.c
arch/powerpc/platforms/pseries/eeh_pseries.c
arch/s390/configs/default_defconfig
arch/s390/configs/gcov_defconfig
arch/s390/configs/performance_defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/defconfig
arch/s390/mm/fault.c
arch/s390/net/bpf_jit.h
arch/s390/net/bpf_jit_comp.c
arch/sparc/include/asm/head_64.h
arch/sparc/include/asm/ttable.h
arch/sparc/kernel/Makefile
arch/sparc/kernel/rtrap_64.S
arch/sparc/kernel/signal32.c
arch/sparc/kernel/signal_32.c
arch/sparc/kernel/signal_64.c
arch/sparc/kernel/sigutil_32.c
arch/sparc/kernel/sigutil_64.c
arch/sparc/kernel/urtt_fill.S [new file with mode: 0644]
arch/sparc/mm/init_64.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/mmu.c
arch/x86/kvm/x86.c
arch/x86/pci/vmd.c
crypto/asymmetric_keys/Kconfig
drivers/acpi/acpi_processor.c
drivers/acpi/acpi_video.c
drivers/acpi/acpica/hwregs.c
drivers/acpi/processor_throttling.c
drivers/atm/firestream.c
drivers/atm/iphase.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/crypto/ccp/ccp-crypto-aes-xts.c
drivers/crypto/omap-sham.c
drivers/dma-buf/dma-buf.c
drivers/dma-buf/reservation.c
drivers/gpio/gpio-lpc32xx.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/arm/hdlcd_crtc.c
drivers/gpu/drm/arm/hdlcd_drv.c
drivers/gpu/drm/arm/hdlcd_drv.h
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_fb_cma_helper.c
drivers/gpu/drm/drm_gem_cma_helper.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/imx/imx-drm.h
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/imx/imx-tve.c
drivers/gpu/drm/imx/ipuv3-crtc.c
drivers/gpu/drm/imx/ipuv3-plane.c
drivers/gpu/drm/imx/parallel-display.c
drivers/gpu/drm/mediatek/mtk_dpi.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/omapdrm/Kconfig
drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
drivers/gpu/drm/omapdrm/displays/panel-dpi.c
drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
drivers/gpu/drm/omapdrm/dss/dsi.c
drivers/gpu/drm/omapdrm/dss/dss.c
drivers/gpu/drm/omapdrm/dss/hdmi4.c
drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
drivers/gpu/drm/omapdrm/dss/hdmi5.c
drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
drivers/gpu/drm/omapdrm/omap_debugfs.c
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
drivers/gpu/drm/omapdrm/omap_fb.c
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/sti/sti_crtc.c
drivers/irqchip/Kconfig
drivers/irqchip/Makefile
drivers/irqchip/irq-gic-common.c
drivers/irqchip/irq-gic-pm.c [new file with mode: 0644]
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-gic.c
drivers/irqchip/irq-mips-gic.c
drivers/irqchip/irq-pic32-evic.c
drivers/mmc/core/mmc.c
drivers/mmc/host/sunxi-mmc.c
drivers/net/ethernet/arc/emac_mdio.c
drivers/net/ethernet/atheros/alx/alx.h
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/ezchip/nps_enet.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
drivers/net/ethernet/marvell/mvneta_bm.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_port.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/team/team.c
drivers/net/usb/pegasus.c
drivers/net/usb/smsc95xx.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/perf/arm_pmu.c
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
drivers/pinctrl/nomadik/pinctrl-nomadik.c
drivers/ptp/ptp_chardev.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aacraid/linit.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/thermal/int340x_thermal/int3406_thermal.c
drivers/tty/Kconfig
drivers/tty/pty.c
drivers/vfio/pci/vfio_pci_config.c
drivers/vfio/pci/vfio_pci_intrs.c
drivers/vfio/vfio_iommu_type1.c
drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/ordered-data.c
fs/btrfs/ordered-data.h
fs/btrfs/reada.c
fs/btrfs/scrub.c
fs/btrfs/volumes.c
fs/cachefiles/interface.c
fs/ceph/addr.c
fs/ceph/cache.c
fs/ceph/cache.h
fs/ceph/caps.c
fs/ceph/file.c
fs/ceph/super.h
fs/devpts/inode.c
fs/fscache/page.c
fs/namei.c
include/acpi/video.h
include/linux/ceph/osd_client.h
include/linux/ceph/osdmap.h
include/linux/devpts_fs.h
include/linux/dma-buf.h
include/linux/fence.h
include/linux/fscache-cache.h
include/linux/irq.h
include/linux/irqchip/arm-gic-v3.h
include/linux/irqchip/arm-gic.h
include/linux/irqdomain.h
include/linux/namei.h
include/linux/page_idle.h
include/linux/reservation.h
include/linux/sctp.h
include/linux/timekeeping.h
include/net/ip6_tunnel.h
include/net/pkt_sched.h
include/uapi/linux/ethtool.h
include/uapi/linux/pkt_cls.h
kernel/bpf/inode.c
kernel/irq/chip.c
kernel/irq/handle.c
kernel/irq/internals.h
kernel/irq/ipi.c
kernel/irq/irqdomain.c
kernel/irq/manage.c
kernel/irq/proc.c
kernel/time/hrtimer.c
lib/Kconfig.debug
lib/Makefile
lib/test_uuid.c [new file with mode: 0644]
lib/uuid.c
mm/memcontrol.c
mm/oom_kill.c
mm/page_alloc.c
mm/page_owner.c
mm/page_poison.c
mm/vmalloc.c
mm/vmstat.c
mm/z3fold.c
net/8021q/vlan.c
net/8021q/vlan.h
net/8021q/vlan_dev.c
net/atm/signaling.c
net/atm/svc.c
net/ceph/osd_client.c
net/ceph/osdmap.c
net/core/hwbm.c
net/core/pktgen.c
net/ieee802154/nl802154.c
net/ipv4/af_inet.c
net/ipv4/sysctl_net_ipv4.c
net/ipv6/Kconfig
net/ipv6/Makefile
net/ipv6/fou6.c
net/ipv6/ip6_gre.c
net/l2tp/l2tp_ip6.c
net/lapb/lapb_in.c
net/lapb/lapb_out.c
net/lapb/lapb_subr.c
net/openvswitch/actions.c
net/sched/act_police.c
net/sched/sch_api.c
net/sched/sch_htb.c
net/sctp/sctp_diag.c
net/sctp/socket.c
net/tipc/netlink_compat.c
scripts/checkpatch.pl
security/keys/compat.c
security/keys/dh.c
security/keys/internal.h
security/keys/keyctl.c
virt/kvm/arm/hyp/vgic-v2-sr.c
virt/kvm/arm/vgic/vgic-mmio.c
virt/kvm/arm/vgic/vgic-v2.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/irqchip.c
virt/kvm/kvm_main.c

index de79efd..8c68768 100644 (file)
@@ -128,16 +128,44 @@ X!Edrivers/base/interface.c
 !Edrivers/base/platform.c
 !Edrivers/base/bus.c
      </sect1>
-     <sect1><title>Device Drivers DMA Management</title>
+     <sect1>
+       <title>Buffer Sharing and Synchronization</title>
+       <para>
+         The dma-buf subsystem provides the framework for sharing buffers
+         for hardware (DMA) access across multiple device drivers and
+         subsystems, and for synchronizing asynchronous hardware access.
+       </para>
+       <para>
+         This is used, for example, by drm "prime" multi-GPU support, but
+         is of course not limited to GPU use cases.
+       </para>
+       <para>
+         The three main components of this are: (1) dma-buf, representing
+         a sg_table and exposed to userspace as a file descriptor to allow
+         passing between devices, (2) fence, which provides a mechanism
+         to signal when one device as finished access, and (3) reservation,
+         which manages the shared or exclusive fence(s) associated with
+         the buffer.
+       </para>
+       <sect2><title>dma-buf</title>
 !Edrivers/dma-buf/dma-buf.c
+!Iinclude/linux/dma-buf.h
+       </sect2>
+       <sect2><title>reservation</title>
+!Pdrivers/dma-buf/reservation.c Reservation Object Overview
+!Edrivers/dma-buf/reservation.c
+!Iinclude/linux/reservation.h
+       </sect2>
+       <sect2><title>fence</title>
 !Edrivers/dma-buf/fence.c
-!Edrivers/dma-buf/seqno-fence.c
 !Iinclude/linux/fence.h
+!Edrivers/dma-buf/seqno-fence.c
 !Iinclude/linux/seqno-fence.h
-!Edrivers/dma-buf/reservation.c
-!Iinclude/linux/reservation.h
 !Edrivers/dma-buf/sync_file.c
 !Iinclude/linux/sync_file.h
+       </sect2>
+     </sect1>
+     <sect1><title>Device Drivers DMA Management</title>
 !Edrivers/base/dma-coherent.c
 !Edrivers/base/dma-mapping.c
      </sect1>
index c6938e5..4da60b4 100644 (file)
@@ -56,6 +56,7 @@ stable kernels.
 | ARM            | MMU-500         | #841119,#826419 | N/A                     |
 |                |                 |                 |                         |
 | Cavium         | ThunderX ITS    | #22375, #24313  | CAVIUM_ERRATUM_22375    |
+| Cavium         | ThunderX ITS    | #23144          | CAVIUM_ERRATUM_23144    |
 | Cavium         | ThunderX GICv3  | #23154          | CAVIUM_ERRATUM_23154    |
 | Cavium         | ThunderX Core   | #27456          | CAVIUM_ERRATUM_27456    |
 | Cavium         | ThunderX SMMUv2 | #27704          | N/A                    |
index 0a175d9..a407462 100644 (file)
@@ -62,6 +62,7 @@ Required properties:
    display-timings are used instead.
 
 Optional properties (required if display-timings are used):
+ - ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
  - display-timings : A node that describes the display timings as defined in
    Documentation/devicetree/bindings/display/display-timing.txt.
  - fsl,data-mapping : should be "spwg" or "jeida"
index 793c20f..5393e2a 100644 (file)
@@ -21,6 +21,7 @@ Main node required properties:
        "arm,pl390"
        "arm,tc11mp-gic"
        "brcm,brahma-b15-gic"
+       "nvidia,tegra210-agic"
        "qcom,msm-8660-qgic"
        "qcom,msm-qgic2"
 - interrupt-controller : Identifies the node as an interrupt controller
@@ -68,7 +69,7 @@ Optional
        "ic_clk" (for "arm,arm11mp-gic")
        "PERIPHCLKEN" (for "arm,cortex-a15-gic")
        "PERIPHCLK", "PERIPHCLKEN" (for "arm,cortex-a9-gic")
-       "clk" (for "arm,gic-400")
+       "clk" (for "arm,gic-400" and "nvidia,tegra210")
        "gclk" (for "arm,pl390")
 
 - power-domains : A phandle and PM domain specifier as defined by bindings of
index 30d2fcb..9f94fe2 100644 (file)
+Each mount of the devpts filesystem is now distinct such that ptys
+and their indicies allocated in one mount are independent from ptys
+and their indicies in all other mounts.
 
-To support containers, we now allow multiple instances of devpts filesystem,
-such that indices of ptys allocated in one instance are independent of indices
-allocated in other instances of devpts.
+All mounts of the devpts filesystem now create a /dev/pts/ptmx node
+with permissions 0000.
 
-To preserve backward compatibility, this support for multiple instances is
-enabled only if:
+To retain backwards compatibility the a ptmx device node (aka any node
+created with "mknod name c 5 2") when opened will look for an instance
+of devpts under the name "pts" in the same directory as the ptmx device
+node.
 
-       - CONFIG_DEVPTS_MULTIPLE_INSTANCES=y, and
-       - '-o newinstance' mount option is specified while mounting devpts
-
-IOW, devpts now supports both single-instance and multi-instance semantics.
-
-If CONFIG_DEVPTS_MULTIPLE_INSTANCES=n, there is no change in behavior and
-this referred to as the "legacy" mode. In this mode, the new mount options
-(-o newinstance and -o ptmxmode) will be ignored with a 'bogus option' message
-on console.
-
-If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and devpts is mounted without the
-'newinstance' option (as in current start-up scripts) the new mount binds
-to the initial kernel mount of devpts. This mode is referred to as the
-'single-instance' mode and the current, single-instance semantics are
-preserved, i.e PTYs are common across the system.
-
-The only difference between this single-instance mode and the legacy mode
-is the presence of new, '/dev/pts/ptmx' node with permissions 0000, which
-can safely be ignored.
-
-If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and 'newinstance' option is specified,
-the mount is considered to be in the multi-instance mode and a new instance
-of the devpts fs is created. Any ptys created in this instance are independent
-of ptys in other instances of devpts. Like in the single-instance mode, the
-/dev/pts/ptmx node is present. To effectively use the multi-instance mode,
-open of /dev/ptmx must be a redirected to '/dev/pts/ptmx' using a symlink or
-bind-mount.
-
-Eg: A container startup script could do the following:
-
-       $ chmod 0666 /dev/pts/ptmx
-       $ rm /dev/ptmx
-       $ ln -s pts/ptmx /dev/ptmx
-       $ ns_exec -cm /bin/bash
-
-       # We are now in new container
-
-       $ umount /dev/pts
-       $ mount -t devpts -o newinstance lxcpts /dev/pts
-       $ sshd -p 1234
-
-where 'ns_exec -cm /bin/bash' calls clone() with CLONE_NEWNS flag and execs
-/bin/bash in the child process.  A pty created by the sshd is not visible in
-the original mount of /dev/pts.
+As an option instead of placing a /dev/ptmx device node at /dev/ptmx
+it is possible to place a symlink to /dev/pts/ptmx at /dev/ptmx or
+to bind mount /dev/ptx/ptmx to /dev/ptmx.  If you opt for using
+the devpts filesystem in this manner devpts should be mounted with
+the ptmxmode=0666, or chmod 0666 /dev/pts/ptmx should be called.
 
 Total count of pty pairs in all instances is limited by sysctls:
 kernel.pty.max = 4096          - global limit
-kernel.pty.reserve = 1024      - reserve for initial instance
+kernel.pty.reserve = 1024      - reserved for filesystems mounted from the initial mount namespace
 kernel.pty.nr                  - current count of ptys
 
 Per-instance limit could be set by adding mount option "max=<count>".
 This feature was added in kernel 3.4 together with sysctl kernel.pty.reserve.
 In kernels older than 3.4 sysctl kernel.pty.max works as per-instance limit.
-
-User-space changes
-------------------
-
-In multi-instance mode (i.e '-o newinstance' mount option is specified at least
-once), following user-space issues should be noted.
-
-1. If -o newinstance mount option is never used, /dev/pts/ptmx can be ignored
-   and no change is needed to system-startup scripts.
-
-2. To effectively use multi-instance mode (i.e -o newinstance is specified)
-   administrators or startup scripts should "redirect" open of /dev/ptmx to
-   /dev/pts/ptmx using either a bind mount or symlink.
-
-       $ mount -t devpts -o newinstance devpts /dev/pts
-
-   followed by either
-
-       $ rm /dev/ptmx
-       $ ln -s pts/ptmx /dev/ptmx
-       $ chmod 666 /dev/pts/ptmx
-   or
-       $ mount -o bind /dev/pts/ptmx /dev/ptmx
-
-3. The '/dev/ptmx -> pts/ptmx' symlink is the preferred method since it
-   enables better error-reporting and treats both single-instance and
-   multi-instance mounts similarly.
-
-   But this method requires that system-startup scripts set the mode of
-   /dev/pts/ptmx correctly (default mode is 0000). The scripts can set the
-   mode by, either
-
-       - adding ptmxmode mount option to devpts entry in /etc/fstab, or
-       - using 'chmod 0666 /dev/pts/ptmx'
-
-4. If multi-instance mode mount is needed for containers, but the system
-   startup scripts have not yet been updated, container-startup scripts
-   should bind mount /dev/ptmx to /dev/pts/ptmx to avoid breaking single-
-   instance mounts.
-
-   Or, in general, container-startup scripts should use:
-
-       mount -t devpts -o newinstance -o ptmxmode=0666 devpts /dev/pts
-       if [ ! -L /dev/ptmx ]; then
-               mount -o bind /dev/pts/ptmx /dev/ptmx
-       fi
-
-   When all devpts mounts are multi-instance, /dev/ptmx can permanently be
-   a symlink to pts/ptmx and the bind mount can be ignored.
-
-5. A multi-instance mount that is not accompanied by the /dev/ptmx to
-   /dev/pts/ptmx redirection would result in an unusable/unreachable pty.
-
-       mount -t devpts -o newinstance lxcpts /dev/pts
-
-   immediately followed by:
-
-       open("/dev/ptmx")
-
-    would create a pty, say /dev/pts/7, in the initial kernel mount.
-    But /dev/pts/7 would be invisible in the new mount.
-
-6. The permissions for /dev/pts/ptmx node should be specified when mounting
-   /dev/pts, using the '-o ptmxmode=%o' mount option (default is 0000).
-
-       mount -t devpts -o newinstance -o ptmxmode=0644 devpts /dev/pts
-
-   The permissions can be later be changed as usual with 'chmod'.
-
-       chmod 666 /dev/pts/ptmx
-
-7. A mount of devpts without the 'newinstance' option results in binding to
-   initial kernel mount.  This behavior while preserving legacy semantics,
-   does not provide strict isolation in a container environment. i.e by
-   mounting devpts without the 'newinstance' option, a container could
-   get visibility into the 'host' or root container's devpts.
-   
-   To workaround this and have strict isolation, all mounts of devpts,
-   including the mount in the root container, should use the newinstance
-   option.
index 35f6a98..220d0a8 100644 (file)
@@ -170,21 +170,92 @@ document trapinfo
        address the kernel panicked.
 end
 
+define dump_log_idx
+       set $idx = $arg0
+       if ($argc > 1)
+               set $prev_flags = $arg1
+       else
+               set $prev_flags = 0
+       end
+       set $msg = ((struct printk_log *) (log_buf + $idx))
+       set $prefix = 1
+       set $newline = 1
+       set $log = log_buf + $idx + sizeof(*$msg)
 
-define dmesg
-       set $i = 0
-       set $end_idx = (log_end - 1) & (log_buf_len - 1)
+       # prev & LOG_CONT && !(msg->flags & LOG_PREIX)
+       if (($prev_flags & 8) && !($msg->flags & 4))
+               set $prefix = 0
+       end
+
+       # msg->flags & LOG_CONT
+       if ($msg->flags & 8)
+               # (prev & LOG_CONT && !(prev & LOG_NEWLINE))
+               if (($prev_flags & 8) && !($prev_flags & 2))
+                       set $prefix = 0
+               end
+               # (!(msg->flags & LOG_NEWLINE))
+               if (!($msg->flags & 2))
+                       set $newline = 0
+               end
+       end
+
+       if ($prefix)
+               printf "[%5lu.%06lu] ", $msg->ts_nsec / 1000000000, $msg->ts_nsec % 1000000000
+       end
+       if ($msg->text_len != 0)
+               eval "printf \"%%%d.%ds\", $log", $msg->text_len, $msg->text_len
+       end
+       if ($newline)
+               printf "\n"
+       end
+       if ($msg->dict_len > 0)
+               set $dict = $log + $msg->text_len
+               set $idx = 0
+               set $line = 1
+               while ($idx < $msg->dict_len)
+                       if ($line)
+                               printf " "
+                               set $line = 0
+                       end
+                       set $c = $dict[$idx]
+                       if ($c == '\0')
+                               printf "\n"
+                               set $line = 1
+                       else
+                               if ($c < ' ' || $c >= 127 || $c == '\\')
+                                       printf "\\x%02x", $c
+                               else
+                                       printf "%c", $c
+                               end
+                       end
+                       set $idx = $idx + 1
+               end
+               printf "\n"
+       end
+end
+document dump_log_idx
+       Dump a single log given its index in the log buffer.  The first
+       parameter is the index into log_buf, the second is optional and
+       specified the previous log buffer's flags, used for properly
+       formatting continued lines.
+end
 
-       while ($i < logged_chars)
-               set $idx = (log_end - 1 - logged_chars + $i) & (log_buf_len - 1)
+define dmesg
+       set $i = log_first_idx
+       set $end_idx = log_first_idx
+       set $prev_flags = 0
 
-               if ($idx + 100 <= $end_idx) || \
-                  ($end_idx <= $idx && $idx + 100 < log_buf_len)
-                       printf "%.100s", &log_buf[$idx]
-                       set $i = $i + 100
+       while (1)
+               set $msg = ((struct printk_log *) (log_buf + $i))
+               if ($msg->len == 0)
+                       set $i = 0
                else
-                       printf "%c", log_buf[$idx]
-                       set $i = $i + 1
+                       dump_log_idx $i $prev_flags
+                       set $i = $i + $msg->len
+                       set $prev_flags = $msg->flags
+               end
+               if ($i == $end_idx)
+                       loop_break
                end
        end
 end
index 631b0f7..9d05ed7 100644 (file)
@@ -369,8 +369,6 @@ does not allocate any driver private context space.
 Switch configuration
 --------------------
 
-- priv_size: additional size needed by the switch driver for its private context
-
 - tag_protocol: this is to indicate what kind of tagging protocol is supported,
   should be a valid value from the dsa_tag_protocol enum
 
@@ -416,11 +414,6 @@ PHY devices and link management
   to the switch port MDIO registers. If unavailable return a negative error
   code.
 
-- poll_link: Function invoked by DSA to query the link state of the switch
-  builtin Ethernet PHYs, per port. This function is responsible for calling
-  netif_carrier_{on,off} when appropriate, and can be used to poll all ports in a
-  single call. Executes from workqueue context.
-
 - adjust_link: Function invoked by the PHY library when a slave network device
   is attached to a PHY device. This function is responsible for appropriately
   configuring the switch port link parameters: speed, duplex, pause based on
@@ -542,6 +535,16 @@ Bridge layer
 Bridge VLAN filtering
 ---------------------
 
+- port_vlan_filtering: bridge layer function invoked when the bridge gets
+  configured for turning on or off VLAN filtering. If nothing specific needs to
+  be done at the hardware level, this callback does not need to be implemented.
+  When VLAN filtering is turned on, the hardware must be programmed with
+  rejecting 802.1Q frames which have VLAN IDs outside of the programmed allowed
+  VLAN ID map/rules.  If there is no PVID programmed into the switch port,
+  untagged frames must be rejected as well. When turned off the switch must
+  accept any 802.1Q frames irrespective of their VLAN ID, and untagged frames are
+  allowed.
+
 - port_vlan_prepare: bridge layer function invoked when the bridge prepares the
   configuration of a VLAN on the given port. If the operation is not supported
   by the hardware, this function should return -EOPNOTSUPP to inform the bridge
index 6c7f365..9ae9293 100644 (file)
@@ -1036,15 +1036,17 @@ proxy_arp_pvlan - BOOLEAN
 
 shared_media - BOOLEAN
        Send(router) or accept(host) RFC1620 shared media redirects.
-       Overrides ip_secure_redirects.
+       Overrides secure_redirects.
        shared_media for the interface will be enabled if at least one of
        conf/{all,interface}/shared_media is set to TRUE,
        it will be disabled otherwise
        default TRUE
 
 secure_redirects - BOOLEAN
-       Accept ICMP redirect messages only for gateways,
-       listed in default gateway list.
+       Accept ICMP redirect messages only to gateways listed in the
+       interface's current gateway list. Even if disabled, RFC1122 redirect
+       rules still apply.
+       Overridden by shared_media.
        secure_redirects for the interface will be enabled if at least one of
        conf/{all,interface}/secure_redirects is set to TRUE,
        it will be disabled otherwise
index 20d0571..3849814 100644 (file)
@@ -826,7 +826,8 @@ The keyctl syscall functions are:
  (*) Compute a Diffie-Hellman shared secret or public key
 
        long keyctl(KEYCTL_DH_COMPUTE, struct keyctl_dh_params *params,
-                  char *buffer, size_t buflen);
+                  char *buffer, size_t buflen,
+                  void *reserved);
 
      The params struct contains serial numbers for three keys:
 
@@ -843,6 +844,8 @@ The keyctl syscall functions are:
      public key.  If the base is the remote public key, the result is
      the shared secret.
 
+     The reserved argument must be set to NULL.
+
      The buffer length must be at least the length of the prime, or zero.
 
      If the buffer length is nonzero, the length of the result is
index 7304d2e..ed42cb6 100644 (file)
@@ -7989,6 +7989,7 @@ Q:        http://patchwork.ozlabs.org/project/netdev/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
 S:     Odd Fixes
+F:     Documentation/devicetree/bindings/net/
 F:     drivers/net/
 F:     include/linux/if_*
 F:     include/linux/netdevice.h
@@ -8944,6 +8945,7 @@ M:        Linus Walleij <linus.walleij@linaro.org>
 L:     linux-gpio@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
 S:     Maintained
+F:     Documentation/devicetree/bindings/pinctrl/
 F:     drivers/pinctrl/
 F:     include/linux/pinctrl/
 
index 0f70de6..8d1301a 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 7
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Psychotic Stoned Sheep
 
 # *DOCUMENTATION*
index ef9119f..4d93758 100644 (file)
@@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target,
        if (ret)
                return ret;
 
-       vfp_flush_hwstate(thread);
        thread->vfpstate.hard = new_vfp;
+       vfp_flush_hwstate(thread);
 
        return 0;
 }
index 76747d9..5a0a691 100644 (file)
@@ -113,6 +113,18 @@ config ARCH_PHYS_ADDR_T_64BIT
 config MMU
        def_bool y
 
+config ARM64_PAGE_SHIFT
+       int
+       default 16 if ARM64_64K_PAGES
+       default 14 if ARM64_16K_PAGES
+       default 12
+
+config ARM64_CONT_SHIFT
+       int
+       default 5 if ARM64_64K_PAGES
+       default 7 if ARM64_16K_PAGES
+       default 4
+
 config ARCH_MMAP_RND_BITS_MIN
        default 14 if ARM64_64K_PAGES
        default 16 if ARM64_16K_PAGES
@@ -426,6 +438,15 @@ config CAVIUM_ERRATUM_22375
 
          If unsure, say Y.
 
+config CAVIUM_ERRATUM_23144
+       bool "Cavium erratum 23144: ITS SYNC hang on dual socket system"
+       depends on NUMA
+       default y
+       help
+         ITS SYNC command hang for cross node io and collections/cpu mapping.
+
+         If unsure, say Y.
+
 config CAVIUM_ERRATUM_23154
        bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed"
        default y
index 710fde4..0cc758c 100644 (file)
@@ -12,7 +12,8 @@ config ARM64_PTDUMP
          who are working in architecture specific areas of the kernel.
          It is probably not a good idea to enable this feature in a production
          kernel.
-         If in doubt, say "N"
+
+         If in doubt, say N.
 
 config PID_IN_CONTEXTIDR
        bool "Write the current PID to the CONTEXTIDR register"
@@ -38,15 +39,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
          value.
 
 config DEBUG_SET_MODULE_RONX
-        bool "Set loadable kernel module data as NX and text as RO"
-        depends on MODULES
-        help
-          This option helps catch unintended modifications to loadable
-          kernel module's text and read-only data. It also prevents execution
-          of module data. Such protection may interfere with run-time code
-          patching and dynamic kernel tracing - and they might also protect
-          against certain classes of kernel exploits.
-          If in doubt, say "N".
+       bool "Set loadable kernel module data as NX and text as RO"
+       depends on MODULES
+       default y
+       help
+         Is this is set, kernel module text and rodata will be made read-only.
+         This is to help catch accidental or malicious attempts to change the
+         kernel's executable code.
+
+         If in doubt, say Y.
 
 config DEBUG_RODATA
        bool "Make kernel text and rodata read-only"
@@ -56,7 +57,7 @@ config DEBUG_RODATA
          is to help catch accidental or malicious attempts to change the
          kernel's executable code.
 
-         If in doubt, say Y
+         If in doubt, say Y.
 
 config DEBUG_ALIGN_RODATA
        depends on DEBUG_RODATA
@@ -69,7 +70,7 @@ config DEBUG_ALIGN_RODATA
          alignment and potentially wasted space. Turn on this option if
          performance is more important than memory pressure.
 
-         If in doubt, say N
+         If in doubt, say N.
 
 source "drivers/hwtracing/coresight/Kconfig"
 
index 354d754..7085e32 100644 (file)
@@ -60,7 +60,9 @@ head-y                := arch/arm64/kernel/head.o
 
 # The byte offset of the kernel image in RAM from the start of RAM.
 ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
-TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}')
+TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \
+                int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \
+                rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}")
 else
 TEXT_OFFSET := 0x00080000
 endif
index 7a09c48..579b6e6 100644 (file)
@@ -160,14 +160,14 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
 #define STACK_RND_MASK                 (0x3ffff >> (PAGE_SHIFT - 12))
 #endif
 
-#ifdef CONFIG_COMPAT
-
 #ifdef __AARCH64EB__
 #define COMPAT_ELF_PLATFORM            ("v8b")
 #else
 #define COMPAT_ELF_PLATFORM            ("v8l")
 #endif
 
+#ifdef CONFIG_COMPAT
+
 #define COMPAT_ELF_ET_DYN_BASE         (2 * TASK_SIZE_32 / 3)
 
 /* AArch32 registers. */
index 72a3025..31b7322 100644 (file)
@@ -55,8 +55,9 @@
 #define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT))
 
 /*
- * PAGE_OFFSET - the virtual address of the start of the kernel image (top
+ * PAGE_OFFSET - the virtual address of the start of the linear map (top
  *              (VA_BITS - 1))
+ * KIMAGE_VADDR - the virtual address of the start of the kernel image
  * VA_BITS - the maximum number of bits for virtual addresses.
  * VA_START - the first kernel virtual address.
  * TASK_SIZE - the maximum size of a user space task.
index 17b45f7..8472c6d 100644 (file)
 
 /* PAGE_SHIFT determines the page size */
 /* CONT_SHIFT determines the number of pages which can be tracked together  */
-#ifdef CONFIG_ARM64_64K_PAGES
-#define PAGE_SHIFT             16
-#define CONT_SHIFT             5
-#elif defined(CONFIG_ARM64_16K_PAGES)
-#define PAGE_SHIFT             14
-#define CONT_SHIFT             7
-#else
-#define PAGE_SHIFT             12
-#define CONT_SHIFT             4
-#endif
+#define PAGE_SHIFT             CONFIG_ARM64_PAGE_SHIFT
+#define CONT_SHIFT             CONFIG_ARM64_CONT_SHIFT
 #define PAGE_SIZE              (_AC(1, UL) << PAGE_SHIFT)
 #define PAGE_MASK              (~(PAGE_SIZE-1))
 
index 0685d74..9e397a5 100644 (file)
@@ -80,19 +80,6 @@ static inline void set_fs(mm_segment_t fs)
 
 #define segment_eq(a, b)       ((a) == (b))
 
-/*
- * Return 1 if addr < current->addr_limit, 0 otherwise.
- */
-#define __addr_ok(addr)                                                        \
-({                                                                     \
-       unsigned long flag;                                             \
-       asm("cmp %1, %0; cset %0, lo"                                   \
-               : "=&r" (flag)                                          \
-               : "r" (addr), "0" (current_thread_info()->addr_limit)   \
-               : "cc");                                                \
-       flag;                                                           \
-})
-
 /*
  * Test whether a block of memory is a valid user space address.
  * Returns 1 if the range is valid, 0 otherwise.
index 41e58fe..e78ac26 100644 (file)
@@ -44,7 +44,7 @@
 #define __ARM_NR_compat_cacheflush     (__ARM_NR_COMPAT_BASE+2)
 #define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE+5)
 
-#define __NR_compat_syscalls           390
+#define __NR_compat_syscalls           394
 #endif
 
 #define __ARCH_WANT_SYS_CLONE
index 5b925b7..b7e8ef1 100644 (file)
@@ -801,6 +801,14 @@ __SYSCALL(__NR_execveat, compat_sys_execveat)
 __SYSCALL(__NR_userfaultfd, sys_userfaultfd)
 #define __NR_membarrier 389
 __SYSCALL(__NR_membarrier, sys_membarrier)
+#define __NR_mlock2 390
+__SYSCALL(__NR_mlock2, sys_mlock2)
+#define __NR_copy_file_range 391
+__SYSCALL(__NR_copy_file_range, sys_copy_file_range)
+#define __NR_preadv2 392
+__SYSCALL(__NR_preadv2, compat_sys_preadv2)
+#define __NR_pwritev2 393
+__SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
 
 /*
  * Please add new compat syscalls above this comment and update
index 3808470..c173d32 100644 (file)
@@ -22,6 +22,8 @@
 
 #include <linux/bitops.h>
 #include <linux/bug.h>
+#include <linux/compat.h>
+#include <linux/elf.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/personality.h>
@@ -104,6 +106,7 @@ static const char *const compat_hwcap2_str[] = {
 static int c_show(struct seq_file *m, void *v)
 {
        int i, j;
+       bool compat = personality(current->personality) == PER_LINUX32;
 
        for_each_online_cpu(i) {
                struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
@@ -115,6 +118,9 @@ static int c_show(struct seq_file *m, void *v)
                 * "processor".  Give glibc what it expects.
                 */
                seq_printf(m, "processor\t: %d\n", i);
+               if (compat)
+                       seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
+                                  MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
 
                seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
                           loops_per_jiffy / (500000UL/HZ),
@@ -127,7 +133,7 @@ static int c_show(struct seq_file *m, void *v)
                 * software which does already (at least for 32-bit).
                 */
                seq_puts(m, "Features\t:");
-               if (personality(current->personality) == PER_LINUX32) {
+               if (compat) {
 #ifdef CONFIG_COMPAT
                        for (j = 0; compat_hwcap_str[j]; j++)
                                if (compat_elf_hwcap & (1 << j))
index c539208..f7cf463 100644 (file)
@@ -477,8 +477,9 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
        void __user *pc = (void __user *)instruction_pointer(regs);
        console_verbose();
 
-       pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
-               handler[reason], esr, esr_get_class_string(esr));
+       pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
+               handler[reason], smp_processor_id(), esr,
+               esr_get_class_string(esr));
        __show_regs(regs);
 
        info.si_signo = SIGILL;
index fff7cd4..5f8f80b 100644 (file)
@@ -169,7 +169,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
         * Make sure stores to the GIC via the memory mapped interface
         * are now visible to the system register interface.
         */
-       dsb(st);
+       if (!cpu_if->vgic_sre)
+               dsb(st);
 
        cpu_if->vgic_vmcr  = read_gicreg(ICH_VMCR_EL2);
 
@@ -190,12 +191,11 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
                        if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
                                continue;
 
-                       if (cpu_if->vgic_elrsr & (1 << i)) {
+                       if (cpu_if->vgic_elrsr & (1 << i))
                                cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
-                               continue;
-                       }
+                       else
+                               cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
 
-                       cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
                        __gic_v3_set_lr(0, i);
                }
 
@@ -236,8 +236,12 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
 
        val = read_gicreg(ICC_SRE_EL2);
        write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
-       isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
-       write_gicreg(1, ICC_SRE_EL1);
+
+       if (!cpu_if->vgic_sre) {
+               /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
+               isb();
+               write_gicreg(1, ICC_SRE_EL1);
+       }
 }
 
 void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
@@ -256,8 +260,10 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
         * been actually programmed with the value we want before
         * starting to mess with the rest of the GIC.
         */
-       write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1);
-       isb();
+       if (!cpu_if->vgic_sre) {
+               write_gicreg(0, ICC_SRE_EL1);
+               isb();
+       }
 
        val = read_gicreg(ICH_VTR_EL2);
        max_lr_idx = vtr_to_max_lr_idx(val);
@@ -306,18 +312,18 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
         * (re)distributors. This ensure the guest will read the
         * correct values from the memory-mapped interface.
         */
-       isb();
-       dsb(sy);
+       if (!cpu_if->vgic_sre) {
+               isb();
+               dsb(sy);
+       }
        vcpu->arch.vgic_cpu.live_lrs = live_lrs;
 
        /*
         * Prevent the guest from touching the GIC system registers if
         * SRE isn't enabled for GICv3 emulation.
         */
-       if (!cpu_if->vgic_sre) {
-               write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
-                            ICC_SRE_EL2);
-       }
+       write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
+                    ICC_SRE_EL2);
 }
 
 void __hyp_text __vgic_v3_init_lrs(void)
index 7bbe3ff..a57d650 100644 (file)
@@ -134,6 +134,17 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
        return true;
 }
 
+static bool access_gic_sre(struct kvm_vcpu *vcpu,
+                          struct sys_reg_params *p,
+                          const struct sys_reg_desc *r)
+{
+       if (p->is_write)
+               return ignore_write(vcpu, p);
+
+       p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
+       return true;
+}
+
 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
                        struct sys_reg_params *p,
                        const struct sys_reg_desc *r)
@@ -958,7 +969,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
          access_gic_sgi },
        /* ICC_SRE_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
-         trap_raz_wi },
+         access_gic_sre },
 
        /* CONTEXTIDR_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
index 8404190..ccfde23 100644 (file)
@@ -150,6 +150,7 @@ static const struct prot_bits pte_bits[] = {
 
 struct pg_level {
        const struct prot_bits *bits;
+       const char *name;
        size_t num;
        u64 mask;
 };
@@ -157,15 +158,19 @@ struct pg_level {
 static struct pg_level pg_level[] = {
        {
        }, { /* pgd */
+               .name   = "PGD",
                .bits   = pte_bits,
                .num    = ARRAY_SIZE(pte_bits),
        }, { /* pud */
+               .name   = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
                .bits   = pte_bits,
                .num    = ARRAY_SIZE(pte_bits),
        }, { /* pmd */
+               .name   = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
                .bits   = pte_bits,
                .num    = ARRAY_SIZE(pte_bits),
        }, { /* pte */
+               .name   = "PTE",
                .bits   = pte_bits,
                .num    = ARRAY_SIZE(pte_bits),
        },
@@ -214,7 +219,8 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
                                delta >>= 10;
                                unit++;
                        }
-                       seq_printf(st->seq, "%9lu%c", delta, *unit);
+                       seq_printf(st->seq, "%9lu%c %s", delta, *unit,
+                                  pg_level[st->level].name);
                        if (pg_level[st->level].bits)
                                dump_prot(st, pg_level[st->level].bits,
                                          pg_level[st->level].num);
index aa8aee7..2e49bd2 100644 (file)
@@ -306,6 +306,10 @@ static __init int setup_hugepagesz(char *opt)
                hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
        } else if (ps == PUD_SIZE) {
                hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+       } else if (ps == (PAGE_SIZE * CONT_PTES)) {
+               hugetlb_add_hstate(CONT_PTE_SHIFT);
+       } else if (ps == (PMD_SIZE * CONT_PMDS)) {
+               hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
        } else {
                hugetlb_bad_size();
                pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
@@ -314,3 +318,13 @@ static __init int setup_hugepagesz(char *opt)
        return 1;
 }
 __setup("hugepagesz=", setup_hugepagesz);
+
+#ifdef CONFIG_ARM64_64K_PAGES
+static __init int add_default_hugepagesz(void)
+{
+       if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
+               hugetlb_add_hstate(CONT_PMD_SHIFT);
+       return 0;
+}
+arch_initcall(add_default_hugepagesz);
+#endif
index 4736020..5e953ab 100644 (file)
@@ -8,6 +8,8 @@ struct pt_regs;
 void parisc_terminate(char *msg, struct pt_regs *regs,
                int code, unsigned long offset) __noreturn __cold;
 
+void die_if_kernel(char *str, struct pt_regs *regs, long err);
+
 /* mm/fault.c */
 void do_page_fault(struct pt_regs *regs, unsigned long code,
                unsigned long address);
index e81ccf1..5adc339 100644 (file)
@@ -324,8 +324,9 @@ int init_per_cpu(int cpunum)
                per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
                per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
 
-               printk(KERN_INFO  "FP[%d] enabled: Rev %ld Model %ld\n",
-                       cpunum, coproc_cfg.revision, coproc_cfg.model);
+               if (cpunum == 0)
+                       printk(KERN_INFO  "FP[%d] enabled: Rev %ld Model %ld\n",
+                               cpunum, coproc_cfg.revision, coproc_cfg.model);
 
                /*
                ** store status register to stack (hopefully aligned)
index 58dd680..31ec99a 100644 (file)
@@ -309,11 +309,6 @@ void __init time_init(void)
        clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
                                NSEC_PER_MSEC, 0);
 
-#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
-       /* At bootup only one 64bit CPU is online and cr16 is "stable" */
-       set_sched_clock_stable();
-#endif
-
        start_cpu_itimer();     /* get CPU 0 started */
 
        /* register at clocksource framework */
index d7c0acb..2b65c01 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/ratelimit.h>
 #include <asm/uaccess.h>
 #include <asm/hardirq.h>
+#include <asm/traps.h>
 
 /* #define DEBUG_UNALIGNED 1 */
 
 
 int unaligned_enabled __read_mostly = 1;
 
-void die_if_kernel (char *str, struct pt_regs *regs, long err);
-
 static int emulate_ldh(struct pt_regs *regs, int toreg)
 {
        unsigned long saddr = regs->ior;
@@ -666,7 +665,7 @@ void handle_unaligned(struct pt_regs *regs)
                break;
        }
 
-       if (modify && R1(regs->iir))
+       if (ret == 0 && modify && R1(regs->iir))
                regs->gr[R1(regs->iir)] = newbase;
 
 
@@ -677,6 +676,14 @@ void handle_unaligned(struct pt_regs *regs)
 
        if (ret)
        {
+               /*
+                * The unaligned handler failed.
+                * If we were called by __get_user() or __put_user() jump
+                * to it's exception fixup handler instead of crashing.
+                */
+               if (!user_mode(regs) && fixup_exception(regs))
+                       return;
+
                printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
                die_if_kernel("Unaligned data reference", regs, 28);
 
index ddd988b..e278a87 100644 (file)
@@ -75,7 +75,10 @@ find_unwind_entry(unsigned long addr)
        if (addr >= kernel_unwind_table.start && 
            addr <= kernel_unwind_table.end)
                e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
-       else 
+       else {
+               unsigned long flags;
+
+               spin_lock_irqsave(&unwind_lock, flags);
                list_for_each_entry(table, &unwind_tables, list) {
                        if (addr >= table->start && 
                            addr <= table->end)
@@ -86,6 +89,8 @@ find_unwind_entry(unsigned long addr)
                                break;
                        }
                }
+               spin_unlock_irqrestore(&unwind_lock, flags);
+       }
 
        return e;
 }
@@ -303,18 +308,16 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
 
                        insn = *(unsigned int *)npc;
 
-                       if ((insn & 0xffffc000) == 0x37de0000 ||
-                           (insn & 0xffe00000) == 0x6fc00000) {
+                       if ((insn & 0xffffc001) == 0x37de0000 ||
+                           (insn & 0xffe00001) == 0x6fc00000) {
                                /* ldo X(sp), sp, or stwm X,D(sp) */
-                               frame_size += (insn & 0x1 ? -1 << 13 : 0) | 
-                                       ((insn & 0x3fff) >> 1);
+                               frame_size += (insn & 0x3fff) >> 1;
                                dbg("analyzing func @ %lx, insn=%08x @ "
                                    "%lx, frame_size = %ld\n", info->ip,
                                    insn, npc, frame_size);
-                       } else if ((insn & 0xffe00008) == 0x73c00008) {
+                       } else if ((insn & 0xffe00009) == 0x73c00008) {
                                /* std,ma X,D(sp) */
-                               frame_size += (insn & 0x1 ? -1 << 13 : 0) | 
-                                       (((insn >> 4) & 0x3ff) << 3);
+                               frame_size += ((insn >> 4) & 0x3ff) << 3;
                                dbg("analyzing func @ %lx, insn=%08x @ "
                                    "%lx, frame_size = %ld\n", info->ip,
                                    insn, npc, frame_size);
@@ -333,6 +336,9 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
                        }
                }
 
+               if (frame_size > e->Total_frame_size << 3)
+                       frame_size = e->Total_frame_size << 3;
+
                if (!unwind_special(info, e->region_start, frame_size)) {
                        info->prev_sp = info->sp - frame_size;
                        if (e->Millicode)
index c1e82e9..a0948f4 100644 (file)
 #define   MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
 #define   MMCR0_FCHV   0x00000001UL /* freeze conditions in hypervisor mode */
 #define SPRN_MMCR1     798
-#define SPRN_MMCR2     769
+#define SPRN_MMCR2     785
 #define SPRN_MMCRA     0x312
 #define   MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
 #define   MMCRA_SDAR_DCACHE_MISS 0x40000000UL
 #define SPRN_PMC6      792
 #define SPRN_PMC7      793
 #define SPRN_PMC8      794
-#define SPRN_SIAR      780
-#define SPRN_SDAR      781
 #define SPRN_SIER      784
 #define   SIER_SIPR            0x2000000       /* Sampled MSR_PR */
 #define   SIER_SIHV            0x1000000       /* Sampled MSR_HV */
 #define   SIER_SIAR_VALID      0x0400000       /* SIAR contents valid */
 #define   SIER_SDAR_VALID      0x0200000       /* SDAR contents valid */
+#define SPRN_SIAR      796
+#define SPRN_SDAR      797
 #define SPRN_TACR      888
 #define SPRN_TCSCR     889
 #define SPRN_CSIGR     890
index da51925..ccd2037 100644 (file)
@@ -656,6 +656,7 @@ unsigned char ibm_architecture_vec[] = {
        W(0xffff0000), W(0x003e0000),   /* POWER6 */
        W(0xffff0000), W(0x003f0000),   /* POWER7 */
        W(0xffff0000), W(0x004b0000),   /* POWER8E */
+       W(0xffff0000), W(0x004c0000),   /* POWER8NVL */
        W(0xffff0000), W(0x004d0000),   /* POWER8 */
        W(0xffffffff), W(0x0f000004),   /* all 2.07-compliant */
        W(0xffffffff), W(0x0f000003),   /* all 2.06-compliant */
index 5926896..b2740c6 100644 (file)
@@ -159,6 +159,19 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
        },
 };
 
+/*
+ * 'R' and 'C' update notes:
+ *  - Under pHyp or KVM, the updatepp path will not set C, thus it *will*
+ *     create writeable HPTEs without C set, because the hcall H_PROTECT
+ *     that we use in that case will not update C
+ *  - The above is however not a problem, because we also don't do that
+ *     fancy "no flush" variant of eviction and we use H_REMOVE which will
+ *     do the right thing and thus we don't have the race I described earlier
+ *
+ *    - Under bare metal,  we do have the race, so we need R and C set
+ *    - We make sure R is always set and never lost
+ *    - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping
+ */
 unsigned long htab_convert_pte_flags(unsigned long pteflags)
 {
        unsigned long rflags = 0;
@@ -186,9 +199,14 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
                        rflags |= 0x1;
        }
        /*
-        * Always add "C" bit for perf. Memory coherence is always enabled
+        * We can't allow hardware to update hpte bits. Hence always
+        * set 'R' bit and set 'C' if it is a write fault
+        * Memory coherence is always enabled
         */
-       rflags |=  HPTE_R_C | HPTE_R_M;
+       rflags |=  HPTE_R_R | HPTE_R_M;
+
+       if (pteflags & _PAGE_DIRTY)
+               rflags |= HPTE_R_C;
        /*
         * Add in WIG bits
         */
index eb44511..6703187 100644 (file)
@@ -33,10 +33,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
        changed = !pmd_same(*(pmdp), entry);
        if (changed) {
                __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
-               /*
-                * Since we are not supporting SW TLB systems, we don't
-                * have any thing similar to flush_tlb_page_nohash()
-                */
+               flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        }
        return changed;
 }
index 18b2c11..c939e6e 100644 (file)
@@ -296,11 +296,6 @@ found:
 void __init radix__early_init_mmu(void)
 {
        unsigned long lpcr;
-       /*
-        * setup LPCR UPRT based on mmu_features
-        */
-       lpcr = mfspr(SPRN_LPCR);
-       mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
 
 #ifdef CONFIG_PPC_64K_PAGES
        /* PAGE_SIZE mappings */
@@ -343,8 +338,11 @@ void __init radix__early_init_mmu(void)
        __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
 
        radix_init_page_sizes();
-       if (!firmware_has_feature(FW_FEATURE_LPAR))
+       if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+               lpcr = mfspr(SPRN_LPCR);
+               mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
                radix_init_partition_table();
+       }
 
        radix_init_pgtable();
 }
@@ -353,16 +351,15 @@ void radix__early_init_mmu_secondary(void)
 {
        unsigned long lpcr;
        /*
-        * setup LPCR UPRT based on mmu_features
+        * update partition table control register and UPRT
         */
-       lpcr = mfspr(SPRN_LPCR);
-       mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
-       /*
-        * update partition table control register, 64 K size.
-        */
-       if (!firmware_has_feature(FW_FEATURE_LPAR))
+       if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+               lpcr = mfspr(SPRN_LPCR);
+               mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
+
                mtspr(SPRN_PTCR,
                      __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
+       }
 }
 
 void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
index ac3ffd9..3998e0f 100644 (file)
@@ -53,7 +53,6 @@ static int ibm_read_slot_reset_state2;
 static int ibm_slot_error_detail;
 static int ibm_get_config_addr_info;
 static int ibm_get_config_addr_info2;
-static int ibm_configure_bridge;
 static int ibm_configure_pe;
 
 /*
@@ -81,7 +80,14 @@ static int pseries_eeh_init(void)
        ibm_get_config_addr_info2       = rtas_token("ibm,get-config-addr-info2");
        ibm_get_config_addr_info        = rtas_token("ibm,get-config-addr-info");
        ibm_configure_pe                = rtas_token("ibm,configure-pe");
-       ibm_configure_bridge            = rtas_token("ibm,configure-bridge");
+
+       /*
+        * ibm,configure-pe and ibm,configure-bridge have the same semantics,
+        * however ibm,configure-pe can be faster.  If we can't find
+        * ibm,configure-pe then fall back to using ibm,configure-bridge.
+        */
+       if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE)
+               ibm_configure_pe        = rtas_token("ibm,configure-bridge");
 
        /*
         * Necessary sanity check. We needn't check "get-config-addr-info"
@@ -93,8 +99,7 @@ static int pseries_eeh_init(void)
            (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
             ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) ||
            ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE       ||
-           (ibm_configure_pe == RTAS_UNKNOWN_SERVICE           &&
-            ibm_configure_bridge == RTAS_UNKNOWN_SERVICE)) {
+           ibm_configure_pe == RTAS_UNKNOWN_SERVICE) {
                pr_info("EEH functionality not supported\n");
                return -EINVAL;
        }
@@ -615,29 +620,41 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
 {
        int config_addr;
        int ret;
+       /* Waiting 0.2s maximum before skipping configuration */
+       int max_wait = 200;
 
        /* Figure out the PE address */
        config_addr = pe->config_addr;
        if (pe->addr)
                config_addr = pe->addr;
 
-       /* Use new configure-pe function, if supported */
-       if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
+       while (max_wait > 0) {
                ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
                                config_addr, BUID_HI(pe->phb->buid),
                                BUID_LO(pe->phb->buid));
-       } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
-               ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
-                               config_addr, BUID_HI(pe->phb->buid),
-                               BUID_LO(pe->phb->buid));
-       } else {
-               return -EFAULT;
-       }
 
-       if (ret)
-               pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
-                       __func__, pe->phb->global_number, pe->addr, ret);
+               if (!ret)
+                       return ret;
+
+               /*
+                * If RTAS returns a delay value that's above 100ms, cut it
+                * down to 100ms in case firmware made a mistake.  For more
+                * on how these delay values work see rtas_busy_delay_time
+                */
+               if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
+                   ret <= RTAS_EXTENDED_DELAY_MAX)
+                       ret = RTAS_EXTENDED_DELAY_MIN+2;
+
+               max_wait -= rtas_busy_delay_time(ret);
+
+               if (max_wait < 0)
+                       break;
+
+               rtas_busy_delay(ret);
+       }
 
+       pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
+               __func__, pe->phb->global_number, pe->addr, ret);
        return ret;
 }
 
index 0ac42cc..d5ec71b 100644 (file)
@@ -1,8 +1,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -13,19 +12,19 @@ CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
-CONFIG_CGROUP_HUGETLB=y
-CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
 CONFIG_CFS_BANDWIDTH=y
 CONFIG_RT_GROUP_SCHED=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_SCHED_AUTOGROUP=y
@@ -55,7 +54,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
 CONFIG_LIVEPATCH=y
-CONFIG_MARCH_Z196=y
 CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=256
 CONFIG_NUMA=y
@@ -65,6 +63,15 @@ CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_MEM_SOFT_DIRTY=y
+CONFIG_ZPOOL=m
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
 CONFIG_PCI=y
 CONFIG_PCI_DEBUG=y
 CONFIG_HOTPLUG_PCI=y
@@ -452,6 +459,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
 CONFIG_RAW_DRIVER=m
 CONFIG_HANGCHECK_TIMER=m
 CONFIG_TN3270_FS=y
+# CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
@@ -537,6 +545,8 @@ CONFIG_DLM=m
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
 CONFIG_FRAME_WARN=1024
 CONFIG_READABLE_ASM=y
 CONFIG_UNUSED_SYMBOLS=y
@@ -555,13 +565,17 @@ CONFIG_SLUB_DEBUG_ON=y
 CONFIG_SLUB_STATS=y
 CONFIG_DEBUG_STACK_USAGE=y
 CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_VM_VMACACHE=y
 CONFIG_DEBUG_VM_RB=y
+CONFIG_DEBUG_VM_PGFLAGS=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
 CONFIG_DEBUG_PER_CPU_MAPS=y
 CONFIG_DEBUG_SHIRQ=y
 CONFIG_DETECT_HUNG_TASK=y
+CONFIG_WQ_WATCHDOG=y
 CONFIG_PANIC_ON_OOPS=y
+CONFIG_DEBUG_TIMEKEEPING=y
 CONFIG_TIMER_STATS=y
 CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
@@ -596,6 +610,8 @@ CONFIG_FTRACE_SYSCALLS=y
 CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_LKDTM=m
 CONFIG_TEST_LIST_SORT=y
 CONFIG_KPROBES_SANITY_TEST=y
@@ -607,7 +623,6 @@ CONFIG_TEST_STRING_HELPERS=y
 CONFIG_TEST_KSTRTOX=y
 CONFIG_DMA_API_DEBUG=y
 CONFIG_TEST_BPF=m
-# CONFIG_STRICT_DEVMEM is not set
 CONFIG_S390_PTDUMP=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
@@ -651,7 +666,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=y
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
@@ -664,7 +678,7 @@ CONFIG_CRYPTO_SHA512_S390=m
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_KEY_TYPE=y
 CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
 CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
index a31dcd5..f46a351 100644 (file)
@@ -1,8 +1,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -13,17 +12,17 @@ CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_PERF=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_SCHED_AUTOGROUP=y
@@ -53,7 +52,6 @@ CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
-CONFIG_MARCH_Z196=y
 CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=256
 CONFIG_NUMA=y
@@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_ZSWAP=y
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
 CONFIG_PCI=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
@@ -530,6 +536,8 @@ CONFIG_NLS_UTF8=m
 CONFIG_DLM=m
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_FRAME_WARN=1024
 CONFIG_UNUSED_SYMBOLS=y
@@ -547,13 +555,13 @@ CONFIG_LATENCYTOP=y
 CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_BLK_DEV_IO_TRACE=y
 # CONFIG_KPROBE_EVENT is not set
+CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_LKDTM=m
 CONFIG_RBTREE_TEST=m
 CONFIG_INTERVAL_TREE_TEST=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_TEST_BPF=m
-# CONFIG_STRICT_DEVMEM is not set
 CONFIG_S390_PTDUMP=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
@@ -597,8 +605,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=y
-CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
@@ -610,7 +616,7 @@ CONFIG_CRYPTO_SHA512_S390=m
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_KEY_TYPE=y
 CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
 CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
index 7b73bf3..ba0f2a5 100644 (file)
@@ -1,8 +1,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -14,17 +13,17 @@ CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
 # CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_PERF=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_SCHED_AUTOGROUP=y
@@ -53,7 +52,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
 CONFIG_LIVEPATCH=y
-CONFIG_MARCH_Z196=y
 CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=512
 CONFIG_NUMA=y
@@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_ZSWAP=y
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
 CONFIG_PCI=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
@@ -447,6 +453,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
 CONFIG_RAW_DRIVER=m
 CONFIG_HANGCHECK_TIMER=m
 CONFIG_TN3270_FS=y
+# CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
@@ -530,6 +537,8 @@ CONFIG_NLS_UTF8=m
 CONFIG_DLM=m
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_FRAME_WARN=1024
 CONFIG_UNUSED_SYMBOLS=y
@@ -546,11 +555,12 @@ CONFIG_FTRACE_SYSCALLS=y
 CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_LKDTM=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_TEST_BPF=m
-# CONFIG_STRICT_DEVMEM is not set
 CONFIG_S390_PTDUMP=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
@@ -594,8 +604,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=y
-CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
@@ -607,7 +615,7 @@ CONFIG_CRYPTO_SHA512_S390=m
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_KEY_TYPE=y
 CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
 CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
index 1719843..4366a3e 100644 (file)
@@ -1,5 +1,5 @@
 # CONFIG_SWAP is not set
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -7,7 +7,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_DEFAULT_DEADLINE=y
-CONFIG_MARCH_Z196=y
 CONFIG_TUNE_ZEC12=y
 # CONFIG_COMPAT is not set
 CONFIG_NR_CPUS=2
@@ -64,7 +63,6 @@ CONFIG_PANIC_ON_OOPS=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 # CONFIG_FTRACE is not set
-# CONFIG_STRICT_DEVMEM is not set
 # CONFIG_PFAULT is not set
 # CONFIG_S390_HYPFS_FS is not set
 # CONFIG_VIRTUALIZATION is not set
index e24f2af..3f571ea 100644 (file)
@@ -1,8 +1,8 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
+CONFIG_USELIB=y
 CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_TASKSTATS=y
 CONFIG_TASK_DELAY_ACCT=y
@@ -11,19 +11,19 @@ CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_CGROUPS=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
-CONFIG_CGROUP_HUGETLB=y
-CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_RT_GROUP_SCHED=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_BLK_DEV_INITRD=y
@@ -44,7 +44,6 @@ CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_DEFAULT_DEADLINE=y
 CONFIG_LIVEPATCH=y
-CONFIG_MARCH_Z196=y
 CONFIG_NR_CPUS=256
 CONFIG_NUMA=y
 CONFIG_HZ_100=y
@@ -52,6 +51,14 @@ CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_ZSWAP=y
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
 CONFIG_CRASH_DUMP=y
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
@@ -61,7 +68,6 @@ CONFIG_UNIX=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
-# CONFIG_INET_LRO is not set
 CONFIG_L2TP=m
 CONFIG_L2TP_DEBUGFS=m
 CONFIG_VLAN_8021Q=y
@@ -144,6 +150,9 @@ CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_HUGETLBFS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
 CONFIG_UNUSED_SYMBOLS=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
 CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
@@ -158,20 +167,21 @@ CONFIG_LOCK_STAT=y
 CONFIG_DEBUG_LOCKDEP=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
 CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_PI_LIST=y
 CONFIG_DEBUG_SG=y
 CONFIG_DEBUG_NOTIFIERS=y
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_RCU_TRACE=y
 CONFIG_LATENCYTOP=y
 CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
-CONFIG_TRACER_SNAPSHOT=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
 CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
 CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_KPROBES_SANITY_TEST=y
-# CONFIG_STRICT_DEVMEM is not set
 CONFIG_S390_PTDUMP=y
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_AUTHENC=m
@@ -212,8 +222,6 @@ CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_DEFLATE=m
-CONFIG_CRYPTO_ZLIB=m
-CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
index 7a31440..19288c1 100644 (file)
@@ -250,6 +250,7 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
 
        report_user_fault(regs, SIGSEGV, 1);
        si.si_signo = SIGSEGV;
+       si.si_errno = 0;
        si.si_code = si_code;
        si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
        force_sig_info(SIGSEGV, &si, current);
index f010c93..fda605d 100644 (file)
@@ -37,7 +37,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
  *           |               |     |
  *           +---------------+     |
  *           | 8 byte skbp   |     |
- * R15+170 -> +---------------+     |
+ * R15+176 -> +---------------+     |
  *           | 8 byte hlen   |     |
  * R15+168 -> +---------------+     |
  *           | 4 byte align  |     |
@@ -58,7 +58,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
 #define STK_OFF                (STK_SPACE - STK_160_UNUSED)
 #define STK_OFF_TMP    160     /* Offset of tmp buffer on stack */
 #define STK_OFF_HLEN   168     /* Offset of SKB header length on stack */
-#define STK_OFF_SKBP   170     /* Offset of SKB pointer on stack */
+#define STK_OFF_SKBP   176     /* Offset of SKB pointer on stack */
 
 #define STK_OFF_R6     (160 - 11 * 8)  /* Offset of r6 on stack */
 #define STK_OFF_TCCNT  (160 - 12 * 8)  /* Offset of tail_call_cnt on stack */
index 9133b0e..bee281f 100644 (file)
@@ -45,7 +45,7 @@ struct bpf_jit {
        int labels[1];          /* Labels for local jumps */
 };
 
-#define BPF_SIZE_MAX   0x7ffff /* Max size for program (20 bit signed displ) */
+#define BPF_SIZE_MAX   0xffff  /* Max size for program (16 bit branches) */
 
 #define SEEN_SKB       1       /* skb access */
 #define SEEN_MEM       2       /* use mem[] for temporary storage */
@@ -450,7 +450,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
                emit_load_skb_data_hlen(jit);
        if (jit->seen & SEEN_SKB_CHANGE)
                /* stg %b1,ST_OFF_SKBP(%r0,%r15) */
-               EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
+               EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
                              STK_OFF_SKBP);
 }
 
index 10e9dab..f0700cf 100644 (file)
 
 #define        PTREGS_OFF      (STACK_BIAS + STACKFRAME_SZ)
 
+#define        RTRAP_PSTATE            (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
+#define        RTRAP_PSTATE_IRQOFF     (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
+#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
+
 #define __CHEETAH_ID   0x003e0014
 #define __JALAPENO_ID  0x003e0016
 #define __SERRANO_ID   0x003e0022
index 71b5a67..781b9f1 100644 (file)
@@ -589,8 +589,8 @@ user_rtt_fill_64bit:                                        \
         restored;                                      \
        nop; nop; nop; nop; nop; nop;                   \
        nop; nop; nop; nop; nop;                        \
-       ba,a,pt %xcc, user_rtt_fill_fixup;              \
-       ba,a,pt %xcc, user_rtt_fill_fixup;              \
+       ba,a,pt %xcc, user_rtt_fill_fixup_dax;          \
+       ba,a,pt %xcc, user_rtt_fill_fixup_mna;          \
        ba,a,pt %xcc, user_rtt_fill_fixup;
 
 
@@ -652,8 +652,8 @@ user_rtt_fill_32bit:                                        \
         restored;                                      \
        nop; nop; nop; nop; nop;                        \
        nop; nop; nop;                                  \
-       ba,a,pt %xcc, user_rtt_fill_fixup;              \
-       ba,a,pt %xcc, user_rtt_fill_fixup;              \
+       ba,a,pt %xcc, user_rtt_fill_fixup_dax;          \
+       ba,a,pt %xcc, user_rtt_fill_fixup_mna;          \
        ba,a,pt %xcc, user_rtt_fill_fixup;
 
 
index 7cf9c6e..fdb1332 100644 (file)
@@ -21,6 +21,7 @@ CFLAGS_REMOVE_perf_event.o := -pg
 CFLAGS_REMOVE_pcr.o := -pg
 endif
 
+obj-$(CONFIG_SPARC64)   += urtt_fill.o
 obj-$(CONFIG_SPARC32)   += entry.o wof.o wuf.o
 obj-$(CONFIG_SPARC32)   += etrap_32.o
 obj-$(CONFIG_SPARC32)   += rtrap_32.o
index d08bdaf..216948c 100644 (file)
 #include <asm/visasm.h>
 #include <asm/processor.h>
 
-#define                RTRAP_PSTATE            (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
-#define                RTRAP_PSTATE_IRQOFF     (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
-#define                RTRAP_PSTATE_AG_IRQOFF  (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
-
 #ifdef CONFIG_CONTEXT_TRACKING
 # define SCHEDULE_USER schedule_user
 #else
@@ -242,52 +238,17 @@ rt_continue:      ldx                     [%sp + PTREGS_OFF + PT_V9_G1], %g1
                 wrpr                   %g1, %cwp
                ba,a,pt                 %xcc, user_rtt_fill_64bit
 
-user_rtt_fill_fixup:
-               rdpr    %cwp, %g1
-               add     %g1, 1, %g1
-               wrpr    %g1, 0x0, %cwp
-
-               rdpr    %wstate, %g2
-               sll     %g2, 3, %g2
-               wrpr    %g2, 0x0, %wstate
-
-               /* We know %canrestore and %otherwin are both zero.  */
-
-               sethi   %hi(sparc64_kern_pri_context), %g2
-               ldx     [%g2 + %lo(sparc64_kern_pri_context)], %g2
-               mov     PRIMARY_CONTEXT, %g1
-
-661:           stxa    %g2, [%g1] ASI_DMMU
-               .section .sun4v_1insn_patch, "ax"
-               .word   661b
-               stxa    %g2, [%g1] ASI_MMU
-               .previous
-
-               sethi   %hi(KERNBASE), %g1
-               flush   %g1
+user_rtt_fill_fixup_dax:
+               ba,pt   %xcc, user_rtt_fill_fixup_common
+                mov    1, %g3
 
-               or      %g4, FAULT_CODE_WINFIXUP, %g4
-               stb     %g4, [%g6 + TI_FAULT_CODE]
-               stx     %g5, [%g6 + TI_FAULT_ADDR]
+user_rtt_fill_fixup_mna:
+               ba,pt   %xcc, user_rtt_fill_fixup_common
+                mov    2, %g3
 
-               mov     %g6, %l1
-               wrpr    %g0, 0x0, %tl
-
-661:           nop
-               .section                .sun4v_1insn_patch, "ax"
-               .word                   661b
-               SET_GL(0)
-               .previous
-
-               wrpr    %g0, RTRAP_PSTATE, %pstate
-
-               mov     %l1, %g6
-               ldx     [%g6 + TI_TASK], %g4
-               LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
-               call    do_sparc64_fault
-                add    %sp, PTREGS_OFF, %o0
-               ba,pt   %xcc, rtrap
-                nop
+user_rtt_fill_fixup:
+               ba,pt   %xcc, user_rtt_fill_fixup_common
+                clr    %g3
 
 user_rtt_pre_restore:
                add                     %g1, 1, %g1
index 3c25241..91cc2f4 100644 (file)
@@ -138,12 +138,24 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
        return 0;
 }
 
+/* Checks if the fp is valid.  We always build signal frames which are
+ * 16-byte aligned, therefore we can always enforce that the restore
+ * frame has that property as well.
+ */
+static bool invalid_frame_pointer(void __user *fp, int fplen)
+{
+       if ((((unsigned long) fp) & 15) ||
+           ((unsigned long)fp) > 0x100000000ULL - fplen)
+               return true;
+       return false;
+}
+
 void do_sigreturn32(struct pt_regs *regs)
 {
        struct signal_frame32 __user *sf;
        compat_uptr_t fpu_save;
        compat_uptr_t rwin_save;
-       unsigned int psr;
+       unsigned int psr, ufp;
        unsigned int pc, npc;
        sigset_t set;
        compat_sigset_t seta;
@@ -158,11 +170,16 @@ void do_sigreturn32(struct pt_regs *regs)
        sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
 
        /* 1. Make sure we are not getting garbage from the user */
-       if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
-           (((unsigned long) sf) & 3))
+       if (invalid_frame_pointer(sf, sizeof(*sf)))
+               goto segv;
+
+       if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
+               goto segv;
+
+       if (ufp & 0x7)
                goto segv;
 
-       if (get_user(pc, &sf->info.si_regs.pc) ||
+       if (__get_user(pc, &sf->info.si_regs.pc) ||
            __get_user(npc, &sf->info.si_regs.npc))
                goto segv;
 
@@ -227,7 +244,7 @@ segv:
 asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
 {
        struct rt_signal_frame32 __user *sf;
-       unsigned int psr, pc, npc;
+       unsigned int psr, pc, npc, ufp;
        compat_uptr_t fpu_save;
        compat_uptr_t rwin_save;
        sigset_t set;
@@ -242,11 +259,16 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
        sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
 
        /* 1. Make sure we are not getting garbage from the user */
-       if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
-           (((unsigned long) sf) & 3))
+       if (invalid_frame_pointer(sf, sizeof(*sf)))
                goto segv;
 
-       if (get_user(pc, &sf->regs.pc) || 
+       if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+               goto segv;
+
+       if (ufp & 0x7)
+               goto segv;
+
+       if (__get_user(pc, &sf->regs.pc) || 
            __get_user(npc, &sf->regs.npc))
                goto segv;
 
@@ -307,14 +329,6 @@ segv:
        force_sig(SIGSEGV, current);
 }
 
-/* Checks if the fp is valid */
-static int invalid_frame_pointer(void __user *fp, int fplen)
-{
-       if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
-               return 1;
-       return 0;
-}
-
 static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
 {
        unsigned long sp;
index 52aa5e4..c3c12ef 100644 (file)
@@ -60,10 +60,22 @@ struct rt_signal_frame {
 #define SF_ALIGNEDSZ  (((sizeof(struct signal_frame) + 7) & (~7)))
 #define RT_ALIGNEDSZ  (((sizeof(struct rt_signal_frame) + 7) & (~7)))
 
+/* Checks if the fp is valid.  We always build signal frames which are
+ * 16-byte aligned, therefore we can always enforce that the restore
+ * frame has that property as well.
+ */
+static inline bool invalid_frame_pointer(void __user *fp, int fplen)
+{
+       if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen))
+               return true;
+
+       return false;
+}
+
 asmlinkage void do_sigreturn(struct pt_regs *regs)
 {
+       unsigned long up_psr, pc, npc, ufp;
        struct signal_frame __user *sf;
-       unsigned long up_psr, pc, npc;
        sigset_t set;
        __siginfo_fpu_t __user *fpu_save;
        __siginfo_rwin_t __user *rwin_save;
@@ -77,10 +89,13 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
        sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
 
        /* 1. Make sure we are not getting garbage from the user */
-       if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
+       if (!invalid_frame_pointer(sf, sizeof(*sf)))
+               goto segv_and_exit;
+
+       if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
                goto segv_and_exit;
 
-       if (((unsigned long) sf) & 3)
+       if (ufp & 0x7)
                goto segv_and_exit;
 
        err = __get_user(pc,  &sf->info.si_regs.pc);
@@ -127,7 +142,7 @@ segv_and_exit:
 asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
 {
        struct rt_signal_frame __user *sf;
-       unsigned int psr, pc, npc;
+       unsigned int psr, pc, npc, ufp;
        __siginfo_fpu_t __user *fpu_save;
        __siginfo_rwin_t __user *rwin_save;
        sigset_t set;
@@ -135,8 +150,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
 
        synchronize_user_stack();
        sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
-       if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
-           (((unsigned long) sf) & 0x03))
+       if (!invalid_frame_pointer(sf, sizeof(*sf)))
+               goto segv;
+
+       if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+               goto segv;
+
+       if (ufp & 0x7)
                goto segv;
 
        err = __get_user(pc, &sf->regs.pc);
@@ -178,15 +198,6 @@ segv:
        force_sig(SIGSEGV, current);
 }
 
-/* Checks if the fp is valid */
-static inline int invalid_frame_pointer(void __user *fp, int fplen)
-{
-       if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen))
-               return 1;
-
-       return 0;
-}
-
 static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
 {
        unsigned long sp = regs->u_regs[UREG_FP];
index 39aaec1..5ee930c 100644 (file)
@@ -234,6 +234,17 @@ do_sigsegv:
        goto out;
 }
 
+/* Checks if the fp is valid.  We always build rt signal frames which
+ * are 16-byte aligned, therefore we can always enforce that the
+ * restore frame has that property as well.
+ */
+static bool invalid_frame_pointer(void __user *fp)
+{
+       if (((unsigned long) fp) & 15)
+               return true;
+       return false;
+}
+
 struct rt_signal_frame {
        struct sparc_stackf     ss;
        siginfo_t               info;
@@ -246,8 +257,8 @@ struct rt_signal_frame {
 
 void do_rt_sigreturn(struct pt_regs *regs)
 {
+       unsigned long tpc, tnpc, tstate, ufp;
        struct rt_signal_frame __user *sf;
-       unsigned long tpc, tnpc, tstate;
        __siginfo_fpu_t __user *fpu_save;
        __siginfo_rwin_t __user *rwin_save;
        sigset_t set;
@@ -261,10 +272,16 @@ void do_rt_sigreturn(struct pt_regs *regs)
                (regs->u_regs [UREG_FP] + STACK_BIAS);
 
        /* 1. Make sure we are not getting garbage from the user */
-       if (((unsigned long) sf) & 3)
+       if (invalid_frame_pointer(sf))
+               goto segv;
+
+       if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
                goto segv;
 
-       err = get_user(tpc, &sf->regs.tpc);
+       if ((ufp + STACK_BIAS) & 0x7)
+               goto segv;
+
+       err = __get_user(tpc, &sf->regs.tpc);
        err |= __get_user(tnpc, &sf->regs.tnpc);
        if (test_thread_flag(TIF_32BIT)) {
                tpc &= 0xffffffff;
@@ -308,14 +325,6 @@ segv:
        force_sig(SIGSEGV, current);
 }
 
-/* Checks if the fp is valid */
-static int invalid_frame_pointer(void __user *fp)
-{
-       if (((unsigned long) fp) & 15)
-               return 1;
-       return 0;
-}
-
 static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
 {
        unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
index 0f6eebe..e5fe8ce 100644 (file)
@@ -48,6 +48,10 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
 int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
 {
        int err;
+
+       if (((unsigned long) fpu) & 3)
+               return -EFAULT;
+
 #ifdef CONFIG_SMP
        if (test_tsk_thread_flag(current, TIF_USEDFPU))
                regs->psr &= ~PSR_EF;
@@ -97,7 +101,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
        struct thread_info *t = current_thread_info();
        int i, wsaved, err;
 
-       __get_user(wsaved, &rp->wsaved);
+       if (((unsigned long) rp) & 3)
+               return -EFAULT;
+
+       get_user(wsaved, &rp->wsaved);
        if (wsaved > NSWINS)
                return -EFAULT;
 
index 387834a..36aadcb 100644 (file)
@@ -37,7 +37,10 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
        unsigned long fprs;
        int err;
 
-       err = __get_user(fprs, &fpu->si_fprs);
+       if (((unsigned long) fpu) & 7)
+               return -EFAULT;
+
+       err = get_user(fprs, &fpu->si_fprs);
        fprs_write(0);
        regs->tstate &= ~TSTATE_PEF;
        if (fprs & FPRS_DL)
@@ -72,7 +75,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
        struct thread_info *t = current_thread_info();
        int i, wsaved, err;
 
-       __get_user(wsaved, &rp->wsaved);
+       if (((unsigned long) rp) & 7)
+               return -EFAULT;
+
+       get_user(wsaved, &rp->wsaved);
        if (wsaved > NSWINS)
                return -EFAULT;
 
diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S
new file mode 100644 (file)
index 0000000..5604a2b
--- /dev/null
@@ -0,0 +1,98 @@
+#include <asm/thread_info.h>
+#include <asm/trap_block.h>
+#include <asm/spitfire.h>
+#include <asm/ptrace.h>
+#include <asm/head.h>
+
+               .text
+               .align  8
+               .globl  user_rtt_fill_fixup_common
+user_rtt_fill_fixup_common:
+               rdpr    %cwp, %g1
+               add     %g1, 1, %g1
+               wrpr    %g1, 0x0, %cwp
+
+               rdpr    %wstate, %g2
+               sll     %g2, 3, %g2
+               wrpr    %g2, 0x0, %wstate
+
+               /* We know %canrestore and %otherwin are both zero.  */
+
+               sethi   %hi(sparc64_kern_pri_context), %g2
+               ldx     [%g2 + %lo(sparc64_kern_pri_context)], %g2
+               mov     PRIMARY_CONTEXT, %g1
+
+661:           stxa    %g2, [%g1] ASI_DMMU
+               .section .sun4v_1insn_patch, "ax"
+               .word   661b
+               stxa    %g2, [%g1] ASI_MMU
+               .previous
+
+               sethi   %hi(KERNBASE), %g1
+               flush   %g1
+
+               mov     %g4, %l4
+               mov     %g5, %l5
+               brnz,pn %g3, 1f
+                mov    %g3, %l3
+
+               or      %g4, FAULT_CODE_WINFIXUP, %g4
+               stb     %g4, [%g6 + TI_FAULT_CODE]
+               stx     %g5, [%g6 + TI_FAULT_ADDR]
+1:
+               mov     %g6, %l1
+               wrpr    %g0, 0x0, %tl
+
+661:           nop
+               .section                .sun4v_1insn_patch, "ax"
+               .word                   661b
+               SET_GL(0)
+               .previous
+
+               wrpr    %g0, RTRAP_PSTATE, %pstate
+
+               mov     %l1, %g6
+               ldx     [%g6 + TI_TASK], %g4
+               LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
+
+               brnz,pn %l3, 1f
+                nop
+
+               call    do_sparc64_fault
+                add    %sp, PTREGS_OFF, %o0
+               ba,pt   %xcc, rtrap
+                nop
+
+1:             cmp     %g3, 2
+               bne,pn  %xcc, 2f
+                nop
+
+               sethi   %hi(tlb_type), %g1
+               lduw    [%g1 + %lo(tlb_type)], %g1
+               cmp     %g1, 3
+               bne,pt  %icc, 1f
+                add    %sp, PTREGS_OFF, %o0
+               mov     %l4, %o2
+               call    sun4v_do_mna
+                mov    %l5, %o1
+               ba,a,pt %xcc, rtrap
+1:             mov     %l4, %o1
+               mov     %l5, %o2
+               call    mem_address_unaligned
+                nop
+               ba,a,pt %xcc, rtrap
+
+2:             sethi   %hi(tlb_type), %g1
+               mov     %l4, %o1
+               lduw    [%g1 + %lo(tlb_type)], %g1
+               mov     %l5, %o2
+               cmp     %g1, 3
+               bne,pt  %icc, 1f
+                add    %sp, PTREGS_OFF, %o0
+               call    sun4v_data_access_exception
+                nop
+               ba,a,pt %xcc, rtrap
+
+1:             call    spitfire_data_access_exception
+                nop
+               ba,a,pt %xcc, rtrap
index 652683c..14bb0d5 100644 (file)
@@ -2824,9 +2824,10 @@ void hugetlb_setup(struct pt_regs *regs)
         * the Data-TLB for huge pages.
         */
        if (tlb_type == cheetah_plus) {
+               bool need_context_reload = false;
                unsigned long ctx;
 
-               spin_lock(&ctx_alloc_lock);
+               spin_lock_irq(&ctx_alloc_lock);
                ctx = mm->context.sparc64_ctx_val;
                ctx &= ~CTX_PGSZ_MASK;
                ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
@@ -2845,9 +2846,12 @@ void hugetlb_setup(struct pt_regs *regs)
                         * also executing in this address space.
                         */
                        mm->context.sparc64_ctx_val = ctx;
-                       on_each_cpu(context_reload, mm, 0);
+                       need_context_reload = true;
                }
-               spin_unlock(&ctx_alloc_lock);
+               spin_unlock_irq(&ctx_alloc_lock);
+
+               if (need_context_reload)
+                       on_each_cpu(context_reload, mm, 0);
        }
 }
 #endif
index 769af90..7597b42 100644 (file)
@@ -181,19 +181,22 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
                             struct kvm_cpuid_entry __user *entries)
 {
        int r, i;
-       struct kvm_cpuid_entry *cpuid_entries;
+       struct kvm_cpuid_entry *cpuid_entries = NULL;
 
        r = -E2BIG;
        if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
                goto out;
        r = -ENOMEM;
-       cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
-       if (!cpuid_entries)
-               goto out;
-       r = -EFAULT;
-       if (copy_from_user(cpuid_entries, entries,
-                          cpuid->nent * sizeof(struct kvm_cpuid_entry)))
-               goto out_free;
+       if (cpuid->nent) {
+               cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) *
+                                       cpuid->nent);
+               if (!cpuid_entries)
+                       goto out;
+               r = -EFAULT;
+               if (copy_from_user(cpuid_entries, entries,
+                                  cpuid->nent * sizeof(struct kvm_cpuid_entry)))
+                       goto out;
+       }
        for (i = 0; i < cpuid->nent; i++) {
                vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
                vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
@@ -212,9 +215,8 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
        kvm_x86_ops->cpuid_update(vcpu);
        r = kvm_update_cpuid(vcpu);
 
-out_free:
-       vfree(cpuid_entries);
 out:
+       vfree(cpuid_entries);
        return r;
 }
 
index 24e8001..def97b3 100644 (file)
@@ -336,12 +336,12 @@ static gfn_t pse36_gfn_delta(u32 gpte)
 #ifdef CONFIG_X86_64
 static void __set_spte(u64 *sptep, u64 spte)
 {
-       *sptep = spte;
+       WRITE_ONCE(*sptep, spte);
 }
 
 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
 {
-       *sptep = spte;
+       WRITE_ONCE(*sptep, spte);
 }
 
 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
@@ -390,7 +390,7 @@ static void __set_spte(u64 *sptep, u64 spte)
         */
        smp_wmb();
 
-       ssptep->spte_low = sspte.spte_low;
+       WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
 }
 
 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
@@ -400,7 +400,7 @@ static void __update_clear_spte_fast(u64 *sptep, u64 spte)
        ssptep = (union split_spte *)sptep;
        sspte = (union split_spte)spte;
 
-       ssptep->spte_low = sspte.spte_low;
+       WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
 
        /*
         * If we map the spte from present to nonpresent, we should clear
index c805cf4..902d9da 100644 (file)
@@ -2314,6 +2314,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_AMD64_NB_CFG:
        case MSR_FAM10H_MMIO_CONF_BASE:
        case MSR_AMD64_BU_CFG2:
+       case MSR_IA32_PERF_CTL:
                msr_info->data = 0;
                break;
        case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
@@ -2972,6 +2973,10 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                              | KVM_VCPUEVENT_VALID_SMM))
                return -EINVAL;
 
+       if (events->exception.injected &&
+           (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
+               return -EINVAL;
+
        process_nmi(vcpu);
        vcpu->arch.exception.pending = events->exception.injected;
        vcpu->arch.exception.nr = events->exception.nr;
@@ -3036,6 +3041,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
        if (dbgregs->flags)
                return -EINVAL;
 
+       if (dbgregs->dr6 & ~0xffffffffull)
+               return -EINVAL;
+       if (dbgregs->dr7 & ~0xffffffffull)
+               return -EINVAL;
+
        memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
        kvm_update_dr0123(vcpu);
        vcpu->arch.dr6 = dbgregs->dr6;
@@ -7815,7 +7825,7 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
 
        slot = id_to_memslot(slots, id);
        if (size) {
-               if (WARN_ON(slot->npages))
+               if (slot->npages)
                        return -EEXIST;
 
                /*
index 7792aba..613cac7 100644 (file)
@@ -195,7 +195,7 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
        vmdirq->virq = virq;
 
        irq_domain_set_info(domain, virq, vmdirq->irq->vmd_vector, info->chip,
-                           vmdirq, handle_simple_irq, vmd, NULL);
+                           vmdirq, handle_untracked_irq, vmd, NULL);
        return 0;
 }
 
index e28e912..331f6ba 100644 (file)
@@ -13,6 +13,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
        tristate "Asymmetric public-key crypto algorithm subtype"
        select MPILIB
        select CRYPTO_HASH_INFO
+       select CRYPTO_AKCIPHER
        help
          This option provides support for asymmetric public key type handling.
          If signature generation and/or verification are to be used,
index 0d92d0f..c7ba948 100644 (file)
@@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
                pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
 
                pr->pblk = object.processor.pblk_address;
-
-               /*
-                * We don't care about error returns - we just try to mark
-                * these reserved so that nobody else is confused into thinking
-                * that this region might be unused..
-                *
-                * (In particular, allocating the IO range for Cardbus)
-                */
-               request_region(pr->throttling.address, 6, "ACPI CPU throttle");
        }
 
        /*
index 3d5b8a0..c1d138e 100644 (file)
@@ -754,7 +754,8 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device,
 }
 
 int acpi_video_get_levels(struct acpi_device *device,
-                         struct acpi_video_device_brightness **dev_br)
+                         struct acpi_video_device_brightness **dev_br,
+                         int *pmax_level)
 {
        union acpi_object *obj = NULL;
        int i, max_level = 0, count = 0, level_ac_battery = 0;
@@ -841,6 +842,8 @@ int acpi_video_get_levels(struct acpi_device *device,
 
        br->count = count;
        *dev_br = br;
+       if (pmax_level)
+               *pmax_level = max_level;
 
 out:
        kfree(obj);
@@ -869,7 +872,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
        struct acpi_video_device_brightness *br = NULL;
        int result = -EINVAL;
 
-       result = acpi_video_get_levels(device->dev, &br);
+       result = acpi_video_get_levels(device->dev, &br, &max_level);
        if (result)
                return result;
        device->brightness = br;
@@ -1737,7 +1740,7 @@ static void acpi_video_run_bcl_for_osi(struct acpi_video_bus *video)
 
        mutex_lock(&video->device_list_lock);
        list_for_each_entry(dev, &video->video_device_list, entry) {
-               if (!acpi_video_device_lcd_query_levels(dev, &levels))
+               if (!acpi_video_device_lcd_query_levels(dev->dev->handle, &levels))
                        kfree(levels);
        }
        mutex_unlock(&video->device_list_lock);
index 0f18dbc..daceb80 100644 (file)
@@ -83,27 +83,22 @@ acpi_hw_write_multiple(u32 value,
 static u8
 acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width)
 {
-       u64 address;
-
        if (!reg->access_width) {
+               if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+                       max_bit_width = 32;
+               }
+
                /*
                 * Detect old register descriptors where only the bit_width field
-                * makes senses. The target address is copied to handle possible
-                * alignment issues.
+                * makes senses.
                 */
-               ACPI_MOVE_64_TO_64(&address, &reg->address);
-               if (!reg->bit_offset && reg->bit_width &&
+               if (reg->bit_width < max_bit_width &&
+                   !reg->bit_offset && reg->bit_width &&
                    ACPI_IS_POWER_OF_TWO(reg->bit_width) &&
-                   ACPI_IS_ALIGNED(reg->bit_width, 8) &&
-                   ACPI_IS_ALIGNED(address, reg->bit_width)) {
+                   ACPI_IS_ALIGNED(reg->bit_width, 8)) {
                        return (reg->bit_width);
-               } else {
-                       if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
-                               return (32);
-                       } else {
-                               return (max_bit_width);
-                       }
                }
+               return (max_bit_width);
        } else {
                return (1 << (reg->access_width + 2));
        }
index f170d74..c72e648 100644 (file)
@@ -676,6 +676,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
        if (!pr->flags.throttling)
                return -ENODEV;
 
+       /*
+        * We don't care about error returns - we just try to mark
+        * these reserved so that nobody else is confused into thinking
+        * that this region might be unused..
+        *
+        * (In particular, allocating the IO range for Cardbus)
+        */
+       request_region(pr->throttling.address, 6, "ACPI CPU throttle");
+
        pr->throttling.state = 0;
 
        duty_mask = pr->throttling.state_count - 1;
index a969a7e..85aaf22 100644 (file)
@@ -181,13 +181,17 @@ static char *res_strings[] = {
        "reserved 27", 
        "reserved 28", 
        "reserved 29", 
-       "reserved 30", 
+       "reserved 30", /* FIXME: The strings between 30-40 might be wrong. */
        "reassembly abort: no buffers", 
        "receive buffer overflow", 
        "change in GFC", 
        "receive buffer full", 
        "low priority discard - no receive descriptor", 
        "low priority discard - missing end of packet", 
+       "reserved 37",
+       "reserved 38",
+       "reserved 39",
+       "reseverd 40",
        "reserved 41", 
        "reserved 42", 
        "reserved 43", 
index 7d00f29..809dd1e 100644 (file)
@@ -1128,7 +1128,7 @@ static int rx_pkt(struct atm_dev *dev)
        /* make the ptr point to the corresponding buffer desc entry */  
        buf_desc_ptr += desc;     
         if (!desc || (desc > iadev->num_rx_desc) || 
-                      ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { 
+                      ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
             free_desc(dev, desc);
             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
             return -1;
index 36bc11a..9009295 100644 (file)
@@ -1832,7 +1832,7 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
                                        unsigned int target_freq)
 {
-       clamp_val(target_freq, policy->min, policy->max);
+       target_freq = clamp_val(target_freq, policy->min, policy->max);
 
        return cpufreq_driver->fast_switch(policy, target_freq);
 }
index 3a9c432..0d159b5 100644 (file)
@@ -449,7 +449,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
                cpu->acpi_perf_data.states[0].core_frequency =
                                        policy->cpuinfo.max_freq / 1000;
        cpu->valid_pss_table = true;
-       pr_info("_PPC limits will be enforced\n");
+       pr_debug("_PPC limits will be enforced\n");
 
        return;
 
index 52c7395..0d0d452 100644 (file)
@@ -122,6 +122,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
        struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
        struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
        unsigned int unit;
+       u32 unit_size;
        int ret;
 
        if (!ctx->u.aes.key_len)
@@ -133,11 +134,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
        if (!req->info)
                return -EINVAL;
 
-       for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++)
-               if (!(req->nbytes & (unit_size_map[unit].size - 1)))
-                       break;
+       unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
+       if (req->nbytes <= unit_size_map[0].size) {
+               for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
+                       if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
+                               unit_size = unit_size_map[unit].value;
+                               break;
+                       }
+               }
+       }
 
-       if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) ||
+       if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
            (ctx->u.aes.key_len != AES_KEYSIZE_128)) {
                /* Use the fallback to process the request for any
                 * unsupported unit sizes or key sizes
@@ -158,7 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
        rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
        rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
                                           : CCP_AES_ACTION_DECRYPT;
-       rctx->cmd.u.xts.unit_size = unit_size_map[unit].value;
+       rctx->cmd.u.xts.unit_size = unit_size;
        rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
        rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
        rctx->cmd.u.xts.iv = &rctx->iv_sg;
index 6eefaa2..63464e8 100644 (file)
@@ -1986,7 +1986,7 @@ err_algs:
                                        &dd->pdata->algs_info[i].algs_list[j]);
 err_pm:
        pm_runtime_disable(dev);
-       if (dd->polling_mode)
+       if (!dd->polling_mode)
                dma_release_channel(dd->dma_lch);
 data_err:
        dev_err(dev, "initialization failed.\n");
index 4a2c07e..6355ab3 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/seq_file.h>
 #include <linux/poll.h>
 #include <linux/reservation.h>
+#include <linux/mm.h>
 
 #include <uapi/linux/dma-buf.h>
 
@@ -90,7 +91,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
        dmabuf = file->private_data;
 
        /* check for overflowing the buffer's size */
-       if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+       if (vma->vm_pgoff + vma_pages(vma) >
            dmabuf->size >> PAGE_SHIFT)
                return -EINVAL;
 
@@ -723,11 +724,11 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
                return -EINVAL;
 
        /* check for offset overflow */
-       if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
+       if (pgoff + vma_pages(vma) < pgoff)
                return -EOVERFLOW;
 
        /* check for overflowing the buffer's size */
-       if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+       if (pgoff + vma_pages(vma) >
            dmabuf->size >> PAGE_SHIFT)
                return -EINVAL;
 
index c0bd572..9566a62 100644 (file)
 #include <linux/reservation.h>
 #include <linux/export.h>
 
+/**
+ * DOC: Reservation Object Overview
+ *
+ * The reservation object provides a mechanism to manage shared and
+ * exclusive fences associated with a buffer.  A reservation object
+ * can have attached one exclusive fence (normally associated with
+ * write operations) or N shared fences (read operations).  The RCU
+ * mechanism is used to protect read access to fences from locked
+ * write-side updates.
+ */
+
 DEFINE_WW_CLASS(reservation_ww_class);
 EXPORT_SYMBOL(reservation_ww_class);
 
@@ -43,9 +54,17 @@ EXPORT_SYMBOL(reservation_seqcount_class);
 
 const char reservation_seqcount_string[] = "reservation_seqcount";
 EXPORT_SYMBOL(reservation_seqcount_string);
-/*
- * Reserve space to add a shared fence to a reservation_object,
- * must be called with obj->lock held.
+
+/**
+ * reservation_object_reserve_shared - Reserve space to add a shared
+ * fence to a reservation_object.
+ * @obj: reservation object
+ *
+ * Should be called before reservation_object_add_shared_fence().  Must
+ * be called with obj->lock held.
+ *
+ * RETURNS
+ * Zero for success, or -errno
  */
 int reservation_object_reserve_shared(struct reservation_object *obj)
 {
@@ -180,7 +199,11 @@ done:
                fence_put(old_fence);
 }
 
-/*
+/**
+ * reservation_object_add_shared_fence - Add a fence to a shared slot
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
  * Add a fence to a shared slot, obj->lock must be held, and
  * reservation_object_reserve_shared_fence has been called.
  */
@@ -200,6 +223,13 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
 }
 EXPORT_SYMBOL(reservation_object_add_shared_fence);
 
+/**
+ * reservation_object_add_excl_fence - Add an exclusive fence.
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
+ * Add a fence to the exclusive slot.  The obj->lock must be held.
+ */
 void reservation_object_add_excl_fence(struct reservation_object *obj,
                                       struct fence *fence)
 {
@@ -233,6 +263,18 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
 }
 EXPORT_SYMBOL(reservation_object_add_excl_fence);
 
+/**
+ * reservation_object_get_fences_rcu - Get an object's shared and exclusive
+ * fences without update side lock held
+ * @obj: the reservation object
+ * @pfence_excl: the returned exclusive fence (or NULL)
+ * @pshared_count: the number of shared fences returned
+ * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
+ * the required size, and must be freed by caller)
+ *
+ * RETURNS
+ * Zero or -errno
+ */
 int reservation_object_get_fences_rcu(struct reservation_object *obj,
                                      struct fence **pfence_excl,
                                      unsigned *pshared_count,
@@ -319,6 +361,18 @@ unlock:
 }
 EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
 
+/**
+ * reservation_object_wait_timeout_rcu - Wait on reservation's objects
+ * shared and/or exclusive fences.
+ * @obj: the reservation object
+ * @wait_all: if true, wait on all fences, else wait on just exclusive fence
+ * @intr: if true, do interruptible wait
+ * @timeout: timeout value in jiffies or zero to return immediately
+ *
+ * RETURNS
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
+ * greater than zer on success.
+ */
 long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
                                         bool wait_all, bool intr,
                                         unsigned long timeout)
@@ -416,6 +470,16 @@ reservation_object_test_signaled_single(struct fence *passed_fence)
        return ret;
 }
 
+/**
+ * reservation_object_test_signaled_rcu - Test if a reservation object's
+ * fences have been signaled.
+ * @obj: the reservation object
+ * @test_all: if true, test all fences, otherwise only test the exclusive
+ * fence
+ *
+ * RETURNS
+ * true if all fences signaled, else false
+ */
 bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
                                          bool test_all)
 {
index d39014d..fc5f197 100644 (file)
@@ -29,7 +29,6 @@
 
 #include <mach/hardware.h>
 #include <mach/platform.h>
-#include <mach/irqs.h>
 
 #define LPC32XX_GPIO_P3_INP_STATE              _GPREG(0x000)
 #define LPC32XX_GPIO_P3_OUTP_SET               _GPREG(0x004)
@@ -371,61 +370,16 @@ static int lpc32xx_gpio_request(struct gpio_chip *chip, unsigned pin)
 
 static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset)
 {
-       return IRQ_LPC32XX_P0_P1_IRQ;
+       return -ENXIO;
 }
 
-static const char lpc32xx_gpio_to_irq_gpio_p3_table[] = {
-       IRQ_LPC32XX_GPIO_00,
-       IRQ_LPC32XX_GPIO_01,
-       IRQ_LPC32XX_GPIO_02,
-       IRQ_LPC32XX_GPIO_03,
-       IRQ_LPC32XX_GPIO_04,
-       IRQ_LPC32XX_GPIO_05,
-};
-
 static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset)
 {
-       if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpio_p3_table))
-               return lpc32xx_gpio_to_irq_gpio_p3_table[offset];
        return -ENXIO;
 }
 
-static const char lpc32xx_gpio_to_irq_gpi_p3_table[] = {
-       IRQ_LPC32XX_GPI_00,
-       IRQ_LPC32XX_GPI_01,
-       IRQ_LPC32XX_GPI_02,
-       IRQ_LPC32XX_GPI_03,
-       IRQ_LPC32XX_GPI_04,
-       IRQ_LPC32XX_GPI_05,
-       IRQ_LPC32XX_GPI_06,
-       IRQ_LPC32XX_GPI_07,
-       IRQ_LPC32XX_GPI_08,
-       IRQ_LPC32XX_GPI_09,
-       -ENXIO, /* 10 */
-       -ENXIO, /* 11 */
-       -ENXIO, /* 12 */
-       -ENXIO, /* 13 */
-       -ENXIO, /* 14 */
-       -ENXIO, /* 15 */
-       -ENXIO, /* 16 */
-       -ENXIO, /* 17 */
-       -ENXIO, /* 18 */
-       IRQ_LPC32XX_GPI_19,
-       -ENXIO, /* 20 */
-       -ENXIO, /* 21 */
-       -ENXIO, /* 22 */
-       -ENXIO, /* 23 */
-       -ENXIO, /* 24 */
-       -ENXIO, /* 25 */
-       -ENXIO, /* 26 */
-       -ENXIO, /* 27 */
-       IRQ_LPC32XX_GPI_28,
-};
-
 static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset)
 {
-       if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpi_p3_table))
-               return lpc32xx_gpio_to_irq_gpi_p3_table[offset];
        return -ENXIO;
 }
 
index d407f90..24f60d2 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/cdev.h>
 #include <linux/fs.h>
 #include <linux/uaccess.h>
+#include <linux/compat.h>
 #include <uapi/linux/gpio.h>
 
 #include "gpiolib.h"
@@ -316,7 +317,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
        struct gpio_device *gdev = filp->private_data;
        struct gpio_chip *chip = gdev->chip;
-       int __user *ip = (int __user *)arg;
+       void __user *ip = (void __user *)arg;
 
        /* We fail any subsequent ioctl():s when the chip is gone */
        if (!chip)
@@ -388,6 +389,14 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        return -EINVAL;
 }
 
+#ifdef CONFIG_COMPAT
+static long gpio_ioctl_compat(struct file *filp, unsigned int cmd,
+                             unsigned long arg)
+{
+       return gpio_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
 /**
  * gpio_chrdev_open() - open the chardev for ioctl operations
  * @inode: inode for this chardev
@@ -431,7 +440,9 @@ static const struct file_operations gpio_fileops = {
        .owner = THIS_MODULE,
        .llseek = noop_llseek,
        .unlocked_ioctl = gpio_ioctl,
-       .compat_ioctl = gpio_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = gpio_ioctl_compat,
+#endif
 };
 
 static void gpiodevice_release(struct device *dev)
@@ -618,6 +629,8 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
                goto err_free_label;
        }
 
+       spin_unlock_irqrestore(&gpio_lock, flags);
+
        for (i = 0; i < chip->ngpio; i++) {
                struct gpio_desc *desc = &gdev->descs[i];
 
@@ -649,8 +662,6 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
                }
        }
 
-       spin_unlock_irqrestore(&gpio_lock, flags);
-
 #ifdef CONFIG_PINCTRL
        INIT_LIST_HEAD(&gdev->pin_ranges);
 #endif
@@ -1356,10 +1367,13 @@ done:
 /*
  * This descriptor validation needs to be inserted verbatim into each
  * function taking a descriptor, so we need to use a preprocessor
- * macro to avoid endless duplication.
+ * macro to avoid endless duplication. If the desc is NULL it is an
+ * optional GPIO and calls should just bail out.
  */
 #define VALIDATE_DESC(desc) do { \
-       if (!desc || !desc->gdev) { \
+       if (!desc) \
+               return 0; \
+       if (!desc->gdev) { \
                pr_warn("%s: invalid GPIO\n", __func__); \
                return -EINVAL; \
        } \
@@ -1370,7 +1384,9 @@ done:
        } } while (0)
 
 #define VALIDATE_DESC_VOID(desc) do { \
-       if (!desc || !desc->gdev) { \
+       if (!desc) \
+               return; \
+       if (!desc->gdev) { \
                pr_warn("%s: invalid GPIO\n", __func__); \
                return; \
        } \
@@ -2066,17 +2082,30 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq);
  */
 int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
 {
-       if (offset >= chip->ngpio)
-               return -EINVAL;
+       struct gpio_desc *desc;
+
+       desc = gpiochip_get_desc(chip, offset);
+       if (IS_ERR(desc))
+               return PTR_ERR(desc);
+
+       /* Flush direction if something changed behind our back */
+       if (chip->get_direction) {
+               int dir = chip->get_direction(chip, offset);
+
+               if (dir)
+                       clear_bit(FLAG_IS_OUT, &desc->flags);
+               else
+                       set_bit(FLAG_IS_OUT, &desc->flags);
+       }
 
-       if (test_bit(FLAG_IS_OUT, &chip->gpiodev->descs[offset].flags)) {
+       if (test_bit(FLAG_IS_OUT, &desc->flags)) {
                chip_err(chip,
                          "%s: tried to flag a GPIO set as output for IRQ\n",
                          __func__);
                return -EIO;
        }
 
-       set_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags);
+       set_bit(FLAG_USED_AS_IRQ, &desc->flags);
        return 0;
 }
 EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq);
index fef1b04..0813c2f 100644 (file)
  *
  */
 
+static void hdlcd_crtc_cleanup(struct drm_crtc *crtc)
+{
+       struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+
+       /* stop the controller on cleanup */
+       hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
+       drm_crtc_cleanup(crtc);
+}
+
 static const struct drm_crtc_funcs hdlcd_crtc_funcs = {
-       .destroy = drm_crtc_cleanup,
+       .destroy = hdlcd_crtc_cleanup,
        .set_config = drm_atomic_helper_set_config,
        .page_flip = drm_atomic_helper_page_flip,
        .reset = drm_atomic_helper_crtc_reset,
@@ -97,7 +106,7 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc)
        struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
        struct drm_display_mode *m = &crtc->state->adjusted_mode;
        struct videomode vm;
-       unsigned int polarities, line_length, err;
+       unsigned int polarities, err;
 
        vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay;
        vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end;
@@ -113,23 +122,18 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc)
        if (m->flags & DRM_MODE_FLAG_PVSYNC)
                polarities |= HDLCD_POLARITY_VSYNC;
 
-       line_length = crtc->primary->state->fb->pitches[0];
-
        /* Allow max number of outstanding requests and largest burst size */
        hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS,
                    HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16);
 
-       hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, line_length);
-       hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, line_length);
-       hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, m->crtc_vdisplay - 1);
        hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1);
        hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1);
        hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1);
        hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1);
+       hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1);
        hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1);
        hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1);
        hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1);
-       hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1);
        hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities);
 
        err = hdlcd_set_pxl_fmt(crtc);
@@ -144,20 +148,19 @@ static void hdlcd_crtc_enable(struct drm_crtc *crtc)
        struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
 
        clk_prepare_enable(hdlcd->clk);
+       hdlcd_crtc_mode_set_nofb(crtc);
        hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1);
-       drm_crtc_vblank_on(crtc);
 }
 
 static void hdlcd_crtc_disable(struct drm_crtc *crtc)
 {
        struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
 
-       if (!crtc->primary->fb)
+       if (!crtc->state->active)
                return;
 
-       clk_disable_unprepare(hdlcd->clk);
        hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
-       drm_crtc_vblank_off(crtc);
+       clk_disable_unprepare(hdlcd->clk);
 }
 
 static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
@@ -179,20 +182,17 @@ static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
 static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
                                    struct drm_crtc_state *state)
 {
-       struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
-       unsigned long flags;
-
-       if (crtc->state->event) {
-               struct drm_pending_vblank_event *event = crtc->state->event;
+       struct drm_pending_vblank_event *event = crtc->state->event;
 
+       if (event) {
                crtc->state->event = NULL;
-               event->pipe = drm_crtc_index(crtc);
-
-               WARN_ON(drm_crtc_vblank_get(crtc) != 0);
 
-               spin_lock_irqsave(&crtc->dev->event_lock, flags);
-               list_add_tail(&event->base.link, &hdlcd->event_list);
-               spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+               spin_lock_irq(&crtc->dev->event_lock);
+               if (drm_crtc_vblank_get(crtc) == 0)
+                       drm_crtc_arm_vblank_event(crtc, event);
+               else
+                       drm_crtc_send_vblank_event(crtc, event);
+               spin_unlock_irq(&crtc->dev->event_lock);
        }
 }
 
@@ -225,6 +225,15 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
 static int hdlcd_plane_atomic_check(struct drm_plane *plane,
                                    struct drm_plane_state *state)
 {
+       u32 src_w, src_h;
+
+       src_w = state->src_w >> 16;
+       src_h = state->src_h >> 16;
+
+       /* we can't do any scaling of the plane source */
+       if ((src_w != state->crtc_w) || (src_h != state->crtc_h))
+               return -EINVAL;
+
        return 0;
 }
 
@@ -233,20 +242,31 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
 {
        struct hdlcd_drm_private *hdlcd;
        struct drm_gem_cma_object *gem;
+       unsigned int depth, bpp;
+       u32 src_w, src_h, dest_w, dest_h;
        dma_addr_t scanout_start;
 
-       if (!plane->state->crtc || !plane->state->fb)
+       if (!plane->state->fb)
                return;
 
-       hdlcd = crtc_to_hdlcd_priv(plane->state->crtc);
+       drm_fb_get_bpp_depth(plane->state->fb->pixel_format, &depth, &bpp);
+       src_w = plane->state->src_w >> 16;
+       src_h = plane->state->src_h >> 16;
+       dest_w = plane->state->crtc_w;
+       dest_h = plane->state->crtc_h;
        gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
-       scanout_start = gem->paddr;
+       scanout_start = gem->paddr + plane->state->fb->offsets[0] +
+               plane->state->crtc_y * plane->state->fb->pitches[0] +
+               plane->state->crtc_x * bpp / 8;
+
+       hdlcd = plane->dev->dev_private;
+       hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, plane->state->fb->pitches[0]);
+       hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, plane->state->fb->pitches[0]);
+       hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, dest_h - 1);
        hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start);
 }
 
 static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = {
-       .prepare_fb = NULL,
-       .cleanup_fb = NULL,
        .atomic_check = hdlcd_plane_atomic_check,
        .atomic_update = hdlcd_plane_atomic_update,
 };
@@ -294,16 +314,6 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
        return plane;
 }
 
-void hdlcd_crtc_suspend(struct drm_crtc *crtc)
-{
-       hdlcd_crtc_disable(crtc);
-}
-
-void hdlcd_crtc_resume(struct drm_crtc *crtc)
-{
-       hdlcd_crtc_enable(crtc);
-}
-
 int hdlcd_setup_crtc(struct drm_device *drm)
 {
        struct hdlcd_drm_private *hdlcd = drm->dev_private;
index b987c63..a6ca36f 100644 (file)
@@ -49,8 +49,6 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
        atomic_set(&hdlcd->dma_end_count, 0);
 #endif
 
-       INIT_LIST_HEAD(&hdlcd->event_list);
-
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        hdlcd->mmio = devm_ioremap_resource(drm->dev, res);
        if (IS_ERR(hdlcd->mmio)) {
@@ -84,11 +82,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
                goto setup_fail;
        }
 
-       pm_runtime_enable(drm->dev);
-
-       pm_runtime_get_sync(drm->dev);
        ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
-       pm_runtime_put_sync(drm->dev);
        if (ret < 0) {
                DRM_ERROR("failed to install IRQ handler\n");
                goto irq_fail;
@@ -164,24 +158,9 @@ static irqreturn_t hdlcd_irq(int irq, void *arg)
                atomic_inc(&hdlcd->vsync_count);
 
 #endif
-       if (irq_status & HDLCD_INTERRUPT_VSYNC) {
-               bool events_sent = false;
-               unsigned long flags;
-               struct drm_pending_vblank_event *e, *t;
-
+       if (irq_status & HDLCD_INTERRUPT_VSYNC)
                drm_crtc_handle_vblank(&hdlcd->crtc);
 
-               spin_lock_irqsave(&drm->event_lock, flags);
-               list_for_each_entry_safe(e, t, &hdlcd->event_list, base.link) {
-                       list_del(&e->base.link);
-                       drm_crtc_send_vblank_event(&hdlcd->crtc, e);
-                       events_sent = true;
-               }
-               if (events_sent)
-                       drm_crtc_vblank_put(&hdlcd->crtc);
-               spin_unlock_irqrestore(&drm->event_lock, flags);
-       }
-
        /* acknowledge interrupt(s) */
        hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status);
 
@@ -275,6 +254,7 @@ static int hdlcd_show_pxlclock(struct seq_file *m, void *arg)
 static struct drm_info_list hdlcd_debugfs_list[] = {
        { "interrupt_count", hdlcd_show_underrun_count, 0 },
        { "clocks", hdlcd_show_pxlclock, 0 },
+       { "fb", drm_fb_cma_debugfs_show, 0 },
 };
 
 static int hdlcd_debugfs_init(struct drm_minor *minor)
@@ -357,6 +337,8 @@ static int hdlcd_drm_bind(struct device *dev)
                return -ENOMEM;
 
        drm->dev_private = hdlcd;
+       dev_set_drvdata(dev, drm);
+
        hdlcd_setup_mode_config(drm);
        ret = hdlcd_load(drm, 0);
        if (ret)
@@ -366,14 +348,18 @@ static int hdlcd_drm_bind(struct device *dev)
        if (ret)
                goto err_unload;
 
-       dev_set_drvdata(dev, drm);
-
        ret = component_bind_all(dev, drm);
        if (ret) {
                DRM_ERROR("Failed to bind all components\n");
                goto err_unregister;
        }
 
+       ret = pm_runtime_set_active(dev);
+       if (ret)
+               goto err_pm_active;
+
+       pm_runtime_enable(dev);
+
        ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
        if (ret < 0) {
                DRM_ERROR("failed to initialise vblank\n");
@@ -399,16 +385,16 @@ err_fbdev:
        drm_mode_config_cleanup(drm);
        drm_vblank_cleanup(drm);
 err_vblank:
+       pm_runtime_disable(drm->dev);
+err_pm_active:
        component_unbind_all(dev, drm);
 err_unregister:
        drm_dev_unregister(drm);
 err_unload:
-       pm_runtime_get_sync(drm->dev);
        drm_irq_uninstall(drm);
-       pm_runtime_put_sync(drm->dev);
-       pm_runtime_disable(drm->dev);
        of_reserved_mem_device_release(drm->dev);
 err_free:
+       dev_set_drvdata(dev, NULL);
        drm_dev_unref(drm);
 
        return ret;
@@ -495,30 +481,34 @@ MODULE_DEVICE_TABLE(of, hdlcd_of_match);
 static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
 {
        struct drm_device *drm = dev_get_drvdata(dev);
-       struct drm_crtc *crtc;
+       struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
 
-       if (pm_runtime_suspended(dev))
+       if (!hdlcd)
                return 0;
 
-       drm_modeset_lock_all(drm);
-       list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
-               hdlcd_crtc_suspend(crtc);
-       drm_modeset_unlock_all(drm);
+       drm_kms_helper_poll_disable(drm);
+
+       hdlcd->state = drm_atomic_helper_suspend(drm);
+       if (IS_ERR(hdlcd->state)) {
+               drm_kms_helper_poll_enable(drm);
+               return PTR_ERR(hdlcd->state);
+       }
+
        return 0;
 }
 
 static int __maybe_unused hdlcd_pm_resume(struct device *dev)
 {
        struct drm_device *drm = dev_get_drvdata(dev);
-       struct drm_crtc *crtc;
+       struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
 
-       if (!pm_runtime_suspended(dev))
+       if (!hdlcd)
                return 0;
 
-       drm_modeset_lock_all(drm);
-       list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
-               hdlcd_crtc_resume(crtc);
-       drm_modeset_unlock_all(drm);
+       drm_atomic_helper_resume(drm, hdlcd->state);
+       drm_kms_helper_poll_enable(drm);
+       pm_runtime_set_active(dev);
+
        return 0;
 }
 
index aa23478..e3950a0 100644 (file)
@@ -9,10 +9,9 @@ struct hdlcd_drm_private {
        void __iomem                    *mmio;
        struct clk                      *clk;
        struct drm_fbdev_cma            *fbdev;
-       struct drm_framebuffer          *fb;
-       struct list_head                event_list;
        struct drm_crtc                 crtc;
        struct drm_plane                *plane;
+       struct drm_atomic_state         *state;
 #ifdef CONFIG_DEBUG_FS
        atomic_t buffer_underrun_count;
        atomic_t bus_error_count;
@@ -36,7 +35,5 @@ static inline u32 hdlcd_read(struct hdlcd_drm_private *hdlcd, unsigned int reg)
 
 int hdlcd_setup_crtc(struct drm_device *dev);
 void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd);
-void hdlcd_crtc_suspend(struct drm_crtc *crtc);
-void hdlcd_crtc_resume(struct drm_crtc *crtc);
 
 #endif /* __HDLCD_DRV_H__ */
index cf23a75..bd12231 100644 (file)
@@ -391,12 +391,11 @@ void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
 {
        struct atmel_hlcdc_crtc_state *state;
 
-       if (crtc->state && crtc->state->mode_blob)
-               drm_property_unreference_blob(crtc->state->mode_blob);
-
        if (crtc->state) {
+               __drm_atomic_helper_crtc_destroy_state(crtc->state);
                state = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state);
                kfree(state);
+               crtc->state = NULL;
        }
 
        state = kzalloc(sizeof(*state), GFP_KERNEL);
@@ -415,8 +414,9 @@ atmel_hlcdc_crtc_duplicate_state(struct drm_crtc *crtc)
                return NULL;
 
        state = kmalloc(sizeof(*state), GFP_KERNEL);
-       if (state)
-               __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+       if (!state)
+               return NULL;
+       __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
 
        cur = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state);
        state->output_mode = cur->output_mode;
index 3ff1ed7..c204ef3 100644 (file)
@@ -351,6 +351,8 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
        drm_property_unreference_blob(state->mode_blob);
        state->mode_blob = NULL;
 
+       memset(&state->mode, 0, sizeof(state->mode));
+
        if (blob) {
                if (blob->length != sizeof(struct drm_mode_modeinfo) ||
                    drm_mode_convert_umode(&state->mode,
@@ -363,7 +365,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
                DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
                                 state->mode.name, state);
        } else {
-               memset(&state->mode, 0, sizeof(state->mode));
                state->enable = false;
                DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
                                 state);
index d2a6d95..0e3cc66 100644 (file)
@@ -2821,8 +2821,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
                        goto out;
                }
 
-               drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
-
                /*
                 * Check whether the primary plane supports the fb pixel format.
                 * Drivers not implementing the universal planes API use a
@@ -4841,7 +4839,8 @@ bool drm_property_change_valid_get(struct drm_property *property,
                if (value == 0)
                        return true;
 
-               return _object_find(property->dev, value, property->values[0]) != NULL;
+               *ref = _object_find(property->dev, value, property->values[0]);
+               return *ref != NULL;
        }
 
        for (i = 0; i < property->num_values; i++)
index 172cafe..5075fae 100644 (file)
@@ -445,7 +445,7 @@ err_cma_destroy:
 err_fb_info_destroy:
        drm_fb_helper_release_fbi(helper);
 err_gem_free_object:
-       dev->driver->gem_free_object(&obj->base);
+       drm_gem_object_unreference_unlocked(&obj->base);
        return ret;
 }
 EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs);
index e1ab008..1d6c335 100644 (file)
@@ -121,7 +121,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
        return cma_obj;
 
 error:
-       drm->driver->gem_free_object(&cma_obj->base);
+       drm_gem_object_unreference_unlocked(&cma_obj->base);
        return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_create);
@@ -162,18 +162,12 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
         * and handle has the id what user can see.
         */
        ret = drm_gem_handle_create(file_priv, gem_obj, handle);
-       if (ret)
-               goto err_handle_create;
-
        /* drop reference from allocate - handle holds it now. */
        drm_gem_object_unreference_unlocked(gem_obj);
+       if (ret)
+               return ERR_PTR(ret);
 
        return cma_obj;
-
-err_handle_create:
-       drm->driver->gem_free_object(gem_obj);
-
-       return ERR_PTR(ret);
 }
 
 /**
index 7def3d5..e5e6f50 100644 (file)
@@ -1518,6 +1518,8 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
        if (out->status != MODE_OK)
                goto out;
 
+       drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V);
+
        ret = 0;
 
 out:
index 1f14b60..8265665 100644 (file)
@@ -97,8 +97,8 @@ static struct imx_drm_crtc *imx_drm_find_crtc(struct drm_crtc *crtc)
        return NULL;
 }
 
-int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format,
-               int hsync_pin, int vsync_pin)
+int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
+               int hsync_pin, int vsync_pin, u32 bus_flags)
 {
        struct imx_drm_crtc_helper_funcs *helper;
        struct imx_drm_crtc *imx_crtc;
@@ -110,14 +110,17 @@ int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format,
        helper = &imx_crtc->imx_drm_helper_funcs;
        if (helper->set_interface_pix_fmt)
                return helper->set_interface_pix_fmt(encoder->crtc,
-                                       bus_format, hsync_pin, vsync_pin);
+                                       bus_format, hsync_pin, vsync_pin,
+                                       bus_flags);
        return 0;
 }
-EXPORT_SYMBOL_GPL(imx_drm_set_bus_format_pins);
+EXPORT_SYMBOL_GPL(imx_drm_set_bus_config);
 
 int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format)
 {
-       return imx_drm_set_bus_format_pins(encoder, bus_format, 2, 3);
+       return imx_drm_set_bus_config(encoder, bus_format, 2, 3,
+                                     DRM_BUS_FLAG_DE_HIGH |
+                                     DRM_BUS_FLAG_PIXDATA_NEGEDGE);
 }
 EXPORT_SYMBOL_GPL(imx_drm_set_bus_format);
 
index b0241b9..74320a1 100644 (file)
@@ -19,7 +19,8 @@ struct imx_drm_crtc_helper_funcs {
        int (*enable_vblank)(struct drm_crtc *crtc);
        void (*disable_vblank)(struct drm_crtc *crtc);
        int (*set_interface_pix_fmt)(struct drm_crtc *crtc,
-                       u32 bus_format, int hsync_pin, int vsync_pin);
+                       u32 bus_format, int hsync_pin, int vsync_pin,
+                       u32 bus_flags);
        const struct drm_crtc_helper_funcs *crtc_helper_funcs;
        const struct drm_crtc_funcs *crtc_funcs;
 };
@@ -41,8 +42,8 @@ void imx_drm_mode_config_init(struct drm_device *drm);
 
 struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
 
-int imx_drm_set_bus_format_pins(struct drm_encoder *encoder,
-               u32 bus_format, int hsync_pin, int vsync_pin);
+int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
+               int hsync_pin, int vsync_pin, u32 bus_flags);
 int imx_drm_set_bus_format(struct drm_encoder *encoder,
                u32 bus_format);
 
index a58eee5..beff793 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
 #include <linux/of_device.h>
 #include <linux/of_graph.h>
+#include <video/of_display_timing.h>
 #include <video/of_videomode.h>
 #include <linux/regmap.h>
 #include <linux/videodev2.h>
@@ -59,6 +60,7 @@ struct imx_ldb_channel {
        struct drm_encoder encoder;
        struct drm_panel *panel;
        struct device_node *child;
+       struct i2c_adapter *ddc;
        int chno;
        void *edid;
        int edid_len;
@@ -107,6 +109,9 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
                        return num_modes;
        }
 
+       if (!imx_ldb_ch->edid && imx_ldb_ch->ddc)
+               imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc);
+
        if (imx_ldb_ch->edid) {
                drm_mode_connector_update_edid_property(connector,
                                                        imx_ldb_ch->edid);
@@ -553,7 +558,8 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
 
        for_each_child_of_node(np, child) {
                struct imx_ldb_channel *channel;
-               struct device_node *port;
+               struct device_node *ddc_node;
+               struct device_node *ep;
 
                ret = of_property_read_u32(child, "reg", &i);
                if (ret || i < 0 || i > 1)
@@ -576,33 +582,54 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
                 * The output port is port@4 with an external 4-port mux or
                 * port@2 with the internal 2-port mux.
                 */
-               port = of_graph_get_port_by_id(child, imx_ldb->lvds_mux ? 4 : 2);
-               if (port) {
-                       struct device_node *endpoint, *remote;
-
-                       endpoint = of_get_child_by_name(port, "endpoint");
-                       if (endpoint) {
-                               remote = of_graph_get_remote_port_parent(endpoint);
-                               if (remote)
-                                       channel->panel = of_drm_find_panel(remote);
-                               else
-                                       return -EPROBE_DEFER;
-                               if (!channel->panel) {
-                                       dev_err(dev, "panel not found: %s\n",
-                                               remote->full_name);
-                                       return -EPROBE_DEFER;
-                               }
+               ep = of_graph_get_endpoint_by_regs(child,
+                                                  imx_ldb->lvds_mux ? 4 : 2,
+                                                  -1);
+               if (ep) {
+                       struct device_node *remote;
+
+                       remote = of_graph_get_remote_port_parent(ep);
+                       of_node_put(ep);
+                       if (remote)
+                               channel->panel = of_drm_find_panel(remote);
+                       else
+                               return -EPROBE_DEFER;
+                       of_node_put(remote);
+                       if (!channel->panel) {
+                               dev_err(dev, "panel not found: %s\n",
+                                       remote->full_name);
+                               return -EPROBE_DEFER;
                        }
                }
 
-               edidp = of_get_property(child, "edid", &channel->edid_len);
-               if (edidp) {
-                       channel->edid = kmemdup(edidp, channel->edid_len,
-                                               GFP_KERNEL);
-               } else if (!channel->panel) {
-                       ret = of_get_drm_display_mode(child, &channel->mode, 0);
-                       if (!ret)
-                               channel->mode_valid = 1;
+               ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0);
+               if (ddc_node) {
+                       channel->ddc = of_find_i2c_adapter_by_node(ddc_node);
+                       of_node_put(ddc_node);
+                       if (!channel->ddc) {
+                               dev_warn(dev, "failed to get ddc i2c adapter\n");
+                               return -EPROBE_DEFER;
+                       }
+               }
+
+               if (!channel->ddc) {
+                       /* if no DDC available, fallback to hardcoded EDID */
+                       dev_dbg(dev, "no ddc available\n");
+
+                       edidp = of_get_property(child, "edid",
+                                               &channel->edid_len);
+                       if (edidp) {
+                               channel->edid = kmemdup(edidp,
+                                                       channel->edid_len,
+                                                       GFP_KERNEL);
+                       } else if (!channel->panel) {
+                               /* fallback to display-timings node */
+                               ret = of_get_drm_display_mode(child,
+                                                             &channel->mode,
+                                                             OF_USE_NATIVE_MODE);
+                               if (!ret)
+                                       channel->mode_valid = 1;
+                       }
                }
 
                channel->bus_format = of_get_bus_format(dev, child);
@@ -647,6 +674,7 @@ static void imx_ldb_unbind(struct device *dev, struct device *master,
                channel->encoder.funcs->destroy(&channel->encoder);
 
                kfree(channel->edid);
+               i2c_put_adapter(channel->ddc);
        }
 }
 
index ae7a9fb..baf7881 100644 (file)
@@ -294,8 +294,10 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
 
        switch (tve->mode) {
        case TVE_MODE_VGA:
-               imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24,
-                                           tve->hsync_pin, tve->vsync_pin);
+               imx_drm_set_bus_config(encoder, MEDIA_BUS_FMT_GBR888_1X24,
+                                      tve->hsync_pin, tve->vsync_pin,
+                                      DRM_BUS_FLAG_DE_HIGH |
+                                      DRM_BUS_FLAG_PIXDATA_NEGEDGE);
                break;
        case TVE_MODE_TVOUT:
                imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24);
index b2c30b8..fc04041 100644 (file)
@@ -66,6 +66,7 @@ struct ipu_crtc {
        struct ipu_flip_work    *flip_work;
        int                     irq;
        u32                     bus_format;
+       u32                     bus_flags;
        int                     di_hsync_pin;
        int                     di_vsync_pin;
 };
@@ -271,8 +272,10 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
        else
                sig_cfg.clkflags = 0;
 
-       sig_cfg.enable_pol = 1;
-       sig_cfg.clk_pol = 0;
+       sig_cfg.enable_pol = !(ipu_crtc->bus_flags & DRM_BUS_FLAG_DE_LOW);
+       /* Default to driving pixel data on negative clock edges */
+       sig_cfg.clk_pol = !!(ipu_crtc->bus_flags &
+                            DRM_BUS_FLAG_PIXDATA_POSEDGE);
        sig_cfg.bus_format = ipu_crtc->bus_format;
        sig_cfg.v_to_h_sync = 0;
        sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin;
@@ -396,11 +399,12 @@ static void ipu_disable_vblank(struct drm_crtc *crtc)
 }
 
 static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc,
-               u32 bus_format, int hsync_pin, int vsync_pin)
+               u32 bus_format, int hsync_pin, int vsync_pin, u32 bus_flags)
 {
        struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
 
        ipu_crtc->bus_format = bus_format;
+       ipu_crtc->bus_flags = bus_flags;
        ipu_crtc->di_hsync_pin = hsync_pin;
        ipu_crtc->di_vsync_pin = vsync_pin;
 
index 681ec6e..a4bb441 100644 (file)
@@ -38,6 +38,8 @@ static const uint32_t ipu_plane_formats[] = {
        DRM_FORMAT_RGBX8888,
        DRM_FORMAT_BGRA8888,
        DRM_FORMAT_BGRA8888,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
        DRM_FORMAT_YUYV,
        DRM_FORMAT_YVYU,
        DRM_FORMAT_YUV420,
@@ -428,7 +430,6 @@ static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        if (crtc != plane->crtc)
                dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n",
                                plane->crtc, crtc);
-       plane->crtc = crtc;
 
        if (!ipu_plane->enabled)
                ipu_plane_enable(ipu_plane);
@@ -461,7 +462,7 @@ static void ipu_plane_destroy(struct drm_plane *plane)
        kfree(ipu_plane);
 }
 
-static struct drm_plane_funcs ipu_plane_funcs = {
+static const struct drm_plane_funcs ipu_plane_funcs = {
        .update_plane   = ipu_update_plane,
        .disable_plane  = ipu_disable_plane,
        .destroy        = ipu_plane_destroy,
index 363e2c7..2d1fd02 100644 (file)
@@ -35,7 +35,6 @@ struct imx_parallel_display {
        void *edid;
        int edid_len;
        u32 bus_format;
-       int mode_valid;
        struct drm_display_mode mode;
        struct drm_panel *panel;
 };
@@ -68,17 +67,6 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
                num_modes = drm_add_edid_modes(connector, imxpd->edid);
        }
 
-       if (imxpd->mode_valid) {
-               struct drm_display_mode *mode = drm_mode_create(connector->dev);
-
-               if (!mode)
-                       return -EINVAL;
-               drm_mode_copy(mode, &imxpd->mode);
-               mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
-               drm_mode_probed_add(connector, mode);
-               num_modes++;
-       }
-
        if (np) {
                struct drm_display_mode *mode = drm_mode_create(connector->dev);
 
@@ -115,8 +103,8 @@ static void imx_pd_encoder_dpms(struct drm_encoder *encoder, int mode)
 static void imx_pd_encoder_prepare(struct drm_encoder *encoder)
 {
        struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
-
-       imx_drm_set_bus_format(encoder, imxpd->bus_format);
+       imx_drm_set_bus_config(encoder, imxpd->bus_format, 2, 3,
+                              imxpd->connector.display_info.bus_flags);
 }
 
 static void imx_pd_encoder_commit(struct drm_encoder *encoder)
@@ -203,7 +191,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
 {
        struct drm_device *drm = data;
        struct device_node *np = dev->of_node;
-       struct device_node *port;
+       struct device_node *ep;
        const u8 *edidp;
        struct imx_parallel_display *imxpd;
        int ret;
@@ -230,18 +218,18 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
        }
 
        /* port@1 is the output port */
-       port = of_graph_get_port_by_id(np, 1);
-       if (port) {
-               struct device_node *endpoint, *remote;
-
-               endpoint = of_get_child_by_name(port, "endpoint");
-               if (endpoint) {
-                       remote = of_graph_get_remote_port_parent(endpoint);
-                       if (remote)
-                               imxpd->panel = of_drm_find_panel(remote);
-                       if (!imxpd->panel)
-                               return -EPROBE_DEFER;
+       ep = of_graph_get_endpoint_by_regs(np, 1, -1);
+       if (ep) {
+               struct device_node *remote;
+
+               remote = of_graph_get_remote_port_parent(ep);
+               of_node_put(ep);
+               if (remote) {
+                       imxpd->panel = of_drm_find_panel(remote);
+                       of_node_put(remote);
                }
+               if (!imxpd->panel)
+                       return -EPROBE_DEFER;
        }
 
        imxpd->dev = dev;
index d05ca79..0186e50 100644 (file)
@@ -432,11 +432,6 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
        unsigned long pll_rate;
        unsigned int factor;
 
-       if (!dpi) {
-               dev_err(dpi->dev, "invalid argument\n");
-               return -EINVAL;
-       }
-
        pix_rate = 1000UL * mode->clock;
        if (mode->clock <= 74000)
                factor = 8 * 3;
index 2d808e5..7695591 100644 (file)
@@ -695,10 +695,8 @@ static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi)
 {
        drm_encoder_cleanup(&dsi->encoder);
        /* Skip connector cleanup if creation was delegated to the bridge */
-       if (dsi->conn.dev) {
-               drm_connector_unregister(&dsi->conn);
+       if (dsi->conn.dev)
                drm_connector_cleanup(&dsi->conn);
-       }
 }
 
 static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
index 14e64e0..d347dca 100644 (file)
@@ -182,7 +182,7 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
                        }
                }
 
-               fvv = pllreffreq * testn / testm;
+               fvv = pllreffreq * (n + 1) / (m + 1);
                fvv = (fvv - 800000) / 50000;
 
                if (fvv > 15)
@@ -202,6 +202,14 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
        WREG_DAC(MGA1064_PIX_PLLC_M, m);
        WREG_DAC(MGA1064_PIX_PLLC_N, n);
        WREG_DAC(MGA1064_PIX_PLLC_P, p);
+
+       if (mdev->unique_rev_id >= 0x04) {
+               WREG_DAC(0x1a, 0x09);
+               msleep(20);
+               WREG_DAC(0x1a, 0x01);
+
+       }
+
        return 0;
 }
 
index 73241c4..336ad4d 100644 (file)
@@ -2,6 +2,7 @@ config DRM_OMAP
        tristate "OMAP DRM"
        depends on DRM
        depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
+       select OMAP2_DSS
        select DRM_KMS_HELPER
        select DRM_KMS_FB_HELPER
        select FB_SYS_FILLRECT
index 225fd8d..667ca4a 100644 (file)
@@ -9,6 +9,7 @@
  * the Free Software Foundation.
  */
 
+#include <linux/gpio/consumer.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
index 8c246c2..9594ff7 100644 (file)
@@ -14,7 +14,7 @@
  * the Free Software Foundation.
  */
 
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
index 2fd5602..671806c 100644 (file)
@@ -9,7 +9,7 @@
  * the Free Software Foundation.
  */
 
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
index e780fd4..7c2331b 100644 (file)
@@ -9,7 +9,7 @@
  * the Free Software Foundation.
  */
 
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
index 36485c2..2b11807 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/backlight.h>
 #include <linux/delay.h>
 #include <linux/fb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/interrupt.h>
 #include <linux/jiffies.h>
 #include <linux/module.h>
index 458f77b..ac680e1 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/spi/spi.h>
 #include <linux/mutex.h>
 #include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 
 #include <video/omapdss.h>
 #include <video/omap-panel-data.h>
index 780cb26..38d2920 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/delay.h>
 #include <linux/spi/spi.h>
 #include <linux/fb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/of_gpio.h>
 
 #include <video/omapdss.h>
index 529a017..4363fff 100644 (file)
@@ -10,7 +10,7 @@
  */
 
 #include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
index 31efcca..deb4167 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/sched.h>
 #include <linux/backlight.h>
 #include <linux/fb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
 
index 03e2beb..d93175b 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/delay.h>
 #include <linux/spi/spi.h>
 #include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/of_gpio.h>
index 8730646..9ed8272 100644 (file)
@@ -1180,15 +1180,6 @@ static int dsi_regulator_init(struct platform_device *dsidev)
                return PTR_ERR(vdds_dsi);
        }
 
-       if (regulator_can_change_voltage(vdds_dsi)) {
-               r = regulator_set_voltage(vdds_dsi, 1800000, 1800000);
-               if (r) {
-                       devm_regulator_put(vdds_dsi);
-                       DSSERR("can't set the DSI regulator voltage\n");
-                       return r;
-               }
-       }
-
        dsi->vdds_dsi_reg = vdds_dsi;
 
        return 0;
index f95ff31..3303cfa 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/delay.h>
 #include <linux/seq_file.h>
 #include <linux/clk.h>
+#include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/gfp.h>
index f892ae1..4d46cdf 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/gpio.h>
 #include <linux/regulator/consumer.h>
 #include <linux/component.h>
+#include <linux/of.h>
 #include <video/omapdss.h>
 #include <sound/omap-hdmi-audio.h>
 
@@ -100,7 +101,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
 
 static int hdmi_init_regulator(void)
 {
-       int r;
        struct regulator *reg;
 
        if (hdmi.vdda_reg != NULL)
@@ -114,15 +114,6 @@ static int hdmi_init_regulator(void)
                return PTR_ERR(reg);
        }
 
-       if (regulator_can_change_voltage(reg)) {
-               r = regulator_set_voltage(reg, 1800000, 1800000);
-               if (r) {
-                       devm_regulator_put(reg);
-                       DSSWARN("can't set the regulator voltage\n");
-                       return r;
-               }
-       }
-
        hdmi.vdda_reg = reg;
 
        return 0;
index fa72e73..ef3afe9 100644 (file)
@@ -211,7 +211,7 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg)
 static void hdmi_core_powerdown_disable(struct hdmi_core_data *core)
 {
        DSSDBG("Enter hdmi_core_powerdown_disable\n");
-       REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x0, 0, 0);
+       REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x1, 0, 0);
 }
 
 static void hdmi_core_swreset_release(struct hdmi_core_data *core)
index a43f7b1..e129245 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/gpio.h>
 #include <linux/regulator/consumer.h>
 #include <linux/component.h>
+#include <linux/of.h>
 #include <video/omapdss.h>
 #include <sound/omap-hdmi-audio.h>
 
@@ -131,15 +132,6 @@ static int hdmi_init_regulator(void)
                return PTR_ERR(reg);
        }
 
-       if (regulator_can_change_voltage(reg)) {
-               r = regulator_set_voltage(reg, 1800000, 1800000);
-               if (r) {
-                       devm_regulator_put(reg);
-                       DSSWARN("can't set the regulator voltage\n");
-                       return r;
-               }
-       }
-
        hdmi.vdda_reg = reg;
 
        return 0;
index 6a39752..8ab2093 100644 (file)
@@ -51,8 +51,8 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core)
 {
        void __iomem *base = core->base;
        const unsigned long long iclk = 266000000;      /* DSS L3 ICLK */
-       const unsigned ss_scl_high = 4000;              /* ns */
-       const unsigned ss_scl_low = 4700;               /* ns */
+       const unsigned ss_scl_high = 4600;              /* ns */
+       const unsigned ss_scl_low = 5400;               /* ns */
        const unsigned fs_scl_high = 600;               /* ns */
        const unsigned fs_scl_low = 1300;               /* ns */
        const unsigned sda_hold = 1000;                 /* ns */
@@ -458,7 +458,7 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core,
 
        c = (ptr[1] >> 6) & 0x3;
        m = (ptr[1] >> 4) & 0x3;
-       r = (ptr[1] >> 0) & 0x3;
+       r = (ptr[1] >> 0) & 0xf;
 
        itc = (ptr[2] >> 7) & 0x1;
        ec = (ptr[2] >> 4) & 0x7;
index 1f5d19c..f98b750 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/seq_file.h>
 #include <video/omapdss.h>
 
 #include "dss.h"
index 06e23a7..f1015e8 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/seq_file.h>
 
 #include <video/omapdss.h>
 
index 13442b9..055f62f 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
+#include <linux/seq_file.h>
 #include <video/omapdss.h>
 
 #include "dss.h"
index 6f5fc14..479bf24 100644 (file)
@@ -17,6 +17,8 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/seq_file.h>
+
 #include <drm/drm_crtc.h>
 #include <drm/drm_fb_helper.h>
 
index de275a5..4ceed7a 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h> /* platform_device() */
 #include <linux/sched.h>
+#include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/time.h>
 #include <linux/vmalloc.h>
index 94ec06d..f84570d 100644 (file)
@@ -17,6 +17,8 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/seq_file.h>
+
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 
index b97afc2..03698b6 100644 (file)
@@ -17,6 +17,7 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/seq_file.h>
 #include <linux/shmem_fs.h>
 #include <linux/spinlock.h>
 #include <linux/pfn_t.h>
index 505620c..e04deed 100644 (file)
@@ -51,15 +51,6 @@ static void sti_crtc_disabling(struct drm_crtc *crtc)
        mixer->status = STI_MIXER_DISABLING;
 }
 
-static bool sti_crtc_mode_fixup(struct drm_crtc *crtc,
-                               const struct drm_display_mode *mode,
-                               struct drm_display_mode *adjusted_mode)
-{
-       /* accept the provided drm_display_mode, do not fix it up */
-       drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
-       return true;
-}
-
 static int
 sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
 {
@@ -230,7 +221,6 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
 static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
        .enable = sti_crtc_enable,
        .disable = sti_crtc_disabling,
-       .mode_fixup = sti_crtc_mode_fixup,
        .mode_set = drm_helper_crtc_mode_set,
        .mode_set_nofb = sti_crtc_mode_set_nofb,
        .mode_set_base = drm_helper_crtc_mode_set_base,
index fa33c50..5495a5b 100644 (file)
@@ -8,6 +8,12 @@ config ARM_GIC
        select IRQ_DOMAIN_HIERARCHY
        select MULTI_IRQ_HANDLER
 
+config ARM_GIC_PM
+       bool
+       depends on PM
+       select ARM_GIC
+       select PM_CLK
+
 config ARM_GIC_MAX_NR
        int
        default 2 if ARCH_REALVIEW
index bb5dc3d..4c203b6 100644 (file)
@@ -24,6 +24,7 @@ obj-$(CONFIG_ARCH_SUNXI)              += irq-sun4i.o
 obj-$(CONFIG_ARCH_SUNXI)               += irq-sunxi-nmi.o
 obj-$(CONFIG_ARCH_SPEAR3XX)            += spear-shirq.o
 obj-$(CONFIG_ARM_GIC)                  += irq-gic.o irq-gic-common.o
+obj-$(CONFIG_ARM_GIC_PM)               += irq-gic-pm.o
 obj-$(CONFIG_REALVIEW_DT)              += irq-gic-realview.o
 obj-$(CONFIG_ARM_GIC_V2M)              += irq-gic-v2m.o
 obj-$(CONFIG_ARM_GIC_V3)               += irq-gic-v3.o irq-gic-common.o
index 89e7423..9ae7180 100644 (file)
@@ -90,8 +90,8 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
        return ret;
 }
 
-void __init gic_dist_config(void __iomem *base, int gic_irqs,
-                           void (*sync_access)(void))
+void gic_dist_config(void __iomem *base, int gic_irqs,
+                    void (*sync_access)(void))
 {
        unsigned int i;
 
diff --git a/drivers/irqchip/irq-gic-pm.c b/drivers/irqchip/irq-gic-pm.c
new file mode 100644 (file)
index 0000000..4cbffba
--- /dev/null
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2016 NVIDIA CORPORATION, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+struct gic_clk_data {
+       unsigned int num_clocks;
+       const char *const *clocks;
+};
+
+static int gic_runtime_resume(struct device *dev)
+{
+       struct gic_chip_data *gic = dev_get_drvdata(dev);
+       int ret;
+
+       ret = pm_clk_resume(dev);
+       if (ret)
+               return ret;
+
+       /*
+        * On the very first resume, the pointer to the driver data
+        * will be NULL and this is intentional, because we do not
+        * want to restore the GIC on the very first resume. So if
+        * the pointer is not valid just return.
+        */
+       if (!gic)
+               return 0;
+
+       gic_dist_restore(gic);
+       gic_cpu_restore(gic);
+
+       return 0;
+}
+
+static int gic_runtime_suspend(struct device *dev)
+{
+       struct gic_chip_data *gic = dev_get_drvdata(dev);
+
+       gic_dist_save(gic);
+       gic_cpu_save(gic);
+
+       return pm_clk_suspend(dev);
+}
+
+static int gic_get_clocks(struct device *dev, const struct gic_clk_data *data)
+{
+       struct clk *clk;
+       unsigned int i;
+       int ret;
+
+       if (!dev || !data)
+               return -EINVAL;
+
+       ret = pm_clk_create(dev);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < data->num_clocks; i++) {
+               clk = of_clk_get_by_name(dev->of_node, data->clocks[i]);
+               if (IS_ERR(clk)) {
+                       dev_err(dev, "failed to get clock %s\n",
+                               data->clocks[i]);
+                       ret = PTR_ERR(clk);
+                       goto error;
+               }
+
+               ret = pm_clk_add_clk(dev, clk);
+               if (ret) {
+                       dev_err(dev, "failed to add clock at index %d\n", i);
+                       clk_put(clk);
+                       goto error;
+               }
+       }
+
+       return 0;
+
+error:
+       pm_clk_destroy(dev);
+
+       return ret;
+}
+
+static int gic_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       const struct gic_clk_data *data;
+       struct gic_chip_data *gic;
+       int ret, irq;
+
+       data = of_device_get_match_data(&pdev->dev);
+       if (!data) {
+               dev_err(&pdev->dev, "no device match found\n");
+               return -ENODEV;
+       }
+
+       irq = irq_of_parse_and_map(dev->of_node, 0);
+       if (!irq) {
+               dev_err(dev, "no parent interrupt found!\n");
+               return -EINVAL;
+       }
+
+       ret = gic_get_clocks(dev, data);
+       if (ret)
+               goto irq_dispose;
+
+       pm_runtime_enable(dev);
+
+       ret = pm_runtime_get_sync(dev);
+       if (ret < 0)
+               goto rpm_disable;
+
+       ret = gic_of_init_child(dev, &gic, irq);
+       if (ret)
+               goto rpm_put;
+
+       platform_set_drvdata(pdev, gic);
+
+       pm_runtime_put(dev);
+
+       dev_info(dev, "GIC IRQ controller registered\n");
+
+       return 0;
+
+rpm_put:
+       pm_runtime_put_sync(dev);
+rpm_disable:
+       pm_runtime_disable(dev);
+       pm_clk_destroy(dev);
+irq_dispose:
+       irq_dispose_mapping(irq);
+
+       return ret;
+}
+
+static const struct dev_pm_ops gic_pm_ops = {
+       SET_RUNTIME_PM_OPS(gic_runtime_suspend,
+                          gic_runtime_resume, NULL)
+};
+
+static const char * const gic400_clocks[] = {
+       "clk",
+};
+
+static const struct gic_clk_data gic400_data = {
+       .num_clocks = ARRAY_SIZE(gic400_clocks),
+       .clocks = gic400_clocks,
+};
+
+static const struct of_device_id gic_match[] = {
+       { .compatible = "nvidia,tegra210-agic", .data = &gic400_data },
+       {},
+};
+MODULE_DEVICE_TABLE(of, gic_match);
+
+static struct platform_driver gic_driver = {
+       .probe          = gic_probe,
+       .driver         = {
+               .name   = "gic",
+               .of_match_table = gic_match,
+               .pm     = &gic_pm_ops,
+       }
+};
+
+builtin_platform_driver(gic_driver);
index 6bd881b..7ceaba8 100644 (file)
@@ -41,6 +41,7 @@
 
 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING          (1ULL << 0)
 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375      (1ULL << 1)
+#define ITS_FLAGS_WORKAROUND_CAVIUM_23144      (1ULL << 2)
 
 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING    (1 << 0)
 
@@ -55,13 +56,14 @@ struct its_collection {
 };
 
 /*
- * The ITS_BASER structure - contains memory information and cached
- * value of BASER register configuration.
+ * The ITS_BASER structure - contains memory information, cached
+ * value of BASER register configuration and ITS page size.
  */
 struct its_baser {
        void            *base;
        u64             val;
        u32             order;
+       u32             psz;
 };
 
 /*
@@ -82,6 +84,7 @@ struct its_node {
        u64                     flags;
        u32                     ite_size;
        u32                     device_ids;
+       int                     numa_node;
 };
 
 #define ITS_ITT_ALIGN          SZ_256
@@ -613,11 +616,23 @@ static void its_unmask_irq(struct irq_data *d)
 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
                            bool force)
 {
-       unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+       unsigned int cpu;
+       const struct cpumask *cpu_mask = cpu_online_mask;
        struct its_device *its_dev = irq_data_get_irq_chip_data(d);
        struct its_collection *target_col;
        u32 id = its_get_event_id(d);
 
+       /* lpi cannot be routed to a redistributor that is on a foreign node */
+       if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
+               if (its_dev->its->numa_node >= 0) {
+                       cpu_mask = cpumask_of_node(its_dev->its->numa_node);
+                       if (!cpumask_intersects(mask_val, cpu_mask))
+                               return -EINVAL;
+               }
+       }
+
+       cpu = cpumask_any_and(mask_val, cpu_mask);
+
        if (cpu >= nr_cpu_ids)
                return -EINVAL;
 
@@ -810,180 +825,241 @@ static const char *its_base_type_string[] = {
        [GITS_BASER_TYPE_RESERVED7]     = "Reserved (7)",
 };
 
-static void its_free_tables(struct its_node *its)
+static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
 {
-       int i;
+       u32 idx = baser - its->tables;
 
-       for (i = 0; i < GITS_BASER_NR_REGS; i++) {
-               if (its->tables[i].base) {
-                       free_pages((unsigned long)its->tables[i].base,
-                                  its->tables[i].order);
-                       its->tables[i].base = NULL;
-               }
-       }
+       return readq_relaxed(its->base + GITS_BASER + (idx << 3));
 }
 
-static int its_alloc_tables(const char *node_name, struct its_node *its)
+static void its_write_baser(struct its_node *its, struct its_baser *baser,
+                           u64 val)
 {
-       int err;
-       int i;
-       int psz = SZ_64K;
-       u64 shr = GITS_BASER_InnerShareable;
-       u64 cache;
-       u64 typer;
-       u32 ids;
+       u32 idx = baser - its->tables;
 
-       if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
-               /*
-                * erratum 22375: only alloc 8MB table size
-                * erratum 24313: ignore memory access type
-                */
-               cache   = 0;
-               ids     = 0x14;                 /* 20 bits, 8MB */
-       } else {
-               cache   = GITS_BASER_WaWb;
-               typer   = readq_relaxed(its->base + GITS_TYPER);
-               ids     = GITS_TYPER_DEVBITS(typer);
+       writeq_relaxed(val, its->base + GITS_BASER + (idx << 3));
+       baser->val = its_read_baser(its, baser);
+}
+
+static int its_setup_baser(struct its_node *its, struct its_baser *baser,
+                          u64 cache, u64 shr, u32 psz, u32 order,
+                          bool indirect)
+{
+       u64 val = its_read_baser(its, baser);
+       u64 esz = GITS_BASER_ENTRY_SIZE(val);
+       u64 type = GITS_BASER_TYPE(val);
+       u32 alloc_pages;
+       void *base;
+       u64 tmp;
+
+retry_alloc_baser:
+       alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
+       if (alloc_pages > GITS_BASER_PAGES_MAX) {
+               pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
+                       &its->phys_base, its_base_type_string[type],
+                       alloc_pages, GITS_BASER_PAGES_MAX);
+               alloc_pages = GITS_BASER_PAGES_MAX;
+               order = get_order(GITS_BASER_PAGES_MAX * psz);
        }
 
-       its->device_ids = ids;
+       base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+       if (!base)
+               return -ENOMEM;
 
-       for (i = 0; i < GITS_BASER_NR_REGS; i++) {
-               u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
-               u64 type = GITS_BASER_TYPE(val);
-               u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
-               int order = get_order(psz);
-               int alloc_pages;
-               u64 tmp;
-               void *base;
+retry_baser:
+       val = (virt_to_phys(base)                                |
+               (type << GITS_BASER_TYPE_SHIFT)                  |
+               ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT)       |
+               ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT)    |
+               cache                                            |
+               shr                                              |
+               GITS_BASER_VALID);
+
+       val |=  indirect ? GITS_BASER_INDIRECT : 0x0;
+
+       switch (psz) {
+       case SZ_4K:
+               val |= GITS_BASER_PAGE_SIZE_4K;
+               break;
+       case SZ_16K:
+               val |= GITS_BASER_PAGE_SIZE_16K;
+               break;
+       case SZ_64K:
+               val |= GITS_BASER_PAGE_SIZE_64K;
+               break;
+       }
 
-               if (type == GITS_BASER_TYPE_NONE)
-                       continue;
+       its_write_baser(its, baser, val);
+       tmp = baser->val;
 
+       if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
                /*
-                * Allocate as many entries as required to fit the
-                * range of device IDs that the ITS can grok... The ID
-                * space being incredibly sparse, this results in a
-                * massive waste of memory.
-                *
-                * For other tables, only allocate a single page.
+                * Shareability didn't stick. Just use
+                * whatever the read reported, which is likely
+                * to be the only thing this redistributor
+                * supports. If that's zero, make it
+                * non-cacheable as well.
                 */
-               if (type == GITS_BASER_TYPE_DEVICE) {
-                       /*
-                        * 'order' was initialized earlier to the default page
-                        * granule of the the ITS.  We can't have an allocation
-                        * smaller than that.  If the requested allocation
-                        * is smaller, round up to the default page granule.
-                        */
-                       order = max(get_order((1UL << ids) * entry_size),
-                                   order);
-                       if (order >= MAX_ORDER) {
-                               order = MAX_ORDER - 1;
-                               pr_warn("%s: Device Table too large, reduce its page order to %u\n",
-                                       node_name, order);
-                       }
+               shr = tmp & GITS_BASER_SHAREABILITY_MASK;
+               if (!shr) {
+                       cache = GITS_BASER_nC;
+                       __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
                }
+               goto retry_baser;
+       }
 
-retry_alloc_baser:
-               alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
-               if (alloc_pages > GITS_BASER_PAGES_MAX) {
-                       alloc_pages = GITS_BASER_PAGES_MAX;
-                       order = get_order(GITS_BASER_PAGES_MAX * psz);
-                       pr_warn("%s: Device Table too large, reduce its page order to %u (%u pages)\n",
-                               node_name, order, alloc_pages);
-               }
-
-               base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
-               if (!base) {
-                       err = -ENOMEM;
-                       goto out_free;
-               }
-
-               its->tables[i].base = base;
-               its->tables[i].order = order;
-
-retry_baser:
-               val = (virt_to_phys(base)                                |
-                      (type << GITS_BASER_TYPE_SHIFT)                   |
-                      ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
-                      cache                                             |
-                      shr                                               |
-                      GITS_BASER_VALID);
+       if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
+               /*
+                * Page size didn't stick. Let's try a smaller
+                * size and retry. If we reach 4K, then
+                * something is horribly wrong...
+                */
+               free_pages((unsigned long)base, order);
+               baser->base = NULL;
 
                switch (psz) {
-               case SZ_4K:
-                       val |= GITS_BASER_PAGE_SIZE_4K;
-                       break;
                case SZ_16K:
-                       val |= GITS_BASER_PAGE_SIZE_16K;
-                       break;
+                       psz = SZ_4K;
+                       goto retry_alloc_baser;
                case SZ_64K:
-                       val |= GITS_BASER_PAGE_SIZE_64K;
-                       break;
+                       psz = SZ_16K;
+                       goto retry_alloc_baser;
                }
+       }
+
+       if (val != tmp) {
+               pr_err("ITS@%pa: %s doesn't stick: %lx %lx\n",
+                      &its->phys_base, its_base_type_string[type],
+                      (unsigned long) val, (unsigned long) tmp);
+               free_pages((unsigned long)base, order);
+               return -ENXIO;
+       }
+
+       baser->order = order;
+       baser->base = base;
+       baser->psz = psz;
+       tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
+
+       pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
+               &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / tmp),
+               its_base_type_string[type],
+               (unsigned long)virt_to_phys(base),
+               indirect ? "indirect" : "flat", (int)esz,
+               psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
 
-               val |= alloc_pages - 1;
-               its->tables[i].val = val;
+       return 0;
+}
 
-               writeq_relaxed(val, its->base + GITS_BASER + i * 8);
-               tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
+static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser,
+                                  u32 psz, u32 *order)
+{
+       u64 esz = GITS_BASER_ENTRY_SIZE(its_read_baser(its, baser));
+       u64 val = GITS_BASER_InnerShareable | GITS_BASER_WaWb;
+       u32 ids = its->device_ids;
+       u32 new_order = *order;
+       bool indirect = false;
+
+       /* No need to enable Indirection if memory requirement < (psz*2)bytes */
+       if ((esz << ids) > (psz * 2)) {
+               /*
+                * Find out whether hw supports a single or two-level table by
+                * table by reading bit at offset '62' after writing '1' to it.
+                */
+               its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
+               indirect = !!(baser->val & GITS_BASER_INDIRECT);
 
-               if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
+               if (indirect) {
                        /*
-                        * Shareability didn't stick. Just use
-                        * whatever the read reported, which is likely
-                        * to be the only thing this redistributor
-                        * supports. If that's zero, make it
-                        * non-cacheable as well.
+                        * The size of the lvl2 table is equal to ITS page size
+                        * which is 'psz'. For computing lvl1 table size,
+                        * subtract ID bits that sparse lvl2 table from 'ids'
+                        * which is reported by ITS hardware times lvl1 table
+                        * entry size.
                         */
-                       shr = tmp & GITS_BASER_SHAREABILITY_MASK;
-                       if (!shr) {
-                               cache = GITS_BASER_nC;
-                               __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
-                       }
-                       goto retry_baser;
+                       ids -= ilog2(psz / esz);
+                       esz = GITS_LVL1_ENTRY_SIZE;
                }
+       }
 
-               if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
-                       /*
-                        * Page size didn't stick. Let's try a smaller
-                        * size and retry. If we reach 4K, then
-                        * something is horribly wrong...
-                        */
-                       free_pages((unsigned long)base, order);
-                       its->tables[i].base = NULL;
+       /*
+        * Allocate as many entries as required to fit the
+        * range of device IDs that the ITS can grok... The ID
+        * space being incredibly sparse, this results in a
+        * massive waste of memory if two-level device table
+        * feature is not supported by hardware.
+        */
+       new_order = max_t(u32, get_order(esz << ids), new_order);
+       if (new_order >= MAX_ORDER) {
+               new_order = MAX_ORDER - 1;
+               ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / esz);
+               pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n",
+                       &its->phys_base, its->device_ids, ids);
+       }
 
-                       switch (psz) {
-                       case SZ_16K:
-                               psz = SZ_4K;
-                               goto retry_alloc_baser;
-                       case SZ_64K:
-                               psz = SZ_16K;
-                               goto retry_alloc_baser;
-                       }
-               }
+       *order = new_order;
 
-               if (val != tmp) {
-                       pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n",
-                              node_name, i,
-                              (unsigned long) val, (unsigned long) tmp);
-                       err = -ENXIO;
-                       goto out_free;
+       return indirect;
+}
+
+static void its_free_tables(struct its_node *its)
+{
+       int i;
+
+       for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+               if (its->tables[i].base) {
+                       free_pages((unsigned long)its->tables[i].base,
+                                  its->tables[i].order);
+                       its->tables[i].base = NULL;
                }
+       }
+}
 
-               pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
-                       (int)(PAGE_ORDER_TO_SIZE(order) / entry_size),
-                       its_base_type_string[type],
-                       (unsigned long)virt_to_phys(base),
-                       psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
+static int its_alloc_tables(struct its_node *its)
+{
+       u64 typer = readq_relaxed(its->base + GITS_TYPER);
+       u32 ids = GITS_TYPER_DEVBITS(typer);
+       u64 shr = GITS_BASER_InnerShareable;
+       u64 cache = GITS_BASER_WaWb;
+       u32 psz = SZ_64K;
+       int err, i;
+
+       if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
+               /*
+               * erratum 22375: only alloc 8MB table size
+               * erratum 24313: ignore memory access type
+               */
+               cache   = GITS_BASER_nCnB;
+               ids     = 0x14;                 /* 20 bits, 8MB */
        }
 
-       return 0;
+       its->device_ids = ids;
 
-out_free:
-       its_free_tables(its);
+       for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+               struct its_baser *baser = its->tables + i;
+               u64 val = its_read_baser(its, baser);
+               u64 type = GITS_BASER_TYPE(val);
+               u32 order = get_order(psz);
+               bool indirect = false;
 
-       return err;
+               if (type == GITS_BASER_TYPE_NONE)
+                       continue;
+
+               if (type == GITS_BASER_TYPE_DEVICE)
+                       indirect = its_parse_baser_device(its, baser, psz, &order);
+
+               err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
+               if (err < 0) {
+                       its_free_tables(its);
+                       return err;
+               }
+
+               /* Update settings which will be used for next BASERn */
+               psz = baser->psz;
+               cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
+               shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
+       }
+
+       return 0;
 }
 
 static int its_alloc_collections(struct its_node *its)
@@ -1101,6 +1177,16 @@ static void its_cpu_init_collection(void)
        list_for_each_entry(its, &its_nodes, entry) {
                u64 target;
 
+               /* avoid cross node collections and its mapping */
+               if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
+                       struct device_node *cpu_node;
+
+                       cpu_node = of_get_cpu_node(cpu, NULL);
+                       if (its->numa_node != NUMA_NO_NODE &&
+                               its->numa_node != of_node_to_nid(cpu_node))
+                               continue;
+               }
+
                /*
                 * We now have to bind each collection to its target
                 * redistributor.
@@ -1161,10 +1247,57 @@ static struct its_baser *its_get_baser(struct its_node *its, u32 type)
        return NULL;
 }
 
+static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
+{
+       struct its_baser *baser;
+       struct page *page;
+       u32 esz, idx;
+       __le64 *table;
+
+       baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
+
+       /* Don't allow device id that exceeds ITS hardware limit */
+       if (!baser)
+               return (ilog2(dev_id) < its->device_ids);
+
+       /* Don't allow device id that exceeds single, flat table limit */
+       esz = GITS_BASER_ENTRY_SIZE(baser->val);
+       if (!(baser->val & GITS_BASER_INDIRECT))
+               return (dev_id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
+
+       /* Compute 1st level table index & check if that exceeds table limit */
+       idx = dev_id >> ilog2(baser->psz / esz);
+       if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
+               return false;
+
+       table = baser->base;
+
+       /* Allocate memory for 2nd level table */
+       if (!table[idx]) {
+               page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz));
+               if (!page)
+                       return false;
+
+               /* Flush Lvl2 table to PoC if hw doesn't support coherency */
+               if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
+                       __flush_dcache_area(page_address(page), baser->psz);
+
+               table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
+
+               /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
+               if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
+                       __flush_dcache_area(table + idx, GITS_LVL1_ENTRY_SIZE);
+
+               /* Ensure updated table contents are visible to ITS hardware */
+               dsb(sy);
+       }
+
+       return true;
+}
+
 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
                                            int nvecs)
 {
-       struct its_baser *baser;
        struct its_device *dev;
        unsigned long *lpi_map;
        unsigned long flags;
@@ -1175,14 +1308,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
        int nr_ites;
        int sz;
 
-       baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
-
-       /* Don't allow 'dev_id' that exceeds single, flat table limit */
-       if (baser) {
-               if (dev_id >= (PAGE_ORDER_TO_SIZE(baser->order) /
-                             GITS_BASER_ENTRY_SIZE(baser->val)))
-                       return NULL;
-       } else if (ilog2(dev_id) >= its->device_ids)
+       if (!its_alloc_device_table(its, dev_id))
                return NULL;
 
        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -1351,9 +1477,14 @@ static void its_irq_domain_activate(struct irq_domain *domain,
 {
        struct its_device *its_dev = irq_data_get_irq_chip_data(d);
        u32 event = its_get_event_id(d);
+       const struct cpumask *cpu_mask = cpu_online_mask;
+
+       /* get the cpu_mask of local node */
+       if (its_dev->its->numa_node >= 0)
+               cpu_mask = cpumask_of_node(its_dev->its->numa_node);
 
        /* Bind the LPI to the first possible CPU */
-       its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
+       its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
 
        /* Map the GIC IRQ and event to the device */
        its_send_mapvi(its_dev, d->hwirq, event);
@@ -1443,6 +1574,13 @@ static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
        its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
 }
 
+static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
+{
+       struct its_node *its = data;
+
+       its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
+}
+
 static const struct gic_quirk its_quirks[] = {
 #ifdef CONFIG_CAVIUM_ERRATUM_22375
        {
@@ -1451,6 +1589,14 @@ static const struct gic_quirk its_quirks[] = {
                .mask   = 0xffff0fff,
                .init   = its_enable_quirk_cavium_22375,
        },
+#endif
+#ifdef CONFIG_CAVIUM_ERRATUM_23144
+       {
+               .desc   = "ITS: Cavium erratum 23144",
+               .iidr   = 0xa100034c,   /* ThunderX pass 1.x */
+               .mask   = 0xffff0fff,
+               .init   = its_enable_quirk_cavium_23144,
+       },
 #endif
        {
        }
@@ -1514,6 +1660,7 @@ static int __init its_probe(struct device_node *node,
        its->base = its_base;
        its->phys_base = res.start;
        its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
+       its->numa_node = of_node_to_nid(node);
 
        its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
        if (!its->cmd_base) {
@@ -1524,7 +1671,7 @@ static int __init its_probe(struct device_node *node,
 
        its_enable_quirks(its);
 
-       err = its_alloc_tables(node->full_name, its);
+       err = its_alloc_tables(its);
        if (err)
                goto out_free_cmd;
 
index fb042ba..2c5ba0e 100644 (file)
@@ -155,7 +155,7 @@ static void gic_enable_redist(bool enable)
 
        while (count--) {
                val = readl_relaxed(rbase + GICR_WAKER);
-               if (enable ^ (val & GICR_WAKER_ChildrenAsleep))
+               if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
                        break;
                cpu_relax();
                udelay(1);
index fbc4ae2..1de07eb 100644 (file)
@@ -75,7 +75,7 @@ struct gic_chip_data {
        void __iomem *raw_dist_base;
        void __iomem *raw_cpu_base;
        u32 percpu_offset;
-#ifdef CONFIG_CPU_PM
+#if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
        u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
        u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
        u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
@@ -449,7 +449,7 @@ static void gic_cpu_if_up(struct gic_chip_data *gic)
 }
 
 
-static void __init gic_dist_init(struct gic_chip_data *gic)
+static void gic_dist_init(struct gic_chip_data *gic)
 {
        unsigned int i;
        u32 cpumask;
@@ -528,14 +528,14 @@ int gic_cpu_if_down(unsigned int gic_nr)
        return 0;
 }
 
-#ifdef CONFIG_CPU_PM
+#if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
 /*
  * Saves the GIC distributor registers during suspend or idle.  Must be called
  * with interrupts disabled but before powering down the GIC.  After calling
  * this function, no interrupts will be delivered by the GIC, and another
  * platform-specific wakeup source must be enabled.
  */
-static void gic_dist_save(struct gic_chip_data *gic)
+void gic_dist_save(struct gic_chip_data *gic)
 {
        unsigned int gic_irqs;
        void __iomem *dist_base;
@@ -574,7 +574,7 @@ static void gic_dist_save(struct gic_chip_data *gic)
  * handled normally, but any edge interrupts that occured will not be seen by
  * the GIC and need to be handled by the platform-specific wakeup source.
  */
-static void gic_dist_restore(struct gic_chip_data *gic)
+void gic_dist_restore(struct gic_chip_data *gic)
 {
        unsigned int gic_irqs;
        unsigned int i;
@@ -620,7 +620,7 @@ static void gic_dist_restore(struct gic_chip_data *gic)
        writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
 }
 
-static void gic_cpu_save(struct gic_chip_data *gic)
+void gic_cpu_save(struct gic_chip_data *gic)
 {
        int i;
        u32 *ptr;
@@ -650,7 +650,7 @@ static void gic_cpu_save(struct gic_chip_data *gic)
 
 }
 
-static void gic_cpu_restore(struct gic_chip_data *gic)
+void gic_cpu_restore(struct gic_chip_data *gic)
 {
        int i;
        u32 *ptr;
@@ -727,7 +727,7 @@ static struct notifier_block gic_notifier_block = {
        .notifier_call = gic_notifier,
 };
 
-static int __init gic_pm_init(struct gic_chip_data *gic)
+static int gic_pm_init(struct gic_chip_data *gic)
 {
        gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
                sizeof(u32));
@@ -757,7 +757,7 @@ free_ppi_enable:
        return -ENOMEM;
 }
 #else
-static int __init gic_pm_init(struct gic_chip_data *gic)
+static int gic_pm_init(struct gic_chip_data *gic)
 {
        return 0;
 }
@@ -1032,32 +1032,31 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
        .unmap = gic_irq_domain_unmap,
 };
 
-static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
-                                  struct fwnode_handle *handle)
+static void gic_init_chip(struct gic_chip_data *gic, struct device *dev,
+                         const char *name, bool use_eoimode1)
 {
-       irq_hw_number_t hwirq_base;
-       int gic_irqs, irq_base, i, ret;
-
-       if (WARN_ON(!gic || gic->domain))
-               return -EINVAL;
-
        /* Initialize irq_chip */
        gic->chip = gic_chip;
+       gic->chip.name = name;
+       gic->chip.parent_device = dev;
 
-       if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) {
+       if (use_eoimode1) {
                gic->chip.irq_mask = gic_eoimode1_mask_irq;
                gic->chip.irq_eoi = gic_eoimode1_eoi_irq;
                gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
-               gic->chip.name = kasprintf(GFP_KERNEL, "GICv2");
-       } else {
-               gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d",
-                                          (int)(gic - &gic_data[0]));
        }
 
 #ifdef CONFIG_SMP
        if (gic == &gic_data[0])
                gic->chip.irq_set_affinity = gic_set_affinity;
 #endif
+}
+
+static int gic_init_bases(struct gic_chip_data *gic, int irq_start,
+                         struct fwnode_handle *handle)
+{
+       irq_hw_number_t hwirq_base;
+       int gic_irqs, irq_base, ret;
 
        if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
                /* Frankein-GIC without banked registers... */
@@ -1138,6 +1137,36 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
                goto error;
        }
 
+       gic_dist_init(gic);
+       ret = gic_cpu_init(gic);
+       if (ret)
+               goto error;
+
+       ret = gic_pm_init(gic);
+       if (ret)
+               goto error;
+
+       return 0;
+
+error:
+       if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
+               free_percpu(gic->dist_base.percpu_base);
+               free_percpu(gic->cpu_base.percpu_base);
+       }
+
+       return ret;
+}
+
+static int __init __gic_init_bases(struct gic_chip_data *gic,
+                                  int irq_start,
+                                  struct fwnode_handle *handle)
+{
+       char *name;
+       int i, ret;
+
+       if (WARN_ON(!gic || gic->domain))
+               return -EINVAL;
+
        if (gic == &gic_data[0]) {
                /*
                 * Initialize the CPU interface map to all CPUs.
@@ -1155,24 +1184,17 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
                        pr_info("GIC: Using split EOI/Deactivate mode\n");
        }
 
-       gic_dist_init(gic);
-       ret = gic_cpu_init(gic);
-       if (ret)
-               goto error;
-
-       ret = gic_pm_init(gic);
-       if (ret)
-               goto error;
-
-       return 0;
-
-error:
-       if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
-               free_percpu(gic->dist_base.percpu_base);
-               free_percpu(gic->cpu_base.percpu_base);
+       if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) {
+               name = kasprintf(GFP_KERNEL, "GICv2");
+               gic_init_chip(gic, NULL, name, true);
+       } else {
+               name = kasprintf(GFP_KERNEL, "GIC-%d", (int)(gic-&gic_data[0]));
+               gic_init_chip(gic, NULL, name, false);
        }
 
-       kfree(gic->chip.name);
+       ret = gic_init_bases(gic, irq_start, handle);
+       if (ret)
+               kfree(name);
 
        return ret;
 }
@@ -1250,7 +1272,7 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
        return true;
 }
 
-static int __init gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
+static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
 {
        if (!gic || !node)
                return -EINVAL;
@@ -1274,6 +1296,34 @@ error:
        return -ENOMEM;
 }
 
+int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
+{
+       int ret;
+
+       if (!dev || !dev->of_node || !gic || !irq)
+               return -EINVAL;
+
+       *gic = devm_kzalloc(dev, sizeof(**gic), GFP_KERNEL);
+       if (!*gic)
+               return -ENOMEM;
+
+       gic_init_chip(*gic, dev, dev->of_node->name, false);
+
+       ret = gic_of_setup(*gic, dev->of_node);
+       if (ret)
+               return ret;
+
+       ret = gic_init_bases(*gic, -1, &dev->of_node->fwnode);
+       if (ret) {
+               gic_teardown(*gic);
+               return ret;
+       }
+
+       irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic);
+
+       return 0;
+}
+
 static void __init gic_of_setup_kvm_info(struct device_node *node)
 {
        int ret;
@@ -1353,7 +1403,11 @@ IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
 IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
 IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
 IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init);
-
+#else
+int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
+{
+       return -ENOTSUPP;
+}
 #endif
 
 #ifdef CONFIG_ACPI
index 3b5e10a..df9a1fe 100644 (file)
@@ -1032,12 +1032,14 @@ static void __init __gic_init(unsigned long gic_base_addr,
                                               &gic_irq_domain_ops, NULL);
        if (!gic_irq_domain)
                panic("Failed to add GIC IRQ domain");
+       gic_irq_domain->name = "mips-gic-irq";
 
        gic_dev_domain = irq_domain_add_hierarchy(gic_irq_domain, 0,
                                                  GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
                                                  node, &gic_dev_domain_ops, NULL);
        if (!gic_dev_domain)
                panic("Failed to add GIC DEV domain");
+       gic_dev_domain->name = "mips-gic-dev";
 
        gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
                                                  IRQ_DOMAIN_FLAG_IPI_PER_CPU,
@@ -1046,6 +1048,7 @@ static void __init __gic_init(unsigned long gic_base_addr,
        if (!gic_ipi_domain)
                panic("Failed to add GIC IPI domain");
 
+       gic_ipi_domain->name = "mips-gic-ipi";
        gic_ipi_domain->bus_token = DOMAIN_BUS_IPI;
 
        if (node &&
index e7155db..73addb4 100644 (file)
@@ -91,7 +91,7 @@ static int pic32_set_type_edge(struct irq_data *data,
        /* set polarity for external interrupts only */
        for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) {
                if (priv->ext_irqs[i] == data->hwirq) {
-                       ret = pic32_set_ext_polarity(i + 1, flow_type);
+                       ret = pic32_set_ext_polarity(i, flow_type);
                        if (ret)
                                return ret;
                }
index c984321..5d438ad 100644 (file)
@@ -1276,7 +1276,7 @@ static int mmc_select_hs200(struct mmc_card *card)
         * switch to HS200 mode if bus width is set successfully.
         */
        err = mmc_select_bus_width(card);
-       if (!err) {
+       if (err >= 0) {
                val = EXT_CSD_TIMING_HS200 |
                      card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
                err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1583,7 +1583,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
        } else if (mmc_card_hs(card)) {
                /* Select the desired bus width optionally */
                err = mmc_select_bus_width(card);
-               if (!err) {
+               if (err >= 0) {
                        err = mmc_select_hs_ddr(card);
                        if (err)
                                goto free_card;
index 7fc8b7a..2ee4c21 100644 (file)
@@ -970,8 +970,8 @@ static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
        [SDXC_CLK_400K]         = { .output = 180, .sample = 180 },
        [SDXC_CLK_25M]          = { .output = 180, .sample =  75 },
        [SDXC_CLK_50M]          = { .output = 150, .sample = 120 },
-       [SDXC_CLK_50M_DDR]      = { .output =  90, .sample = 120 },
-       [SDXC_CLK_50M_DDR_8BIT] = { .output =  90, .sample = 120 },
+       [SDXC_CLK_50M_DDR]      = { .output =  54, .sample =  36 },
+       [SDXC_CLK_50M_DDR_8BIT] = { .output =  72, .sample =  72 },
 };
 
 static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
@@ -1129,11 +1129,6 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
                                  MMC_CAP_1_8V_DDR |
                                  MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
 
-       /* TODO MMC DDR is not working on A80 */
-       if (of_device_is_compatible(pdev->dev.of_node,
-                                   "allwinner,sun9i-a80-mmc"))
-               mmc->caps &= ~MMC_CAP_1_8V_DDR;
-
        ret = mmc_of_parse(mmc);
        if (ret)
                goto error_free_dma;
index 16419f5..058460b 100644 (file)
@@ -141,7 +141,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
        priv->bus = bus;
        bus->priv = priv;
        bus->parent = priv->dev;
-       bus->name = "Synopsys MII Bus",
+       bus->name = "Synopsys MII Bus";
        bus->read = &arc_mdio_read;
        bus->write = &arc_mdio_write;
        bus->reset = &arc_mdio_reset;
index 8fc93c5..d02c424 100644 (file)
@@ -96,6 +96,10 @@ struct alx_priv {
        unsigned int rx_ringsz;
        unsigned int rxbuf_size;
 
+       struct page  *rx_page;
+       unsigned int rx_page_offset;
+       unsigned int rx_frag_size;
+
        struct napi_struct napi;
        struct alx_tx_queue txq;
        struct alx_rx_queue rxq;
index 9fe8b5e..c98acdc 100644 (file)
@@ -70,6 +70,35 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry)
        }
 }
 
+static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp)
+{
+       struct sk_buff *skb;
+       struct page *page;
+
+       if (alx->rx_frag_size > PAGE_SIZE)
+               return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
+
+       page = alx->rx_page;
+       if (!page) {
+               alx->rx_page = page = alloc_page(gfp);
+               if (unlikely(!page))
+                       return NULL;
+               alx->rx_page_offset = 0;
+       }
+
+       skb = build_skb(page_address(page) + alx->rx_page_offset,
+                       alx->rx_frag_size);
+       if (likely(skb)) {
+               alx->rx_page_offset += alx->rx_frag_size;
+               if (alx->rx_page_offset >= PAGE_SIZE)
+                       alx->rx_page = NULL;
+               else
+                       get_page(page);
+       }
+       return skb;
+}
+
+
 static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
 {
        struct alx_rx_queue *rxq = &alx->rxq;
@@ -86,7 +115,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
        while (!cur_buf->skb && next != rxq->read_idx) {
                struct alx_rfd *rfd = &rxq->rfd[cur];
 
-               skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
+               skb = alx_alloc_skb(alx, gfp);
                if (!skb)
                        break;
                dma = dma_map_single(&alx->hw.pdev->dev,
@@ -124,6 +153,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
                alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
        }
 
+
        return count;
 }
 
@@ -592,6 +622,11 @@ static void alx_free_rings(struct alx_priv *alx)
        kfree(alx->txq.bufs);
        kfree(alx->rxq.bufs);
 
+       if (alx->rx_page) {
+               put_page(alx->rx_page);
+               alx->rx_page = NULL;
+       }
+
        dma_free_coherent(&alx->hw.pdev->dev,
                          alx->descmem.size,
                          alx->descmem.virt,
@@ -646,6 +681,7 @@ static int alx_request_irq(struct alx_priv *alx)
                                  alx->dev->name, alx);
                if (!err)
                        goto out;
+
                /* fall back to legacy interrupt */
                pci_disable_msi(alx->hw.pdev);
        }
@@ -689,6 +725,7 @@ static int alx_init_sw(struct alx_priv *alx)
        struct pci_dev *pdev = alx->hw.pdev;
        struct alx_hw *hw = &alx->hw;
        int err;
+       unsigned int head_size;
 
        err = alx_identify_hw(alx);
        if (err) {
@@ -704,7 +741,12 @@ static int alx_init_sw(struct alx_priv *alx)
 
        hw->smb_timer = 400;
        hw->mtu = alx->dev->mtu;
+
        alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
+       head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
+                   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+       alx->rx_frag_size = roundup_pow_of_two(head_size);
+
        alx->tx_ringsz = 256;
        alx->rx_ringsz = 512;
        hw->imt = 200;
@@ -806,6 +848,7 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
 {
        struct alx_priv *alx = netdev_priv(netdev);
        int max_frame = ALX_MAX_FRAME_LEN(mtu);
+       unsigned int head_size;
 
        if ((max_frame < ALX_MIN_FRAME_SIZE) ||
            (max_frame > ALX_MAX_FRAME_SIZE))
@@ -817,6 +860,9 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
        netdev->mtu = mtu;
        alx->hw.mtu = mtu;
        alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
+       head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
+                   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+       alx->rx_frag_size = roundup_pow_of_two(head_size);
        netdev_update_features(netdev);
        if (netif_running(netdev))
                alx_reinit(alx);
index 0a5b770..c5fe915 100644 (file)
@@ -13941,14 +13941,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
                bp->doorbells = bnx2x_vf_doorbells(bp);
                rc = bnx2x_vf_pci_alloc(bp);
                if (rc)
-                       goto init_one_exit;
+                       goto init_one_freemem;
        } else {
                doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
                if (doorbell_size > pci_resource_len(pdev, 2)) {
                        dev_err(&bp->pdev->dev,
                                "Cannot map doorbells, bar size too small, aborting\n");
                        rc = -ENOMEM;
-                       goto init_one_exit;
+                       goto init_one_freemem;
                }
                bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
                                                doorbell_size);
@@ -13957,19 +13957,19 @@ static int bnx2x_init_one(struct pci_dev *pdev,
                dev_err(&bp->pdev->dev,
                        "Cannot map doorbell space, aborting\n");
                rc = -ENOMEM;
-               goto init_one_exit;
+               goto init_one_freemem;
        }
 
        if (IS_VF(bp)) {
                rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
                if (rc)
-                       goto init_one_exit;
+                       goto init_one_freemem;
        }
 
        /* Enable SRIOV if capability found in configuration space */
        rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
        if (rc)
-               goto init_one_exit;
+               goto init_one_freemem;
 
        /* calc qm_cid_count */
        bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
@@ -13988,7 +13988,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
        rc = bnx2x_set_int_mode(bp);
        if (rc) {
                dev_err(&pdev->dev, "Cannot set interrupts\n");
-               goto init_one_exit;
+               goto init_one_freemem;
        }
        BNX2X_DEV_INFO("set interrupts successfully\n");
 
@@ -13996,7 +13996,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
        rc = register_netdev(dev);
        if (rc) {
                dev_err(&pdev->dev, "Cannot register net device\n");
-               goto init_one_exit;
+               goto init_one_freemem;
        }
        BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
 
@@ -14029,6 +14029,9 @@ static int bnx2x_init_one(struct pci_dev *pdev,
 
        return 0;
 
+init_one_freemem:
+       bnx2x_free_mem_bp(bp);
+
 init_one_exit:
        bnx2x_disable_pcie_error_reporting(bp);
 
index 085f912..06f0317 100644 (file)
@@ -205,8 +205,10 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
                 * re-adding ourselves to the poll list.
                 */
 
-               if (priv->tx_skb && !tx_ctrl_ct)
+               if (priv->tx_skb && !tx_ctrl_ct) {
+                       nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
                        napi_reschedule(napi);
+               }
        }
 
        return work_done;
index ca2cccc..3c0255e 100644 (file)
@@ -1197,10 +1197,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
                                         fec16_to_cpu(bdp->cbd_datlen),
                                         DMA_TO_DEVICE);
                bdp->cbd_bufaddr = cpu_to_fec32(0);
-               if (!skb) {
-                       bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
-                       continue;
-               }
+               if (!skb)
+                       goto skb_done;
 
                /* Check for errors. */
                if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1239,7 +1237,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
 
                /* Free the sk buffer associated with this last transmit */
                dev_kfree_skb_any(skb);
-
+skb_done:
                /* Make sure the update to bdp and tx_skbuff are performed
                 * before dirty_tx
                 */
index 3d746c8..67a648c 100644 (file)
@@ -46,7 +46,6 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
        u32 link_stat = priv->link;
        struct hnae_handle *h;
 
-       assert(priv && priv->ae_handle);
        h = priv->ae_handle;
 
        if (priv->phy) {
@@ -646,8 +645,6 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
 {
        struct hns_nic_priv *priv = netdev_priv(net_dev);
 
-       assert(priv);
-
        strncpy(drvinfo->version, HNAE_DRIVER_VERSION,
                sizeof(drvinfo->version));
        drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
@@ -720,8 +717,6 @@ static int hns_set_pauseparam(struct net_device *net_dev,
        struct hnae_handle *h;
        struct hnae_ae_ops *ops;
 
-       assert(priv || priv->ae_handle);
-
        h = priv->ae_handle;
        ops = h->dev->ops;
 
@@ -780,8 +775,6 @@ static int hns_set_coalesce(struct net_device *net_dev,
        struct hnae_ae_ops *ops;
        int ret;
 
-       assert(priv || priv->ae_handle);
-
        ops = priv->ae_handle->dev->ops;
 
        if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
@@ -1111,8 +1104,6 @@ void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
        struct hns_nic_priv *priv = netdev_priv(net_dev);
        struct hnae_ae_ops *ops;
 
-       assert(priv || priv->ae_handle);
-
        ops = priv->ae_handle->dev->ops;
 
        cmd->version = HNS_CHIP_VERSION;
@@ -1135,8 +1126,6 @@ static int hns_get_regs_len(struct net_device *net_dev)
        struct hns_nic_priv *priv = netdev_priv(net_dev);
        struct hnae_ae_ops *ops;
 
-       assert(priv || priv->ae_handle);
-
        ops = priv->ae_handle->dev->ops;
        if (!ops->get_regs_len) {
                netdev_err(net_dev, "ops->get_regs_len is null!\n");
index 01fccec..466939f 100644 (file)
@@ -189,6 +189,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
                hwbm_pool->construct = mvneta_bm_construct;
                hwbm_pool->priv = new_pool;
+               spin_lock_init(&hwbm_pool->lock);
 
                /* Create new pool */
                err = mvneta_bm_pool_create(priv, new_pool);
index c761194..fc95aff 100644 (file)
@@ -362,7 +362,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
 
        for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
                if (bitmap_iterator_test(&it))
-                       data[index++] = ((unsigned long *)&priv->stats)[i];
+                       data[index++] = ((unsigned long *)&dev->stats)[i];
 
        for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
                if (bitmap_iterator_test(&it))
index 92e0624..19ceced 100644 (file)
@@ -1296,15 +1296,16 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
 }
 
 
-static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *
+mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
 
        spin_lock_bh(&priv->stats_lock);
-       memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
+       netdev_stats_to_stats64(stats, &dev->stats);
        spin_unlock_bh(&priv->stats_lock);
 
-       return &priv->ret_stats;
+       return stats;
 }
 
 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
@@ -1876,7 +1877,6 @@ static void mlx4_en_clear_stats(struct net_device *dev)
        if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
                en_dbg(HW, priv, "Failed dumping statistics\n");
 
-       memset(&priv->stats, 0, sizeof(priv->stats));
        memset(&priv->pstats, 0, sizeof(priv->pstats));
        memset(&priv->pkstats, 0, sizeof(priv->pkstats));
        memset(&priv->port_stats, 0, sizeof(priv->port_stats));
@@ -1892,6 +1892,11 @@ static void mlx4_en_clear_stats(struct net_device *dev)
                priv->tx_ring[i]->bytes = 0;
                priv->tx_ring[i]->packets = 0;
                priv->tx_ring[i]->tx_csum = 0;
+               priv->tx_ring[i]->tx_dropped = 0;
+               priv->tx_ring[i]->queue_stopped = 0;
+               priv->tx_ring[i]->wake_queue = 0;
+               priv->tx_ring[i]->tso_packets = 0;
+               priv->tx_ring[i]->xmit_more = 0;
        }
        for (i = 0; i < priv->rx_ring_num; i++) {
                priv->rx_ring[i]->bytes = 0;
@@ -2482,7 +2487,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
        .ndo_stop               = mlx4_en_close,
        .ndo_start_xmit         = mlx4_en_xmit,
        .ndo_select_queue       = mlx4_en_select_queue,
-       .ndo_get_stats          = mlx4_en_get_stats,
+       .ndo_get_stats64        = mlx4_en_get_stats64,
        .ndo_set_rx_mode        = mlx4_en_set_rx_mode,
        .ndo_set_mac_address    = mlx4_en_set_mac,
        .ndo_validate_addr      = eth_validate_addr,
@@ -2514,7 +2519,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
        .ndo_stop               = mlx4_en_close,
        .ndo_start_xmit         = mlx4_en_xmit,
        .ndo_select_queue       = mlx4_en_select_queue,
-       .ndo_get_stats          = mlx4_en_get_stats,
+       .ndo_get_stats64        = mlx4_en_get_stats64,
        .ndo_set_rx_mode        = mlx4_en_set_rx_mode,
        .ndo_set_mac_address    = mlx4_en_set_mac,
        .ndo_validate_addr      = eth_validate_addr,
index 20b6c2e..5aa8b75 100644 (file)
@@ -152,8 +152,9 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        struct mlx4_counter tmp_counter_stats;
        struct mlx4_en_stat_out_mbox *mlx4_en_stats;
        struct mlx4_en_stat_out_flow_control_mbox *flowstats;
-       struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
-       struct net_device_stats *stats = &priv->stats;
+       struct net_device *dev = mdev->pndev[port];
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
        struct mlx4_cmd_mailbox *mailbox;
        u64 in_mod = reset << 8 | port;
        int err;
@@ -188,6 +189,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        }
        stats->tx_packets = 0;
        stats->tx_bytes = 0;
+       stats->tx_dropped = 0;
        priv->port_stats.tx_chksum_offload = 0;
        priv->port_stats.queue_stopped = 0;
        priv->port_stats.wake_queue = 0;
@@ -199,6 +201,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
 
                stats->tx_packets += ring->packets;
                stats->tx_bytes += ring->bytes;
+               stats->tx_dropped += ring->tx_dropped;
                priv->port_stats.tx_chksum_offload += ring->tx_csum;
                priv->port_stats.queue_stopped     += ring->queue_stopped;
                priv->port_stats.wake_queue        += ring->wake_queue;
@@ -237,21 +240,12 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0,
                                          &mlx4_en_stats->MCAST_prio_1,
                                          NUM_PRIORITIES);
-       stats->collisions = 0;
        stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
                            sw_rx_dropped;
        stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
-       stats->rx_over_errors = 0;
        stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
-       stats->rx_frame_errors = 0;
        stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
-       stats->rx_missed_errors = 0;
-       stats->tx_aborted_errors = 0;
-       stats->tx_carrier_errors = 0;
-       stats->tx_fifo_errors = 0;
-       stats->tx_heartbeat_errors = 0;
-       stats->tx_window_errors = 0;
-       stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP);
+       stats->tx_dropped += be32_to_cpu(mlx4_en_stats->TDROP);
 
        /* RX stats */
        priv->pkstats.rx_multicast_packets = stats->multicast;
index f6e6157..76aa4d2 100644 (file)
@@ -726,12 +726,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        bool inline_ok;
        u32 ring_cons;
 
-       if (!priv->port_up)
-               goto tx_drop;
-
        tx_ind = skb_get_queue_mapping(skb);
        ring = priv->tx_ring[tx_ind];
 
+       if (!priv->port_up)
+               goto tx_drop;
+
        /* fetch ring->cons far ahead before needing it to avoid stall */
        ring_cons = ACCESS_ONCE(ring->cons);
 
@@ -1030,7 +1030,7 @@ tx_drop_unmap:
 
 tx_drop:
        dev_kfree_skb_any(skb);
-       priv->stats.tx_dropped++;
+       ring->tx_dropped++;
        return NETDEV_TX_OK;
 }
 
index cc84e09..467d47e 100644 (file)
@@ -270,6 +270,7 @@ struct mlx4_en_tx_ring {
        unsigned long           tx_csum;
        unsigned long           tso_packets;
        unsigned long           xmit_more;
+       unsigned int            tx_dropped;
        struct mlx4_bf          bf;
        unsigned long           queue_stopped;
 
@@ -482,8 +483,6 @@ struct mlx4_en_priv {
        struct mlx4_en_port_profile *prof;
        struct net_device *dev;
        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
-       struct net_device_stats stats;
-       struct net_device_stats ret_stats;
        struct mlx4_en_port_state port_state;
        spinlock_t stats_lock;
        struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
index cbf58e1..21ec1c2 100644 (file)
@@ -192,9 +192,10 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
                     struct dcbx_app_priority_entry *p_tbl,
                     u32 pri_tc_tbl, int count, bool dcbx_enabled)
 {
-       u8 tc, priority, priority_map;
+       u8 tc, priority_map;
        enum dcbx_protocol_type type;
        u16 protocol_id;
+       int priority;
        bool enable;
        int i;
 
@@ -221,7 +222,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
                         * indication, but we only got here if there was an
                         * app tlv for the protocol, so dcbx must be enabled.
                         */
-                       enable = !!(type == DCBX_PROTOCOL_ETH);
+                       enable = !(type == DCBX_PROTOCOL_ETH);
 
                        qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
                                                 priority, tc, type);
index 089016f..2d89e8c 100644 (file)
@@ -155,12 +155,14 @@ void qed_resc_free(struct qed_dev *cdev)
        }
 }
 
-static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
+static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
 {
        u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
        struct qed_qm_info *qm_info = &p_hwfn->qm_info;
        struct init_qm_port_params *p_qm_port;
        u16 num_pqs, multi_cos_tcs = 1;
+       u8 pf_wfq = qm_info->pf_wfq;
+       u32 pf_rl = qm_info->pf_rl;
        u16 num_vfs = 0;
 
 #ifdef CONFIG_QED_SRIOV
@@ -182,23 +184,28 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
 
        /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
         */
-       qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
-                                       num_pqs, GFP_KERNEL);
+       qm_info->qm_pq_params = kcalloc(num_pqs,
+                                       sizeof(struct init_qm_pq_params),
+                                       b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
        if (!qm_info->qm_pq_params)
                goto alloc_err;
 
-       qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
-                                          num_vports, GFP_KERNEL);
+       qm_info->qm_vport_params = kcalloc(num_vports,
+                                          sizeof(struct init_qm_vport_params),
+                                          b_sleepable ? GFP_KERNEL
+                                                      : GFP_ATOMIC);
        if (!qm_info->qm_vport_params)
                goto alloc_err;
 
-       qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
-                                         MAX_NUM_PORTS, GFP_KERNEL);
+       qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
+                                         sizeof(struct init_qm_port_params),
+                                         b_sleepable ? GFP_KERNEL
+                                                     : GFP_ATOMIC);
        if (!qm_info->qm_port_params)
                goto alloc_err;
 
-       qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data),
-                                   GFP_KERNEL);
+       qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
+                                   b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
        if (!qm_info->wfq_data)
                goto alloc_err;
 
@@ -264,10 +271,10 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
        for (i = 0; i < qm_info->num_vports; i++)
                qm_info->qm_vport_params[i].vport_wfq = 1;
 
-       qm_info->pf_wfq = 0;
-       qm_info->pf_rl = 0;
        qm_info->vport_rl_en = 1;
        qm_info->vport_wfq_en = 1;
+       qm_info->pf_rl = pf_rl;
+       qm_info->pf_wfq = pf_wfq;
 
        return 0;
 
@@ -299,7 +306,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        qed_qm_info_free(p_hwfn);
 
        /* initialize qed's qm data structure */
-       rc = qed_init_qm_info(p_hwfn);
+       rc = qed_init_qm_info(p_hwfn, false);
        if (rc)
                return rc;
 
@@ -388,7 +395,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
                        goto alloc_err;
 
                /* Prepare and process QM requirements */
-               rc = qed_init_qm_info(p_hwfn);
+               rc = qed_init_qm_info(p_hwfn, true);
                if (rc)
                        goto alloc_err;
 
@@ -581,7 +588,14 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
 
        hw_mode |= 1 << MODE_ASIC;
 
+       if (p_hwfn->cdev->num_hwfns > 1)
+               hw_mode |= 1 << MODE_100G;
+
        p_hwfn->hw_info.hw_mode = hw_mode;
+
+       DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
+                  "Configuring function for hw_mode: 0x%08x\n",
+                  p_hwfn->hw_info.hw_mode);
 }
 
 /* Init run time data for all PFs on an engine. */
@@ -821,6 +835,11 @@ int qed_hw_init(struct qed_dev *cdev,
        u32 load_code, param;
        int rc, mfw_rc, i;
 
+       if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+               DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
+               return -EINVAL;
+       }
+
        if (IS_PF(cdev)) {
                rc = qed_init_fw_data(cdev, bin_fw_data);
                if (rc != 0)
@@ -2086,6 +2105,13 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
 {
        int i;
 
+       if (cdev->num_hwfns > 1) {
+               DP_VERBOSE(cdev,
+                          NETIF_MSG_LINK,
+                          "WFQ configuration is not supported for this device\n");
+               return;
+       }
+
        for_each_hwfn(cdev, i) {
                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
index 8b22f87..7530646 100644 (file)
@@ -413,15 +413,17 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
                /* Fallthrough */
 
        case QED_INT_MODE_MSI:
-               rc = pci_enable_msi(cdev->pdev);
-               if (!rc) {
-                       int_params->out.int_mode = QED_INT_MODE_MSI;
-                       goto out;
-               }
+               if (cdev->num_hwfns == 1) {
+                       rc = pci_enable_msi(cdev->pdev);
+                       if (!rc) {
+                               int_params->out.int_mode = QED_INT_MODE_MSI;
+                               goto out;
+                       }
 
-               DP_NOTICE(cdev, "Failed to enable MSI\n");
-               if (force_mode)
-                       goto out;
+                       DP_NOTICE(cdev, "Failed to enable MSI\n");
+                       if (force_mode)
+                               goto out;
+               }
                /* Fallthrough */
 
        case QED_INT_MODE_INTA:
index 1bc7535..ad3cae3 100644 (file)
@@ -230,7 +230,10 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
        case ETH_SS_PRIV_FLAGS:
                return QEDE_PRI_FLAG_LEN;
        case ETH_SS_TEST:
-               return QEDE_ETHTOOL_TEST_MAX;
+               if (!IS_VF(edev))
+                       return QEDE_ETHTOOL_TEST_MAX;
+               else
+                       return 0;
        default:
                DP_VERBOSE(edev, QED_MSG_DEBUG,
                           "Unsupported stringset 0x%08x\n", stringset);
index 337e839..5d00d14 100644 (file)
@@ -1824,7 +1824,7 @@ static int qede_set_vf_rate(struct net_device *dev, int vfidx,
 {
        struct qede_dev *edev = netdev_priv(dev);
 
-       return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate,
+       return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
                                        max_tx_rate);
 }
 
@@ -2091,6 +2091,29 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
        edev->accept_any_vlan = false;
 }
 
+int qede_set_features(struct net_device *dev, netdev_features_t features)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       netdev_features_t changes = features ^ dev->features;
+       bool need_reload = false;
+
+       /* No action needed if hardware GRO is disabled during driver load */
+       if (changes & NETIF_F_GRO) {
+               if (dev->features & NETIF_F_GRO)
+                       need_reload = !edev->gro_disable;
+               else
+                       need_reload = edev->gro_disable;
+       }
+
+       if (need_reload && netif_running(edev->ndev)) {
+               dev->features = features;
+               qede_reload(edev, NULL, NULL);
+               return 1;
+       }
+
+       return 0;
+}
+
 #ifdef CONFIG_QEDE_VXLAN
 static void qede_add_vxlan_port(struct net_device *dev,
                                sa_family_t sa_family, __be16 port)
@@ -2175,6 +2198,7 @@ static const struct net_device_ops qede_netdev_ops = {
 #endif
        .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+       .ndo_set_features = qede_set_features,
        .ndo_get_stats64 = qede_get_stats64,
 #ifdef CONFIG_QED_SRIOV
        .ndo_set_vf_link_state = qede_set_vf_link_state,
index 83d7210..fd5d1c9 100644 (file)
@@ -4846,7 +4846,6 @@ static void ql_eeh_close(struct net_device *ndev)
        }
 
        /* Disabling the timer */
-       del_timer_sync(&qdev->timer);
        ql_cancel_all_work_sync(qdev);
 
        for (i = 0; i < qdev->rss_ring_count; i++)
@@ -4873,6 +4872,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
                return PCI_ERS_RESULT_CAN_RECOVER;
        case pci_channel_io_frozen:
                netif_device_detach(ndev);
+               del_timer_sync(&qdev->timer);
                if (netif_running(ndev))
                        ql_eeh_close(ndev);
                pci_disable_device(pdev);
@@ -4880,6 +4880,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
        case pci_channel_io_perm_failure:
                dev_err(&pdev->dev,
                        "%s: pci_channel_io_perm_failure.\n", __func__);
+               del_timer_sync(&qdev->timer);
                ql_eeh_close(ndev);
                set_bit(QL_EEH_FATAL, &qdev->flags);
                return PCI_ERS_RESULT_DISCONNECT;
index 1681084..1f30912 100644 (file)
@@ -619,6 +619,17 @@ fail:
        return rc;
 }
 
+static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
+{
+       struct efx_channel *channel;
+       struct efx_tx_queue *tx_queue;
+
+       /* All our existing PIO buffers went away */
+       efx_for_each_channel(channel, efx)
+               efx_for_each_channel_tx_queue(tx_queue, channel)
+                       tx_queue->piobuf = NULL;
+}
+
 #else /* !EFX_USE_PIO */
 
 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
@@ -635,6 +646,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx)
 {
 }
 
+static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
+{
+}
+
 #endif /* EFX_USE_PIO */
 
 static void efx_ef10_remove(struct efx_nic *efx)
@@ -1018,6 +1033,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
        nic_data->must_realloc_vis = true;
        nic_data->must_restore_filters = true;
        nic_data->must_restore_piobufs = true;
+       efx_ef10_forget_old_piobufs(efx);
        nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
 
        /* Driver-created vswitches and vports must be re-created */
index 0705ec8..097f363 100644 (file)
@@ -1726,14 +1726,33 @@ static int efx_probe_filters(struct efx_nic *efx)
 
 #ifdef CONFIG_RFS_ACCEL
        if (efx->type->offload_features & NETIF_F_NTUPLE) {
-               efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
-                                          sizeof(*efx->rps_flow_id),
-                                          GFP_KERNEL);
-               if (!efx->rps_flow_id) {
+               struct efx_channel *channel;
+               int i, success = 1;
+
+               efx_for_each_channel(channel, efx) {
+                       channel->rps_flow_id =
+                               kcalloc(efx->type->max_rx_ip_filters,
+                                       sizeof(*channel->rps_flow_id),
+                                       GFP_KERNEL);
+                       if (!channel->rps_flow_id)
+                               success = 0;
+                       else
+                               for (i = 0;
+                                    i < efx->type->max_rx_ip_filters;
+                                    ++i)
+                                       channel->rps_flow_id[i] =
+                                               RPS_FLOW_ID_INVALID;
+               }
+
+               if (!success) {
+                       efx_for_each_channel(channel, efx)
+                               kfree(channel->rps_flow_id);
                        efx->type->filter_table_remove(efx);
                        rc = -ENOMEM;
                        goto out_unlock;
                }
+
+               efx->rps_expire_index = efx->rps_expire_channel = 0;
        }
 #endif
 out_unlock:
@@ -1744,7 +1763,10 @@ out_unlock:
 static void efx_remove_filters(struct efx_nic *efx)
 {
 #ifdef CONFIG_RFS_ACCEL
-       kfree(efx->rps_flow_id);
+       struct efx_channel *channel;
+
+       efx_for_each_channel(channel, efx)
+               kfree(channel->rps_flow_id);
 #endif
        down_write(&efx->filter_sem);
        efx->type->filter_table_remove(efx);
index 38c4223..d13ddf9 100644 (file)
@@ -403,6 +403,8 @@ enum efx_sync_events_state {
  * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
  * @irq_count: Number of IRQs since last adaptive moderation decision
  * @irq_mod_score: IRQ moderation score
+ * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
+ *      indexed by filter ID
  * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
  * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
  * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
@@ -446,6 +448,8 @@ struct efx_channel {
        unsigned int irq_mod_score;
 #ifdef CONFIG_RFS_ACCEL
        unsigned int rfs_filters_added;
+#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
+       u32 *rps_flow_id;
 #endif
 
        unsigned n_rx_tobe_disc;
@@ -889,9 +893,9 @@ struct vfdi_status;
  * @filter_sem: Filter table rw_semaphore, for freeing the table
  * @filter_lock: Filter table lock, for mere content changes
  * @filter_state: Architecture-dependent filter table state
- * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
- *     indexed by filter ID
- * @rps_expire_index: Next index to check for expiry in @rps_flow_id
+ * @rps_expire_channel: Next channel to check for expiry
+ * @rps_expire_index: Next index to check for expiry in
+ *     @rps_expire_channel's @rps_flow_id
  * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
  * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
  *     Decremented when the efx_flush_rx_queue() is called.
@@ -1035,7 +1039,7 @@ struct efx_nic {
        spinlock_t filter_lock;
        void *filter_state;
 #ifdef CONFIG_RFS_ACCEL
-       u32 *rps_flow_id;
+       unsigned int rps_expire_channel;
        unsigned int rps_expire_index;
 #endif
 
index 8956995..02b0b52 100644 (file)
@@ -842,33 +842,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
        struct efx_nic *efx = netdev_priv(net_dev);
        struct efx_channel *channel;
        struct efx_filter_spec spec;
-       const __be16 *ports;
-       __be16 ether_type;
-       int nhoff;
+       struct flow_keys fk;
        int rc;
 
-       /* The core RPS/RFS code has already parsed and validated
-        * VLAN, IP and transport headers.  We assume they are in the
-        * header area.
-        */
-
-       if (skb->protocol == htons(ETH_P_8021Q)) {
-               const struct vlan_hdr *vh =
-                       (const struct vlan_hdr *)skb->data;
+       if (flow_id == RPS_FLOW_ID_INVALID)
+               return -EINVAL;
 
-               /* We can't filter on the IP 5-tuple and the vlan
-                * together, so just strip the vlan header and filter
-                * on the IP part.
-                */
-               EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
-               ether_type = vh->h_vlan_encapsulated_proto;
-               nhoff = sizeof(struct vlan_hdr);
-       } else {
-               ether_type = skb->protocol;
-               nhoff = 0;
-       }
+       if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+               return -EPROTONOSUPPORT;
 
-       if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
+       if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
+               return -EPROTONOSUPPORT;
+       if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
                return -EPROTONOSUPPORT;
 
        efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
@@ -878,56 +863,41 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
                EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
                EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
                EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
-       spec.ether_type = ether_type;
-
-       if (ether_type == htons(ETH_P_IP)) {
-               const struct iphdr *ip =
-                       (const struct iphdr *)(skb->data + nhoff);
-
-               EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
-               if (ip_is_fragment(ip))
-                       return -EPROTONOSUPPORT;
-               spec.ip_proto = ip->protocol;
-               spec.rem_host[0] = ip->saddr;
-               spec.loc_host[0] = ip->daddr;
-               EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
-               ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+       spec.ether_type = fk.basic.n_proto;
+       spec.ip_proto = fk.basic.ip_proto;
+
+       if (fk.basic.n_proto == htons(ETH_P_IP)) {
+               spec.rem_host[0] = fk.addrs.v4addrs.src;
+               spec.loc_host[0] = fk.addrs.v4addrs.dst;
        } else {
-               const struct ipv6hdr *ip6 =
-                       (const struct ipv6hdr *)(skb->data + nhoff);
-
-               EFX_BUG_ON_PARANOID(skb_headlen(skb) <
-                                   nhoff + sizeof(*ip6) + 4);
-               spec.ip_proto = ip6->nexthdr;
-               memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
-               memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
-               ports = (const __be16 *)(ip6 + 1);
+               memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
+               memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
        }
 
-       spec.rem_port = ports[0];
-       spec.loc_port = ports[1];
+       spec.rem_port = fk.ports.src;
+       spec.loc_port = fk.ports.dst;
 
        rc = efx->type->filter_rfs_insert(efx, &spec);
        if (rc < 0)
                return rc;
 
        /* Remember this so we can check whether to expire the filter later */
-       efx->rps_flow_id[rc] = flow_id;
-       channel = efx_get_channel(efx, skb_get_rx_queue(skb));
+       channel = efx_get_channel(efx, rxq_index);
+       channel->rps_flow_id[rc] = flow_id;
        ++channel->rfs_filters_added;
 
-       if (ether_type == htons(ETH_P_IP))
+       if (spec.ether_type == htons(ETH_P_IP))
                netif_info(efx, rx_status, efx->net_dev,
                           "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
                           (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
-                          spec.rem_host, ntohs(ports[0]), spec.loc_host,
-                          ntohs(ports[1]), rxq_index, flow_id, rc);
+                          spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
+                          ntohs(spec.loc_port), rxq_index, flow_id, rc);
        else
                netif_info(efx, rx_status, efx->net_dev,
                           "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
                           (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
-                          spec.rem_host, ntohs(ports[0]), spec.loc_host,
-                          ntohs(ports[1]), rxq_index, flow_id, rc);
+                          spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
+                          ntohs(spec.loc_port), rxq_index, flow_id, rc);
 
        return rc;
 }
@@ -935,24 +905,34 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
 bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
 {
        bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
-       unsigned int index, size;
+       unsigned int channel_idx, index, size;
        u32 flow_id;
 
        if (!spin_trylock_bh(&efx->filter_lock))
                return false;
 
        expire_one = efx->type->filter_rfs_expire_one;
+       channel_idx = efx->rps_expire_channel;
        index = efx->rps_expire_index;
        size = efx->type->max_rx_ip_filters;
        while (quota--) {
-               flow_id = efx->rps_flow_id[index];
-               if (expire_one(efx, flow_id, index))
+               struct efx_channel *channel = efx_get_channel(efx, channel_idx);
+               flow_id = channel->rps_flow_id[index];
+
+               if (flow_id != RPS_FLOW_ID_INVALID &&
+                   expire_one(efx, flow_id, index)) {
                        netif_info(efx, rx_status, efx->net_dev,
-                                  "expired filter %d [flow %u]\n",
-                                  index, flow_id);
-               if (++index == size)
+                                  "expired filter %d [queue %u flow %u]\n",
+                                  index, channel_idx, flow_id);
+                       channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
+               }
+               if (++index == size) {
+                       if (++channel_idx == efx->n_channels)
+                               channel_idx = 0;
                        index = 0;
+               }
        }
+       efx->rps_expire_channel = channel_idx;
        efx->rps_expire_index = index;
 
        spin_unlock_bh(&efx->filter_lock);
index 3f83c36..ec29585 100644 (file)
@@ -297,7 +297,7 @@ int stmmac_mdio_register(struct net_device *ndev)
                return -ENOMEM;
 
        if (mdio_bus_data->irqs)
-               memcpy(new_bus->irq, mdio_bus_data, sizeof(new_bus->irq));
+               memcpy(new_bus->irq, mdio_bus_data->irqs, sizeof(new_bus->irq));
 
 #ifdef CONFIG_OF
        if (priv->device->of_node)
index a0f64cb..2ace126 100644 (file)
@@ -990,7 +990,7 @@ static void team_port_disable(struct team *team,
 #define TEAM_ENC_FEATURES      (NETIF_F_HW_CSUM | NETIF_F_SG | \
                                 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
 
-static void __team_compute_features(struct team *team)
+static void ___team_compute_features(struct team *team)
 {
        struct team_port *port;
        u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
@@ -1021,15 +1021,20 @@ static void __team_compute_features(struct team *team)
        team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
        if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
                team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
+}
 
+static void __team_compute_features(struct team *team)
+{
+       ___team_compute_features(team);
        netdev_change_features(team->dev);
 }
 
 static void team_compute_features(struct team *team)
 {
        mutex_lock(&team->lock);
-       __team_compute_features(team);
+       ___team_compute_features(team);
        mutex_unlock(&team->lock);
+       netdev_change_features(team->dev);
 }
 
 static int team_port_enter(struct team *team, struct team_port *port)
index 36cd7f0..9bbe016 100644 (file)
@@ -473,7 +473,7 @@ static void read_bulk_callback(struct urb *urb)
                goto goon;
        }
 
-       if (!count || count < 4)
+       if (count < 4)
                goto goon;
 
        rx_status = buf[count - 2];
index d9d2806..dc989a8 100644 (file)
@@ -61,6 +61,8 @@
 #define SUSPEND_ALLMODES               (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
                                         SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
 
+#define CARRIER_CHECK_DELAY (2 * HZ)
+
 struct smsc95xx_priv {
        u32 mac_cr;
        u32 hash_hi;
@@ -69,6 +71,9 @@ struct smsc95xx_priv {
        spinlock_t mac_cr_lock;
        u8 features;
        u8 suspend_flags;
+       bool link_ok;
+       struct delayed_work carrier_check;
+       struct usbnet *dev;
 };
 
 static bool turbo_mode = true;
@@ -624,6 +629,44 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
                            intdata);
 }
 
+static void set_carrier(struct usbnet *dev, bool link)
+{
+       struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
+       if (pdata->link_ok == link)
+               return;
+
+       pdata->link_ok = link;
+
+       if (link)
+               usbnet_link_change(dev, 1, 0);
+       else
+               usbnet_link_change(dev, 0, 0);
+}
+
+static void check_carrier(struct work_struct *work)
+{
+       struct smsc95xx_priv *pdata = container_of(work, struct smsc95xx_priv,
+                                               carrier_check.work);
+       struct usbnet *dev = pdata->dev;
+       int ret;
+
+       if (pdata->suspend_flags != 0)
+               return;
+
+       ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMSR);
+       if (ret < 0) {
+               netdev_warn(dev->net, "Failed to read MII_BMSR\n");
+               return;
+       }
+       if (ret & BMSR_LSTATUS)
+               set_carrier(dev, 1);
+       else
+               set_carrier(dev, 0);
+
+       schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
+}
+
 /* Enable or disable Tx & Rx checksum offload engines */
 static int smsc95xx_set_features(struct net_device *netdev,
        netdev_features_t features)
@@ -1165,13 +1208,20 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->net->flags |= IFF_MULTICAST;
        dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
        dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+       pdata->dev = dev;
+       INIT_DELAYED_WORK(&pdata->carrier_check, check_carrier);
+       schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
+
        return 0;
 }
 
 static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
 {
        struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
        if (pdata) {
+               cancel_delayed_work(&pdata->carrier_check);
                netif_dbg(dev, ifdown, dev->net, "free pdata\n");
                kfree(pdata);
                pdata = NULL;
@@ -1695,6 +1745,7 @@ static int smsc95xx_resume(struct usb_interface *intf)
 
        /* do this first to ensure it's cleared even in error case */
        pdata->suspend_flags = 0;
+       schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
 
        if (suspend_flags & SUSPEND_ALLMODES) {
                /* clear wake-up sources */
index 49d84e5..e0638e5 100644 (file)
@@ -1925,24 +1925,11 @@ static int virtnet_probe(struct virtio_device *vdev)
 
        virtio_device_ready(vdev);
 
-       /* Last of all, set up some receive buffers. */
-       for (i = 0; i < vi->curr_queue_pairs; i++) {
-               try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
-
-               /* If we didn't even get one input buffer, we're useless. */
-               if (vi->rq[i].vq->num_free ==
-                   virtqueue_get_vring_size(vi->rq[i].vq)) {
-                       free_unused_bufs(vi);
-                       err = -ENOMEM;
-                       goto free_recv_bufs;
-               }
-       }
-
        vi->nb.notifier_call = &virtnet_cpu_callback;
        err = register_hotcpu_notifier(&vi->nb);
        if (err) {
                pr_debug("virtio_net: registering cpu notifier failed\n");
-               goto free_recv_bufs;
+               goto free_unregister_netdev;
        }
 
        /* Assume link up if device can't report link status,
@@ -1960,10 +1947,9 @@ static int virtnet_probe(struct virtio_device *vdev)
 
        return 0;
 
-free_recv_bufs:
+free_unregister_netdev:
        vi->vdev->config->reset(vdev);
 
-       free_receive_bufs(vi);
        unregister_netdev(dev);
 free_vqs:
        cancel_delayed_work_sync(&vi->refill);
index 8ff30c3..f999db2 100644 (file)
@@ -3086,6 +3086,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
        if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
                conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
 
+       if (tb[IFLA_MTU])
+               conf.mtu = nla_get_u32(tb[IFLA_MTU]);
+
        err = vxlan_dev_configure(src_net, dev, &conf);
        switch (err) {
        case -ENODEV:
index f2d01d4..1b8304e 100644 (file)
@@ -950,17 +950,14 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
 
                /* For SPIs, we need to track the affinity per IRQ */
                if (using_spi) {
-                       if (i >= pdev->num_resources) {
-                               of_node_put(dn);
+                       if (i >= pdev->num_resources)
                                break;
-                       }
 
                        irqs[i] = cpu;
                }
 
                /* Keep track of the CPUs containing this PMU type */
                cpumask_set_cpu(cpu, &pmu->supported_cpus);
-               of_node_put(dn);
                i++;
        } while (1);
 
@@ -995,9 +992,6 @@ int arm_pmu_device_probe(struct platform_device *pdev,
 
        armpmu_init(pmu);
 
-       if (!__oprofile_cpu_pmu)
-               __oprofile_cpu_pmu = pmu;
-
        pmu->plat_device = pdev;
 
        if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
@@ -1033,6 +1027,9 @@ int arm_pmu_device_probe(struct platform_device *pdev,
        if (ret)
                goto out_destroy;
 
+       if (!__oprofile_cpu_pmu)
+               __oprofile_cpu_pmu = pmu;
+
        pr_info("enabled with %s PMU driver, %d counters available\n",
                        pmu->name, pmu->num_events);
 
@@ -1043,6 +1040,7 @@ out_destroy:
 out_free:
        pr_info("%s: failed to register PMU devices!\n",
                of_node_full_name(node));
+       kfree(pmu->irq_affinity);
        kfree(pmu);
        return ret;
 }
index 207b13b..a607655 100644 (file)
@@ -1256,9 +1256,10 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
        const struct mtk_desc_pin *pin;
 
        chained_irq_enter(chip, desc);
-       for (eint_num = 0; eint_num < pctl->devdata->ap_num; eint_num += 32) {
+       for (eint_num = 0;
+            eint_num < pctl->devdata->ap_num;
+            eint_num += 32, reg += 4) {
                status = readl(reg);
-               reg += 4;
                while (status) {
                        offset = __ffs(status);
                        index = eint_num + offset;
index ccbfc32..38facef 100644 (file)
@@ -854,7 +854,7 @@ static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset)
 
        clk_enable(nmk_chip->clk);
 
-       dir = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
+       dir = !(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
 
        clk_disable(nmk_chip->clk);
 
index 579fd65..d637c93 100644 (file)
@@ -208,14 +208,10 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
                break;
 
        case PTP_SYS_OFFSET:
-               sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL);
-               if (!sysoff) {
-                       err = -ENOMEM;
-                       break;
-               }
-               if (copy_from_user(sysoff, (void __user *)arg,
-                                  sizeof(*sysoff))) {
-                       err = -EFAULT;
+               sysoff = memdup_user((void __user *)arg, sizeof(*sysoff));
+               if (IS_ERR(sysoff)) {
+                       err = PTR_ERR(sysoff);
+                       sysoff = NULL;
                        break;
                }
                if (sysoff->n_samples > PTP_MAX_SAMPLES) {
index 8f90d9e..969c312 100644 (file)
@@ -620,6 +620,11 @@ struct aac_driver_ident
  */
 #define AAC_QUIRK_SCSI_32      0x0020
 
+/*
+ * SRC based adapters support the AifReqEvent functions
+ */
+#define AAC_QUIRK_SRC 0x0040
+
 /*
  *     The adapter interface specs all queues to be located in the same
  *     physically contiguous block. The host structure that defines the
index a943bd2..79871f3 100644 (file)
@@ -236,10 +236,10 @@ static struct aac_driver_ident aac_drivers[] = {
        { aac_rx_init, "aacraid",  "ADAPTEC ", "RAID            ", 2 }, /* Adaptec Catch All */
        { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec Rocket Catch All */
        { aac_nark_init, "aacraid", "ADAPTEC ", "RAID           ", 2 }, /* Adaptec NEMER/ARK Catch All */
-       { aac_src_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */
-       { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec PMC Series 7 (Denali) */
-       { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec PMC Series 8 */
-       { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2 } /* Adaptec PMC Series 9 */
+       { aac_src_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
+       { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
+       { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
+       { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */
 };
 
 /**
@@ -1299,7 +1299,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        else
                shost->this_id = shost->max_id;
 
-       aac_intr_normal(aac, 0, 2, 0, NULL);
+       if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
+               aac_intr_normal(aac, 0, 2, 0, NULL);
 
        /*
         * dmb - we may need to move the setting of these parms somewhere else once
index 6a4df5a..6bff13e 100644 (file)
@@ -7975,13 +7975,14 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
                ActiveCableEventData =
                    (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
                if (ActiveCableEventData->ReasonCode ==
-                               MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER)
+                               MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) {
                        pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d",
                            ioc->name, ActiveCableEventData->ReceptacleID);
                        pr_info("cannot be powered and devices connected to this active cable");
                        pr_info("will not be seen. This active cable");
                        pr_info("requires %d mW of power",
                            ActiveCableEventData->ActiveCablePowerRequirement);
+               }
                break;
 
        default: /* ignore the rest */
index b2e332a..c71344a 100644 (file)
@@ -821,9 +821,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
        }
 
        /*
-        * If we finished all bytes in the request we are done now.
+        * special case: failed zero length commands always need to
+        * drop down into the retry code. Otherwise, if we finished
+        * all bytes in the request we are done now.
         */
-       if (!scsi_end_request(req, error, good_bytes, 0))
+       if (!(blk_rq_bytes(req) == 0 && error) &&
+           !scsi_end_request(req, error, good_bytes, 0))
                return;
 
        /*
index 428c03e..f459dff 100644 (file)
@@ -1398,11 +1398,15 @@ static int media_not_present(struct scsi_disk *sdkp,
  **/
 static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
 {
-       struct scsi_disk *sdkp = scsi_disk(disk);
-       struct scsi_device *sdp = sdkp->device;
+       struct scsi_disk *sdkp = scsi_disk_get(disk);
+       struct scsi_device *sdp;
        struct scsi_sense_hdr *sshdr = NULL;
        int retval;
 
+       if (!sdkp)
+               return 0;
+
+       sdp = sdkp->device;
        SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
 
        /*
@@ -1459,6 +1463,7 @@ out:
        kfree(sshdr);
        retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
        sdp->changed = 0;
+       scsi_disk_put(sdkp);
        return retval;
 }
 
index 13d431c..a578cd2 100644 (file)
@@ -177,7 +177,7 @@ static int int3406_thermal_probe(struct platform_device *pdev)
                return -ENODEV;
        d->raw_bd = bd;
 
-       ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br);
+       ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br, NULL);
        if (ret)
                return ret;
 
index 82c4d2e..9510305 100644 (file)
@@ -120,17 +120,6 @@ config UNIX98_PTYS
          All modern Linux systems use the Unix98 ptys.  Say Y unless
          you're on an embedded system and want to conserve memory.
 
-config DEVPTS_MULTIPLE_INSTANCES
-       bool "Support multiple instances of devpts"
-       depends on UNIX98_PTYS
-       default n
-       ---help---
-         Enable support for multiple instances of devpts filesystem.
-         If you want to have isolated PTY namespaces (eg: in containers),
-         say Y here.  Otherwise, say N. If enabled, each mount of devpts
-         filesystem with the '-o newinstance' option will create an
-         independent PTY namespace.
-
 config LEGACY_PTYS
        bool "Legacy (BSD) PTY support"
        default y
index dd4b841..f856c45 100644 (file)
@@ -668,7 +668,7 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
        else
                fsi = tty->link->driver_data;
        devpts_kill_index(fsi, tty->index);
-       devpts_put_ref(fsi);
+       devpts_release(fsi);
 }
 
 static const struct tty_operations ptm_unix98_ops = {
@@ -733,10 +733,11 @@ static int ptmx_open(struct inode *inode, struct file *filp)
        if (retval)
                return retval;
 
-       fsi = devpts_get_ref(inode, filp);
-       retval = -ENODEV;
-       if (!fsi)
+       fsi = devpts_acquire(filp);
+       if (IS_ERR(fsi)) {
+               retval = PTR_ERR(fsi);
                goto out_free_file;
+       }
 
        /* find a device that is not in use. */
        mutex_lock(&devpts_mutex);
@@ -745,7 +746,7 @@ static int ptmx_open(struct inode *inode, struct file *filp)
 
        retval = index;
        if (index < 0)
-               goto out_put_ref;
+               goto out_put_fsi;
 
 
        mutex_lock(&tty_mutex);
@@ -789,8 +790,8 @@ err_release:
        return retval;
 out:
        devpts_kill_index(fsi, index);
-out_put_ref:
-       devpts_put_ref(fsi);
+out_put_fsi:
+       devpts_release(fsi);
 out_free_file:
        tty_free_file(filp);
        return retval;
index 9360140..688691d 100644 (file)
@@ -749,7 +749,8 @@ static int vfio_vpd_config_write(struct vfio_pci_device *vdev, int pos,
                if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4)
                        return count;
        } else {
-               if (pci_read_vpd(pdev, addr, 4, &data) != 4)
+               data = 0;
+               if (pci_read_vpd(pdev, addr, 4, &data) < 0)
                        return count;
                *pdata = cpu_to_le32(data);
        }
index e9ea3fe..15ecfc9 100644 (file)
@@ -228,9 +228,9 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
 
 static void vfio_intx_disable(struct vfio_pci_device *vdev)
 {
-       vfio_intx_set_signal(vdev, -1);
        vfio_virqfd_disable(&vdev->ctx[0].unmask);
        vfio_virqfd_disable(&vdev->ctx[0].mask);
+       vfio_intx_set_signal(vdev, -1);
        vdev->irq_type = VFIO_PCI_NUM_IRQS;
        vdev->num_ctx = 0;
        kfree(vdev->ctx);
@@ -401,13 +401,13 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
        struct pci_dev *pdev = vdev->pdev;
        int i;
 
-       vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
-
        for (i = 0; i < vdev->num_ctx; i++) {
                vfio_virqfd_disable(&vdev->ctx[i].unmask);
                vfio_virqfd_disable(&vdev->ctx[i].mask);
        }
 
+       vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
+
        if (msix) {
                pci_disable_msix(vdev->pdev);
                kfree(vdev->msix);
index 15a6582..2ba1942 100644 (file)
@@ -515,7 +515,7 @@ static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova,
                          unsigned long pfn, long npage, int prot)
 {
        long i;
-       int ret;
+       int ret = 0;
 
        for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) {
                ret = iommu_map(domain->domain, iova,
index 8ea531d..bbfe7e2 100644 (file)
@@ -51,8 +51,8 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core)
 {
        void __iomem *base = core->base;
        const unsigned long long iclk = 266000000;      /* DSS L3 ICLK */
-       const unsigned ss_scl_high = 4000;              /* ns */
-       const unsigned ss_scl_low = 4700;               /* ns */
+       const unsigned ss_scl_high = 4600;              /* ns */
+       const unsigned ss_scl_low = 5400;               /* ns */
        const unsigned fs_scl_high = 600;               /* ns */
        const unsigned fs_scl_low = 1300;               /* ns */
        const unsigned sda_hold = 1000;                 /* ns */
@@ -442,7 +442,7 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core,
 
        c = (ptr[1] >> 6) & 0x3;
        m = (ptr[1] >> 4) & 0x3;
-       r = (ptr[1] >> 0) & 0x3;
+       r = (ptr[1] >> 0) & 0xf;
 
        itc = (ptr[2] >> 7) & 0x1;
        ec = (ptr[2] >> 4) & 0x7;
index a400951..689d25a 100644 (file)
@@ -2042,6 +2042,11 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
        struct btrfs_bio *bbio = NULL;
 
 
+       /*
+        * Avoid races with device replace and make sure our bbio has devices
+        * associated to its stripes that don't go away while we are discarding.
+        */
+       btrfs_bio_counter_inc_blocked(root->fs_info);
        /* Tell the block device(s) that the sectors can be discarded */
        ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
                              bytenr, &num_bytes, &bbio, 0);
@@ -2074,6 +2079,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
                }
                btrfs_put_bbio(bbio);
        }
+       btrfs_bio_counter_dec(root->fs_info);
 
        if (actual_bytes)
                *actual_bytes = discarded_bytes;
index 3cd5782..6e953de 100644 (file)
@@ -2025,9 +2025,16 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
        bio->bi_iter.bi_size = 0;
        map_length = length;
 
+       /*
+        * Avoid races with device replace and make sure our bbio has devices
+        * associated to its stripes that don't go away while we are doing the
+        * read repair operation.
+        */
+       btrfs_bio_counter_inc_blocked(fs_info);
        ret = btrfs_map_block(fs_info, WRITE, logical,
                              &map_length, &bbio, mirror_num);
        if (ret) {
+               btrfs_bio_counter_dec(fs_info);
                bio_put(bio);
                return -EIO;
        }
@@ -2037,6 +2044,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
        dev = bbio->stripes[mirror_num-1].dev;
        btrfs_put_bbio(bbio);
        if (!dev || !dev->bdev || !dev->writeable) {
+               btrfs_bio_counter_dec(fs_info);
                bio_put(bio);
                return -EIO;
        }
@@ -2045,6 +2053,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
 
        if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
                /* try to remap that extent elsewhere? */
+               btrfs_bio_counter_dec(fs_info);
                bio_put(bio);
                btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
                return -EIO;
@@ -2054,6 +2063,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
                "read error corrected: ino %llu off %llu (dev %s sector %llu)",
                                  btrfs_ino(inode), start,
                                  rcu_str_deref(dev->name), sector);
+       btrfs_bio_counter_dec(fs_info);
        bio_put(bio);
        return 0;
 }
index 2704995..8b1212e 100644 (file)
@@ -6979,7 +6979,18 @@ insert:
                 * existing will always be non-NULL, since there must be
                 * extent causing the -EEXIST.
                 */
-               if (start >= extent_map_end(existing) ||
+               if (existing->start == em->start &&
+                   extent_map_end(existing) == extent_map_end(em) &&
+                   em->block_start == existing->block_start) {
+                       /*
+                        * these two extents are the same, it happens
+                        * with inlines especially
+                        */
+                       free_extent_map(em);
+                       em = existing;
+                       err = 0;
+
+               } else if (start >= extent_map_end(existing) ||
                    start <= existing->start) {
                        /*
                         * The existing extent map is the one nearest to
index 5591704..e96634a 100644 (file)
@@ -718,12 +718,13 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
        return count;
 }
 
-void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
+int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
                              const u64 range_start, const u64 range_len)
 {
        struct btrfs_root *root;
        struct list_head splice;
        int done;
+       int total_done = 0;
 
        INIT_LIST_HEAD(&splice);
 
@@ -742,6 +743,7 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
                done = btrfs_wait_ordered_extents(root, nr,
                                                  range_start, range_len);
                btrfs_put_fs_root(root);
+               total_done += done;
 
                spin_lock(&fs_info->ordered_root_lock);
                if (nr != -1) {
@@ -752,6 +754,8 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
        list_splice_tail(&splice, &fs_info->ordered_roots);
        spin_unlock(&fs_info->ordered_root_lock);
        mutex_unlock(&fs_info->ordered_operations_mutex);
+
+       return total_done;
 }
 
 /*
index 2049c9b..4515077 100644 (file)
@@ -199,7 +199,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
                           u32 *sum, int len);
 int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
                               const u64 range_start, const u64 range_len);
-void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
+int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
                              const u64 range_start, const u64 range_len);
 void btrfs_get_logged_extents(struct inode *inode,
                              struct list_head *logged_list,
index 298631e..8428db7 100644 (file)
@@ -761,12 +761,14 @@ static void __reada_start_machine(struct btrfs_fs_info *fs_info)
 
        do {
                enqueued = 0;
+               mutex_lock(&fs_devices->device_list_mutex);
                list_for_each_entry(device, &fs_devices->devices, dev_list) {
                        if (atomic_read(&device->reada_in_flight) <
                            MAX_IN_FLIGHT)
                                enqueued += reada_start_machine_dev(fs_info,
                                                                    device);
                }
+               mutex_unlock(&fs_devices->device_list_mutex);
                total += enqueued;
        } while (enqueued && total < 10000);
 
index 46d847f..70427ef 100644 (file)
@@ -3582,6 +3582,46 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                 */
                scrub_pause_on(fs_info);
                ret = btrfs_inc_block_group_ro(root, cache);
+               if (!ret && is_dev_replace) {
+                       /*
+                        * If we are doing a device replace wait for any tasks
+                        * that started dellaloc right before we set the block
+                        * group to RO mode, as they might have just allocated
+                        * an extent from it or decided they could do a nocow
+                        * write. And if any such tasks did that, wait for their
+                        * ordered extents to complete and then commit the
+                        * current transaction, so that we can later see the new
+                        * extent items in the extent tree - the ordered extents
+                        * create delayed data references (for cow writes) when
+                        * they complete, which will be run and insert the
+                        * corresponding extent items into the extent tree when
+                        * we commit the transaction they used when running
+                        * inode.c:btrfs_finish_ordered_io(). We later use
+                        * the commit root of the extent tree to find extents
+                        * to copy from the srcdev into the tgtdev, and we don't
+                        * want to miss any new extents.
+                        */
+                       btrfs_wait_block_group_reservations(cache);
+                       btrfs_wait_nocow_writers(cache);
+                       ret = btrfs_wait_ordered_roots(fs_info, -1,
+                                                      cache->key.objectid,
+                                                      cache->key.offset);
+                       if (ret > 0) {
+                               struct btrfs_trans_handle *trans;
+
+                               trans = btrfs_join_transaction(root);
+                               if (IS_ERR(trans))
+                                       ret = PTR_ERR(trans);
+                               else
+                                       ret = btrfs_commit_transaction(trans,
+                                                                      root);
+                               if (ret) {
+                                       scrub_pause_off(fs_info);
+                                       btrfs_put_block_group(cache);
+                                       break;
+                               }
+                       }
+               }
                scrub_pause_off(fs_info);
 
                if (ret == 0) {
@@ -3602,9 +3642,11 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                        break;
                }
 
+               btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
                dev_replace->cursor_right = found_key.offset + length;
                dev_replace->cursor_left = found_key.offset;
                dev_replace->item_needs_writeback = 1;
+               btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
                ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
                                  found_key.offset, cache, is_dev_replace);
 
@@ -3640,6 +3682,11 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
 
                scrub_pause_off(fs_info);
 
+               btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
+               dev_replace->cursor_left = dev_replace->cursor_right;
+               dev_replace->item_needs_writeback = 1;
+               btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
+
                if (ro_set)
                        btrfs_dec_block_group_ro(root, cache);
 
@@ -3677,9 +3724,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                        ret = -ENOMEM;
                        break;
                }
-
-               dev_replace->cursor_left = dev_replace->cursor_right;
-               dev_replace->item_needs_writeback = 1;
 skip:
                key.offset = found_key.offset + length;
                btrfs_release_path(path);
index bdc6256..da9e003 100644 (file)
@@ -2761,6 +2761,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
        u64 dev_extent_len = 0;
        u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
        int i, ret = 0;
+       struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
 
        /* Just in case */
        root = root->fs_info->chunk_root;
@@ -2787,12 +2788,19 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
        check_system_chunk(trans, extent_root, map->type);
        unlock_chunks(root->fs_info->chunk_root);
 
+       /*
+        * Take the device list mutex to prevent races with the final phase of
+        * a device replace operation that replaces the device object associated
+        * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
+        */
+       mutex_lock(&fs_devices->device_list_mutex);
        for (i = 0; i < map->num_stripes; i++) {
                struct btrfs_device *device = map->stripes[i].dev;
                ret = btrfs_free_dev_extent(trans, device,
                                            map->stripes[i].physical,
                                            &dev_extent_len);
                if (ret) {
+                       mutex_unlock(&fs_devices->device_list_mutex);
                        btrfs_abort_transaction(trans, root, ret);
                        goto out;
                }
@@ -2811,11 +2819,14 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
                if (map->stripes[i].dev) {
                        ret = btrfs_update_device(trans, map->stripes[i].dev);
                        if (ret) {
+                               mutex_unlock(&fs_devices->device_list_mutex);
                                btrfs_abort_transaction(trans, root, ret);
                                goto out;
                        }
                }
        }
+       mutex_unlock(&fs_devices->device_list_mutex);
+
        ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
        if (ret) {
                btrfs_abort_transaction(trans, root, ret);
@@ -5762,20 +5773,17 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                        }
                }
                if (found) {
-                       if (physical_of_found + map->stripe_len <=
-                           dev_replace->cursor_left) {
-                               struct btrfs_bio_stripe *tgtdev_stripe =
-                                       bbio->stripes + num_stripes;
+                       struct btrfs_bio_stripe *tgtdev_stripe =
+                               bbio->stripes + num_stripes;
 
-                               tgtdev_stripe->physical = physical_of_found;
-                               tgtdev_stripe->length =
-                                       bbio->stripes[index_srcdev].length;
-                               tgtdev_stripe->dev = dev_replace->tgtdev;
-                               bbio->tgtdev_map[index_srcdev] = num_stripes;
+                       tgtdev_stripe->physical = physical_of_found;
+                       tgtdev_stripe->length =
+                               bbio->stripes[index_srcdev].length;
+                       tgtdev_stripe->dev = dev_replace->tgtdev;
+                       bbio->tgtdev_map[index_srcdev] = num_stripes;
 
-                               tgtdev_indexes++;
-                               num_stripes++;
-                       }
+                       tgtdev_indexes++;
+                       num_stripes++;
                }
        }
 
index 861d611..ce5f345 100644 (file)
@@ -380,7 +380,7 @@ static void cachefiles_sync_cache(struct fscache_cache *_cache)
  * check if the backing cache is updated to FS-Cache
  * - called by FS-Cache when evaluates if need to invalidate the cache
  */
-static bool cachefiles_check_consistency(struct fscache_operation *op)
+static int cachefiles_check_consistency(struct fscache_operation *op)
 {
        struct cachefiles_object *object;
        struct cachefiles_cache *cache;
index eeb71e5..26a9d10 100644 (file)
@@ -276,8 +276,10 @@ static void finish_read(struct ceph_osd_request *req)
        for (i = 0; i < num_pages; i++) {
                struct page *page = osd_data->pages[i];
 
-               if (rc < 0 && rc != -ENOENT)
+               if (rc < 0 && rc != -ENOENT) {
+                       ceph_fscache_readpage_cancel(inode, page);
                        goto unlock;
+               }
                if (bytes < (int)PAGE_SIZE) {
                        /* zero (remainder of) page */
                        int s = bytes < 0 ? 0 : bytes;
@@ -535,8 +537,6 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
            CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
                set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC);
 
-       ceph_readpage_to_fscache(inode, page);
-
        set_page_writeback(page);
        err = ceph_osdc_writepages(osdc, ceph_vino(inode),
                                   &ci->i_layout, snapc,
index c052b5b..238c55b 100644 (file)
@@ -25,6 +25,7 @@
 #include "cache.h"
 
 struct ceph_aux_inode {
+       u64             version;
        struct timespec mtime;
        loff_t          size;
 };
@@ -69,15 +70,8 @@ int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
        fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
                                              &ceph_fscache_fsid_object_def,
                                              fsc, true);
-
-       if (fsc->fscache == NULL) {
+       if (!fsc->fscache)
                pr_err("Unable to resgister fsid: %p fscache cookie", fsc);
-               return 0;
-       }
-
-       fsc->revalidate_wq = alloc_workqueue("ceph-revalidate", 0, 1);
-       if (fsc->revalidate_wq == NULL)
-               return -ENOMEM;
 
        return 0;
 }
@@ -105,6 +99,7 @@ static uint16_t ceph_fscache_inode_get_aux(const void *cookie_netfs_data,
        const struct inode* inode = &ci->vfs_inode;
 
        memset(&aux, 0, sizeof(aux));
+       aux.version = ci->i_version;
        aux.mtime = inode->i_mtime;
        aux.size = i_size_read(inode);
 
@@ -131,6 +126,7 @@ static enum fscache_checkaux ceph_fscache_inode_check_aux(
                return FSCACHE_CHECKAUX_OBSOLETE;
 
        memset(&aux, 0, sizeof(aux));
+       aux.version = ci->i_version;
        aux.mtime = inode->i_mtime;
        aux.size = i_size_read(inode);
 
@@ -181,32 +177,26 @@ static const struct fscache_cookie_def ceph_fscache_inode_object_def = {
        .now_uncached   = ceph_fscache_inode_now_uncached,
 };
 
-void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc,
-                                       struct ceph_inode_info* ci)
+void ceph_fscache_register_inode_cookie(struct inode *inode)
 {
-       struct inode* inode = &ci->vfs_inode;
+       struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 
        /* No caching for filesystem */
        if (fsc->fscache == NULL)
                return;
 
        /* Only cache for regular files that are read only */
-       if ((ci->vfs_inode.i_mode & S_IFREG) == 0)
+       if (!S_ISREG(inode->i_mode))
                return;
 
-       /* Avoid multiple racing open requests */
-       inode_lock(inode);
-
-       if (ci->fscache)
-               goto done;
-
-       ci->fscache = fscache_acquire_cookie(fsc->fscache,
-                                            &ceph_fscache_inode_object_def,
-                                            ci, true);
-       fscache_check_consistency(ci->fscache);
-done:
+       inode_lock_nested(inode, I_MUTEX_CHILD);
+       if (!ci->fscache) {
+               ci->fscache = fscache_acquire_cookie(fsc->fscache,
+                                       &ceph_fscache_inode_object_def,
+                                       ci, false);
+       }
        inode_unlock(inode);
-
 }
 
 void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
@@ -222,6 +212,34 @@ void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
        fscache_relinquish_cookie(cookie, 0);
 }
 
+static bool ceph_fscache_can_enable(void *data)
+{
+       struct inode *inode = data;
+       return !inode_is_open_for_write(inode);
+}
+
+void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
+{
+       struct ceph_inode_info *ci = ceph_inode(inode);
+
+       if (!fscache_cookie_valid(ci->fscache))
+               return;
+
+       if (inode_is_open_for_write(inode)) {
+               dout("fscache_file_set_cookie %p %p disabling cache\n",
+                    inode, filp);
+               fscache_disable_cookie(ci->fscache, false);
+               fscache_uncache_all_inode_pages(ci->fscache, inode);
+       } else {
+               fscache_enable_cookie(ci->fscache, ceph_fscache_can_enable,
+                               inode);
+               if (fscache_cookie_enabled(ci->fscache)) {
+                       dout("fscache_file_set_cookie %p %p enabing cache\n",
+                            inode, filp);
+               }
+       }
+}
+
 static void ceph_vfs_readpage_complete(struct page *page, void *data, int error)
 {
        if (!error)
@@ -238,8 +256,7 @@ static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int
 
 static inline bool cache_valid(struct ceph_inode_info *ci)
 {
-       return ((ceph_caps_issued(ci) & CEPH_CAP_FILE_CACHE) &&
-               (ci->i_fscache_gen == ci->i_rdcache_gen));
+       return ci->i_fscache_gen == ci->i_rdcache_gen;
 }
 
 
@@ -332,69 +349,27 @@ void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
 
 void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
 {
-       if (fsc->revalidate_wq)
-               destroy_workqueue(fsc->revalidate_wq);
-
        fscache_relinquish_cookie(fsc->fscache, 0);
        fsc->fscache = NULL;
 }
 
-static void ceph_revalidate_work(struct work_struct *work)
-{
-       int issued;
-       u32 orig_gen;
-       struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
-                                                 i_revalidate_work);
-       struct inode *inode = &ci->vfs_inode;
-
-       spin_lock(&ci->i_ceph_lock);
-       issued = __ceph_caps_issued(ci, NULL);
-       orig_gen = ci->i_rdcache_gen;
-       spin_unlock(&ci->i_ceph_lock);
-
-       if (!(issued & CEPH_CAP_FILE_CACHE)) {
-               dout("revalidate_work lost cache before validation %p\n",
-                    inode);
-               goto out;
-       }
-
-       if (!fscache_check_consistency(ci->fscache))
-               fscache_invalidate(ci->fscache);
-
-       spin_lock(&ci->i_ceph_lock);
-       /* Update the new valid generation (backwards sanity check too) */
-       if (orig_gen > ci->i_fscache_gen) {
-               ci->i_fscache_gen = orig_gen;
-       }
-       spin_unlock(&ci->i_ceph_lock);
-
-out:
-       iput(&ci->vfs_inode);
-}
-
-void ceph_queue_revalidate(struct inode *inode)
+/*
+ * caller should hold CEPH_CAP_FILE_{RD,CACHE}
+ */
+void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
-       struct ceph_inode_info *ci = ceph_inode(inode);
-
-       if (fsc->revalidate_wq == NULL || ci->fscache == NULL)
+       if (cache_valid(ci))
                return;
 
-       ihold(inode);
-
-       if (queue_work(ceph_sb_to_client(inode->i_sb)->revalidate_wq,
-                      &ci->i_revalidate_work)) {
-               dout("ceph_queue_revalidate %p\n", inode);
-       } else {
-               dout("ceph_queue_revalidate %p failed\n)", inode);
-               iput(inode);
+       /* resue i_truncate_mutex. There should be no pending
+        * truncate while the caller holds CEPH_CAP_FILE_RD */
+       mutex_lock(&ci->i_truncate_mutex);
+       if (!cache_valid(ci)) {
+               if (fscache_check_consistency(ci->fscache))
+                       fscache_invalidate(ci->fscache);
+               spin_lock(&ci->i_ceph_lock);
+               ci->i_fscache_gen = ci->i_rdcache_gen;
+               spin_unlock(&ci->i_ceph_lock);
        }
-}
-
-void ceph_fscache_inode_init(struct ceph_inode_info *ci)
-{
-       ci->fscache = NULL;
-       /* The first load is verifed cookie open time */
-       ci->i_fscache_gen = 1;
-       INIT_WORK(&ci->i_revalidate_work, ceph_revalidate_work);
+       mutex_unlock(&ci->i_truncate_mutex);
 }
index 5ac591b..7e72c75 100644 (file)
@@ -34,10 +34,10 @@ void ceph_fscache_unregister(void);
 int ceph_fscache_register_fs(struct ceph_fs_client* fsc);
 void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc);
 
-void ceph_fscache_inode_init(struct ceph_inode_info *ci);
-void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc,
-                                       struct ceph_inode_info* ci);
+void ceph_fscache_register_inode_cookie(struct inode *inode);
 void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci);
+void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp);
+void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci);
 
 int ceph_readpage_from_fscache(struct inode *inode, struct page *page);
 int ceph_readpages_from_fscache(struct inode *inode,
@@ -46,12 +46,11 @@ int ceph_readpages_from_fscache(struct inode *inode,
                                unsigned *nr_pages);
 void ceph_readpage_to_fscache(struct inode *inode, struct page *page);
 void ceph_invalidate_fscache_page(struct inode* inode, struct page *page);
-void ceph_queue_revalidate(struct inode *inode);
 
-static inline void ceph_fscache_update_objectsize(struct inode *inode)
+static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
 {
-       struct ceph_inode_info *ci = ceph_inode(inode);
-       fscache_attr_changed(ci->fscache);
+       ci->fscache = NULL;
+       ci->i_fscache_gen = 0;
 }
 
 static inline void ceph_fscache_invalidate(struct inode *inode)
@@ -88,6 +87,11 @@ static inline void ceph_fscache_readpages_cancel(struct inode *inode,
        return fscache_readpages_cancel(ci->fscache, pages);
 }
 
+static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci)
+{
+       ci->i_fscache_gen = ci->i_rdcache_gen - 1;
+}
+
 #else
 
 static inline int ceph_fscache_register(void)
@@ -112,8 +116,20 @@ static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
 {
 }
 
-static inline void ceph_fscache_register_inode_cookie(struct ceph_fs_client* parent_fsc,
-                                                     struct ceph_inode_info* ci)
+static inline void ceph_fscache_register_inode_cookie(struct inode *inode)
+{
+}
+
+static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
+{
+}
+
+static inline void ceph_fscache_file_set_cookie(struct inode *inode,
+                                               struct file *filp)
+{
+}
+
+static inline void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
 {
 }
 
@@ -141,10 +157,6 @@ static inline void ceph_readpage_to_fscache(struct inode *inode,
 {
 }
 
-static inline void ceph_fscache_update_objectsize(struct inode *inode)
-{
-}
-
 static inline void ceph_fscache_invalidate(struct inode *inode)
 {
 }
@@ -154,10 +166,6 @@ static inline void ceph_invalidate_fscache_page(struct inode *inode,
 {
 }
 
-static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
-{
-}
-
 static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp)
 {
        return 1;
@@ -173,7 +181,7 @@ static inline void ceph_fscache_readpages_cancel(struct inode *inode,
 {
 }
 
-static inline void ceph_queue_revalidate(struct inode *inode)
+static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci)
 {
 }
 
index c17b5d7..6f60d0a 100644 (file)
@@ -2393,6 +2393,9 @@ again:
                                snap_rwsem_locked = true;
                        }
                        *got = need | (have & want);
+                       if ((need & CEPH_CAP_FILE_RD) &&
+                           !(*got & CEPH_CAP_FILE_CACHE))
+                               ceph_disable_fscache_readpage(ci);
                        __take_cap_refs(ci, *got, true);
                        ret = 1;
                }
@@ -2554,6 +2557,9 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
                break;
        }
 
+       if ((_got & CEPH_CAP_FILE_RD) && (_got & CEPH_CAP_FILE_CACHE))
+               ceph_fscache_revalidate_cookie(ci);
+
        *got = _got;
        return 0;
 }
@@ -2795,7 +2801,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
        bool writeback = false;
        bool queue_trunc = false;
        bool queue_invalidate = false;
-       bool queue_revalidate = false;
        bool deleted_inode = false;
        bool fill_inline = false;
 
@@ -2837,8 +2842,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
                                ci->i_rdcache_revoking = ci->i_rdcache_gen;
                        }
                }
-
-               ceph_fscache_invalidate(inode);
        }
 
        /* side effects now are allowed */
@@ -2880,11 +2883,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
                }
        }
 
-       /* Do we need to revalidate our fscache cookie. Don't bother on the
-        * first cache cap as we already validate at cookie creation time. */
-       if ((issued & CEPH_CAP_FILE_CACHE) && ci->i_rdcache_gen > 1)
-               queue_revalidate = true;
-
        if (newcaps & CEPH_CAP_ANY_RD) {
                /* ctime/mtime/atime? */
                ceph_decode_timespec(&mtime, &grant->mtime);
@@ -2993,11 +2991,8 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
        if (fill_inline)
                ceph_fill_inline_data(inode, NULL, inline_data, inline_len);
 
-       if (queue_trunc) {
+       if (queue_trunc)
                ceph_queue_vmtruncate(inode);
-               ceph_queue_revalidate(inode);
-       } else if (queue_revalidate)
-               ceph_queue_revalidate(inode);
 
        if (writeback)
                /*
@@ -3199,10 +3194,8 @@ static void handle_cap_trunc(struct inode *inode,
                                          truncate_seq, truncate_size, size);
        spin_unlock(&ci->i_ceph_lock);
 
-       if (queue_trunc) {
+       if (queue_trunc)
                ceph_queue_vmtruncate(inode);
-               ceph_fscache_invalidate(inode);
-       }
 }
 
 /*
index a888df6..ce2f579 100644 (file)
@@ -137,23 +137,11 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
 {
        struct ceph_file_info *cf;
        int ret = 0;
-       struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
-       struct ceph_mds_client *mdsc = fsc->mdsc;
 
        switch (inode->i_mode & S_IFMT) {
        case S_IFREG:
-               /* First file open request creates the cookie, we want to keep
-                * this cookie around for the filetime of the inode as not to
-                * have to worry about fscache register / revoke / operation
-                * races.
-                *
-                * Also, if we know the operation is going to invalidate data
-                * (non readonly) just nuke the cache right away.
-                */
-               ceph_fscache_register_inode_cookie(mdsc->fsc, ci);
-               if ((fmode & CEPH_FILE_MODE_WR))
-                       ceph_fscache_invalidate(inode);
+               ceph_fscache_register_inode_cookie(inode);
+               ceph_fscache_file_set_cookie(inode, file);
        case S_IFDIR:
                dout("init_file %p %p 0%o (regular)\n", inode, file,
                     inode->i_mode);
@@ -1349,7 +1337,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
        }
 
 retry_snap:
-       if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) {
+       if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) {
                err = -ENOSPC;
                goto out;
        }
@@ -1407,7 +1395,6 @@ retry_snap:
                        iov_iter_advance(from, written);
                ceph_put_snap_context(snapc);
        } else {
-               loff_t old_size = i_size_read(inode);
                /*
                 * No need to acquire the i_truncate_mutex. Because
                 * the MDS revokes Fwb caps before sending truncate
@@ -1418,8 +1405,6 @@ retry_snap:
                written = generic_perform_write(file, from, pos);
                if (likely(written >= 0))
                        iocb->ki_pos = pos + written;
-               if (i_size_read(inode) > old_size)
-                       ceph_fscache_update_objectsize(inode);
                inode_unlock(inode);
        }
 
@@ -1440,7 +1425,7 @@ retry_snap:
        ceph_put_cap_refs(ci, got);
 
        if (written >= 0) {
-               if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))
+               if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL))
                        iocb->ki_flags |= IOCB_DSYNC;
 
                written = generic_write_sync(iocb, written);
@@ -1672,8 +1657,8 @@ static long ceph_fallocate(struct file *file, int mode,
                goto unlock;
        }
 
-       if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) &&
-               !(mode & FALLOC_FL_PUNCH_HOLE)) {
+       if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) &&
+           !(mode & FALLOC_FL_PUNCH_HOLE)) {
                ret = -ENOSPC;
                goto unlock;
        }
index 0130a85..0168b49 100644 (file)
@@ -103,7 +103,6 @@ struct ceph_fs_client {
 
 #ifdef CONFIG_CEPH_FSCACHE
        struct fscache_cookie *fscache;
-       struct workqueue_struct *revalidate_wq;
 #endif
 };
 
@@ -360,8 +359,7 @@ struct ceph_inode_info {
 
 #ifdef CONFIG_CEPH_FSCACHE
        struct fscache_cookie *fscache;
-       u32 i_fscache_gen; /* sequence, for delayed fscache validate */
-       struct work_struct i_revalidate_work;
+       u32 i_fscache_gen;
 #endif
        struct inode vfs_inode; /* at end */
 };
index 0b2954d..37c134a 100644 (file)
@@ -95,8 +95,6 @@ static struct ctl_table pty_root_table[] = {
 
 static DEFINE_MUTEX(allocated_ptys_lock);
 
-static struct vfsmount *devpts_mnt;
-
 struct pts_mount_opts {
        int setuid;
        int setgid;
@@ -104,7 +102,7 @@ struct pts_mount_opts {
        kgid_t   gid;
        umode_t mode;
        umode_t ptmxmode;
-       int newinstance;
+       int reserve;
        int max;
 };
 
@@ -117,11 +115,9 @@ static const match_table_t tokens = {
        {Opt_uid, "uid=%u"},
        {Opt_gid, "gid=%u"},
        {Opt_mode, "mode=%o"},
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
        {Opt_ptmxmode, "ptmxmode=%o"},
        {Opt_newinstance, "newinstance"},
        {Opt_max, "max=%d"},
-#endif
        {Opt_err, NULL}
 };
 
@@ -137,15 +133,48 @@ static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb)
        return sb->s_fs_info;
 }
 
-static inline struct super_block *pts_sb_from_inode(struct inode *inode)
+struct pts_fs_info *devpts_acquire(struct file *filp)
 {
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
-       if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC)
-               return inode->i_sb;
-#endif
-       if (!devpts_mnt)
-               return NULL;
-       return devpts_mnt->mnt_sb;
+       struct pts_fs_info *result;
+       struct path path;
+       struct super_block *sb;
+       int err;
+
+       path = filp->f_path;
+       path_get(&path);
+
+       /* Has the devpts filesystem already been found? */
+       sb = path.mnt->mnt_sb;
+       if (sb->s_magic != DEVPTS_SUPER_MAGIC) {
+               /* Is a devpts filesystem at "pts" in the same directory? */
+               err = path_pts(&path);
+               if (err) {
+                       result = ERR_PTR(err);
+                       goto out;
+               }
+
+               /* Is the path the root of a devpts filesystem? */
+               result = ERR_PTR(-ENODEV);
+               sb = path.mnt->mnt_sb;
+               if ((sb->s_magic != DEVPTS_SUPER_MAGIC) ||
+                   (path.mnt->mnt_root != sb->s_root))
+                       goto out;
+       }
+
+       /*
+        * pty code needs to hold extra references in case of last /dev/tty close
+        */
+       atomic_inc(&sb->s_active);
+       result = DEVPTS_SB(sb);
+
+out:
+       path_put(&path);
+       return result;
+}
+
+void devpts_release(struct pts_fs_info *fsi)
+{
+       deactivate_super(fsi->sb);
 }
 
 #define PARSE_MOUNT    0
@@ -154,9 +183,7 @@ static inline struct super_block *pts_sb_from_inode(struct inode *inode)
 /*
  * parse_mount_options():
  *     Set @opts to mount options specified in @data. If an option is not
- *     specified in @data, set it to its default value. The exception is
- *     'newinstance' option which can only be set/cleared on a mount (i.e.
- *     cannot be changed during remount).
+ *     specified in @data, set it to its default value.
  *
  * Note: @data may be NULL (in which case all options are set to default).
  */
@@ -174,9 +201,12 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
        opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
        opts->max     = NR_UNIX98_PTY_MAX;
 
-       /* newinstance makes sense only on initial mount */
+       /* Only allow instances mounted from the initial mount
+        * namespace to tap the reserve pool of ptys.
+        */
        if (op == PARSE_MOUNT)
-               opts->newinstance = 0;
+               opts->reserve =
+                       (current->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns);
 
        while ((p = strsep(&data, ",")) != NULL) {
                substring_t args[MAX_OPT_ARGS];
@@ -211,16 +241,12 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
                                return -EINVAL;
                        opts->mode = option & S_IALLUGO;
                        break;
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
                case Opt_ptmxmode:
                        if (match_octal(&args[0], &option))
                                return -EINVAL;
                        opts->ptmxmode = option & S_IALLUGO;
                        break;
                case Opt_newinstance:
-                       /* newinstance makes sense only on initial mount */
-                       if (op == PARSE_MOUNT)
-                               opts->newinstance = 1;
                        break;
                case Opt_max:
                        if (match_int(&args[0], &option) ||
@@ -228,7 +254,6 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
                                return -EINVAL;
                        opts->max = option;
                        break;
-#endif
                default:
                        pr_err("called with bogus options\n");
                        return -EINVAL;
@@ -238,7 +263,6 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
        return 0;
 }
 
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
 static int mknod_ptmx(struct super_block *sb)
 {
        int mode;
@@ -305,12 +329,6 @@ static void update_ptmx_mode(struct pts_fs_info *fsi)
                inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode;
        }
 }
-#else
-static inline void update_ptmx_mode(struct pts_fs_info *fsi)
-{
-       return;
-}
-#endif
 
 static int devpts_remount(struct super_block *sb, int *flags, char *data)
 {
@@ -344,11 +362,9 @@ static int devpts_show_options(struct seq_file *seq, struct dentry *root)
                seq_printf(seq, ",gid=%u",
                           from_kgid_munged(&init_user_ns, opts->gid));
        seq_printf(seq, ",mode=%03o", opts->mode);
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
        seq_printf(seq, ",ptmxmode=%03o", opts->ptmxmode);
        if (opts->max < NR_UNIX98_PTY_MAX)
                seq_printf(seq, ",max=%d", opts->max);
-#endif
 
        return 0;
 }
@@ -410,40 +426,11 @@ fail:
        return -ENOMEM;
 }
 
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
-static int compare_init_pts_sb(struct super_block *s, void *p)
-{
-       if (devpts_mnt)
-               return devpts_mnt->mnt_sb == s;
-       return 0;
-}
-
 /*
  * devpts_mount()
  *
- *     If the '-o newinstance' mount option was specified, mount a new
- *     (private) instance of devpts.  PTYs created in this instance are
- *     independent of the PTYs in other devpts instances.
- *
- *     If the '-o newinstance' option was not specified, mount/remount the
- *     initial kernel mount of devpts.  This type of mount gives the
- *     legacy, single-instance semantics.
- *
- *     The 'newinstance' option is needed to support multiple namespace
- *     semantics in devpts while preserving backward compatibility of the
- *     current 'single-namespace' semantics. i.e all mounts of devpts
- *     without the 'newinstance' mount option should bind to the initial
- *     kernel mount, like mount_single().
- *
- *     Mounts with 'newinstance' option create a new, private namespace.
- *
- *     NOTE:
- *
- *     For single-mount semantics, devpts cannot use mount_single(),
- *     because mount_single()/sget() find and use the super-block from
- *     the most recent mount of devpts. But that recent mount may be a
- *     'newinstance' mount and mount_single() would pick the newinstance
- *     super-block instead of the initial super-block.
+ *     Mount a new (private) instance of devpts.  PTYs created in this
+ *     instance are independent of the PTYs in other devpts instances.
  */
 static struct dentry *devpts_mount(struct file_system_type *fs_type,
        int flags, const char *dev_name, void *data)
@@ -456,18 +443,7 @@ static struct dentry *devpts_mount(struct file_system_type *fs_type,
        if (error)
                return ERR_PTR(error);
 
-       /* Require newinstance for all user namespace mounts to ensure
-        * the mount options are not changed.
-        */
-       if ((current_user_ns() != &init_user_ns) && !opts.newinstance)
-               return ERR_PTR(-EINVAL);
-
-       if (opts.newinstance)
-               s = sget(fs_type, NULL, set_anon_super, flags, NULL);
-       else
-               s = sget(fs_type, compare_init_pts_sb, set_anon_super, flags,
-                        NULL);
-
+       s = sget(fs_type, NULL, set_anon_super, flags, NULL);
        if (IS_ERR(s))
                return ERR_CAST(s);
 
@@ -491,18 +467,6 @@ out_undo_sget:
        return ERR_PTR(error);
 }
 
-#else
-/*
- * This supports only the legacy single-instance semantics (no
- * multiple-instance semantics)
- */
-static struct dentry *devpts_mount(struct file_system_type *fs_type, int flags,
-               const char *dev_name, void *data)
-{
-       return mount_single(fs_type, flags, data, devpts_fill_super);
-}
-#endif
-
 static void devpts_kill_sb(struct super_block *sb)
 {
        struct pts_fs_info *fsi = DEVPTS_SB(sb);
@@ -516,9 +480,7 @@ static struct file_system_type devpts_fs_type = {
        .name           = "devpts",
        .mount          = devpts_mount,
        .kill_sb        = devpts_kill_sb,
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
        .fs_flags       = FS_USERNS_MOUNT | FS_USERNS_DEV_MOUNT,
-#endif
 };
 
 /*
@@ -531,16 +493,13 @@ int devpts_new_index(struct pts_fs_info *fsi)
        int index;
        int ida_ret;
 
-       if (!fsi)
-               return -ENODEV;
-
 retry:
        if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
                return -ENOMEM;
 
        mutex_lock(&allocated_ptys_lock);
-       if (pty_count >= pty_limit -
-                       (fsi->mount_opts.newinstance ? pty_reserve : 0)) {
+       if (pty_count >= (pty_limit -
+                         (fsi->mount_opts.reserve ? 0 : pty_reserve))) {
                mutex_unlock(&allocated_ptys_lock);
                return -ENOSPC;
        }
@@ -571,30 +530,6 @@ void devpts_kill_index(struct pts_fs_info *fsi, int idx)
        mutex_unlock(&allocated_ptys_lock);
 }
 
-/*
- * pty code needs to hold extra references in case of last /dev/tty close
- */
-struct pts_fs_info *devpts_get_ref(struct inode *ptmx_inode, struct file *file)
-{
-       struct super_block *sb;
-       struct pts_fs_info *fsi;
-
-       sb = pts_sb_from_inode(ptmx_inode);
-       if (!sb)
-               return NULL;
-       fsi = DEVPTS_SB(sb);
-       if (!fsi)
-               return NULL;
-
-       atomic_inc(&sb->s_active);
-       return fsi;
-}
-
-void devpts_put_ref(struct pts_fs_info *fsi)
-{
-       deactivate_super(fsi->sb);
-}
-
 /**
  * devpts_pty_new -- create a new inode in /dev/pts/
  * @ptmx_inode: inode of the master
@@ -607,16 +542,12 @@ void devpts_put_ref(struct pts_fs_info *fsi)
 struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
 {
        struct dentry *dentry;
-       struct super_block *sb;
+       struct super_block *sb = fsi->sb;
        struct inode *inode;
        struct dentry *root;
        struct pts_mount_opts *opts;
        char s[12];
 
-       if (!fsi)
-               return ERR_PTR(-ENODEV);
-
-       sb = fsi->sb;
        root = sb->s_root;
        opts = &fsi->mount_opts;
 
@@ -676,20 +607,8 @@ void devpts_pty_kill(struct dentry *dentry)
 static int __init init_devpts_fs(void)
 {
        int err = register_filesystem(&devpts_fs_type);
-       struct ctl_table_header *table;
-
        if (!err) {
-               struct vfsmount *mnt;
-
-               table = register_sysctl_table(pty_root_table);
-               mnt = kern_mount(&devpts_fs_type);
-               if (IS_ERR(mnt)) {
-                       err = PTR_ERR(mnt);
-                       unregister_filesystem(&devpts_fs_type);
-                       unregister_sysctl_table(table);
-               } else {
-                       devpts_mnt = mnt;
-               }
+               register_sysctl_table(pty_root_table);
        }
        return err;
 }
index 3078b67..c8c4f79 100644 (file)
@@ -887,6 +887,8 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie)
                        put_page(results[i]);
        }
 
+       wake_up_bit(&cookie->flags, 0);
+
        _leave("");
 }
 
index 4c4f95a..6a82fb7 100644 (file)
@@ -1416,21 +1416,28 @@ static void follow_mount(struct path *path)
        }
 }
 
+static int path_parent_directory(struct path *path)
+{
+       struct dentry *old = path->dentry;
+       /* rare case of legitimate dget_parent()... */
+       path->dentry = dget_parent(path->dentry);
+       dput(old);
+       if (unlikely(!path_connected(path)))
+               return -ENOENT;
+       return 0;
+}
+
 static int follow_dotdot(struct nameidata *nd)
 {
        while(1) {
-               struct dentry *old = nd->path.dentry;
-
                if (nd->path.dentry == nd->root.dentry &&
                    nd->path.mnt == nd->root.mnt) {
                        break;
                }
                if (nd->path.dentry != nd->path.mnt->mnt_root) {
-                       /* rare case of legitimate dget_parent()... */
-                       nd->path.dentry = dget_parent(nd->path.dentry);
-                       dput(old);
-                       if (unlikely(!path_connected(&nd->path)))
-                               return -ENOENT;
+                       int ret = path_parent_directory(&nd->path);
+                       if (ret)
+                               return ret;
                        break;
                }
                if (!follow_up(&nd->path))
@@ -2514,6 +2521,34 @@ struct dentry *lookup_one_len_unlocked(const char *name,
 }
 EXPORT_SYMBOL(lookup_one_len_unlocked);
 
+#ifdef CONFIG_UNIX98_PTYS
+int path_pts(struct path *path)
+{
+       /* Find something mounted on "pts" in the same directory as
+        * the input path.
+        */
+       struct dentry *child, *parent;
+       struct qstr this;
+       int ret;
+
+       ret = path_parent_directory(path);
+       if (ret)
+               return ret;
+
+       parent = path->dentry;
+       this.name = "pts";
+       this.len = 3;
+       child = d_hash_and_lookup(parent, &this);
+       if (!child)
+               return -ENOENT;
+
+       path->dentry = child;
+       dput(parent);
+       follow_mount(path);
+       return 0;
+}
+#endif
+
 int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
                 struct path *path, int *empty)
 {
index 70a41f7..5731ccb 100644 (file)
@@ -51,7 +51,8 @@ extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type);
  */
 extern bool acpi_video_handles_brightness_key_presses(void);
 extern int acpi_video_get_levels(struct acpi_device *device,
-                                struct acpi_video_device_brightness **dev_br);
+                                struct acpi_video_device_brightness **dev_br,
+                                int *pmax_level);
 #else
 static inline int acpi_video_register(void) { return 0; }
 static inline void acpi_video_unregister(void) { return; }
@@ -72,7 +73,8 @@ static inline bool acpi_video_handles_brightness_key_presses(void)
        return false;
 }
 static inline int acpi_video_get_levels(struct acpi_device *device,
-                       struct acpi_video_device_brightness **dev_br)
+                       struct acpi_video_device_brightness **dev_br,
+                       int *pmax_level)
 {
        return -ENODEV;
 }
index 19b1486..1b3b6e1 100644 (file)
@@ -279,6 +279,11 @@ struct ceph_osd_client {
        struct workqueue_struct *notify_wq;
 };
 
+static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag)
+{
+       return osdc->osdmap->flags & flag;
+}
+
 extern int ceph_osdc_setup(void);
 extern void ceph_osdc_cleanup(void);
 
index ddc426b..9ccf4db 100644 (file)
@@ -189,11 +189,6 @@ static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd)
        return !ceph_osd_is_up(map, osd);
 }
 
-static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag)
-{
-       return map && (map->flags & flag);
-}
-
 extern char *ceph_osdmap_state_str(char *str, int len, int state);
 extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd);
 
index 5871f29..277ab9a 100644 (file)
 
 #include <linux/errno.h>
 
-struct pts_fs_info;
-
 #ifdef CONFIG_UNIX98_PTYS
 
-/* Look up a pts fs info and get a ref to it */
-struct pts_fs_info *devpts_get_ref(struct inode *, struct file *);
-void devpts_put_ref(struct pts_fs_info *);
+struct pts_fs_info;
+
+struct pts_fs_info *devpts_acquire(struct file *);
+void devpts_release(struct pts_fs_info *);
 
 int devpts_new_index(struct pts_fs_info *);
 void devpts_kill_index(struct pts_fs_info *, int);
index 3fe90d4..4551c6f 100644 (file)
@@ -112,19 +112,24 @@ struct dma_buf_ops {
  * @file: file pointer used for sharing buffers across, and for refcounting.
  * @attachments: list of dma_buf_attachment that denotes all devices attached.
  * @ops: dma_buf_ops associated with this buffer object.
+ * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap
+ * @vmapping_counter: used internally to refcnt the vmaps
+ * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
  * @exp_name: name of the exporter; useful for debugging.
  * @owner: pointer to exporter module; used for refcounting when exporter is a
  *         kernel module.
  * @list_node: node for dma_buf accounting and debugging.
  * @priv: exporter specific private data for this buffer object.
  * @resv: reservation object linked to this dma-buf
+ * @poll: for userspace poll support
+ * @cb_excl: for userspace poll support
+ * @cb_shared: for userspace poll support
  */
 struct dma_buf {
        size_t size;
        struct file *file;
        struct list_head attachments;
        const struct dma_buf_ops *ops;
-       /* mutex to serialize list manipulation, attach/detach and vmap/unmap */
        struct mutex lock;
        unsigned vmapping_counter;
        void *vmap_ptr;
@@ -188,9 +193,11 @@ struct dma_buf_export_info {
 
 /**
  * helper macro for exporters; zeros and fills in most common values
+ *
+ * @name: export-info name
  */
-#define DEFINE_DMA_BUF_EXPORT_INFO(a)  \
-       struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME, \
+#define DEFINE_DMA_BUF_EXPORT_INFO(name)       \
+       struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
                                         .owner = THIS_MODULE }
 
 /**
index 2b17698..2056e9f 100644 (file)
@@ -49,6 +49,8 @@ struct fence_cb;
  * @timestamp: Timestamp when the fence was signaled.
  * @status: Optional, only valid if < 0, must be set before calling
  * fence_signal, indicates that the fence has completed with an error.
+ * @child_list: list of children fences
+ * @active_list: list of active fences
  *
  * the flags member must be manipulated and read using the appropriate
  * atomic ops (bit_*), so taking the spinlock will not be needed most
index 604e152..13ba552 100644 (file)
@@ -241,7 +241,7 @@ struct fscache_cache_ops {
 
        /* check the consistency between the backing cache and the FS-Cache
         * cookie */
-       bool (*check_consistency)(struct fscache_operation *op);
+       int (*check_consistency)(struct fscache_operation *op);
 
        /* store the updated auxiliary data on an object */
        void (*update_object)(struct fscache_object *object);
index 4d758a7..562cef0 100644 (file)
@@ -315,6 +315,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
 /**
  * struct irq_chip - hardware interrupt chip descriptor
  *
+ * @parent_device:     pointer to parent device for irqchip
  * @name:              name for /proc/interrupts
  * @irq_startup:       start up the interrupt (defaults to ->enable if NULL)
  * @irq_shutdown:      shut down the interrupt (defaults to ->disable if NULL)
@@ -354,6 +355,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
  * @flags:             chip specific flags
  */
 struct irq_chip {
+       struct device   *parent_device;
        const char      *name;
        unsigned int    (*irq_startup)(struct irq_data *data);
        void            (*irq_shutdown)(struct irq_data *data);
@@ -482,12 +484,15 @@ extern void handle_fasteoi_irq(struct irq_desc *desc);
 extern void handle_edge_irq(struct irq_desc *desc);
 extern void handle_edge_eoi_irq(struct irq_desc *desc);
 extern void handle_simple_irq(struct irq_desc *desc);
+extern void handle_untracked_irq(struct irq_desc *desc);
 extern void handle_percpu_irq(struct irq_desc *desc);
 extern void handle_percpu_devid_irq(struct irq_desc *desc);
 extern void handle_bad_irq(struct irq_desc *desc);
 extern void handle_nested_irq(unsigned int irq);
 
 extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
+extern int irq_chip_pm_get(struct irq_data *data);
+extern int irq_chip_pm_put(struct irq_data *data);
 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
 extern void irq_chip_enable_parent(struct irq_data *data);
 extern void irq_chip_disable_parent(struct irq_data *data);
index bfbd707..107eed4 100644 (file)
 #define GITS_BASER_NR_REGS             8
 
 #define GITS_BASER_VALID               (1UL << 63)
+#define GITS_BASER_INDIRECT            (1UL << 62)
 #define GITS_BASER_nCnB                        (0UL << 59)
 #define GITS_BASER_nC                  (1UL << 59)
 #define GITS_BASER_RaWt                        (2UL << 59)
 #define GITS_BASER_PAGE_SIZE_64K       (2UL << GITS_BASER_PAGE_SIZE_SHIFT)
 #define GITS_BASER_PAGE_SIZE_MASK      (3UL << GITS_BASER_PAGE_SIZE_SHIFT)
 #define GITS_BASER_PAGES_MAX           256
+#define GITS_BASER_PAGES_SHIFT         (0)
 
 #define GITS_BASER_TYPE_NONE           0
 #define GITS_BASER_TYPE_DEVICE         1
 #define GITS_BASER_TYPE_RESERVED6      6
 #define GITS_BASER_TYPE_RESERVED7      7
 
+#define GITS_LVL1_ENTRY_SIZE           (8UL)
+
 /*
  * ITS commands
  */
 #define ICC_SGI1R_AFFINITY_1_SHIFT     16
 #define ICC_SGI1R_AFFINITY_1_MASK      (0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
 #define ICC_SGI1R_SGI_ID_SHIFT         24
-#define ICC_SGI1R_SGI_ID_MASK          (0xff << ICC_SGI1R_SGI_ID_SHIFT)
+#define ICC_SGI1R_SGI_ID_MASK          (0xfULL << ICC_SGI1R_SGI_ID_SHIFT)
 #define ICC_SGI1R_AFFINITY_2_SHIFT     32
-#define ICC_SGI1R_AFFINITY_2_MASK      (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
+#define ICC_SGI1R_AFFINITY_2_MASK      (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT)
 #define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
 #define ICC_SGI1R_AFFINITY_3_SHIFT     48
-#define ICC_SGI1R_AFFINITY_3_MASK      (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
+#define ICC_SGI1R_AFFINITY_3_MASK      (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT)
 
 #include <asm/arch_gicv3.h>
 
index fd05185..eafc965 100644 (file)
 #include <linux/irqdomain.h>
 
 struct device_node;
+struct gic_chip_data;
 
 void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
 int gic_cpu_if_down(unsigned int gic_nr);
+void gic_cpu_save(struct gic_chip_data *gic);
+void gic_cpu_restore(struct gic_chip_data *gic);
+void gic_dist_save(struct gic_chip_data *gic);
+void gic_dist_restore(struct gic_chip_data *gic);
 
 /*
  * Subdrivers that need some preparatory work can initialize their
@@ -111,6 +116,12 @@ int gic_cpu_if_down(unsigned int gic_nr);
  */
 int gic_of_init(struct device_node *node, struct device_node *parent);
 
+/*
+ * Initialises and registers a non-root or child GIC chip. Memory for
+ * the gic_chip_data structure is dynamically allocated.
+ */
+int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq);
+
 /*
  * Legacy platforms not converted to DT yet must use this to init
  * their GIC
index f1f36e0..3175037 100644 (file)
@@ -452,6 +452,9 @@ static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
        return -1;
 }
 
+static inline void irq_domain_free_irqs(unsigned int virq,
+                                       unsigned int nr_irqs) { }
+
 static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
 {
        return false;
index ec5ec28..d3d0398 100644 (file)
@@ -45,6 +45,8 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
 #define LOOKUP_ROOT            0x2000
 #define LOOKUP_EMPTY           0x4000
 
+extern int path_pts(struct path *path);
+
 extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
 
 static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
index bf268fa..fec4027 100644 (file)
@@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops;
 
 static inline bool page_is_young(struct page *page)
 {
-       return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+       struct page_ext *page_ext = lookup_page_ext(page);
+
+       if (unlikely(!page_ext))
+               return false;
+
+       return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
 }
 
 static inline void set_page_young(struct page *page)
 {
-       set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+       struct page_ext *page_ext = lookup_page_ext(page);
+
+       if (unlikely(!page_ext))
+               return;
+
+       set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
 }
 
 static inline bool test_and_clear_page_young(struct page *page)
 {
-       return test_and_clear_bit(PAGE_EXT_YOUNG,
-                                 &lookup_page_ext(page)->flags);
+       struct page_ext *page_ext = lookup_page_ext(page);
+
+       if (unlikely(!page_ext))
+               return false;
+
+       return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
 }
 
 static inline bool page_is_idle(struct page *page)
 {
-       return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+       struct page_ext *page_ext = lookup_page_ext(page);
+
+       if (unlikely(!page_ext))
+               return false;
+
+       return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
 }
 
 static inline void set_page_idle(struct page *page)
 {
-       set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+       struct page_ext *page_ext = lookup_page_ext(page);
+
+       if (unlikely(!page_ext))
+               return;
+
+       set_bit(PAGE_EXT_IDLE, &page_ext->flags);
 }
 
 static inline void clear_page_idle(struct page *page)
 {
-       clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+       struct page_ext *page_ext = lookup_page_ext(page);
+
+       if (unlikely(!page_ext))
+               return;
+
+       clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
 }
 #endif /* CONFIG_64BIT */
 
index 49d0576..b0f305e 100644 (file)
@@ -49,12 +49,27 @@ extern struct ww_class reservation_ww_class;
 extern struct lock_class_key reservation_seqcount_class;
 extern const char reservation_seqcount_string[];
 
+/**
+ * struct reservation_object_list - a list of shared fences
+ * @rcu: for internal use
+ * @shared_count: table of shared fences
+ * @shared_max: for growing shared fence table
+ * @shared: shared fence table
+ */
 struct reservation_object_list {
        struct rcu_head rcu;
        u32 shared_count, shared_max;
        struct fence __rcu *shared[];
 };
 
+/**
+ * struct reservation_object - a reservation object manages fences for a buffer
+ * @lock: update side lock
+ * @seq: sequence count for managing RCU read-side synchronization
+ * @fence_excl: the exclusive fence, if there is one currently
+ * @fence: list of current shared fences
+ * @staged: staged copy of shared fences for RCU updates
+ */
 struct reservation_object {
        struct ww_mutex lock;
        seqcount_t seq;
@@ -68,6 +83,10 @@ struct reservation_object {
 #define reservation_object_assert_held(obj) \
        lockdep_assert_held(&(obj)->lock.base)
 
+/**
+ * reservation_object_init - initialize a reservation object
+ * @obj: the reservation object
+ */
 static inline void
 reservation_object_init(struct reservation_object *obj)
 {
@@ -79,6 +98,10 @@ reservation_object_init(struct reservation_object *obj)
        obj->staged = NULL;
 }
 
+/**
+ * reservation_object_fini - destroys a reservation object
+ * @obj: the reservation object
+ */
 static inline void
 reservation_object_fini(struct reservation_object *obj)
 {
@@ -106,6 +129,14 @@ reservation_object_fini(struct reservation_object *obj)
        ww_mutex_destroy(&obj->lock);
 }
 
+/**
+ * reservation_object_get_list - get the reservation object's
+ * shared fence list, with update-side lock held
+ * @obj: the reservation object
+ *
+ * Returns the shared fence list.  Does NOT take references to
+ * the fence.  The obj->lock must be held.
+ */
 static inline struct reservation_object_list *
 reservation_object_get_list(struct reservation_object *obj)
 {
@@ -113,6 +144,17 @@ reservation_object_get_list(struct reservation_object *obj)
                                         reservation_object_held(obj));
 }
 
+/**
+ * reservation_object_get_excl - get the reservation object's
+ * exclusive fence, with update-side lock held
+ * @obj: the reservation object
+ *
+ * Returns the exclusive fence (if any).  Does NOT take a
+ * reference.  The obj->lock must be held.
+ *
+ * RETURNS
+ * The exclusive fence or NULL
+ */
 static inline struct fence *
 reservation_object_get_excl(struct reservation_object *obj)
 {
@@ -120,6 +162,17 @@ reservation_object_get_excl(struct reservation_object *obj)
                                         reservation_object_held(obj));
 }
 
+/**
+ * reservation_object_get_excl_rcu - get the reservation object's
+ * exclusive fence, without lock held.
+ * @obj: the reservation object
+ *
+ * If there is an exclusive fence, this atomically increments it's
+ * reference count and returns it.
+ *
+ * RETURNS
+ * The exclusive fence or NULL if none
+ */
 static inline struct fence *
 reservation_object_get_excl_rcu(struct reservation_object *obj)
 {
index dacb5e7..de1f643 100644 (file)
@@ -765,6 +765,8 @@ struct sctp_info {
        __u8    sctpi_s_disable_fragments;
        __u8    sctpi_s_v4mapped;
        __u8    sctpi_s_frag_interleave;
+       __u32   sctpi_s_type;
+       __u32   __reserved3;
 };
 
 struct sctp_infox {
index 37dbacf..816b754 100644 (file)
@@ -21,6 +21,9 @@ static inline int do_sys_settimeofday(const struct timespec *tv,
        struct timespec64 ts64;
 
        if (!tv)
+               return do_sys_settimeofday64(NULL, tz);
+
+       if (!timespec_valid(tv))
                return -EINVAL;
 
        ts64 = timespec_to_timespec64(*tv);
index d325c81..43a5a0e 100644 (file)
@@ -63,6 +63,8 @@ struct ip6_tnl_encap_ops {
                            u8 *protocol, struct flowi6 *fl6);
 };
 
+#ifdef CONFIG_INET
+
 extern const struct ip6_tnl_encap_ops __rcu *
                ip6tun_encaps[MAX_IPTUN_ENCAP_OPS];
 
@@ -138,7 +140,6 @@ struct net *ip6_tnl_get_link_net(const struct net_device *dev);
 int ip6_tnl_get_iflink(const struct net_device *dev);
 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu);
 
-#ifdef CONFIG_INET
 static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
                                  struct net_device *dev)
 {
index 401038d..fea53f4 100644 (file)
@@ -61,6 +61,7 @@ psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound)
 }
 
 struct qdisc_watchdog {
+       u64             last_expires;
        struct hrtimer  timer;
        struct Qdisc    *qdisc;
 };
index 9222db8..5f030b4 100644 (file)
@@ -1353,6 +1353,15 @@ enum ethtool_link_mode_bit_indices {
        ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28,
        ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29,
        ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30,
+       ETHTOOL_LINK_MODE_25000baseCR_Full_BIT  = 31,
+       ETHTOOL_LINK_MODE_25000baseKR_Full_BIT  = 32,
+       ETHTOOL_LINK_MODE_25000baseSR_Full_BIT  = 33,
+       ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34,
+       ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35,
+       ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT        = 36,
+       ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT        = 37,
+       ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT        = 38,
+       ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT    = 39,
 
        /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
         * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
@@ -1361,7 +1370,7 @@ enum ethtool_link_mode_bit_indices {
         */
 
        __ETHTOOL_LINK_MODE_LAST
-         = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
+         = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
 };
 
 #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name)     \
index eba5914..f4297c8 100644 (file)
@@ -145,6 +145,8 @@ enum {
        TCA_POLICE_PEAKRATE,
        TCA_POLICE_AVRATE,
        TCA_POLICE_RESULT,
+       TCA_POLICE_TM,
+       TCA_POLICE_PAD,
        __TCA_POLICE_MAX
 #define TCA_POLICE_RESULT TCA_POLICE_RESULT
 };
@@ -173,7 +175,7 @@ enum {
        TCA_U32_DIVISOR,
        TCA_U32_SEL,
        TCA_U32_POLICE,
-       TCA_U32_ACT,   
+       TCA_U32_ACT,
        TCA_U32_INDEV,
        TCA_U32_PCNT,
        TCA_U32_MARK,
index 04be702..318858e 100644 (file)
@@ -365,7 +365,6 @@ static struct file_system_type bpf_fs_type = {
        .name           = "bpf",
        .mount          = bpf_mount,
        .kill_sb        = kill_litter_super,
-       .fs_flags       = FS_USERNS_MOUNT,
 };
 
 MODULE_ALIAS_FS("bpf");
index 2f9f2b0..b4c1bc7 100644 (file)
@@ -426,6 +426,49 @@ out_unlock:
 }
 EXPORT_SYMBOL_GPL(handle_simple_irq);
 
+/**
+ *     handle_untracked_irq - Simple and software-decoded IRQs.
+ *     @desc:  the interrupt description structure for this irq
+ *
+ *     Untracked interrupts are sent from a demultiplexing interrupt
+ *     handler when the demultiplexer does not know which device it its
+ *     multiplexed irq domain generated the interrupt. IRQ's handled
+ *     through here are not subjected to stats tracking, randomness, or
+ *     spurious interrupt detection.
+ *
+ *     Note: Like handle_simple_irq, the caller is expected to handle
+ *     the ack, clear, mask and unmask issues if necessary.
+ */
+void handle_untracked_irq(struct irq_desc *desc)
+{
+       unsigned int flags = 0;
+
+       raw_spin_lock(&desc->lock);
+
+       if (!irq_may_run(desc))
+               goto out_unlock;
+
+       desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
+
+       if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
+               desc->istate |= IRQS_PENDING;
+               goto out_unlock;
+       }
+
+       desc->istate &= ~IRQS_PENDING;
+       irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
+       raw_spin_unlock(&desc->lock);
+
+       __handle_irq_event_percpu(desc, &flags);
+
+       raw_spin_lock(&desc->lock);
+       irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
+
+out_unlock:
+       raw_spin_unlock(&desc->lock);
+}
+EXPORT_SYMBOL_GPL(handle_untracked_irq);
+
 /*
  * Called unconditionally from handle_level_irq() and only for oneshot
  * interrupts from handle_fasteoi_irq()
@@ -1093,3 +1136,43 @@ int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 
        return 0;
 }
+
+/**
+ * irq_chip_pm_get - Enable power for an IRQ chip
+ * @data:      Pointer to interrupt specific data
+ *
+ * Enable the power to the IRQ chip referenced by the interrupt data
+ * structure.
+ */
+int irq_chip_pm_get(struct irq_data *data)
+{
+       int retval;
+
+       if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
+               retval = pm_runtime_get_sync(data->chip->parent_device);
+               if (retval < 0) {
+                       pm_runtime_put_noidle(data->chip->parent_device);
+                       return retval;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * irq_chip_pm_put - Disable power for an IRQ chip
+ * @data:      Pointer to interrupt specific data
+ *
+ * Disable the power to the IRQ chip referenced by the interrupt data
+ * structure, belongs. Note that power will only be disabled, once this
+ * function has been called for all IRQs that have called irq_chip_pm_get().
+ */
+int irq_chip_pm_put(struct irq_data *data)
+{
+       int retval = 0;
+
+       if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
+               retval = pm_runtime_put(data->chip->parent_device);
+
+       return (retval < 0) ? retval : 0;
+}
index a15b548..d3f2490 100644 (file)
@@ -132,10 +132,10 @@ void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
        wake_up_process(action->thread);
 }
 
-irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
+irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags)
 {
        irqreturn_t retval = IRQ_NONE;
-       unsigned int flags = 0, irq = desc->irq_data.irq;
+       unsigned int irq = desc->irq_data.irq;
        struct irqaction *action;
 
        for_each_action_of_desc(desc, action) {
@@ -164,7 +164,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
 
                        /* Fall through to add to randomness */
                case IRQ_HANDLED:
-                       flags |= action->flags;
+                       *flags |= action->flags;
                        break;
 
                default:
@@ -174,7 +174,17 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
                retval |= res;
        }
 
-       add_interrupt_randomness(irq, flags);
+       return retval;
+}
+
+irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
+{
+       irqreturn_t retval;
+       unsigned int flags = 0;
+
+       retval = __handle_irq_event_percpu(desc, &flags);
+
+       add_interrupt_randomness(desc->irq_data.irq, flags);
 
        if (!noirqdebug)
                note_interrupt(desc, retval);
index 09be2c9..0c6f35b 100644 (file)
@@ -7,6 +7,7 @@
  */
 #include <linux/irqdesc.h>
 #include <linux/kernel_stat.h>
+#include <linux/pm_runtime.h>
 
 #ifdef CONFIG_SPARSE_IRQ
 # define IRQ_BITMAP_BITS       (NR_IRQS + 8196)
@@ -83,6 +84,7 @@ extern void irq_mark_irq(unsigned int irq);
 
 extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
 
+irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags);
 irqreturn_t handle_irq_event_percpu(struct irq_desc *desc);
 irqreturn_t handle_irq_event(struct irq_desc *desc);
 
index c427422..89b49f6 100644 (file)
@@ -125,7 +125,7 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
 
        domain = data->domain;
        if (WARN_ON(domain == NULL))
-               return;
+               return -EINVAL;
 
        if (!irq_domain_is_ipi(domain)) {
                pr_warn("Trying to destroy a non IPI domain!\n");
index 8798b6c..5d89d72 100644 (file)
@@ -567,6 +567,7 @@ static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data,
 unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
 {
        struct irq_domain *domain;
+       struct irq_data *irq_data;
        irq_hw_number_t hwirq;
        unsigned int type = IRQ_TYPE_NONE;
        int virq;
@@ -588,15 +589,46 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
        if (irq_domain_translate(domain, fwspec, &hwirq, &type))
                return 0;
 
-       if (irq_domain_is_hierarchy(domain)) {
+       /*
+        * WARN if the irqchip returns a type with bits
+        * outside the sense mask set and clear these bits.
+        */
+       if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
+               type &= IRQ_TYPE_SENSE_MASK;
+
+       /*
+        * If we've already configured this interrupt,
+        * don't do it again, or hell will break loose.
+        */
+       virq = irq_find_mapping(domain, hwirq);
+       if (virq) {
                /*
-                * If we've already configured this interrupt,
-                * don't do it again, or hell will break loose.
+                * If the trigger type is not specified or matches the
+                * current trigger type then we are done so return the
+                * interrupt number.
                 */
-               virq = irq_find_mapping(domain, hwirq);
-               if (virq)
+               if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
+                       return virq;
+
+               /*
+                * If the trigger type has not been set yet, then set
+                * it now and return the interrupt number.
+                */
+               if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
+                       irq_data = irq_get_irq_data(virq);
+                       if (!irq_data)
+                               return 0;
+
+                       irqd_set_trigger_type(irq_data, type);
                        return virq;
+               }
+
+               pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
+                       hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
+               return 0;
+       }
 
+       if (irq_domain_is_hierarchy(domain)) {
                virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
                if (virq <= 0)
                        return 0;
@@ -607,10 +639,18 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
                        return virq;
        }
 
-       /* Set type if specified and different than the current one */
-       if (type != IRQ_TYPE_NONE &&
-           type != irq_get_trigger_type(virq))
-               irq_set_irq_type(virq, type);
+       irq_data = irq_get_irq_data(virq);
+       if (!irq_data) {
+               if (irq_domain_is_hierarchy(domain))
+                       irq_domain_free_irqs(virq, 1);
+               else
+                       irq_dispose_mapping(virq);
+               return 0;
+       }
+
+       /* Store trigger type */
+       irqd_set_trigger_type(irq_data, type);
+
        return virq;
 }
 EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping);
@@ -640,8 +680,12 @@ void irq_dispose_mapping(unsigned int virq)
        if (WARN_ON(domain == NULL))
                return;
 
-       irq_domain_disassociate(domain, virq);
-       irq_free_desc(virq);
+       if (irq_domain_is_hierarchy(domain)) {
+               irq_domain_free_irqs(virq, 1);
+       } else {
+               irq_domain_disassociate(domain, virq);
+               irq_free_desc(virq);
+       }
 }
 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
 
index ef0bc02..00cfc85 100644 (file)
@@ -1116,6 +1116,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 
        new->irq = irq;
 
+       /*
+        * If the trigger type is not specified by the caller,
+        * then use the default for this interrupt.
+        */
+       if (!(new->flags & IRQF_TRIGGER_MASK))
+               new->flags |= irqd_get_trigger_type(&desc->irq_data);
+
        /*
         * Check whether the interrupt nests into another interrupt
         * thread.
@@ -1409,10 +1416,18 @@ int setup_irq(unsigned int irq, struct irqaction *act)
 
        if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
                return -EINVAL;
+
+       retval = irq_chip_pm_get(&desc->irq_data);
+       if (retval < 0)
+               return retval;
+
        chip_bus_lock(desc);
        retval = __setup_irq(irq, desc, act);
        chip_bus_sync_unlock(desc);
 
+       if (retval)
+               irq_chip_pm_put(&desc->irq_data);
+
        return retval;
 }
 EXPORT_SYMBOL_GPL(setup_irq);
@@ -1506,6 +1521,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
                }
        }
 
+       irq_chip_pm_put(&desc->irq_data);
        module_put(desc->owner);
        kfree(action->secondary);
        return action;
@@ -1648,11 +1664,16 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
        action->name = devname;
        action->dev_id = dev_id;
 
+       retval = irq_chip_pm_get(&desc->irq_data);
+       if (retval < 0)
+               return retval;
+
        chip_bus_lock(desc);
        retval = __setup_irq(irq, desc, action);
        chip_bus_sync_unlock(desc);
 
        if (retval) {
+               irq_chip_pm_put(&desc->irq_data);
                kfree(action->secondary);
                kfree(action);
        }
@@ -1730,7 +1751,14 @@ void enable_percpu_irq(unsigned int irq, unsigned int type)
        if (!desc)
                return;
 
+       /*
+        * If the trigger type is not specified by the caller, then
+        * use the default for this interrupt.
+        */
        type &= IRQ_TYPE_SENSE_MASK;
+       if (type == IRQ_TYPE_NONE)
+               type = irqd_get_trigger_type(&desc->irq_data);
+
        if (type != IRQ_TYPE_NONE) {
                int ret;
 
@@ -1822,6 +1850,7 @@ static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_
 
        unregister_handler_proc(irq, action);
 
+       irq_chip_pm_put(&desc->irq_data);
        module_put(desc->owner);
        return action;
 
@@ -1884,10 +1913,18 @@ int setup_percpu_irq(unsigned int irq, struct irqaction *act)
 
        if (!desc || !irq_settings_is_per_cpu_devid(desc))
                return -EINVAL;
+
+       retval = irq_chip_pm_get(&desc->irq_data);
+       if (retval < 0)
+               return retval;
+
        chip_bus_lock(desc);
        retval = __setup_irq(irq, desc, act);
        chip_bus_sync_unlock(desc);
 
+       if (retval)
+               irq_chip_pm_put(&desc->irq_data);
+
        return retval;
 }
 
@@ -1931,12 +1968,18 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler,
        action->name = devname;
        action->percpu_dev_id = dev_id;
 
+       retval = irq_chip_pm_get(&desc->irq_data);
+       if (retval < 0)
+               return retval;
+
        chip_bus_lock(desc);
        retval = __setup_irq(irq, desc, action);
        chip_bus_sync_unlock(desc);
 
-       if (retval)
+       if (retval) {
+               irq_chip_pm_put(&desc->irq_data);
                kfree(action);
+       }
 
        return retval;
 }
index 4e1b947..f30425d 100644 (file)
@@ -311,7 +311,6 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
                                        !name_unique(irq, action))
                return;
 
-       memset(name, 0, MAX_NAMELEN);
        snprintf(name, MAX_NAMELEN, "%s", action->name);
 
        /* create /proc/irq/1234/handler/ */
@@ -340,7 +339,6 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
        if (desc->dir)
                goto out_unlock;
 
-       memset(name, 0, MAX_NAMELEN);
        sprintf(name, "%d", irq);
 
        /* create /proc/irq/1234 */
@@ -386,7 +384,6 @@ void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
 #endif
        remove_proc_entry("spurious", desc->dir);
 
-       memset(name, 0, MAX_NAMELEN);
        sprintf(name, "%u", irq);
        remove_proc_entry(name, root_irq_dir);
 }
@@ -421,12 +418,8 @@ void init_irq_proc(void)
        /*
         * Create entries for all existing IRQs.
         */
-       for_each_irq_desc(irq, desc) {
-               if (!desc)
-                       continue;
-
+       for_each_irq_desc(irq, desc)
                register_irq_proc(irq, desc);
-       }
 }
 
 #ifdef CONFIG_GENERIC_IRQ_SHOW
index 8c7392c..e99df0f 100644 (file)
@@ -425,6 +425,7 @@ void destroy_hrtimer_on_stack(struct hrtimer *timer)
 {
        debug_object_free(timer, &hrtimer_debug_descr);
 }
+EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
 
 #else
 static inline void debug_hrtimer_init(struct hrtimer *timer) { }
index 77d7d03..b9cfdbf 100644 (file)
@@ -1841,6 +1841,9 @@ config TEST_BITMAP
 
          If unsure, say N.
 
+config TEST_UUID
+       tristate "Test functions located in the uuid module at runtime"
+
 config TEST_RHASHTABLE
        tristate "Perform selftest on resizable hash table"
        default n
index 499fb35..ff6a7a6 100644 (file)
@@ -58,6 +58,7 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
 obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
 obj-$(CONFIG_TEST_PRINTF) += test_printf.o
 obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
+obj-$(CONFIG_TEST_UUID) += test_uuid.o
 
 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
 CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/test_uuid.c b/lib/test_uuid.c
new file mode 100644 (file)
index 0000000..547d312
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+ * Test cases for lib/uuid.c module.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/uuid.h>
+
+struct test_uuid_data {
+       const char *uuid;
+       uuid_le le;
+       uuid_be be;
+};
+
+static const struct test_uuid_data test_uuid_test_data[] = {
+       {
+               .uuid = "c33f4995-3701-450e-9fbf-206a2e98e576",
+               .le = UUID_LE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
+               .be = UUID_BE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
+       },
+       {
+               .uuid = "64b4371c-77c1-48f9-8221-29f054fc023b",
+               .le = UUID_LE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
+               .be = UUID_BE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
+       },
+       {
+               .uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84",
+               .le = UUID_LE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
+               .be = UUID_BE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
+       },
+};
+
+static const char * const test_uuid_wrong_data[] = {
+       "c33f4995-3701-450e-9fbf206a2e98e576 ", /* no hyphen(s) */
+       "64b4371c-77c1-48f9-8221-29f054XX023b", /* invalid character(s) */
+       "0cb4ddff-a545-4401-9d06-688af53e",     /* not enough data */
+};
+
+static unsigned total_tests __initdata;
+static unsigned failed_tests __initdata;
+
+static void __init test_uuid_failed(const char *prefix, bool wrong, bool be,
+                                   const char *data, const char *actual)
+{
+       pr_err("%s test #%u %s %s data: '%s'\n",
+              prefix,
+              total_tests,
+              wrong ? "passed on wrong" : "failed on",
+              be ? "BE" : "LE",
+              data);
+       if (actual && *actual)
+               pr_err("%s test #%u actual data: '%s'\n",
+                      prefix,
+                      total_tests,
+                      actual);
+       failed_tests++;
+}
+
+static void __init test_uuid_test(const struct test_uuid_data *data)
+{
+       uuid_le le;
+       uuid_be be;
+       char buf[48];
+
+       /* LE */
+       total_tests++;
+       if (uuid_le_to_bin(data->uuid, &le))
+               test_uuid_failed("conversion", false, false, data->uuid, NULL);
+
+       total_tests++;
+       if (uuid_le_cmp(data->le, le)) {
+               sprintf(buf, "%pUl", &le);
+               test_uuid_failed("cmp", false, false, data->uuid, buf);
+       }
+
+       /* BE */
+       total_tests++;
+       if (uuid_be_to_bin(data->uuid, &be))
+               test_uuid_failed("conversion", false, true, data->uuid, NULL);
+
+       total_tests++;
+       if (uuid_be_cmp(data->be, be)) {
+               sprintf(buf, "%pUb", &be);
+               test_uuid_failed("cmp", false, true, data->uuid, buf);
+       }
+}
+
+static void __init test_uuid_wrong(const char *data)
+{
+       uuid_le le;
+       uuid_be be;
+
+       /* LE */
+       total_tests++;
+       if (!uuid_le_to_bin(data, &le))
+               test_uuid_failed("negative", true, false, data, NULL);
+
+       /* BE */
+       total_tests++;
+       if (!uuid_be_to_bin(data, &be))
+               test_uuid_failed("negative", true, true, data, NULL);
+}
+
+static int __init test_uuid_init(void)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(test_uuid_test_data); i++)
+               test_uuid_test(&test_uuid_test_data[i]);
+
+       for (i = 0; i < ARRAY_SIZE(test_uuid_wrong_data); i++)
+               test_uuid_wrong(test_uuid_wrong_data[i]);
+
+       if (failed_tests == 0)
+               pr_info("all %u tests passed\n", total_tests);
+       else
+               pr_err("failed %u out of %u tests\n", failed_tests, total_tests);
+
+       return failed_tests ? -EINVAL : 0;
+}
+module_init(test_uuid_init);
+
+static void __exit test_uuid_exit(void)
+{
+       /* do nothing */
+}
+module_exit(test_uuid_exit);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_LICENSE("Dual BSD/GPL");
index e116ae5..37687af 100644 (file)
@@ -106,8 +106,8 @@ static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16])
                return -EINVAL;
 
        for (i = 0; i < 16; i++) {
-               int hi = hex_to_bin(uuid[si[i]] + 0);
-               int lo = hex_to_bin(uuid[si[i]] + 1);
+               int hi = hex_to_bin(uuid[si[i] + 0]);
+               int lo = hex_to_bin(uuid[si[i] + 1]);
 
                b[ei[i]] = (hi << 4) | lo;
        }
index 925b431..58c69c9 100644 (file)
@@ -2896,6 +2896,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
         * ordering is imposed by list_lru_node->lock taken by
         * memcg_drain_all_list_lrus().
         */
+       rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
        css_for_each_descendant_pre(css, &memcg->css) {
                child = mem_cgroup_from_css(css);
                BUG_ON(child->kmemcg_id != kmemcg_id);
@@ -2903,6 +2904,8 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
                if (!memcg->use_hierarchy)
                        break;
        }
+       rcu_read_unlock();
+
        memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
 
        memcg_free_cache_id(kmemcg_id);
index dfb1ab6..acbc432 100644 (file)
@@ -625,8 +625,6 @@ void try_oom_reaper(struct task_struct *tsk)
        if (atomic_read(&mm->mm_users) > 1) {
                rcu_read_lock();
                for_each_process(p) {
-                       bool exiting;
-
                        if (!process_shares_mm(p, mm))
                                continue;
                        if (fatal_signal_pending(p))
@@ -636,10 +634,7 @@ void try_oom_reaper(struct task_struct *tsk)
                         * If the task is exiting make sure the whole thread group
                         * is exiting and cannot acces mm anymore.
                         */
-                       spin_lock_irq(&p->sighand->siglock);
-                       exiting = signal_group_exit(p->signal);
-                       spin_unlock_irq(&p->sighand->siglock);
-                       if (exiting)
+                       if (signal_group_exit(p->signal))
                                continue;
 
                        /* Give up */
index f8f3bfc..6903b69 100644 (file)
@@ -656,6 +656,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
                return;
 
        page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
+
        __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
 
        INIT_LIST_HEAD(&page->lru);
@@ -673,6 +676,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
                return;
 
        page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
+
        __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
 
        set_page_private(page, 0);
@@ -2609,11 +2615,12 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
                                page = list_last_entry(list, struct page, lru);
                        else
                                page = list_first_entry(list, struct page, lru);
-               } while (page && check_new_pcp(page));
 
-               __dec_zone_state(zone, NR_ALLOC_BATCH);
-               list_del(&page->lru);
-               pcp->count--;
+                       __dec_zone_state(zone, NR_ALLOC_BATCH);
+                       list_del(&page->lru);
+                       pcp->count--;
+
+               } while (check_new_pcp(page));
        } else {
                /*
                 * We most definitely don't want callers attempting to
@@ -3023,6 +3030,7 @@ reset_fair:
                apply_fair = false;
                fair_skipped = false;
                reset_alloc_batches(ac->preferred_zoneref->zone);
+               z = ac->preferred_zoneref;
                goto zonelist_scan;
        }
 
@@ -3596,6 +3604,17 @@ retry:
         */
        alloc_flags = gfp_to_alloc_flags(gfp_mask);
 
+       /*
+        * Reset the zonelist iterators if memory policies can be ignored.
+        * These allocations are high priority and system rather than user
+        * orientated.
+        */
+       if ((alloc_flags & ALLOC_NO_WATERMARKS) || !(alloc_flags & ALLOC_CPUSET)) {
+               ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
+               ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
+                                       ac->high_zoneidx, ac->nodemask);
+       }
+
        /* This is the last chance, in general, before the goto nopage. */
        page = get_page_from_freelist(gfp_mask, order,
                                alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
@@ -3604,12 +3623,6 @@ retry:
 
        /* Allocate without watermarks if the context allows */
        if (alloc_flags & ALLOC_NO_WATERMARKS) {
-               /*
-                * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
-                * the allocation is high priority and these type of
-                * allocations are system rather than user orientated
-                */
-               ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
                page = get_page_from_freelist(gfp_mask, order,
                                                ALLOC_NO_WATERMARKS, ac);
                if (page)
@@ -3808,7 +3821,11 @@ retry_cpuset:
        /* Dirty zone balancing only done in the fast path */
        ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
 
-       /* The preferred zone is used for statistics later */
+       /*
+        * The preferred zone is used for statistics but crucially it is
+        * also used as the starting point for the zonelist iterator. It
+        * may get reset for allocations that ignore memory policies.
+        */
        ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
                                        ac.high_zoneidx, ac.nodemask);
        if (!ac.preferred_zoneref) {
index 792b56d..c6cda3e 100644 (file)
@@ -55,6 +55,8 @@ void __reset_page_owner(struct page *page, unsigned int order)
 
        for (i = 0; i < (1 << order); i++) {
                page_ext = lookup_page_ext(page + i);
+               if (unlikely(!page_ext))
+                       continue;
                __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
        }
 }
@@ -62,6 +64,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
 void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
 {
        struct page_ext *page_ext = lookup_page_ext(page);
+
        struct stack_trace trace = {
                .nr_entries = 0,
                .max_entries = ARRAY_SIZE(page_ext->trace_entries),
@@ -69,6 +72,9 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
                .skip = 3,
        };
 
+       if (unlikely(!page_ext))
+               return;
+
        save_stack_trace(&trace);
 
        page_ext->order = order;
@@ -82,6 +88,8 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
 void __set_page_owner_migrate_reason(struct page *page, int reason)
 {
        struct page_ext *page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
 
        page_ext->last_migrate_reason = reason;
 }
@@ -89,6 +97,12 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
 gfp_t __get_page_owner_gfp(struct page *page)
 {
        struct page_ext *page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               /*
+                * The caller just returns 0 if no valid gfp
+                * So return 0 here too.
+                */
+               return 0;
 
        return page_ext->gfp_mask;
 }
@@ -99,6 +113,9 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
        struct page_ext *new_ext = lookup_page_ext(newpage);
        int i;
 
+       if (unlikely(!old_ext || !new_ext))
+               return;
+
        new_ext->order = old_ext->order;
        new_ext->gfp_mask = old_ext->gfp_mask;
        new_ext->nr_entries = old_ext->nr_entries;
@@ -193,6 +210,11 @@ void __dump_page_owner(struct page *page)
        gfp_t gfp_mask = page_ext->gfp_mask;
        int mt = gfpflags_to_migratetype(gfp_mask);
 
+       if (unlikely(!page_ext)) {
+               pr_alert("There is not page extension available.\n");
+               return;
+       }
+
        if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
                pr_alert("page_owner info is not active (free page?)\n");
                return;
@@ -251,6 +273,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
                }
 
                page_ext = lookup_page_ext(page);
+               if (unlikely(!page_ext))
+                       continue;
 
                /*
                 * Some pages could be missed by concurrent allocation or free,
@@ -317,6 +341,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
                                continue;
 
                        page_ext = lookup_page_ext(page);
+                       if (unlikely(!page_ext))
+                               continue;
 
                        /* Maybe overraping zone */
                        if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
index 1eae5fa..2e647c6 100644 (file)
@@ -54,6 +54,9 @@ static inline void set_page_poison(struct page *page)
        struct page_ext *page_ext;
 
        page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
+
        __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
 }
 
@@ -62,6 +65,9 @@ static inline void clear_page_poison(struct page *page)
        struct page_ext *page_ext;
 
        page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
+
        __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
 }
 
@@ -70,7 +76,7 @@ bool page_is_poisoned(struct page *page)
        struct page_ext *page_ext;
 
        page_ext = lookup_page_ext(page);
-       if (!page_ext)
+       if (unlikely(!page_ext))
                return false;
 
        return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
index cf7ad1a..e11475c 100644 (file)
@@ -1105,7 +1105,7 @@ EXPORT_SYMBOL_GPL(vm_unmap_aliases);
  */
 void vm_unmap_ram(const void *mem, unsigned int count)
 {
-       unsigned long size = count << PAGE_SHIFT;
+       unsigned long size = (unsigned long)count << PAGE_SHIFT;
        unsigned long addr = (unsigned long)mem;
 
        BUG_ON(!addr);
@@ -1140,7 +1140,7 @@ EXPORT_SYMBOL(vm_unmap_ram);
  */
 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
 {
-       unsigned long size = count << PAGE_SHIFT;
+       unsigned long size = (unsigned long)count << PAGE_SHIFT;
        unsigned long addr;
        void *mem;
 
@@ -1574,14 +1574,15 @@ void *vmap(struct page **pages, unsigned int count,
                unsigned long flags, pgprot_t prot)
 {
        struct vm_struct *area;
+       unsigned long size;             /* In bytes */
 
        might_sleep();
 
        if (count > totalram_pages)
                return NULL;
 
-       area = get_vm_area_caller((count << PAGE_SHIFT), flags,
-                                       __builtin_return_address(0));
+       size = (unsigned long)count << PAGE_SHIFT;
+       area = get_vm_area_caller(size, flags, __builtin_return_address(0));
        if (!area)
                return NULL;
 
index 77e42ef..cb2a67b 100644 (file)
@@ -1061,6 +1061,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
                                continue;
 
                        page_ext = lookup_page_ext(page);
+                       if (unlikely(!page_ext))
+                               continue;
 
                        if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
                                continue;
index 34917d5..8f9e89c 100644 (file)
@@ -412,7 +412,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
                /* HEADLESS page stored */
                bud = HEADLESS;
        } else {
-               bud = (handle - zhdr->first_num) & BUDDY_MASK;
+               bud = handle_to_buddy(handle);
 
                switch (bud) {
                case FIRST:
@@ -572,15 +572,19 @@ next:
                        pool->pages_nr--;
                        spin_unlock(&pool->lock);
                        return 0;
-               } else if (zhdr->first_chunks != 0 &&
-                          zhdr->last_chunks != 0 && zhdr->middle_chunks != 0) {
-                       /* Full, add to buddied list */
-                       list_add(&zhdr->buddy, &pool->buddied);
-               } else if (!test_bit(PAGE_HEADLESS, &page->private)) {
-                       z3fold_compact_page(zhdr);
-                       /* add to unbuddied list */
-                       freechunks = num_free_chunks(zhdr);
-                       list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
+               }  else if (!test_bit(PAGE_HEADLESS, &page->private)) {
+                       if (zhdr->first_chunks != 0 &&
+                           zhdr->last_chunks != 0 &&
+                           zhdr->middle_chunks != 0) {
+                               /* Full, add to buddied list */
+                               list_add(&zhdr->buddy, &pool->buddied);
+                       } else {
+                               z3fold_compact_page(zhdr);
+                               /* add to unbuddied list */
+                               freechunks = num_free_chunks(zhdr);
+                               list_add(&zhdr->buddy,
+                                        &pool->unbuddied[freechunks]);
+                       }
                }
 
                /* add to beginning of LRU */
index a1e273a..82a116b 100644 (file)
@@ -290,6 +290,10 @@ static void vlan_sync_address(struct net_device *dev,
        if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
                return;
 
+       /* vlan continues to inherit address of lower device */
+       if (vlan_dev_inherit_address(vlandev, dev))
+               goto out;
+
        /* vlan address was different from the old address and is equal to
         * the new address */
        if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
@@ -302,6 +306,7 @@ static void vlan_sync_address(struct net_device *dev,
            !ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
                dev_uc_add(dev, vlandev->dev_addr);
 
+out:
        ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
 }
 
index 9d010a0..cc15579 100644 (file)
@@ -109,6 +109,8 @@ int vlan_check_real_dev(struct net_device *real_dev,
 void vlan_setup(struct net_device *dev);
 int register_vlan_dev(struct net_device *dev);
 void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
+bool vlan_dev_inherit_address(struct net_device *dev,
+                             struct net_device *real_dev);
 
 static inline u32 vlan_get_ingress_priority(struct net_device *dev,
                                            u16 vlan_tci)
index e7e6257..86ae75b 100644 (file)
@@ -245,6 +245,17 @@ void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
        strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
 }
 
+bool vlan_dev_inherit_address(struct net_device *dev,
+                             struct net_device *real_dev)
+{
+       if (dev->addr_assign_type != NET_ADDR_STOLEN)
+               return false;
+
+       ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+       call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+       return true;
+}
+
 static int vlan_dev_open(struct net_device *dev)
 {
        struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
@@ -255,7 +266,8 @@ static int vlan_dev_open(struct net_device *dev)
            !(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
                return -ENETDOWN;
 
-       if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) {
+       if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) &&
+           !vlan_dev_inherit_address(dev, real_dev)) {
                err = dev_uc_add(real_dev, dev->dev_addr);
                if (err < 0)
                        goto out;
@@ -560,8 +572,10 @@ static int vlan_dev_init(struct net_device *dev)
        /* ipv6 shared card related stuff */
        dev->dev_id = real_dev->dev_id;
 
-       if (is_zero_ether_addr(dev->dev_addr))
-               eth_hw_addr_inherit(dev, real_dev);
+       if (is_zero_ether_addr(dev->dev_addr)) {
+               ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+               dev->addr_assign_type = NET_ADDR_STOLEN;
+       }
        if (is_zero_ether_addr(dev->broadcast))
                memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
 
index 4fd6af4..adb6e3d 100644 (file)
@@ -124,7 +124,7 @@ as_indicate_complete:
                break;
        case as_addparty:
        case as_dropparty:
-               sk->sk_err_soft = msg->reply;
+               sk->sk_err_soft = -msg->reply;
                                        /* < 0 failure, otherwise ep_ref */
                clear_bit(ATM_VF_WAITING, &vcc->flags);
                break;
index 3fa0a9e..878563a 100644 (file)
@@ -546,7 +546,7 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
                schedule();
        }
        finish_wait(sk_sleep(sk), &wait);
-       error = xchg(&sk->sk_err_soft, 0);
+       error = -xchg(&sk->sk_err_soft, 0);
 out:
        release_sock(sk);
        return error;
@@ -573,7 +573,7 @@ static int svc_dropparty(struct socket *sock, int ep_ref)
                error = -EUNATCH;
                goto out;
        }
-       error = xchg(&sk->sk_err_soft, 0);
+       error = -xchg(&sk->sk_err_soft, 0);
 out:
        release_sock(sk);
        return error;
index 0160d7d..8946959 100644 (file)
@@ -1276,9 +1276,9 @@ static bool target_should_be_paused(struct ceph_osd_client *osdc,
                                    const struct ceph_osd_request_target *t,
                                    struct ceph_pg_pool_info *pi)
 {
-       bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
-       bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
-                      ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
+       bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
+       bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
+                      ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
                       __pool_full(pi);
 
        WARN_ON(pi->id != t->base_oloc.pool);
@@ -1303,8 +1303,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
        bool force_resend = false;
        bool need_check_tiering = false;
        bool need_resend = false;
-       bool sort_bitwise = ceph_osdmap_flag(osdc->osdmap,
-                                            CEPH_OSDMAP_SORTBITWISE);
+       bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
        enum calc_target_result ct_res;
        int ret;
 
@@ -1540,9 +1539,9 @@ static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg)
         */
        msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
 
-       dout("%s req %p oid %*pE oid_len %d front %zu data %u\n", __func__,
-            req, req->r_t.target_oid.name_len, req->r_t.target_oid.name,
-            req->r_t.target_oid.name_len, msg->front.iov_len, data_len);
+       dout("%s req %p oid %s oid_len %d front %zu data %u\n", __func__,
+            req, req->r_t.target_oid.name, req->r_t.target_oid.name_len,
+            msg->front.iov_len, data_len);
 }
 
 /*
@@ -1590,9 +1589,9 @@ static void maybe_request_map(struct ceph_osd_client *osdc)
        verify_osdc_locked(osdc);
        WARN_ON(!osdc->osdmap->epoch);
 
-       if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
-           ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) ||
-           ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) {
+       if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
+           ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
+           ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
                dout("%s osdc %p continuous\n", __func__, osdc);
                continuous = true;
        } else {
@@ -1629,19 +1628,19 @@ again:
        }
 
        if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
-           ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) {
+           ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
                dout("req %p pausewr\n", req);
                req->r_t.paused = true;
                maybe_request_map(osdc);
        } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
-                  ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) {
+                  ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
                dout("req %p pauserd\n", req);
                req->r_t.paused = true;
                maybe_request_map(osdc);
        } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
                   !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
                                     CEPH_OSD_FLAG_FULL_FORCE)) &&
-                  (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
+                  (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
                    pool_full(osdc, req->r_t.base_oloc.pool))) {
                dout("req %p full/pool_full\n", req);
                pr_warn_ratelimited("FULL or reached pool quota\n");
@@ -2280,7 +2279,7 @@ static void send_linger_ping(struct ceph_osd_linger_request *lreq)
        struct ceph_osd_request *req = lreq->ping_req;
        struct ceph_osd_req_op *op = &req->r_ops[0];
 
-       if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) {
+       if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
                dout("%s PAUSERD\n", __func__);
                return;
        }
@@ -2893,6 +2892,9 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
                        dout("req %p tid %llu cb\n", req, req->r_tid);
                        __complete_request(req);
                }
+               if (m.flags & CEPH_OSD_FLAG_ONDISK)
+                       complete_all(&req->r_safe_completion);
+               ceph_osdc_put_request(req);
        } else {
                if (req->r_unsafe_callback) {
                        dout("req %p tid %llu unsafe-cb\n", req, req->r_tid);
@@ -2901,10 +2903,7 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
                        WARN_ON(1);
                }
        }
-       if (m.flags & CEPH_OSD_FLAG_ONDISK)
-               complete_all(&req->r_safe_completion);
 
-       ceph_osdc_put_request(req);
        return;
 
 fail_request:
@@ -3050,7 +3049,7 @@ static int handle_one_map(struct ceph_osd_client *osdc,
        bool skipped_map = false;
        bool was_full;
 
-       was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
+       was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
        set_pool_was_full(osdc);
 
        if (incremental)
@@ -3088,7 +3087,7 @@ static int handle_one_map(struct ceph_osd_client *osdc,
                osdc->osdmap = newmap;
        }
 
-       was_full &= !ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
+       was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
        scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
                      need_resend, need_resend_linger);
 
@@ -3174,9 +3173,9 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
        if (ceph_check_fsid(osdc->client, &fsid) < 0)
                goto bad;
 
-       was_pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
-       was_pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
-                     ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
+       was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
+       was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
+                     ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
                      have_pool_full(osdc);
 
        /* incremental maps */
@@ -3238,9 +3237,9 @@ done:
         * we find out when we are no longer full and stop returning
         * ENOSPC.
         */
-       pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
-       pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
-                 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
+       pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
+       pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
+                 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
                  have_pool_full(osdc);
        if (was_pauserd || was_pausewr || pauserd || pausewr)
                maybe_request_map(osdc);
index cde52e9..03062bb 100644 (file)
@@ -1778,8 +1778,8 @@ int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
        raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name,
                                       oid->name_len);
 
-       dout("%s %*pE -> raw_pgid %llu.%x\n", __func__, oid->name_len,
-            oid->name, raw_pgid->pool, raw_pgid->seed);
+       dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name,
+            raw_pgid->pool, raw_pgid->seed);
        return 0;
 }
 EXPORT_SYMBOL(ceph_object_locator_to_pg);
index 941c284..2cab489 100644 (file)
@@ -55,18 +55,21 @@ int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp)
        spin_lock_irqsave(&bm_pool->lock, flags);
        if (bm_pool->buf_num == bm_pool->size) {
                pr_warn("pool already filled\n");
+               spin_unlock_irqrestore(&bm_pool->lock, flags);
                return bm_pool->buf_num;
        }
 
        if (buf_num + bm_pool->buf_num > bm_pool->size) {
                pr_warn("cannot allocate %d buffers for pool\n",
                        buf_num);
+               spin_unlock_irqrestore(&bm_pool->lock, flags);
                return 0;
        }
 
        if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) {
                pr_warn("Adding %d buffers to the %d current buffers will overflow\n",
                        buf_num,  bm_pool->buf_num);
+               spin_unlock_irqrestore(&bm_pool->lock, flags);
                return 0;
        }
 
index 8604ae2..8b02df0 100644 (file)
@@ -2245,10 +2245,8 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
        hrtimer_set_expires(&t.timer, spin_until);
 
        remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer));
-       if (remaining <= 0) {
-               pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
-               return;
-       }
+       if (remaining <= 0)
+               goto out;
 
        start_time = ktime_get();
        if (remaining < 100000) {
@@ -2273,7 +2271,9 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
        }
 
        pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
+out:
        pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
+       destroy_hrtimer_on_stack(&t.timer);
 }
 
 static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
index ca207db..116187b 100644 (file)
@@ -1289,8 +1289,8 @@ ieee802154_llsec_parse_dev_addr(struct nlattr *nla,
                                     nl802154_dev_addr_policy))
                return -EINVAL;
 
-       if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] &&
-           !attrs[NL802154_DEV_ADDR_ATTR_MODE] &&
+       if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] ||
+           !attrs[NL802154_DEV_ADDR_ATTR_MODE] ||
            !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] ||
              attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]))
                return -EINVAL;
index 377424e..d39e9e4 100644 (file)
@@ -1681,6 +1681,14 @@ static __net_init int inet_init_net(struct net *net)
         */
        net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
        net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
+
+       /* Default values for sysctl-controlled parameters.
+        * We set them here, in case sysctl is not compiled.
+        */
+       net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
+       net->ipv4.sysctl_ip_dynaddr = 0;
+       net->ipv4.sysctl_ip_early_demux = 1;
+
        return 0;
 }
 
index bb04195..1cb67de 100644 (file)
@@ -999,10 +999,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
        if (!net->ipv4.sysctl_local_reserved_ports)
                goto err_ports;
 
-       net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
-       net->ipv4.sysctl_ip_dynaddr = 0;
-       net->ipv4.sysctl_ip_early_demux = 1;
-
        return 0;
 
 err_ports:
index 3f84113..2343e4f 100644 (file)
@@ -232,6 +232,15 @@ config IPV6_GRE
 
          Saying M here will produce a module called ip6_gre. If unsure, say N.
 
+config IPV6_FOU
+       tristate
+       default NET_FOU && IPV6
+
+config IPV6_FOU_TUNNEL
+       tristate
+       default NET_FOU_IP_TUNNELS && IPV6_FOU
+       select IPV6_TUNNEL
+
 config IPV6_MULTIPLE_TABLES
        bool "IPv6: Multiple Routing Tables"
        select FIB_RULES
index 7ec3129..6d8ea09 100644 (file)
@@ -42,7 +42,7 @@ obj-$(CONFIG_IPV6_VTI) += ip6_vti.o
 obj-$(CONFIG_IPV6_SIT) += sit.o
 obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
 obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
-obj-$(CONFIG_NET_FOU) += fou6.o
+obj-$(CONFIG_IPV6_FOU) += fou6.o
 
 obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o
 obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload)
index c972d0b..9ea249b 100644 (file)
@@ -69,7 +69,7 @@ int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
 }
 EXPORT_SYMBOL(gue6_build_header);
 
-#ifdef CONFIG_NET_FOU_IP_TUNNELS
+#if IS_ENABLED(CONFIG_IPV6_FOU_TUNNEL)
 
 static const struct ip6_tnl_encap_ops fou_ip6tun_ops = {
        .encap_hlen = fou_encap_hlen,
index af503f5..f4ac284 100644 (file)
@@ -712,6 +712,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
        fl6->daddr = p->raddr;
        fl6->flowi6_oif = p->link;
        fl6->flowlabel = 0;
+       fl6->flowi6_proto = IPPROTO_GRE;
 
        if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
                fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
@@ -1027,6 +1028,8 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
 
        dev->hard_header_len = LL_MAX_HEADER + t_hlen;
        dev->mtu = ETH_DATA_LEN - t_hlen;
+       if (dev->type == ARPHRD_ETHER)
+               dev->mtu -= ETH_HLEN;
        if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
                dev->mtu -= 8;
 
index c6f5df1..6c54e03 100644 (file)
@@ -128,6 +128,7 @@ static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
  */
 static int l2tp_ip6_recv(struct sk_buff *skb)
 {
+       struct net *net = dev_net(skb->dev);
        struct sock *sk;
        u32 session_id;
        u32 tunnel_id;
@@ -154,7 +155,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
        }
 
        /* Ok, this is a data packet. Lookup the session. */
-       session = l2tp_session_find(&init_net, NULL, session_id);
+       session = l2tp_session_find(net, NULL, session_id);
        if (session == NULL)
                goto discard;
 
@@ -188,14 +189,14 @@ pass_up:
                goto discard;
 
        tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
-       tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
+       tunnel = l2tp_tunnel_find(net, tunnel_id);
        if (tunnel != NULL)
                sk = tunnel->sock;
        else {
                struct ipv6hdr *iph = ipv6_hdr(skb);
 
                read_lock_bh(&l2tp_ip6_lock);
-               sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr,
+               sk = __l2tp_ip6_bind_lookup(net, &iph->daddr,
                                            0, tunnel_id);
                read_unlock_bh(&l2tp_ip6_lock);
        }
@@ -263,6 +264,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        struct inet_sock *inet = inet_sk(sk);
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
+       struct net *net = sock_net(sk);
        __be32 v4addr = 0;
        int addr_type;
        int err;
@@ -286,7 +288,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 
        err = -EADDRINUSE;
        read_lock_bh(&l2tp_ip6_lock);
-       if (__l2tp_ip6_bind_lookup(&init_net, &addr->l2tp_addr,
+       if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr,
                                   sk->sk_bound_dev_if, addr->l2tp_conn_id))
                goto out_in_use;
        read_unlock_bh(&l2tp_ip6_lock);
@@ -456,7 +458,7 @@ static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb)
        return 0;
 
 drop:
-       IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS);
+       IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
        kfree_skb(skb);
        return -1;
 }
index 5dba899..1824708 100644 (file)
@@ -444,10 +444,9 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
                break;
 
        case LAPB_FRMR:
-               lapb_dbg(1, "(%p) S3 RX FRMR(%d) %02X %02X %02X %02X %02X\n",
+               lapb_dbg(1, "(%p) S3 RX FRMR(%d) %5ph\n",
                         lapb->dev, frame->pf,
-                        skb->data[0], skb->data[1], skb->data[2],
-                        skb->data[3], skb->data[4]);
+                        skb->data);
                lapb_establish_data_link(lapb);
                lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev);
                lapb_requeue_frames(lapb);
index ba4d015..482c94d 100644 (file)
@@ -148,9 +148,7 @@ void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *skb, int type)
                }
        }
 
-       lapb_dbg(2, "(%p) S%d TX %02X %02X %02X\n",
-                lapb->dev, lapb->state,
-                skb->data[0], skb->data[1], skb->data[2]);
+       lapb_dbg(2, "(%p) S%d TX %3ph\n", lapb->dev, lapb->state, skb->data);
 
        if (!lapb_data_transmit(lapb, skb))
                kfree_skb(skb);
index 9d0a426..3c1914d 100644 (file)
@@ -113,9 +113,7 @@ int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb,
 {
        frame->type = LAPB_ILLEGAL;
 
-       lapb_dbg(2, "(%p) S%d RX %02X %02X %02X\n",
-                lapb->dev, lapb->state,
-                skb->data[0], skb->data[1], skb->data[2]);
+       lapb_dbg(2, "(%p) S%d RX %3ph\n", lapb->dev, lapb->state, skb->data);
 
        /* We always need to look at 2 bytes, sometimes we need
         * to look at 3 and those cases are handled below.
@@ -284,10 +282,9 @@ void lapb_transmit_frmr(struct lapb_cb *lapb)
                dptr++;
                *dptr++ = lapb->frmr_type;
 
-               lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X %02X %02X\n",
+               lapb_dbg(1, "(%p) S%d TX FRMR %5ph\n",
                         lapb->dev, lapb->state,
-                        skb->data[1], skb->data[2], skb->data[3],
-                        skb->data[4], skb->data[5]);
+                        &skb->data[1]);
        } else {
                dptr    = skb_put(skb, 4);
                *dptr++ = LAPB_FRMR;
@@ -299,9 +296,8 @@ void lapb_transmit_frmr(struct lapb_cb *lapb)
                dptr++;
                *dptr++ = lapb->frmr_type;
 
-               lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X\n",
-                        lapb->dev, lapb->state, skb->data[1],
-                        skb->data[2], skb->data[3]);
+               lapb_dbg(1, "(%p) S%d TX FRMR %3ph\n",
+                        lapb->dev, lapb->state, &skb->data[1]);
        }
 
        lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE);
index 879185f..9a3eb7a 100644 (file)
@@ -137,11 +137,23 @@ static bool is_flow_key_valid(const struct sw_flow_key *key)
        return !!key->eth.type;
 }
 
+static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
+                            __be16 ethertype)
+{
+       if (skb->ip_summed == CHECKSUM_COMPLETE) {
+               __be16 diff[] = { ~(hdr->h_proto), ethertype };
+
+               skb->csum = ~csum_partial((char *)diff, sizeof(diff),
+                                       ~skb->csum);
+       }
+
+       hdr->h_proto = ethertype;
+}
+
 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
                     const struct ovs_action_push_mpls *mpls)
 {
        __be32 *new_mpls_lse;
-       struct ethhdr *hdr;
 
        /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
        if (skb->encapsulation)
@@ -160,9 +172,7 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
 
        skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
 
-       hdr = eth_hdr(skb);
-       hdr->h_proto = mpls->mpls_ethertype;
-
+       update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
        if (!skb->inner_protocol)
                skb_set_inner_protocol(skb, skb->protocol);
        skb->protocol = mpls->mpls_ethertype;
@@ -193,7 +203,7 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
         * field correctly in the presence of VLAN tags.
         */
        hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
-       hdr->h_proto = ethertype;
+       update_ethertype(skb, hdr, ethertype);
        if (eth_p_mpls(skb->protocol))
                skb->protocol = ethertype;
 
index 330f14e..b884dae 100644 (file)
@@ -239,6 +239,8 @@ override:
        police->tcfp_t_c = ktime_get_ns();
        police->tcf_index = parm->index ? parm->index :
                tcf_hash_new_index(tn);
+       police->tcf_tm.install = jiffies;
+       police->tcf_tm.lastuse = jiffies;
        h = tcf_hash(police->tcf_index, POL_TAB_MASK);
        spin_lock_bh(&hinfo->lock);
        hlist_add_head(&police->tcf_head, &hinfo->htab[h]);
@@ -268,6 +270,7 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
        spin_lock(&police->tcf_lock);
 
        bstats_update(&police->tcf_bstats, skb);
+       tcf_lastuse_update(&police->tcf_tm);
 
        if (police->tcfp_ewma_rate &&
            police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
@@ -327,6 +330,7 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
                .refcnt = police->tcf_refcnt - ref,
                .bindcnt = police->tcf_bindcnt - bind,
        };
+       struct tcf_t t;
 
        if (police->rate_present)
                psched_ratecfg_getrate(&opt.rate, &police->rate);
@@ -340,6 +344,13 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
        if (police->tcfp_ewma_rate &&
            nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate))
                goto nla_put_failure;
+
+       t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install);
+       t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse);
+       t.expires = jiffies_to_clock_t(police->tcf_tm.expires);
+       if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
+               goto nla_put_failure;
+
        return skb->len;
 
 nla_put_failure:
index 64f71a2..ddf047d 100644 (file)
@@ -607,6 +607,10 @@ void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool thr
        if (throttle)
                qdisc_throttled(wd->qdisc);
 
+       if (wd->last_expires == expires)
+               return;
+
+       wd->last_expires = expires;
        hrtimer_start(&wd->timer,
                      ns_to_ktime(expires),
                      HRTIMER_MODE_ABS_PINNED);
index f6bf581..d4b4218 100644 (file)
@@ -928,17 +928,10 @@ ok:
                }
        }
        qdisc_qstats_overlimit(sch);
-       if (likely(next_event > q->now)) {
-               if (!test_bit(__QDISC_STATE_DEACTIVATED,
-                             &qdisc_root_sleeping(q->watchdog.qdisc)->state)) {
-                       ktime_t time = ns_to_ktime(next_event);
-                       qdisc_throttled(q->watchdog.qdisc);
-                       hrtimer_start(&q->watchdog.timer, time,
-                                     HRTIMER_MODE_ABS_PINNED);
-               }
-       } else {
+       if (likely(next_event > q->now))
+               qdisc_watchdog_schedule_ns(&q->watchdog, next_event, true);
+       else
                schedule_work(&q->work);
-       }
 fin:
        return skb;
 }
index 8e3e769..1ce724b 100644 (file)
@@ -356,6 +356,9 @@ static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
        if (cb->args[4] < cb->args[1])
                goto next;
 
+       if ((r->idiag_states & ~TCPF_LISTEN) && !list_empty(&ep->asocs))
+               goto next;
+
        if (r->sdiag_family != AF_UNSPEC &&
            sk->sk_family != r->sdiag_family)
                goto next;
index 777d032..67154b8 100644 (file)
@@ -4220,6 +4220,7 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
                info->sctpi_s_disable_fragments = sp->disable_fragments;
                info->sctpi_s_v4mapped = sp->v4mapped;
                info->sctpi_s_frag_interleave = sp->frag_interleave;
+               info->sctpi_s_type = sp->type;
 
                return 0;
        }
index 4dfc5c1..f795b1d 100644 (file)
@@ -346,9 +346,15 @@ static int tipc_nl_compat_bearer_dump(struct tipc_nl_compat_msg *msg,
                                      struct nlattr **attrs)
 {
        struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1];
+       int err;
+
+       if (!attrs[TIPC_NLA_BEARER])
+               return -EINVAL;
 
-       nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX, attrs[TIPC_NLA_BEARER],
-                        NULL);
+       err = nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX,
+                              attrs[TIPC_NLA_BEARER], NULL);
+       if (err)
+               return err;
 
        return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME,
                            nla_data(bearer[TIPC_NLA_BEARER_NAME]),
@@ -460,14 +466,31 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
        struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
        struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
        struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
+       int err;
 
-       nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL);
+       if (!attrs[TIPC_NLA_LINK])
+               return -EINVAL;
 
-       nla_parse_nested(prop, TIPC_NLA_PROP_MAX, link[TIPC_NLA_LINK_PROP],
-                        NULL);
+       err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
+                              NULL);
+       if (err)
+               return err;
+
+       if (!link[TIPC_NLA_LINK_PROP])
+               return -EINVAL;
 
-       nla_parse_nested(stats, TIPC_NLA_STATS_MAX, link[TIPC_NLA_LINK_STATS],
-                        NULL);
+       err = nla_parse_nested(prop, TIPC_NLA_PROP_MAX,
+                              link[TIPC_NLA_LINK_PROP], NULL);
+       if (err)
+               return err;
+
+       if (!link[TIPC_NLA_LINK_STATS])
+               return -EINVAL;
+
+       err = nla_parse_nested(stats, TIPC_NLA_STATS_MAX,
+                              link[TIPC_NLA_LINK_STATS], NULL);
+       if (err)
+               return err;
 
        name = (char *)TLV_DATA(msg->req);
        if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
@@ -569,8 +592,15 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
 {
        struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
        struct tipc_link_info link_info;
+       int err;
 
-       nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL);
+       if (!attrs[TIPC_NLA_LINK])
+               return -EINVAL;
+
+       err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
+                              NULL);
+       if (err)
+               return err;
 
        link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
        link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
@@ -758,12 +788,23 @@ static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg,
        u32 node, depth, type, lowbound, upbound;
        static const char * const scope_str[] = {"", " zone", " cluster",
                                                 " node"};
+       int err;
 
-       nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX,
-                        attrs[TIPC_NLA_NAME_TABLE], NULL);
+       if (!attrs[TIPC_NLA_NAME_TABLE])
+               return -EINVAL;
 
-       nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, nt[TIPC_NLA_NAME_TABLE_PUBL],
-                        NULL);
+       err = nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX,
+                              attrs[TIPC_NLA_NAME_TABLE], NULL);
+       if (err)
+               return err;
+
+       if (!nt[TIPC_NLA_NAME_TABLE_PUBL])
+               return -EINVAL;
+
+       err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX,
+                              nt[TIPC_NLA_NAME_TABLE_PUBL], NULL);
+       if (err)
+               return err;
 
        ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
 
@@ -815,8 +856,15 @@ static int __tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg,
 {
        u32 type, lower, upper;
        struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1];
+       int err;
 
-       nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL], NULL);
+       if (!attrs[TIPC_NLA_PUBL])
+               return -EINVAL;
+
+       err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL],
+                              NULL);
+       if (err)
+               return err;
 
        type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]);
        lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]);
@@ -876,7 +924,13 @@ static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg,
        u32 sock_ref;
        struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
 
-       nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], NULL);
+       if (!attrs[TIPC_NLA_SOCK])
+               return -EINVAL;
+
+       err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK],
+                              NULL);
+       if (err)
+               return err;
 
        sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
        tipc_tlv_sprintf(msg->rep, "%u:", sock_ref);
@@ -917,9 +971,15 @@ static int tipc_nl_compat_media_dump(struct tipc_nl_compat_msg *msg,
                                     struct nlattr **attrs)
 {
        struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1];
+       int err;
+
+       if (!attrs[TIPC_NLA_MEDIA])
+               return -EINVAL;
 
-       nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA],
-                        NULL);
+       err = nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA],
+                              NULL);
+       if (err)
+               return err;
 
        return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME,
                            nla_data(media[TIPC_NLA_MEDIA_NAME]),
@@ -931,8 +991,15 @@ static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg,
 {
        struct tipc_node_info node_info;
        struct nlattr *node[TIPC_NLA_NODE_MAX + 1];
+       int err;
 
-       nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE], NULL);
+       if (!attrs[TIPC_NLA_NODE])
+               return -EINVAL;
+
+       err = nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE],
+                              NULL);
+       if (err)
+               return err;
 
        node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR]));
        node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP]));
@@ -971,8 +1038,16 @@ static int tipc_nl_compat_net_dump(struct tipc_nl_compat_msg *msg,
 {
        __be32 id;
        struct nlattr *net[TIPC_NLA_NET_MAX + 1];
+       int err;
+
+       if (!attrs[TIPC_NLA_NET])
+               return -EINVAL;
+
+       err = nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET],
+                              NULL);
+       if (err)
+               return err;
 
-       nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET], NULL);
        id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID]));
 
        return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id));
index 6750595..4904ced 100755 (executable)
@@ -2454,6 +2454,7 @@ sub process {
 
 # Check for git id commit length and improperly formed commit descriptions
                if ($in_commit_log && !$commit_log_possible_stack_dump &&
+                   $line !~ /^\s*(?:Link|Patchwork|http|BugLink):/i &&
                    ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i ||
                     ($line =~ /\b[0-9a-f]{12,40}\b/i &&
                      $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i &&
index c8783b3..36c80bf 100644 (file)
@@ -134,7 +134,7 @@ COMPAT_SYSCALL_DEFINE5(keyctl, u32, option,
 
        case KEYCTL_DH_COMPUTE:
                return keyctl_dh_compute(compat_ptr(arg2), compat_ptr(arg3),
-                                        arg4);
+                                        arg4, compat_ptr(arg5));
 
        default:
                return -EOPNOTSUPP;
index 880505a..531ed2e 100644 (file)
@@ -78,7 +78,8 @@ error:
 }
 
 long keyctl_dh_compute(struct keyctl_dh_params __user *params,
-                      char __user *buffer, size_t buflen)
+                      char __user *buffer, size_t buflen,
+                      void __user *reserved)
 {
        long ret;
        MPI base, private, prime, result;
@@ -97,6 +98,11 @@ long keyctl_dh_compute(struct keyctl_dh_params __user *params,
                goto out;
        }
 
+       if (reserved) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        keylen = mpi_from_key(pcopy.prime, buflen, &prime);
        if (keylen < 0 || !prime) {
                /* buflen == 0 may be used to query the required buffer size,
index 8ec7a52..a705a7d 100644 (file)
@@ -260,10 +260,11 @@ static inline long keyctl_get_persistent(uid_t uid, key_serial_t destring)
 
 #ifdef CONFIG_KEY_DH_OPERATIONS
 extern long keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *,
-                             size_t);
+                             size_t, void __user *);
 #else
 static inline long keyctl_dh_compute(struct keyctl_dh_params __user *params,
-                                    char __user *buffer, size_t buflen)
+                                    char __user *buffer, size_t buflen,
+                                    void __user *reserved)
 {
        return -EOPNOTSUPP;
 }
index 3b135a0..d580ad0 100644 (file)
@@ -1688,8 +1688,8 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
 
        case KEYCTL_DH_COMPUTE:
                return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2,
-                                        (char __user *) arg3,
-                                        (size_t) arg4);
+                                        (char __user *) arg3, (size_t) arg4,
+                                        (void __user *) arg5);
 
        default:
                return -EOPNOTSUPP;
index a3f12b3..3a3a699 100644 (file)
@@ -100,12 +100,11 @@ static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
                if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
                        continue;
 
-               if (cpu_if->vgic_elrsr & (1UL << i)) {
+               if (cpu_if->vgic_elrsr & (1UL << i))
                        cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
-                       continue;
-               }
+               else
+                       cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
 
-               cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
                writel_relaxed(0, base + GICH_LR0 + (i * 4));
        }
 }
index 059595e..9f6fab7 100644 (file)
@@ -191,10 +191,8 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
         * other thread sync back the IRQ.
         */
        while (irq->vcpu && /* IRQ may have state in an LR somewhere */
-              irq->vcpu->cpu != -1) { /* VCPU thread is running */
-               BUG_ON(irq->intid < VGIC_NR_PRIVATE_IRQS);
+              irq->vcpu->cpu != -1) /* VCPU thread is running */
                cond_resched_lock(&irq->irq_lock);
-       }
 
        irq->active = new_active_state;
        if (new_active_state)
index 8ad42c2..e31405e 100644 (file)
@@ -112,11 +112,15 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
                        }
                }
 
-               /* Clear soft pending state when level IRQs have been acked */
-               if (irq->config == VGIC_CONFIG_LEVEL &&
-                   !(val & GICH_LR_PENDING_BIT)) {
-                       irq->soft_pending = false;
-                       irq->pending = irq->line_level;
+               /*
+                * Clear soft pending state when level irqs have been acked.
+                * Always regenerate the pending state.
+                */
+               if (irq->config == VGIC_CONFIG_LEVEL) {
+                       if (!(val & GICH_LR_PENDING_BIT))
+                               irq->soft_pending = false;
+
+                       irq->pending = irq->line_level || irq->soft_pending;
                }
 
                spin_unlock(&irq->irq_lock);
index 336a461..346b4ad 100644 (file)
@@ -101,11 +101,15 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
                        }
                }
 
-               /* Clear soft pending state when level irqs have been acked */
-               if (irq->config == VGIC_CONFIG_LEVEL &&
-                   !(val & ICH_LR_PENDING_BIT)) {
-                       irq->soft_pending = false;
-                       irq->pending = irq->line_level;
+               /*
+                * Clear soft pending state when level irqs have been acked.
+                * Always regenerate the pending state.
+                */
+               if (irq->config == VGIC_CONFIG_LEVEL) {
+                       if (!(val & ICH_LR_PENDING_BIT))
+                               irq->soft_pending = false;
+
+                       irq->pending = irq->line_level || irq->soft_pending;
                }
 
                spin_unlock(&irq->irq_lock);
index fe84e1a..8db197b 100644 (file)
@@ -40,7 +40,7 @@ int kvm_irq_map_gsi(struct kvm *kvm,
 
        irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
                                        lockdep_is_held(&kvm->irq_lock));
-       if (gsi < irq_rt->nr_rt_entries) {
+       if (irq_rt && gsi < irq_rt->nr_rt_entries) {
                hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
                        entries[n] = *e;
                        ++n;
index 37af230..02e98f3 100644 (file)
@@ -2935,7 +2935,7 @@ static long kvm_vm_ioctl(struct file *filp,
        case KVM_SET_GSI_ROUTING: {
                struct kvm_irq_routing routing;
                struct kvm_irq_routing __user *urouting;
-               struct kvm_irq_routing_entry *entries;
+               struct kvm_irq_routing_entry *entries = NULL;
 
                r = -EFAULT;
                if (copy_from_user(&routing, argp, sizeof(routing)))
@@ -2945,15 +2945,17 @@ static long kvm_vm_ioctl(struct file *filp,
                        goto out;
                if (routing.flags)
                        goto out;
-               r = -ENOMEM;
-               entries = vmalloc(routing.nr * sizeof(*entries));
-               if (!entries)
-                       goto out;
-               r = -EFAULT;
-               urouting = argp;
-               if (copy_from_user(entries, urouting->entries,
-                                  routing.nr * sizeof(*entries)))
-                       goto out_free_irq_routing;
+               if (routing.nr) {
+                       r = -ENOMEM;
+                       entries = vmalloc(routing.nr * sizeof(*entries));
+                       if (!entries)
+                               goto out;
+                       r = -EFAULT;
+                       urouting = argp;
+                       if (copy_from_user(entries, urouting->entries,
+                                          routing.nr * sizeof(*entries)))
+                               goto out_free_irq_routing;
+               }
                r = kvm_set_irq_routing(kvm, entries, routing.nr,
                                        routing.flags);
 out_free_irq_routing: